using System.Buffers.Binary;
using System.Security.Cryptography;
using System.Text;
using System.Text.Json;
using NATS.Server.JetStream.Models;
using NATS.Server.Internal.TimeHashWheel;
// Storage.StreamState is in this namespace. Use an alias for the API-layer type
// (now named ApiStreamState in the Models namespace) to keep method signatures clear.
using ApiStreamState = NATS.Server.JetStream.Models.ApiStreamState;
namespace NATS.Server.JetStream.Storage;
///
/// Block-based file store for JetStream messages. Uses for
/// on-disk persistence and maintains an in-memory cache ()
/// for fast reads and subject queries.
///
/// Reference: golang/nats-server/server/filestore.go — block manager, block rotation,
/// recovery via scanning .blk files, soft-delete via dmap.
///
public sealed class FileStore : IStreamStore, IAsyncDisposable
{
private readonly FileStoreOptions _options;
// In-memory cache: keyed by sequence number. This is the primary data structure
// for reads and queries. The blocks are the on-disk persistence layer.
private readonly Dictionary _messages = new();
// Block-based storage: the active (writable) block and sealed blocks.
private readonly List _blocks = [];
private MsgBlock? _activeBlock;
private int _nextBlockId;
private ulong _last;
// Resolved at construction time: which format family to use.
private readonly bool _useS2; // true -> S2Codec (FSV2 compression path)
private readonly bool _useAead; // true -> AeadEncryptor (FSV2 encryption path)
// Go: filestore.go — per-stream time hash wheel for efficient TTL expiration.
// Created lazily only when MaxAgeMs > 0. Entries are (seq, expires_ns) pairs.
// Reference: golang/nats-server/server/filestore.go:290 (fss/ttl fields).
private HashWheel? _ttlWheel;
public int BlockCount => _blocks.Count;
public bool UsedIndexManifestOnStartup { get; private set; }
public FileStore(FileStoreOptions options)
{
_options = options;
if (_options.BlockSizeBytes <= 0)
_options.BlockSizeBytes = 64 * 1024;
// Determine which format path is active.
_useS2 = _options.Compression == StoreCompression.S2Compression;
_useAead = _options.Cipher != StoreCipher.NoCipher;
Directory.CreateDirectory(options.Directory);
// Attempt legacy JSONL migration first, then recover from blocks.
MigrateLegacyJsonl();
RecoverBlocks();
}
public async ValueTask AppendAsync(string subject, ReadOnlyMemory payload, CancellationToken ct)
{
// Go: check and remove expired messages before each append.
// Reference: golang/nats-server/server/filestore.go — storeMsg, expire check.
ExpireFromWheel();
_last++;
var now = DateTime.UtcNow;
var timestamp = new DateTimeOffset(now).ToUnixTimeMilliseconds() * 1_000_000L;
var persistedPayload = TransformForPersist(payload.Span);
var stored = new StoredMessage
{
Sequence = _last,
Subject = subject,
Payload = payload.ToArray(),
TimestampUtc = now,
};
_messages[_last] = stored;
// Go: register new message in TTL wheel when MaxAgeMs is configured.
// Reference: golang/nats-server/server/filestore.go:6820 (storeMsg TTL schedule).
RegisterTtl(_last, timestamp, _options.MaxAgeMs > 0 ? (long)_options.MaxAgeMs * 1_000_000L : 0);
// Write to MsgBlock. The payload stored in the block is the transformed
// (compressed/encrypted) payload, not the plaintext.
EnsureActiveBlock();
try
{
_activeBlock!.WriteAt(_last, subject, ReadOnlyMemory.Empty, persistedPayload, timestamp);
}
catch (InvalidOperationException)
{
// Block is sealed. Rotate to a new block and retry.
RotateBlock();
_activeBlock!.WriteAt(_last, subject, ReadOnlyMemory.Empty, persistedPayload, timestamp);
}
// Check if the block just became sealed after this write.
if (_activeBlock!.IsSealed)
RotateBlock();
return _last;
}
public ValueTask LoadAsync(ulong sequence, CancellationToken ct)
{
_messages.TryGetValue(sequence, out var msg);
return ValueTask.FromResult(msg);
}
public ValueTask LoadLastBySubjectAsync(string subject, CancellationToken ct)
{
var match = _messages.Values
.Where(m => string.Equals(m.Subject, subject, StringComparison.Ordinal))
.OrderByDescending(m => m.Sequence)
.FirstOrDefault();
return ValueTask.FromResult(match);
}
public ValueTask> ListAsync(CancellationToken ct)
{
var messages = _messages.Values
.OrderBy(m => m.Sequence)
.ToArray();
return ValueTask.FromResult>(messages);
}
public ValueTask RemoveAsync(ulong sequence, CancellationToken ct)
{
var removed = _messages.Remove(sequence);
if (removed)
{
if (sequence == _last)
_last = _messages.Count == 0 ? 0UL : _messages.Keys.Max();
// Soft-delete in the block that contains this sequence.
DeleteInBlock(sequence);
}
return ValueTask.FromResult(removed);
}
public ValueTask PurgeAsync(CancellationToken ct)
{
_messages.Clear();
_last = 0;
// Dispose and delete all blocks.
DisposeAllBlocks();
CleanBlockFiles();
// Clean up any legacy files that might still exist.
var jsonlPath = Path.Combine(_options.Directory, "messages.jsonl");
if (File.Exists(jsonlPath))
File.Delete(jsonlPath);
var manifestPath = Path.Combine(_options.Directory, _options.IndexManifestFileName);
if (File.Exists(manifestPath))
File.Delete(manifestPath);
return ValueTask.CompletedTask;
}
public ValueTask CreateSnapshotAsync(CancellationToken ct)
{
var snapshot = _messages
.Values
.OrderBy(x => x.Sequence)
.Select(x => new FileRecord
{
Sequence = x.Sequence,
Subject = x.Subject,
PayloadBase64 = Convert.ToBase64String(TransformForPersist(x.Payload.Span)),
TimestampUtc = x.TimestampUtc,
})
.ToArray();
return ValueTask.FromResult(JsonSerializer.SerializeToUtf8Bytes(snapshot));
}
public ValueTask RestoreSnapshotAsync(ReadOnlyMemory snapshot, CancellationToken ct)
{
_messages.Clear();
_last = 0;
// Dispose existing blocks and clean files.
DisposeAllBlocks();
CleanBlockFiles();
if (!snapshot.IsEmpty)
{
var records = JsonSerializer.Deserialize(snapshot.Span);
if (records != null)
{
foreach (var record in records)
{
var restoredPayload = RestorePayload(Convert.FromBase64String(record.PayloadBase64 ?? string.Empty));
var message = new StoredMessage
{
Sequence = record.Sequence,
Subject = record.Subject ?? string.Empty,
Payload = restoredPayload,
TimestampUtc = record.TimestampUtc,
};
_messages[record.Sequence] = message;
_last = Math.Max(_last, record.Sequence);
}
}
}
// Write all messages to fresh blocks.
RewriteBlocks();
return ValueTask.CompletedTask;
}
public ValueTask GetStateAsync(CancellationToken ct)
{
return ValueTask.FromResult(new ApiStreamState
{
Messages = (ulong)_messages.Count,
FirstSeq = _messages.Count == 0 ? 0UL : _messages.Keys.Min(),
LastSeq = _last,
Bytes = (ulong)_messages.Values.Sum(m => m.Payload.Length),
});
}
public void TrimToMaxMessages(ulong maxMessages)
{
while ((ulong)_messages.Count > maxMessages)
{
var first = _messages.Keys.Min();
_messages.Remove(first);
}
// Rewrite blocks to reflect the trim (removes trimmed messages from disk).
RewriteBlocks();
}
// -------------------------------------------------------------------------
// Go-parity sync interface implementations
// Reference: golang/nats-server/server/filestore.go
// -------------------------------------------------------------------------
///
/// Synchronously stores a message, optionally with a per-message TTL override.
/// Returns the assigned sequence number and timestamp in nanoseconds.
/// When is greater than zero it overrides MaxAgeMs for
/// this specific message; otherwise the stream's MaxAgeMs applies.
/// Reference: golang/nats-server/server/filestore.go:6790 (storeMsg).
///
public (ulong Seq, long Ts) StoreMsg(string subject, byte[]? hdr, byte[] msg, long ttl)
{
// Go: expire check before each store (same as AppendAsync).
// Reference: golang/nats-server/server/filestore.go:6793 (expireMsgs call).
ExpireFromWheel();
_last++;
var now = DateTime.UtcNow;
var timestamp = new DateTimeOffset(now).ToUnixTimeMilliseconds() * 1_000_000L;
// Combine headers and payload (headers precede the body in NATS wire format).
byte[] combined;
if (hdr is { Length: > 0 })
{
combined = new byte[hdr.Length + msg.Length];
hdr.CopyTo(combined, 0);
msg.CopyTo(combined, hdr.Length);
}
else
{
combined = msg;
}
var persistedPayload = TransformForPersist(combined.AsSpan());
var stored = new StoredMessage
{
Sequence = _last,
Subject = subject,
Payload = combined,
TimestampUtc = now,
};
_messages[_last] = stored;
// Determine effective TTL: per-message ttl (ns) takes priority over MaxAgeMs.
// Go: filestore.go:6830 — if msg.ttl > 0 use it, else use cfg.MaxAge.
var effectiveTtlNs = ttl > 0 ? ttl : (_options.MaxAgeMs > 0 ? (long)_options.MaxAgeMs * 1_000_000L : 0L);
RegisterTtl(_last, timestamp, effectiveTtlNs);
EnsureActiveBlock();
try
{
_activeBlock!.WriteAt(_last, subject, ReadOnlyMemory.Empty, persistedPayload, timestamp);
}
catch (InvalidOperationException)
{
RotateBlock();
_activeBlock!.WriteAt(_last, subject, ReadOnlyMemory.Empty, persistedPayload, timestamp);
}
if (_activeBlock!.IsSealed)
RotateBlock();
return (_last, timestamp);
}
///
/// Removes all messages from the store and returns the count purged.
/// Reference: golang/nats-server/server/filestore.go — purge / purgeMsgs.
///
public ulong Purge()
{
var count = (ulong)_messages.Count;
_messages.Clear();
_last = 0;
DisposeAllBlocks();
CleanBlockFiles();
return count;
}
///
/// Purge messages on a given subject, up to sequence ,
/// keeping the newest messages.
/// If subject is empty or null, behaves like .
/// Returns the number of messages removed.
/// Reference: golang/nats-server/server/filestore.go — PurgeEx.
///
public ulong PurgeEx(string subject, ulong seq, ulong keep)
{
if (string.IsNullOrEmpty(subject))
return Purge();
// Collect all messages matching the subject (with wildcard support) at or below seq, ordered by sequence.
var candidates = _messages.Values
.Where(m => SubjectMatchesFilter(m.Subject, subject))
.Where(m => seq == 0 || m.Sequence <= seq)
.OrderBy(m => m.Sequence)
.ToList();
if (candidates.Count == 0)
return 0;
// Keep the newest `keep` messages; purge the rest.
var toRemove = keep > 0 && (ulong)candidates.Count > keep
? candidates.Take(candidates.Count - (int)keep).ToList()
: (keep == 0 ? candidates : []);
if (toRemove.Count == 0)
return 0;
foreach (var msg in toRemove)
{
_messages.Remove(msg.Sequence);
DeleteInBlock(msg.Sequence);
}
// Update _last if required.
if (_messages.Count == 0)
_last = 0;
else if (!_messages.ContainsKey(_last))
_last = _messages.Keys.Max();
return (ulong)toRemove.Count;
}
///
/// Removes all messages with sequence number strictly less than
/// and returns the count removed.
/// Reference: golang/nats-server/server/filestore.go — Compact.
///
public ulong Compact(ulong seq)
{
if (seq == 0)
return 0;
var toRemove = _messages.Keys.Where(k => k < seq).ToArray();
if (toRemove.Length == 0)
return 0;
foreach (var s in toRemove)
{
_messages.Remove(s);
DeleteInBlock(s);
}
if (_messages.Count == 0)
_last = 0;
else if (!_messages.ContainsKey(_last))
_last = _messages.Keys.Max();
return (ulong)toRemove.Length;
}
///
/// Removes all messages with sequence number strictly greater than
/// and updates the last sequence pointer.
/// Reference: golang/nats-server/server/filestore.go — Truncate.
///
public void Truncate(ulong seq)
{
if (seq == 0)
{
// Truncate to nothing.
_messages.Clear();
_last = 0;
DisposeAllBlocks();
CleanBlockFiles();
return;
}
var toRemove = _messages.Keys.Where(k => k > seq).ToArray();
foreach (var s in toRemove)
{
_messages.Remove(s);
DeleteInBlock(s);
}
// Update _last to the new highest existing sequence (or seq if it exists,
// or the highest below seq).
_last = _messages.Count == 0 ? 0 : _messages.Keys.Max();
}
///
/// Returns the first sequence number at or after the given UTC time.
/// Returns _last + 1 if no message exists at or after .
/// Reference: golang/nats-server/server/filestore.go — GetSeqFromTime.
///
public ulong GetSeqFromTime(DateTime t)
{
var utc = t.Kind == DateTimeKind.Utc ? t : t.ToUniversalTime();
var match = _messages.Values
.Where(m => m.TimestampUtc >= utc)
.OrderBy(m => m.Sequence)
.FirstOrDefault();
return match?.Sequence ?? _last + 1;
}
///
/// Returns compact state for non-deleted messages on
/// at or after sequence .
/// Reference: golang/nats-server/server/filestore.go — FilteredState.
///
public SimpleState FilteredState(ulong seq, string subject)
{
var matching = _messages.Values
.Where(m => m.Sequence >= seq)
.Where(m => string.IsNullOrEmpty(subject)
|| SubjectMatchesFilter(m.Subject, subject))
.OrderBy(m => m.Sequence)
.ToList();
if (matching.Count == 0)
return new SimpleState();
return new SimpleState
{
Msgs = (ulong)matching.Count,
First = matching[0].Sequence,
Last = matching[^1].Sequence,
};
}
///
/// Returns per-subject for all subjects matching
/// . Supports NATS wildcard filters.
/// Reference: golang/nats-server/server/filestore.go — SubjectsState.
///
public Dictionary SubjectsState(string filterSubject)
{
var result = new Dictionary(StringComparer.Ordinal);
foreach (var msg in _messages.Values)
{
if (!string.IsNullOrEmpty(filterSubject) && !SubjectMatchesFilter(msg.Subject, filterSubject))
continue;
if (result.TryGetValue(msg.Subject, out var existing))
{
result[msg.Subject] = new SimpleState
{
Msgs = existing.Msgs + 1,
First = Math.Min(existing.First == 0 ? msg.Sequence : existing.First, msg.Sequence),
Last = Math.Max(existing.Last, msg.Sequence),
};
}
else
{
result[msg.Subject] = new SimpleState
{
Msgs = 1,
First = msg.Sequence,
Last = msg.Sequence,
};
}
}
return result;
}
///
/// Returns per-subject message counts for all subjects matching
/// . Supports NATS wildcard filters.
/// Reference: golang/nats-server/server/filestore.go — SubjectsTotals.
///
public Dictionary SubjectsTotals(string filterSubject)
{
var result = new Dictionary(StringComparer.Ordinal);
foreach (var msg in _messages.Values)
{
if (!string.IsNullOrEmpty(filterSubject) && !SubjectMatchesFilter(msg.Subject, filterSubject))
continue;
result.TryGetValue(msg.Subject, out var count);
result[msg.Subject] = count + 1;
}
return result;
}
///
/// Returns the full stream state, including the list of deleted (interior gap) sequences.
/// Reference: golang/nats-server/server/filestore.go — State.
///
public StreamState State()
{
var state = new StreamState();
FastState(ref state);
// Populate deleted sequences: sequences in [firstSeq, lastSeq] that are
// not present in _messages.
if (state.FirstSeq > 0 && state.LastSeq >= state.FirstSeq)
{
var deletedList = new List();
for (var s = state.FirstSeq; s <= state.LastSeq; s++)
{
if (!_messages.ContainsKey(s))
deletedList.Add(s);
}
if (deletedList.Count > 0)
{
state.Deleted = [.. deletedList];
state.NumDeleted = deletedList.Count;
}
}
// Populate per-subject counts.
var subjectCounts = new Dictionary(StringComparer.Ordinal);
foreach (var msg in _messages.Values)
{
subjectCounts.TryGetValue(msg.Subject, out var cnt);
subjectCounts[msg.Subject] = cnt + 1;
}
state.NumSubjects = subjectCounts.Count;
state.Subjects = subjectCounts.Count > 0 ? subjectCounts : null;
return state;
}
///
/// Populates a pre-allocated with the minimum fields
/// needed for replication without allocating a new struct.
/// Does not populate the array or
/// dictionary.
/// Reference: golang/nats-server/server/filestore.go — FastState.
///
public void FastState(ref StreamState state)
{
state.Msgs = (ulong)_messages.Count;
state.Bytes = (ulong)_messages.Values.Sum(m => (long)m.Payload.Length);
state.LastSeq = _last;
state.LastTime = default;
if (_messages.Count == 0)
{
state.FirstSeq = 0;
state.FirstTime = default;
}
else
{
var firstSeq = _messages.Keys.Min();
state.FirstSeq = firstSeq;
state.FirstTime = _messages[firstSeq].TimestampUtc;
state.LastTime = _messages[_last].TimestampUtc;
}
}
// -------------------------------------------------------------------------
// Subject matching helper
// -------------------------------------------------------------------------
///
/// Returns true if matches .
/// If filter is a literal, performs exact string comparison.
/// If filter contains NATS wildcards (* or >), uses SubjectMatch.MatchLiteral.
/// Reference: golang/nats-server/server/filestore.go — subjectMatch helper.
///
private static bool SubjectMatchesFilter(string subject, string filter)
{
if (string.IsNullOrEmpty(filter))
return true;
if (NATS.Server.Subscriptions.SubjectMatch.IsLiteral(filter))
return string.Equals(subject, filter, StringComparison.Ordinal);
return NATS.Server.Subscriptions.SubjectMatch.MatchLiteral(subject, filter);
}
public ValueTask DisposeAsync()
{
DisposeAllBlocks();
return ValueTask.CompletedTask;
}
// -------------------------------------------------------------------------
// Block management
// -------------------------------------------------------------------------
///
/// Ensures an active (writable) block exists. Creates one if needed.
///
private void EnsureActiveBlock()
{
if (_activeBlock is null || _activeBlock.IsSealed)
RotateBlock();
}
///
/// Creates a new active block. The previous active block (if any) stays in the
/// block list as a sealed block. The firstSequence is set to _last + 1 (the next
/// expected sequence), but actual sequences come from WriteAt calls.
/// When rotating, the previously active block's write cache is cleared to free memory.
/// Reference: golang/nats-server/server/filestore.go — clearCache called on block seal.
///
private void RotateBlock()
{
// Clear the write cache on the outgoing active block — it is now sealed.
// This frees memory; future reads on sealed blocks go to disk.
_activeBlock?.ClearCache();
var firstSeq = _last + 1;
var block = MsgBlock.Create(_nextBlockId, _options.Directory, _options.BlockSizeBytes, firstSeq);
_blocks.Add(block);
_activeBlock = block;
_nextBlockId++;
}
///
/// Soft-deletes a message in the block that contains it.
///
private void DeleteInBlock(ulong sequence)
{
foreach (var block in _blocks)
{
if (sequence >= block.FirstSequence && sequence <= block.LastSequence)
{
block.Delete(sequence);
return;
}
}
}
///
/// Disposes all blocks and clears the block list.
///
private void DisposeAllBlocks()
{
foreach (var block in _blocks)
block.Dispose();
_blocks.Clear();
_activeBlock = null;
_nextBlockId = 0;
}
///
/// Deletes all .blk files in the store directory.
///
private void CleanBlockFiles()
{
if (!Directory.Exists(_options.Directory))
return;
foreach (var blkFile in Directory.GetFiles(_options.Directory, "*.blk"))
{
try { File.Delete(blkFile); }
catch { /* best effort */ }
}
}
///
/// Rewrites all blocks from the in-memory message cache. Used after trim,
/// snapshot restore, or legacy migration.
///
private void RewriteBlocks()
{
DisposeAllBlocks();
CleanBlockFiles();
_last = _messages.Count == 0 ? 0UL : _messages.Keys.Max();
foreach (var message in _messages.OrderBy(kv => kv.Key).Select(kv => kv.Value))
{
var persistedPayload = TransformForPersist(message.Payload.Span);
var timestamp = new DateTimeOffset(message.TimestampUtc).ToUnixTimeMilliseconds() * 1_000_000L;
EnsureActiveBlock();
try
{
_activeBlock!.WriteAt(message.Sequence, message.Subject, ReadOnlyMemory.Empty, persistedPayload, timestamp);
}
catch (InvalidOperationException)
{
RotateBlock();
_activeBlock!.WriteAt(message.Sequence, message.Subject, ReadOnlyMemory.Empty, persistedPayload, timestamp);
}
if (_activeBlock!.IsSealed)
RotateBlock();
}
}
// -------------------------------------------------------------------------
// Recovery: scan .blk files on startup and rebuild in-memory state.
// -------------------------------------------------------------------------
///
/// Recovers all blocks from .blk files in the store directory.
///
private void RecoverBlocks()
{
var blkFiles = Directory.GetFiles(_options.Directory, "*.blk");
if (blkFiles.Length == 0)
return;
// Sort by block ID (filename is like "000000.blk", "000001.blk", ...).
Array.Sort(blkFiles, StringComparer.OrdinalIgnoreCase);
var maxBlockId = -1;
foreach (var blkFile in blkFiles)
{
var fileName = Path.GetFileNameWithoutExtension(blkFile);
if (!int.TryParse(fileName, out var blockId))
continue;
try
{
var block = MsgBlock.Recover(blockId, _options.Directory);
_blocks.Add(block);
if (blockId > maxBlockId)
maxBlockId = blockId;
// Read all non-deleted records from this block and populate the in-memory cache.
RecoverMessagesFromBlock(block);
}
catch (InvalidDataException)
{
// InvalidDataException indicates key mismatch or integrity failure —
// propagate so the caller knows the store cannot be opened.
throw;
}
catch
{
// Skip corrupted blocks — non-critical recovery errors.
}
}
_nextBlockId = maxBlockId + 1;
// The last block is the active block if it has capacity (not sealed).
if (_blocks.Count > 0)
{
var lastBlock = _blocks[^1];
_activeBlock = lastBlock;
}
PruneExpired(DateTime.UtcNow);
}
///
/// Reads all non-deleted records from a block and adds them to the in-memory cache.
///
private void RecoverMessagesFromBlock(MsgBlock block)
{
// We need to iterate through all sequences in the block.
// MsgBlock tracks first/last sequence, so we try each one.
var first = block.FirstSequence;
var last = block.LastSequence;
if (first == 0 && last == 0)
return; // Empty block.
for (var seq = first; seq <= last; seq++)
{
var record = block.Read(seq);
if (record is null)
continue; // Deleted or not present.
// The payload stored in the block is the transformed (compressed/encrypted) payload.
// We need to reverse-transform it to get the original plaintext.
// InvalidDataException (e.g., wrong key) propagates to the caller.
var originalPayload = RestorePayload(record.Payload.Span);
var message = new StoredMessage
{
Sequence = record.Sequence,
Subject = record.Subject,
Payload = originalPayload,
TimestampUtc = DateTimeOffset.FromUnixTimeMilliseconds(record.Timestamp / 1_000_000L).UtcDateTime,
};
_messages[message.Sequence] = message;
if (message.Sequence > _last)
_last = message.Sequence;
// Go: re-register unexpired TTLs in the wheel after recovery.
// Reference: golang/nats-server/server/filestore.go — recoverMsgs, TTL re-registration.
if (_options.MaxAgeMs > 0)
{
var msgTs = new DateTimeOffset(message.TimestampUtc).ToUnixTimeMilliseconds() * 1_000_000L;
RegisterTtl(message.Sequence, msgTs, (long)_options.MaxAgeMs * 1_000_000L);
}
}
}
// -------------------------------------------------------------------------
// Legacy JSONL migration: if messages.jsonl exists, migrate to blocks.
// -------------------------------------------------------------------------
///
/// Migrates data from the legacy JSONL format to block-based storage.
/// If messages.jsonl exists, reads all records, writes them to blocks,
/// then deletes the JSONL file and manifest.
///
private void MigrateLegacyJsonl()
{
var jsonlPath = Path.Combine(_options.Directory, "messages.jsonl");
if (!File.Exists(jsonlPath))
return;
// Read all records from the JSONL file.
var legacyMessages = new List<(ulong Sequence, string Subject, byte[] Payload, DateTime TimestampUtc)>();
foreach (var line in File.ReadLines(jsonlPath))
{
if (string.IsNullOrWhiteSpace(line))
continue;
FileRecord? record;
try
{
record = JsonSerializer.Deserialize(line);
}
catch
{
continue; // Skip corrupted lines.
}
if (record == null)
continue;
byte[] originalPayload;
try
{
originalPayload = RestorePayload(Convert.FromBase64String(record.PayloadBase64 ?? string.Empty));
}
catch
{
// Re-throw for integrity failures (e.g., wrong encryption key).
throw;
}
legacyMessages.Add((record.Sequence, record.Subject ?? string.Empty, originalPayload, record.TimestampUtc));
}
if (legacyMessages.Count == 0)
{
// Delete the empty JSONL file.
File.Delete(jsonlPath);
var manifestPath = Path.Combine(_options.Directory, _options.IndexManifestFileName);
if (File.Exists(manifestPath))
File.Delete(manifestPath);
return;
}
// Add to the in-memory cache.
foreach (var (seq, subject, payload, ts) in legacyMessages)
{
_messages[seq] = new StoredMessage
{
Sequence = seq,
Subject = subject,
Payload = payload,
TimestampUtc = ts,
};
if (seq > _last)
_last = seq;
}
// Write all messages to fresh blocks.
RewriteBlocks();
// Delete the legacy files.
File.Delete(jsonlPath);
var manifestFile = Path.Combine(_options.Directory, _options.IndexManifestFileName);
if (File.Exists(manifestFile))
File.Delete(manifestFile);
}
// -------------------------------------------------------------------------
// Expiry
// -------------------------------------------------------------------------
///
/// Registers a message in the TTL wheel when MaxAgeMs is configured.
/// The wheel's uses Stopwatch-relative nanoseconds,
/// so we compute expiresNs as the current Stopwatch position plus the TTL duration.
/// If ttlNs is 0, this is a no-op.
/// Reference: golang/nats-server/server/filestore.go:6820 — storeMsg TTL scheduling.
///
private void RegisterTtl(ulong seq, long timestampNs, long ttlNs)
{
if (ttlNs <= 0)
return;
_ttlWheel ??= new HashWheel();
// Convert to Stopwatch-domain nanoseconds to match ExpireTasks' time source.
// We intentionally discard timestampNs (Unix epoch ns) and use "now + ttl"
// relative to the Stopwatch epoch used by ExpireTasks.
var nowStopwatchNs = (long)((double)System.Diagnostics.Stopwatch.GetTimestamp()
/ System.Diagnostics.Stopwatch.Frequency * 1_000_000_000);
var expiresNs = nowStopwatchNs + ttlNs;
_ttlWheel.Add(seq, expiresNs);
}
///
/// Checks the TTL wheel for expired entries and removes them from the store.
/// Uses the wheel's expiration scan which is O(expired) rather than O(total).
/// Expired messages are removed from the in-memory cache and soft-deleted in blocks,
/// but is preserved (sequence numbers are monotonically increasing
/// even when messages expire).
/// Reference: golang/nats-server/server/filestore.go — expireMsgs using thw.ExpireTasks.
///
private void ExpireFromWheel()
{
if (_ttlWheel is null)
{
// Fall back to linear scan if wheel is not yet initialised.
// PruneExpiredLinear is only used during recovery (before first write).
PruneExpiredLinear(DateTime.UtcNow);
return;
}
var expired = new List();
_ttlWheel.ExpireTasks((seq, _) =>
{
expired.Add(seq);
return true; // Remove from wheel.
});
if (expired.Count == 0)
return;
// Remove from in-memory cache and soft-delete in the block layer.
// We do NOT call RewriteBlocks here — that would reset _last and create a
// discontinuity in the sequence space. Soft-delete is sufficient for expiry.
// Reference: golang/nats-server/server/filestore.go:expireMsgs — dmap-based removal.
foreach (var seq in expired)
{
_messages.Remove(seq);
DeleteInBlock(seq);
}
}
///
/// O(n) fallback expiry scan used during recovery (before the wheel is warm)
/// or when MaxAgeMs is set but no messages have been appended yet.
///
private void PruneExpiredLinear(DateTime nowUtc)
{
if (_options.MaxAgeMs <= 0)
return;
var cutoff = nowUtc.AddMilliseconds(-_options.MaxAgeMs);
var expired = _messages
.Where(kv => kv.Value.TimestampUtc < cutoff)
.Select(kv => kv.Key)
.ToArray();
if (expired.Length == 0)
return;
foreach (var sequence in expired)
_messages.Remove(sequence);
RewriteBlocks();
}
// Keep the old PruneExpired name as a convenience wrapper for recovery path.
private void PruneExpired(DateTime nowUtc) => PruneExpiredLinear(nowUtc);
// -------------------------------------------------------------------------
// Payload transform: compress + encrypt on write; reverse on read.
//
// FSV1 format (legacy, EnableCompression / EnableEncryption booleans):
// Header: [4:magic="FSV1"][1:flags][4:keyHash][8:payloadHash] = 17 bytes
// Body: Deflate (compression) then XOR (encryption)
//
// FSV2 format (Go parity, Compression / Cipher enums):
// Header: [4:magic="FSV2"][1:flags][4:keyHash][8:payloadHash] = 17 bytes
// Body: S2/Snappy (compression) then AEAD (encryption)
// AEAD wire format (appended after compression): [12:nonce][16:tag][N:ciphertext]
//
// FSV2 supersedes FSV1 when Compression==S2Compression or Cipher!=NoCipher.
// On read, magic bytes select the decode path; FSV1 files remain readable.
// -------------------------------------------------------------------------
private byte[] TransformForPersist(ReadOnlySpan payload)
{
var plaintext = payload.ToArray();
var transformed = plaintext;
byte flags = 0;
byte[] magic;
if (_useS2 || _useAead)
{
// FSV2 path: S2 compression and/or AEAD encryption.
magic = EnvelopeMagicV2;
if (_useS2)
{
transformed = S2Codec.Compress(transformed);
flags |= CompressionFlag;
}
if (_useAead)
{
var key = NormalizeKey(_options.EncryptionKey);
transformed = AeadEncryptor.Encrypt(transformed, key, _options.Cipher);
flags |= EncryptionFlag;
}
}
else
{
// FSV1 legacy path: Deflate + XOR.
magic = EnvelopeMagicV1;
if (_options.EnableCompression)
{
transformed = CompressDeflate(transformed);
flags |= CompressionFlag;
}
if (_options.EnableEncryption)
{
transformed = Xor(transformed, _options.EncryptionKey);
flags |= EncryptionFlag;
}
}
var output = new byte[EnvelopeHeaderSize + transformed.Length];
magic.AsSpan().CopyTo(output.AsSpan(0, magic.Length));
output[magic.Length] = flags;
BinaryPrimitives.WriteUInt32LittleEndian(output.AsSpan(5, 4), ComputeKeyHash(_options.EncryptionKey));
BinaryPrimitives.WriteUInt64LittleEndian(output.AsSpan(9, 8), ComputePayloadHash(plaintext));
transformed.CopyTo(output.AsSpan(EnvelopeHeaderSize));
return output;
}
private byte[] RestorePayload(ReadOnlySpan persisted)
{
if (TryReadEnvelope(persisted, out var version, out var flags, out var keyHash, out var payloadHash, out var body))
{
var data = body.ToArray();
if (version == 2)
{
// FSV2: AEAD decrypt then S2 decompress.
if ((flags & EncryptionFlag) != 0)
{
var key = NormalizeKey(_options.EncryptionKey);
data = AeadEncryptor.Decrypt(data, key, _options.Cipher);
}
if ((flags & CompressionFlag) != 0)
data = S2Codec.Decompress(data);
}
else
{
// FSV1: XOR decrypt then Deflate decompress.
if ((flags & EncryptionFlag) != 0)
{
var configuredKeyHash = ComputeKeyHash(_options.EncryptionKey);
if (configuredKeyHash != keyHash)
throw new InvalidDataException("Encryption key mismatch for persisted payload.");
data = Xor(data, _options.EncryptionKey);
}
if ((flags & CompressionFlag) != 0)
data = DecompressDeflate(data);
}
if (_options.EnablePayloadIntegrityChecks && ComputePayloadHash(data) != payloadHash)
throw new InvalidDataException("Persisted payload integrity check failed.");
return data;
}
// Legacy format fallback for pre-envelope data (no header at all).
var legacy = persisted.ToArray();
if (_options.EnableEncryption)
legacy = Xor(legacy, _options.EncryptionKey);
if (_options.EnableCompression)
legacy = DecompressDeflate(legacy);
return legacy;
}
// -------------------------------------------------------------------------
// Helpers
// -------------------------------------------------------------------------
///
/// Ensures the encryption key is exactly 32 bytes (padding with zeros or
/// truncating), matching the Go server's key normalisation for AEAD ciphers.
/// Only called for FSV2 AEAD path; FSV1 XOR accepts arbitrary key lengths.
///
private static byte[] NormalizeKey(byte[]? key)
{
var normalized = new byte[AeadEncryptor.KeySize];
if (key is { Length: > 0 })
{
var copyLen = Math.Min(key.Length, AeadEncryptor.KeySize);
key.AsSpan(0, copyLen).CopyTo(normalized.AsSpan());
}
return normalized;
}
private static byte[] Xor(ReadOnlySpan data, byte[]? key)
{
if (key == null || key.Length == 0)
return data.ToArray();
var output = data.ToArray();
for (var i = 0; i < output.Length; i++)
output[i] ^= key[i % key.Length];
return output;
}
private static byte[] CompressDeflate(ReadOnlySpan data)
{
using var output = new MemoryStream();
using (var stream = new System.IO.Compression.DeflateStream(output, System.IO.Compression.CompressionLevel.Fastest, leaveOpen: true))
{
stream.Write(data);
}
return output.ToArray();
}
private static byte[] DecompressDeflate(ReadOnlySpan data)
{
using var input = new MemoryStream(data.ToArray());
using var stream = new System.IO.Compression.DeflateStream(input, System.IO.Compression.CompressionMode.Decompress);
using var output = new MemoryStream();
stream.CopyTo(output);
return output.ToArray();
}
private static bool TryReadEnvelope(
ReadOnlySpan persisted,
out int version,
out byte flags,
out uint keyHash,
out ulong payloadHash,
out ReadOnlySpan payload)
{
version = 0;
flags = 0;
keyHash = 0;
payloadHash = 0;
payload = ReadOnlySpan.Empty;
if (persisted.Length < EnvelopeHeaderSize)
return false;
var magic = persisted[..EnvelopeMagicV1.Length];
if (magic.SequenceEqual(EnvelopeMagicV1))
version = 1;
else if (magic.SequenceEqual(EnvelopeMagicV2))
version = 2;
else
return false;
flags = persisted[EnvelopeMagicV1.Length];
keyHash = BinaryPrimitives.ReadUInt32LittleEndian(persisted.Slice(5, 4));
payloadHash = BinaryPrimitives.ReadUInt64LittleEndian(persisted.Slice(9, 8));
payload = persisted[EnvelopeHeaderSize..];
return true;
}
private static uint ComputeKeyHash(byte[]? key)
{
if (key is not { Length: > 0 })
return 0;
Span hash = stackalloc byte[32];
SHA256.HashData(key, hash);
return BinaryPrimitives.ReadUInt32LittleEndian(hash);
}
private static ulong ComputePayloadHash(ReadOnlySpan payload)
{
Span hash = stackalloc byte[32];
SHA256.HashData(payload, hash);
return BinaryPrimitives.ReadUInt64LittleEndian(hash);
}
private const byte CompressionFlag = 0b0000_0001;
private const byte EncryptionFlag = 0b0000_0010;
// FSV1: legacy Deflate + XOR envelope
private static readonly byte[] EnvelopeMagicV1 = "FSV1"u8.ToArray();
// FSV2: Go-parity S2 + AEAD envelope (filestore.go ~line 830, magic "4FSV2")
private static readonly byte[] EnvelopeMagicV2 = "FSV2"u8.ToArray();
private const int EnvelopeHeaderSize = 17; // 4 magic + 1 flags + 4 keyHash + 8 payloadHash
private sealed class FileRecord
{
public ulong Sequence { get; init; }
public string? Subject { get; init; }
public string? PayloadBase64 { get; init; }
public DateTime TimestampUtc { get; init; }
}
}