refactor: extract NATS.Server.JetStream.Tests project
Move 225 JetStream-related test files from NATS.Server.Tests into a dedicated NATS.Server.JetStream.Tests project. This includes root-level JetStream*.cs files, storage test files (FileStore, MemStore, StreamStoreContract), and the full JetStream/ subfolder tree (Api, Cluster, Consumers, MirrorSource, Snapshots, Storage, Streams). Updated all namespaces, added InternalsVisibleTo, registered in the solution file, and added the JETSTREAM_INTEGRATION_MATRIX define.
This commit is contained in:
@@ -0,0 +1,200 @@
|
||||
// Reference: golang/nats-server/server/filestore.go
|
||||
// Go FileStore uses ChaCha20-Poly1305 and AES-256-GCM for block encryption:
|
||||
// - StoreCipher=ChaCha → ChaCha20-Poly1305 (filestore.go ~line 300)
|
||||
// - StoreCipher=AES → AES-256-GCM (filestore.go ~line 310)
|
||||
// Wire format: [12:nonce][16:tag][N:ciphertext]
|
||||
|
||||
using System.Security.Cryptography;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class AeadEncryptorTests
|
||||
{
|
||||
// 32-byte (256-bit) test key.
|
||||
private static byte[] TestKey => "nats-aead-test-key-for-32bytes!!"u8.ToArray();
|
||||
|
||||
// Go: TestFileStoreEncrypted server/filestore_test.go:4204 (ChaCha permutation)
|
||||
[Fact]
|
||||
public void ChaCha_encrypt_decrypt_round_trips()
|
||||
{
|
||||
var plaintext = "Hello, ChaCha20-Poly1305!"u8.ToArray();
|
||||
var key = TestKey;
|
||||
|
||||
var encrypted = AeadEncryptor.Encrypt(plaintext, key, StoreCipher.ChaCha);
|
||||
var decrypted = AeadEncryptor.Decrypt(encrypted, key, StoreCipher.ChaCha);
|
||||
|
||||
decrypted.ShouldBe(plaintext);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEncrypted server/filestore_test.go:4204 (AES permutation)
|
||||
[Fact]
|
||||
public void AesGcm_encrypt_decrypt_round_trips()
|
||||
{
|
||||
var plaintext = "Hello, AES-256-GCM!"u8.ToArray();
|
||||
var key = TestKey;
|
||||
|
||||
var encrypted = AeadEncryptor.Encrypt(plaintext, key, StoreCipher.Aes);
|
||||
var decrypted = AeadEncryptor.Decrypt(encrypted, key, StoreCipher.Aes);
|
||||
|
||||
decrypted.ShouldBe(plaintext);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ChaCha_empty_plaintext_round_trips()
|
||||
{
|
||||
var encrypted = AeadEncryptor.Encrypt([], TestKey, StoreCipher.ChaCha);
|
||||
var decrypted = AeadEncryptor.Decrypt(encrypted, TestKey, StoreCipher.ChaCha);
|
||||
decrypted.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void AesGcm_empty_plaintext_round_trips()
|
||||
{
|
||||
var encrypted = AeadEncryptor.Encrypt([], TestKey, StoreCipher.Aes);
|
||||
var decrypted = AeadEncryptor.Decrypt(encrypted, TestKey, StoreCipher.Aes);
|
||||
decrypted.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ChaCha_encrypted_blob_has_correct_overhead()
|
||||
{
|
||||
var plaintext = new byte[100];
|
||||
var encrypted = AeadEncryptor.Encrypt(plaintext, TestKey, StoreCipher.ChaCha);
|
||||
|
||||
// Expected: nonce (12) + tag (16) + ciphertext (100) = 128
|
||||
encrypted.Length.ShouldBe(AeadEncryptor.NonceSize + AeadEncryptor.TagSize + plaintext.Length);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void AesGcm_encrypted_blob_has_correct_overhead()
|
||||
{
|
||||
var plaintext = new byte[100];
|
||||
var encrypted = AeadEncryptor.Encrypt(plaintext, TestKey, StoreCipher.Aes);
|
||||
|
||||
// Expected: nonce (12) + tag (16) + ciphertext (100) = 128
|
||||
encrypted.Length.ShouldBe(AeadEncryptor.NonceSize + AeadEncryptor.TagSize + plaintext.Length);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreRestoreEncryptedWithNoKeyFuncFails filestore_test.go:5134
|
||||
[Fact]
|
||||
public void ChaCha_wrong_key_throws_CryptographicException()
|
||||
{
|
||||
var plaintext = "secret data"u8.ToArray();
|
||||
var encrypted = AeadEncryptor.Encrypt(plaintext, TestKey, StoreCipher.ChaCha);
|
||||
|
||||
var wrongKey = "wrong-key-wrong-key-wrong-key!!!"u8.ToArray();
|
||||
Should.Throw<CryptographicException>(
|
||||
() => AeadEncryptor.Decrypt(encrypted, wrongKey, StoreCipher.ChaCha));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void AesGcm_wrong_key_throws_CryptographicException()
|
||||
{
|
||||
var plaintext = "secret data"u8.ToArray();
|
||||
var encrypted = AeadEncryptor.Encrypt(plaintext, TestKey, StoreCipher.Aes);
|
||||
|
||||
var wrongKey = "wrong-key-wrong-key-wrong-key!!!"u8.ToArray();
|
||||
Should.Throw<CryptographicException>(
|
||||
() => AeadEncryptor.Decrypt(encrypted, wrongKey, StoreCipher.Aes));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ChaCha_tampered_ciphertext_throws_CryptographicException()
|
||||
{
|
||||
var plaintext = "tamper me"u8.ToArray();
|
||||
var encrypted = AeadEncryptor.Encrypt(plaintext, TestKey, StoreCipher.ChaCha);
|
||||
|
||||
// Flip a bit in the ciphertext portion (after nonce+tag).
|
||||
encrypted[^1] ^= 0xFF;
|
||||
|
||||
Should.Throw<CryptographicException>(
|
||||
() => AeadEncryptor.Decrypt(encrypted, TestKey, StoreCipher.ChaCha));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void AesGcm_tampered_ciphertext_throws_CryptographicException()
|
||||
{
|
||||
var plaintext = "tamper me"u8.ToArray();
|
||||
var encrypted = AeadEncryptor.Encrypt(plaintext, TestKey, StoreCipher.Aes);
|
||||
|
||||
// Flip a bit in the ciphertext portion.
|
||||
encrypted[^1] ^= 0xFF;
|
||||
|
||||
Should.Throw<CryptographicException>(
|
||||
() => AeadEncryptor.Decrypt(encrypted, TestKey, StoreCipher.Aes));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ChaCha_tampered_tag_throws_CryptographicException()
|
||||
{
|
||||
var plaintext = "tamper tag"u8.ToArray();
|
||||
var encrypted = AeadEncryptor.Encrypt(plaintext, TestKey, StoreCipher.ChaCha);
|
||||
|
||||
// Flip a bit in the tag (bytes 12-27).
|
||||
encrypted[AeadEncryptor.NonceSize] ^= 0xFF;
|
||||
|
||||
Should.Throw<CryptographicException>(
|
||||
() => AeadEncryptor.Decrypt(encrypted, TestKey, StoreCipher.ChaCha));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Key_shorter_than_32_bytes_throws_ArgumentException()
|
||||
{
|
||||
var shortKey = new byte[16];
|
||||
Should.Throw<ArgumentException>(
|
||||
() => AeadEncryptor.Encrypt("data"u8.ToArray(), shortKey, StoreCipher.ChaCha));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Key_longer_than_32_bytes_throws_ArgumentException()
|
||||
{
|
||||
var longKey = new byte[64];
|
||||
Should.Throw<ArgumentException>(
|
||||
() => AeadEncryptor.Encrypt("data"u8.ToArray(), longKey, StoreCipher.ChaCha));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Decrypt_data_too_short_throws_ArgumentException()
|
||||
{
|
||||
// Less than nonce (12) + tag (16) = 28 bytes minimum.
|
||||
var tooShort = new byte[10];
|
||||
Should.Throw<ArgumentException>(
|
||||
() => AeadEncryptor.Decrypt(tooShort, TestKey, StoreCipher.ChaCha));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ChaCha_each_encrypt_produces_different_ciphertext()
|
||||
{
|
||||
// Nonce is random per call so ciphertexts differ even for same plaintext.
|
||||
var plaintext = "same plaintext"u8.ToArray();
|
||||
var enc1 = AeadEncryptor.Encrypt(plaintext, TestKey, StoreCipher.ChaCha);
|
||||
var enc2 = AeadEncryptor.Encrypt(plaintext, TestKey, StoreCipher.ChaCha);
|
||||
|
||||
enc1.ShouldNotBe(enc2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ChaCha_large_payload_round_trips()
|
||||
{
|
||||
var plaintext = new byte[64 * 1024]; // 64 KB
|
||||
Random.Shared.NextBytes(plaintext);
|
||||
|
||||
var encrypted = AeadEncryptor.Encrypt(plaintext, TestKey, StoreCipher.ChaCha);
|
||||
var decrypted = AeadEncryptor.Decrypt(encrypted, TestKey, StoreCipher.ChaCha);
|
||||
|
||||
decrypted.ShouldBe(plaintext);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void AesGcm_large_payload_round_trips()
|
||||
{
|
||||
var plaintext = new byte[64 * 1024]; // 64 KB
|
||||
Random.Shared.NextBytes(plaintext);
|
||||
|
||||
var encrypted = AeadEncryptor.Encrypt(plaintext, TestKey, StoreCipher.Aes);
|
||||
var decrypted = AeadEncryptor.Decrypt(encrypted, TestKey, StoreCipher.Aes);
|
||||
|
||||
decrypted.ShouldBe(plaintext);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,149 @@
|
||||
// Go ref: filestore.go:10599 (_writeFullState)
|
||||
// AtomicFileWriter wraps the write-to-temp-then-rename pattern used by
|
||||
// Go's fileStore._writeFullState to guarantee crash-safe state persistence.
|
||||
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class AtomicFileWriterTests : IDisposable
|
||||
{
|
||||
private readonly DirectoryInfo _dir;
|
||||
|
||||
public AtomicFileWriterTests()
|
||||
{
|
||||
_dir = Directory.CreateTempSubdirectory("atomic_writer_tests_");
|
||||
}
|
||||
|
||||
public void Dispose() => _dir.Delete(recursive: true);
|
||||
|
||||
private string TempPath(string name) => Path.Combine(_dir.FullName, name);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// byte[] overload
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAtomicallyAsync_creates_file()
|
||||
{
|
||||
var path = TempPath("state.json");
|
||||
var data = "{ \"seq\": 1 }"u8.ToArray();
|
||||
|
||||
await AtomicFileWriter.WriteAtomicallyAsync(path, data);
|
||||
|
||||
File.Exists(path).ShouldBeTrue();
|
||||
var written = await File.ReadAllBytesAsync(path);
|
||||
written.ShouldBe(data);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAtomicallyAsync_no_temp_file_remains()
|
||||
{
|
||||
var path = TempPath("state.json");
|
||||
var data = "hello world"u8.ToArray();
|
||||
|
||||
await AtomicFileWriter.WriteAtomicallyAsync(path, data);
|
||||
|
||||
// No .tmp file should remain after a successful write.
|
||||
// The temp file uses a random component ({path}.{random}.tmp) so check by extension.
|
||||
_dir.EnumerateFiles("*.tmp").ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAtomicallyAsync_overwrites_existing()
|
||||
{
|
||||
var path = TempPath("state.json");
|
||||
|
||||
await AtomicFileWriter.WriteAtomicallyAsync(path, "first"u8.ToArray());
|
||||
await AtomicFileWriter.WriteAtomicallyAsync(path, "second"u8.ToArray());
|
||||
|
||||
var written = await File.ReadAllTextAsync(path);
|
||||
written.ShouldBe("second");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAtomicallyAsync_concurrent_writes_are_safe()
|
||||
{
|
||||
// Multiple concurrent writes to the same file must not corrupt it.
|
||||
// Each write uses a unique payload; after all writes complete the
|
||||
// file must contain exactly one of the payloads (no partial data).
|
||||
var path = TempPath("concurrent.json");
|
||||
const int concurrency = 20;
|
||||
|
||||
var tasks = Enumerable.Range(0, concurrency).Select(i =>
|
||||
AtomicFileWriter.WriteAtomicallyAsync(path, System.Text.Encoding.UTF8.GetBytes($"payload-{i:D3}")));
|
||||
|
||||
await Task.WhenAll(tasks);
|
||||
|
||||
// File must exist and contain exactly one complete payload.
|
||||
File.Exists(path).ShouldBeTrue();
|
||||
var content = await File.ReadAllTextAsync(path);
|
||||
content.ShouldMatch(@"^payload-\d{3}$");
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// ReadOnlyMemory<byte> overload
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAtomicallyAsync_memory_overload_creates_file()
|
||||
{
|
||||
var path = TempPath("state_mem.json");
|
||||
ReadOnlyMemory<byte> data = "{ \"seq\": 42 }"u8.ToArray();
|
||||
|
||||
await AtomicFileWriter.WriteAtomicallyAsync(path, data);
|
||||
|
||||
File.Exists(path).ShouldBeTrue();
|
||||
var written = await File.ReadAllBytesAsync(path);
|
||||
written.ShouldBe(data.ToArray());
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAtomicallyAsync_memory_overload_no_temp_file_remains()
|
||||
{
|
||||
var path = TempPath("state_mem.json");
|
||||
ReadOnlyMemory<byte> data = "memory data"u8.ToArray();
|
||||
|
||||
await AtomicFileWriter.WriteAtomicallyAsync(path, data);
|
||||
|
||||
// The temp file uses a random component ({path}.{random}.tmp) so check by extension.
|
||||
_dir.EnumerateFiles("*.tmp").ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAtomicallyAsync_memory_overload_overwrites_existing()
|
||||
{
|
||||
var path = TempPath("state_mem.json");
|
||||
|
||||
await AtomicFileWriter.WriteAtomicallyAsync(path, (ReadOnlyMemory<byte>)"first"u8.ToArray());
|
||||
await AtomicFileWriter.WriteAtomicallyAsync(path, (ReadOnlyMemory<byte>)"second"u8.ToArray());
|
||||
|
||||
var written = await File.ReadAllTextAsync(path);
|
||||
written.ShouldBe("second");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAtomicallyAsync_writes_empty_data()
|
||||
{
|
||||
var path = TempPath("empty.json");
|
||||
|
||||
await AtomicFileWriter.WriteAtomicallyAsync(path, Array.Empty<byte>());
|
||||
|
||||
File.Exists(path).ShouldBeTrue();
|
||||
var written = await File.ReadAllBytesAsync(path);
|
||||
written.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WriteAtomicallyAsync_writes_large_payload()
|
||||
{
|
||||
var path = TempPath("large.bin");
|
||||
var data = new byte[256 * 1024]; // 256 KB
|
||||
Random.Shared.NextBytes(data);
|
||||
|
||||
await AtomicFileWriter.WriteAtomicallyAsync(path, data);
|
||||
|
||||
var written = await File.ReadAllBytesAsync(path);
|
||||
written.ShouldBe(data);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,764 @@
|
||||
// Reference: golang/nats-server/server/filestore_test.go
|
||||
// Tests ported: TestFileStoreBasics, TestFileStoreMsgHeaders,
|
||||
// TestFileStoreBasicWriteMsgsAndRestore, TestFileStoreRemove,
|
||||
// TestFileStoreWriteAndReadSameBlock, TestFileStoreAndRetrieveMultiBlock,
|
||||
// TestFileStoreCollapseDmap, TestFileStoreTimeStamps,
|
||||
// TestFileStoreEraseMsg, TestFileStoreSelectNextFirst,
|
||||
// TestFileStoreSkipMsg, TestFileStoreWriteExpireWrite,
|
||||
// TestFileStoreStreamStateDeleted, TestFileStoreMsgLimitBug,
|
||||
// TestFileStoreStreamTruncate, TestFileStoreSnapshot,
|
||||
// TestFileStoreSnapshotAndSyncBlocks, TestFileStoreMeta,
|
||||
// TestFileStoreInitialFirstSeq, TestFileStoreCompactAllWithDanglingLMB
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class FileStoreBasicTests : IDisposable
|
||||
{
|
||||
private readonly string _dir;
|
||||
|
||||
public FileStoreBasicTests()
|
||||
{
|
||||
_dir = Path.Combine(Path.GetTempPath(), $"nats-js-fs-basic-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_dir);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_dir))
|
||||
Directory.Delete(_dir, recursive: true);
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string? subdirectory = null, FileStoreOptions? options = null)
|
||||
{
|
||||
var dir = subdirectory is null ? _dir : Path.Combine(_dir, subdirectory);
|
||||
var opts = options ?? new FileStoreOptions();
|
||||
opts.Directory = dir;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:86
|
||||
[Fact]
|
||||
public async Task Store_and_load_messages()
|
||||
{
|
||||
await using var store = CreateStore();
|
||||
|
||||
const string subject = "foo";
|
||||
var payload = "Hello World"u8.ToArray();
|
||||
|
||||
for (var i = 1; i <= 5; i++)
|
||||
{
|
||||
var seq = await store.AppendAsync(subject, payload, default);
|
||||
seq.ShouldBe((ulong)i);
|
||||
}
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
|
||||
var msg2 = await store.LoadAsync(2, default);
|
||||
msg2.ShouldNotBeNull();
|
||||
msg2!.Subject.ShouldBe(subject);
|
||||
msg2.Payload.ToArray().ShouldBe(payload);
|
||||
|
||||
var msg3 = await store.LoadAsync(3, default);
|
||||
msg3.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMsgHeaders server/filestore_test.go:152
|
||||
[Fact]
|
||||
public async Task Store_message_with_headers()
|
||||
{
|
||||
await using var store = CreateStore();
|
||||
|
||||
var headerBytes = "NATS/1.0\r\nname:derek\r\n\r\n"u8.ToArray();
|
||||
var bodyBytes = "Hello World"u8.ToArray();
|
||||
var fullPayload = headerBytes.Concat(bodyBytes).ToArray();
|
||||
|
||||
await store.AppendAsync("foo", fullPayload, default);
|
||||
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(fullPayload);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasicWriteMsgsAndRestore server/filestore_test.go:181
|
||||
[Fact]
|
||||
public async Task Stop_and_restart_preserves_messages()
|
||||
{
|
||||
const int firstBatch = 100;
|
||||
const int secondBatch = 100;
|
||||
|
||||
await using (var store = CreateStore())
|
||||
{
|
||||
for (var i = 1; i <= firstBatch; i++)
|
||||
{
|
||||
var payload = Encoding.UTF8.GetBytes($"[{i:D8}] Hello World!");
|
||||
var seq = await store.AppendAsync("foo", payload, default);
|
||||
seq.ShouldBe((ulong)i);
|
||||
}
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)firstBatch);
|
||||
}
|
||||
|
||||
// Reopen the same directory.
|
||||
await using (var store = CreateStore())
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)firstBatch);
|
||||
|
||||
for (var i = firstBatch + 1; i <= firstBatch + secondBatch; i++)
|
||||
{
|
||||
var payload = Encoding.UTF8.GetBytes($"[{i:D8}] Hello World!");
|
||||
var seq = await store.AppendAsync("foo", payload, default);
|
||||
seq.ShouldBe((ulong)i);
|
||||
}
|
||||
|
||||
state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)(firstBatch + secondBatch));
|
||||
}
|
||||
|
||||
// Reopen again to confirm the second batch survived.
|
||||
await using (var store = CreateStore())
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)(firstBatch + secondBatch));
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasics (remove section) server/filestore_test.go:129
|
||||
[Fact]
|
||||
public async Task Remove_messages_updates_state()
|
||||
{
|
||||
await using var store = CreateStore();
|
||||
|
||||
const string subject = "foo";
|
||||
var payload = "Hello World"u8.ToArray();
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync(subject, payload, default);
|
||||
|
||||
// Remove first (seq 1).
|
||||
(await store.RemoveAsync(1, default)).ShouldBeTrue();
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)4);
|
||||
|
||||
// Remove last (seq 5).
|
||||
(await store.RemoveAsync(5, default)).ShouldBeTrue();
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)3);
|
||||
|
||||
// Remove a middle message (seq 3).
|
||||
(await store.RemoveAsync(3, default)).ShouldBeTrue();
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)2);
|
||||
|
||||
// Sequences 2 and 4 should still be loadable.
|
||||
(await store.LoadAsync(2, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(4, default)).ShouldNotBeNull();
|
||||
|
||||
// Removed sequences must return null.
|
||||
(await store.LoadAsync(1, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(3, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(5, default)).ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreWriteAndReadSameBlock server/filestore_test.go:1510
|
||||
[Fact]
|
||||
public async Task Write_and_read_same_block()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "same-blk");
|
||||
|
||||
const string subject = "foo";
|
||||
var payload = "Hello World!"u8.ToArray();
|
||||
|
||||
for (ulong i = 1; i <= 10; i++)
|
||||
{
|
||||
var seq = await store.AppendAsync(subject, payload, default);
|
||||
seq.ShouldBe(i);
|
||||
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe(subject);
|
||||
msg.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreTimeStamps server/filestore_test.go:682
|
||||
[Fact]
|
||||
public async Task Stored_messages_have_non_decreasing_timestamps()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "timestamps");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
{
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
var messages = await store.ListAsync(default);
|
||||
messages.Count.ShouldBe(10);
|
||||
|
||||
DateTime? previous = null;
|
||||
foreach (var msg in messages)
|
||||
{
|
||||
if (previous.HasValue)
|
||||
msg.TimestampUtc.ShouldBeGreaterThanOrEqualTo(previous.Value);
|
||||
previous = msg.TimestampUtc;
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreAndRetrieveMultiBlock server/filestore_test.go:1527
|
||||
[Fact]
|
||||
public async Task Store_and_retrieve_multi_block()
|
||||
{
|
||||
var subDir = "multi-blk";
|
||||
|
||||
// Store 20 messages with a small block size to force multiple blocks.
|
||||
await using (var store = CreateStore(subdirectory: subDir, options: new FileStoreOptions { BlockSizeBytes = 256 }))
|
||||
{
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", "Hello World!"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)20);
|
||||
}
|
||||
|
||||
// Reopen and verify all messages are loadable.
|
||||
await using (var store = CreateStore(subdirectory: subDir, options: new FileStoreOptions { BlockSizeBytes = 256 }))
|
||||
{
|
||||
for (ulong i = 1; i <= 20; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe("foo");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreCollapseDmap server/filestore_test.go:1561
|
||||
[Fact]
|
||||
public async Task Remove_out_of_order_collapses_properly()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "dmap");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "Hello World!"u8.ToArray(), default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)10);
|
||||
|
||||
// Remove out of order, forming gaps.
|
||||
(await store.RemoveAsync(2, default)).ShouldBeTrue();
|
||||
(await store.RemoveAsync(4, default)).ShouldBeTrue();
|
||||
(await store.RemoveAsync(8, default)).ShouldBeTrue();
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)7);
|
||||
|
||||
// Remove first to trigger first-seq collapse.
|
||||
(await store.RemoveAsync(1, default)).ShouldBeTrue();
|
||||
state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)6);
|
||||
state.FirstSeq.ShouldBe((ulong)3);
|
||||
|
||||
// Remove seq 3 to advance first seq further.
|
||||
(await store.RemoveAsync(3, default)).ShouldBeTrue();
|
||||
state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
state.FirstSeq.ShouldBe((ulong)5);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreSelectNextFirst server/filestore_test.go:303
|
||||
[Fact]
|
||||
public async Task Remove_across_blocks_updates_first_sequence()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "sel-next");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("zzz", "Hello World"u8.ToArray(), default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)10);
|
||||
|
||||
// Delete 2-7, crossing block boundaries.
|
||||
for (var i = 2; i <= 7; i++)
|
||||
(await store.RemoveAsync((ulong)i, default)).ShouldBeTrue();
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)4);
|
||||
state.FirstSeq.ShouldBe((ulong)1);
|
||||
|
||||
// Remove seq 1 which should cause first to jump to 8.
|
||||
(await store.RemoveAsync(1, default)).ShouldBeTrue();
|
||||
state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)3);
|
||||
state.FirstSeq.ShouldBe((ulong)8);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEraseMsg server/filestore_test.go:1304
|
||||
// The .NET FileStore does not have a separate EraseMsg method yet;
|
||||
// RemoveAsync is the equivalent. This test verifies remove semantics.
|
||||
[Fact]
|
||||
public async Task Remove_message_makes_it_unloadable()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "erase");
|
||||
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe("Hello World"u8.ToArray());
|
||||
|
||||
(await store.RemoveAsync(1, default)).ShouldBeTrue();
|
||||
(await store.LoadAsync(1, default)).ShouldBeNull();
|
||||
|
||||
// Second message should still be loadable.
|
||||
(await store.LoadAsync(2, default)).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreStreamStateDeleted server/filestore_test.go:2794
|
||||
[Fact]
|
||||
public async Task Remove_non_existent_returns_false()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "no-exist");
|
||||
|
||||
await store.AppendAsync("foo", "msg"u8.ToArray(), default);
|
||||
|
||||
// Removing a sequence that does not exist should return false.
|
||||
(await store.RemoveAsync(99, default)).ShouldBeFalse();
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)1);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasicWriteMsgsAndRestore server/filestore_test.go:220
|
||||
// Store after stop should not succeed (or at least not modify persisted state).
|
||||
[Fact]
|
||||
public async Task Purge_then_restart_shows_empty_state()
|
||||
{
|
||||
await using (var store = CreateStore(subdirectory: "purge-restart"))
|
||||
{
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "Hello"u8.ToArray(), default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)10);
|
||||
await store.PurgeAsync(default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
state.Bytes.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Reopen and verify purge persisted.
|
||||
await using (var store = CreateStore(subdirectory: "purge-restart"))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
state.Bytes.ShouldBe((ulong)0);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasicWriteMsgsAndRestore server/filestore_test.go:284
|
||||
// After purge, sequence numbers should continue from where they left off.
|
||||
[Fact]
|
||||
public async Task Purge_then_store_continues_sequence()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "purge-seq");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "Hello"u8.ToArray(), default);
|
||||
|
||||
(await store.GetStateAsync(default)).LastSeq.ShouldBe((ulong)5);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
// After purge, next append starts at seq 1 again (the .NET store resets).
|
||||
var nextSeq = await store.AppendAsync("foo", "After purge"u8.ToArray(), default);
|
||||
nextSeq.ShouldBeGreaterThan((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreSnapshot server/filestore_test.go:1799
|
||||
[Fact]
|
||||
public async Task Snapshot_and_restore_preserves_messages()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "snap-src");
|
||||
|
||||
for (var i = 0; i < 50; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
var snap = await store.CreateSnapshotAsync(default);
|
||||
snap.Length.ShouldBeGreaterThan(0);
|
||||
|
||||
// Restore into a new store.
|
||||
await using var restored = CreateStore(subdirectory: "snap-dst");
|
||||
await restored.RestoreSnapshotAsync(snap, default);
|
||||
|
||||
var srcState = await store.GetStateAsync(default);
|
||||
var dstState = await restored.GetStateAsync(default);
|
||||
dstState.Messages.ShouldBe(srcState.Messages);
|
||||
dstState.FirstSeq.ShouldBe(srcState.FirstSeq);
|
||||
dstState.LastSeq.ShouldBe(srcState.LastSeq);
|
||||
|
||||
// Verify each message round-trips.
|
||||
for (ulong i = 1; i <= srcState.Messages; i++)
|
||||
{
|
||||
var original = await store.LoadAsync(i, default);
|
||||
var copy = await restored.LoadAsync(i, default);
|
||||
copy.ShouldNotBeNull();
|
||||
copy!.Subject.ShouldBe(original!.Subject);
|
||||
copy.Payload.ToArray().ShouldBe(original.Payload.ToArray());
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreSnapshot server/filestore_test.go:1904
|
||||
[Fact]
|
||||
public async Task Snapshot_after_removes_preserves_remaining()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "snap-rm");
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
// Remove first 5.
|
||||
for (ulong i = 1; i <= 5; i++)
|
||||
await store.RemoveAsync(i, default);
|
||||
|
||||
var snap = await store.CreateSnapshotAsync(default);
|
||||
|
||||
await using var restored = CreateStore(subdirectory: "snap-rm-dst");
|
||||
await restored.RestoreSnapshotAsync(snap, default);
|
||||
|
||||
var dstState = await restored.GetStateAsync(default);
|
||||
dstState.Messages.ShouldBe((ulong)15);
|
||||
dstState.FirstSeq.ShouldBe((ulong)6);
|
||||
|
||||
// Removed sequences should not be present.
|
||||
for (ulong i = 1; i <= 5; i++)
|
||||
(await restored.LoadAsync(i, default)).ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:113
|
||||
[Fact]
|
||||
public async Task Load_with_null_sequence_returns_null()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "null-seq");
|
||||
|
||||
await store.AppendAsync("foo", "Hello"u8.ToArray(), default);
|
||||
|
||||
// Loading a sequence that was never stored.
|
||||
(await store.LoadAsync(99, default)).ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMsgHeaders server/filestore_test.go:158
|
||||
[Fact]
|
||||
public async Task Store_preserves_empty_payload()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "empty-payload");
|
||||
|
||||
await store.AppendAsync("foo", ReadOnlyMemory<byte>.Empty, default);
|
||||
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.Length.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:86
|
||||
[Fact]
|
||||
public async Task State_tracks_first_and_last_seq()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "first-last");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.FirstSeq.ShouldBe((ulong)1);
|
||||
state.LastSeq.ShouldBe((ulong)5);
|
||||
|
||||
// Remove first message.
|
||||
await store.RemoveAsync(1, default);
|
||||
state = await store.GetStateAsync(default);
|
||||
state.FirstSeq.ShouldBe((ulong)2);
|
||||
state.LastSeq.ShouldBe((ulong)5);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMsgLimitBug server/filestore_test.go:518
|
||||
[Fact]
|
||||
public async Task TrimToMaxMessages_enforces_limit()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "trim");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
|
||||
store.TrimToMaxMessages(5);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
state.FirstSeq.ShouldBe((ulong)6);
|
||||
state.LastSeq.ShouldBe((ulong)10);
|
||||
|
||||
// Evicted messages not loadable.
|
||||
for (ulong i = 1; i <= 5; i++)
|
||||
(await store.LoadAsync(i, default)).ShouldBeNull();
|
||||
|
||||
// Remaining messages loadable.
|
||||
for (ulong i = 6; i <= 10; i++)
|
||||
(await store.LoadAsync(i, default)).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMsgLimit server/filestore_test.go:484
|
||||
[Fact]
|
||||
public async Task TrimToMaxMessages_to_one()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "trim-one");
|
||||
|
||||
await store.AppendAsync("foo", "first"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "second"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "third"u8.ToArray(), default);
|
||||
|
||||
store.TrimToMaxMessages(1);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)1);
|
||||
state.FirstSeq.ShouldBe((ulong)3);
|
||||
state.LastSeq.ShouldBe((ulong)3);
|
||||
|
||||
var msg = await store.LoadAsync(3, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe("third"u8.ToArray());
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasicWriteMsgsAndRestore server/filestore_test.go:285
|
||||
[Fact]
|
||||
public async Task Remove_then_restart_preserves_state()
|
||||
{
|
||||
var subDir = "rm-restart";
|
||||
await using (var store = CreateStore(subdirectory: subDir))
|
||||
{
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "Hello"u8.ToArray(), default);
|
||||
|
||||
await store.RemoveAsync(3, default);
|
||||
await store.RemoveAsync(7, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)8);
|
||||
}
|
||||
|
||||
// Reopen and verify.
|
||||
await using (var store = CreateStore(subdirectory: subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)8);
|
||||
|
||||
(await store.LoadAsync(3, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(7, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(1, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(10, default)).ShouldNotBeNull();
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:86
|
||||
[Fact]
|
||||
public async Task Multiple_subjects_stored_and_loadable()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "multi-subj");
|
||||
|
||||
await store.AppendAsync("foo.bar", "one"u8.ToArray(), default);
|
||||
await store.AppendAsync("baz.qux", "two"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.bar", "three"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)3);
|
||||
|
||||
var msg1 = await store.LoadAsync(1, default);
|
||||
msg1.ShouldNotBeNull();
|
||||
msg1!.Subject.ShouldBe("foo.bar");
|
||||
|
||||
var msg2 = await store.LoadAsync(2, default);
|
||||
msg2.ShouldNotBeNull();
|
||||
msg2!.Subject.ShouldBe("baz.qux");
|
||||
|
||||
var msg3 = await store.LoadAsync(3, default);
|
||||
msg3.ShouldNotBeNull();
|
||||
msg3!.Subject.ShouldBe("foo.bar");
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:104
|
||||
[Fact]
|
||||
public async Task State_bytes_tracks_total_payload()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "bytes");
|
||||
|
||||
var payload = new byte[100];
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
state.Bytes.ShouldBe((ulong)(5 * 100));
|
||||
}
|
||||
|
||||
// Go: TestFileStoreWriteExpireWrite server/filestore_test.go:424
|
||||
[Fact]
|
||||
public async Task Large_batch_store_then_load_all()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "large-batch");
|
||||
|
||||
const int count = 200;
|
||||
for (var i = 0; i < count; i++)
|
||||
await store.AppendAsync("zzz", Encoding.UTF8.GetBytes($"Hello World! - {i}"), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)count);
|
||||
|
||||
for (ulong i = 1; i <= count; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe("zzz");
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:124
|
||||
[Fact]
|
||||
public async Task Load_returns_null_for_sequence_zero()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "seq-zero");
|
||||
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
// Sequence 0 should never match a stored message.
|
||||
(await store.LoadAsync(0, default)).ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:86
|
||||
[Fact]
|
||||
public async Task LoadLastBySubject_returns_most_recent()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "last-by-subj");
|
||||
|
||||
await store.AppendAsync("foo", "first"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar", "other"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "second"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "third"u8.ToArray(), default);
|
||||
|
||||
var last = await store.LoadLastBySubjectAsync("foo", default);
|
||||
last.ShouldNotBeNull();
|
||||
last!.Payload.ToArray().ShouldBe("third"u8.ToArray());
|
||||
last.Sequence.ShouldBe((ulong)4);
|
||||
|
||||
// No match.
|
||||
(await store.LoadLastBySubjectAsync("does.not.exist", default)).ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:86
|
||||
[Fact]
|
||||
public async Task ListAsync_returns_all_messages_ordered()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "list-ordered");
|
||||
|
||||
await store.AppendAsync("foo", "one"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar", "two"u8.ToArray(), default);
|
||||
await store.AppendAsync("baz", "three"u8.ToArray(), default);
|
||||
|
||||
var messages = await store.ListAsync(default);
|
||||
messages.Count.ShouldBe(3);
|
||||
messages[0].Sequence.ShouldBe((ulong)1);
|
||||
messages[1].Sequence.ShouldBe((ulong)2);
|
||||
messages[2].Sequence.ShouldBe((ulong)3);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasicWriteMsgsAndRestore server/filestore_test.go:268
|
||||
[Fact]
|
||||
public async Task Purge_then_append_works()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "purge-append");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)0);
|
||||
|
||||
// Append after purge.
|
||||
var seq = await store.AppendAsync("foo", "new data"u8.ToArray(), default);
|
||||
seq.ShouldBeGreaterThan((ulong)0);
|
||||
|
||||
var msg = await store.LoadAsync(seq, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe("new data"u8.ToArray());
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:86
|
||||
[Fact]
|
||||
public async Task Empty_store_state_is_zeroed()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "empty-state");
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
state.Bytes.ShouldBe((ulong)0);
|
||||
state.FirstSeq.ShouldBe((ulong)0);
|
||||
state.LastSeq.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreCollapseDmap server/filestore_test.go:1561
|
||||
[Fact]
|
||||
public async Task Remove_all_messages_one_by_one()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "rm-all");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
for (ulong i = 1; i <= 5; i++)
|
||||
(await store.RemoveAsync(i, default)).ShouldBeTrue();
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
state.Bytes.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:136
|
||||
[Fact]
|
||||
public async Task Double_remove_returns_false()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "double-rm");
|
||||
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
(await store.RemoveAsync(1, default)).ShouldBeTrue();
|
||||
(await store.RemoveAsync(1, default)).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasicWriteMsgsAndRestore server/filestore_test.go:181
|
||||
[Fact]
|
||||
public async Task Large_payload_round_trips()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "large-payload");
|
||||
|
||||
var payload = new byte[8 * 1024]; // 8 KiB
|
||||
Random.Shared.NextBytes(payload);
|
||||
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasicWriteMsgsAndRestore server/filestore_test.go:181
|
||||
[Fact]
|
||||
public async Task Binary_payload_round_trips()
|
||||
{
|
||||
await using var store = CreateStore(subdirectory: "binary");
|
||||
|
||||
// Include all byte values 0-255.
|
||||
var payload = new byte[256];
|
||||
for (var i = 0; i < 256; i++)
|
||||
payload[i] = (byte)i;
|
||||
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,289 @@
|
||||
// Reference: golang/nats-server/server/filestore_test.go
|
||||
// Tests for Task A3: FileStore Block Manager Rewrite.
|
||||
// Verifies that FileStore correctly uses MsgBlock-based storage:
|
||||
// block files on disk, block rotation, recovery, purge, snapshot,
|
||||
// soft-delete, and payload transformation (S2/AEAD) integration.
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class FileStoreBlockTests : IDisposable
|
||||
{
|
||||
private readonly string _dir;
|
||||
|
||||
public FileStoreBlockTests()
|
||||
{
|
||||
_dir = Path.Combine(Path.GetTempPath(), $"nats-js-fs-block-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_dir);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_dir))
|
||||
Directory.Delete(_dir, recursive: true);
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string subdirectory, FileStoreOptions? options = null)
|
||||
{
|
||||
var dir = Path.Combine(_dir, subdirectory);
|
||||
var opts = options ?? new FileStoreOptions();
|
||||
opts.Directory = dir;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
// Go: filestore.go block-based storage — verify .blk files are created on disk.
|
||||
[Fact]
|
||||
public async Task Append_UsesBlockStorage()
|
||||
{
|
||||
var subDir = "blk-storage";
|
||||
var dir = Path.Combine(_dir, subDir);
|
||||
|
||||
await using var store = CreateStore(subDir);
|
||||
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
|
||||
// At least one .blk file should exist in the store directory.
|
||||
var blkFiles = Directory.GetFiles(dir, "*.blk");
|
||||
blkFiles.Length.ShouldBeGreaterThanOrEqualTo(1);
|
||||
|
||||
// The old JSONL file should NOT exist.
|
||||
File.Exists(Path.Combine(dir, "messages.jsonl")).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Go: filestore.go block rotation — rbytes check causes new block creation.
|
||||
[Fact]
|
||||
public async Task MultiBlock_RotatesWhenFull()
|
||||
{
|
||||
var subDir = "blk-rotation";
|
||||
var dir = Path.Combine(_dir, subDir);
|
||||
|
||||
// Small block size to force rotation quickly.
|
||||
await using var store = CreateStore(subDir, new FileStoreOptions { BlockSizeBytes = 256 });
|
||||
|
||||
// Write enough messages to exceed 256 bytes per block.
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", "Hello World - block rotation test!"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)20);
|
||||
|
||||
// Multiple .blk files should be created.
|
||||
var blkFiles = Directory.GetFiles(dir, "*.blk");
|
||||
blkFiles.Length.ShouldBeGreaterThan(1);
|
||||
|
||||
// BlockCount should reflect multiple blocks.
|
||||
store.BlockCount.ShouldBeGreaterThan(1);
|
||||
}
|
||||
|
||||
// Go: filestore.go multi-block load — messages span multiple blocks.
|
||||
[Fact]
|
||||
public async Task Load_AcrossBlocks()
|
||||
{
|
||||
var subDir = "blk-across";
|
||||
|
||||
// Small block size to force multiple blocks.
|
||||
await using var store = CreateStore(subDir, new FileStoreOptions { BlockSizeBytes = 256 });
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i:D4}"), default);
|
||||
|
||||
// Verify we have multiple blocks.
|
||||
store.BlockCount.ShouldBeGreaterThan(1);
|
||||
|
||||
// All messages should be loadable, regardless of which block they are in.
|
||||
for (ulong i = 1; i <= 20; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe("foo");
|
||||
var expected = Encoding.UTF8.GetBytes($"msg-{(int)(i - 1):D4}");
|
||||
msg.Payload.ToArray().ShouldBe(expected);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: filestore.go recovery — block files are rescanned on startup.
|
||||
[Fact]
|
||||
public async Task Recovery_AfterRestart()
|
||||
{
|
||||
var subDir = "blk-recovery";
|
||||
var dir = Path.Combine(_dir, subDir);
|
||||
|
||||
// Write data and dispose.
|
||||
await using (var store = CreateStore(subDir, new FileStoreOptions { BlockSizeBytes = 256 }))
|
||||
{
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i:D4}"), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)20);
|
||||
}
|
||||
|
||||
// .blk files should still exist after dispose.
|
||||
var blkFiles = Directory.GetFiles(dir, "*.blk");
|
||||
blkFiles.Length.ShouldBeGreaterThan(0);
|
||||
|
||||
// Recreate FileStore from the same directory.
|
||||
await using (var store = CreateStore(subDir, new FileStoreOptions { BlockSizeBytes = 256 }))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)20);
|
||||
state.FirstSeq.ShouldBe((ulong)1);
|
||||
state.LastSeq.ShouldBe((ulong)20);
|
||||
|
||||
// Verify all messages are intact.
|
||||
for (ulong i = 1; i <= 20; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
var expected = Encoding.UTF8.GetBytes($"msg-{(int)(i - 1):D4}");
|
||||
msg!.Payload.ToArray().ShouldBe(expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Go: filestore.go purge — all blocks removed, fresh block created.
|
||||
[Fact]
|
||||
public async Task Purge_CleansAllBlocks()
|
||||
{
|
||||
var subDir = "blk-purge";
|
||||
var dir = Path.Combine(_dir, subDir);
|
||||
|
||||
await using var store = CreateStore(subDir, new FileStoreOptions { BlockSizeBytes = 256 });
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", "Hello"u8.ToArray(), default);
|
||||
|
||||
// Before purge, multiple .blk files should exist.
|
||||
Directory.GetFiles(dir, "*.blk").Length.ShouldBeGreaterThan(0);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
state.Bytes.ShouldBe((ulong)0);
|
||||
|
||||
// After purge, no old .blk files should remain (or they should be empty/recreated).
|
||||
// The old JSONL file should also not exist.
|
||||
File.Exists(Path.Combine(dir, "messages.jsonl")).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Go: filestore.go dmap — soft-delete within a block.
|
||||
[Fact]
|
||||
public async Task Remove_SoftDeletesInBlock()
|
||||
{
|
||||
await using var store = CreateStore("blk-remove");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
// Remove sequence 3.
|
||||
(await store.RemoveAsync(3, default)).ShouldBeTrue();
|
||||
|
||||
// Verify seq 3 returns null.
|
||||
(await store.LoadAsync(3, default)).ShouldBeNull();
|
||||
|
||||
// Other sequences still loadable.
|
||||
(await store.LoadAsync(1, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(2, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(4, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(5, default)).ShouldNotBeNull();
|
||||
|
||||
// State reflects the removal.
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)4);
|
||||
}
|
||||
|
||||
// Go: filestore.go snapshot — iterates all blocks for snapshot creation.
|
||||
[Fact]
|
||||
public async Task Snapshot_IncludesAllBlocks()
|
||||
{
|
||||
await using var srcStore = CreateStore("blk-snap-src", new FileStoreOptions { BlockSizeBytes = 256 });
|
||||
|
||||
for (var i = 0; i < 30; i++)
|
||||
await srcStore.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
// Verify multiple blocks.
|
||||
srcStore.BlockCount.ShouldBeGreaterThan(1);
|
||||
|
||||
var snap = await srcStore.CreateSnapshotAsync(default);
|
||||
snap.Length.ShouldBeGreaterThan(0);
|
||||
|
||||
// Restore into a new store.
|
||||
await using var dstStore = CreateStore("blk-snap-dst");
|
||||
await dstStore.RestoreSnapshotAsync(snap, default);
|
||||
|
||||
var srcState = await srcStore.GetStateAsync(default);
|
||||
var dstState = await dstStore.GetStateAsync(default);
|
||||
dstState.Messages.ShouldBe(srcState.Messages);
|
||||
dstState.FirstSeq.ShouldBe(srcState.FirstSeq);
|
||||
dstState.LastSeq.ShouldBe(srcState.LastSeq);
|
||||
|
||||
// Verify each message round-trips.
|
||||
for (ulong i = 1; i <= srcState.Messages; i++)
|
||||
{
|
||||
var original = await srcStore.LoadAsync(i, default);
|
||||
var copy = await dstStore.LoadAsync(i, default);
|
||||
copy.ShouldNotBeNull();
|
||||
copy!.Subject.ShouldBe(original!.Subject);
|
||||
copy.Payload.ToArray().ShouldBe(original.Payload.ToArray());
|
||||
}
|
||||
}
|
||||
|
||||
// Go: filestore.go S2 compression — payload is compressed before block write.
|
||||
[Fact]
|
||||
public async Task Compression_RoundTrip()
|
||||
{
|
||||
var subDir = "blk-compress";
|
||||
|
||||
await using var store = CreateStore(subDir, new FileStoreOptions
|
||||
{
|
||||
Compression = StoreCompression.S2Compression,
|
||||
});
|
||||
|
||||
var payload = "Hello, S2 compressed block storage!"u8.ToArray();
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)10);
|
||||
|
||||
// Verify all messages are readable with correct payload.
|
||||
for (ulong i = 1; i <= 10; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: filestore.go AEAD encryption — payload is encrypted before block write.
|
||||
[Fact]
|
||||
public async Task Encryption_RoundTrip()
|
||||
{
|
||||
var subDir = "blk-encrypt";
|
||||
var key = "nats-v2-test-key-exactly-32-bytes"u8[..32].ToArray();
|
||||
|
||||
await using var store = CreateStore(subDir, new FileStoreOptions
|
||||
{
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = key,
|
||||
});
|
||||
|
||||
var payload = "Hello, AEAD encrypted block storage!"u8.ToArray();
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)10);
|
||||
|
||||
// Verify all messages are readable with correct payload.
|
||||
for (ulong i = 1; i <= 10; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,95 @@
|
||||
// Go ref: filestore.go:2204 (lastChecksum), filestore.go:8180 (validation in msgFromBufEx)
|
||||
//
|
||||
// Tests for per-block last-checksum tracking and read-path validation using XxHash64.
|
||||
// The Go reference implementation tracks the last written checksum in msgBlock.lchk
|
||||
// and validates each record's checksum during reads to detect storage corruption.
|
||||
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class FileStoreChecksumTests : IDisposable
|
||||
{
|
||||
private readonly DirectoryInfo _dir = Directory.CreateTempSubdirectory("checksum-");
|
||||
|
||||
public void Dispose() => _dir.Delete(recursive: true);
|
||||
|
||||
// Go ref: filestore.go:2204 (msgBlock.lchk — last checksum field)
|
||||
[Fact]
|
||||
public void MsgBlock_tracks_last_checksum()
|
||||
{
|
||||
// Arrange / Act
|
||||
using var block = MsgBlock.Create(1, _dir.FullName, 1024 * 1024);
|
||||
block.Write("test", ReadOnlyMemory<byte>.Empty, "hello"u8.ToArray());
|
||||
|
||||
// Assert
|
||||
block.LastChecksum.ShouldNotBeNull();
|
||||
block.LastChecksum!.Length.ShouldBe(8); // XxHash64 = 8 bytes
|
||||
}
|
||||
|
||||
// Go ref: filestore.go:8180 (msgFromBufEx checksum validation)
|
||||
[Fact]
|
||||
public void MsgBlock_validates_checksum_on_read()
|
||||
{
|
||||
// Arrange
|
||||
using var block = MsgBlock.Create(1, _dir.FullName, 1024 * 1024);
|
||||
block.Write("test", ReadOnlyMemory<byte>.Empty, "hello"u8.ToArray());
|
||||
block.Flush();
|
||||
block.ClearCache(); // force disk read
|
||||
|
||||
// Act — read should succeed with valid data
|
||||
var record = block.Read(1);
|
||||
|
||||
// Assert
|
||||
record.ShouldNotBeNull();
|
||||
record!.Subject.ShouldBe("test");
|
||||
record.Payload.ToArray().ShouldBe("hello"u8.ToArray());
|
||||
}
|
||||
|
||||
// Go ref: filestore.go:8180 (checksum mismatch → error path)
|
||||
[Fact]
|
||||
public void MsgBlock_detects_corrupted_record_on_disk_read()
|
||||
{
|
||||
// Arrange — write a record, flush, clear cache so next read goes to disk
|
||||
using var block = MsgBlock.Create(1, _dir.FullName, 1024 * 1024);
|
||||
block.Write("test", ReadOnlyMemory<byte>.Empty, "hello"u8.ToArray());
|
||||
block.Flush();
|
||||
block.ClearCache();
|
||||
|
||||
// Corrupt a byte near the end of the block file (in the payload region)
|
||||
var files = Directory.GetFiles(_dir.FullName, "*.blk");
|
||||
files.Length.ShouldBe(1);
|
||||
var bytes = File.ReadAllBytes(files[0]);
|
||||
// Flip a bit in the payload area (10 bytes from end: past checksum + timestamp)
|
||||
bytes[^10] ^= 0xFF;
|
||||
File.WriteAllBytes(files[0], bytes);
|
||||
|
||||
// Act / Assert — Decode should throw on checksum mismatch
|
||||
Should.Throw<InvalidDataException>(() => block.Read(1));
|
||||
}
|
||||
|
||||
// Go ref: filestore.go:2204 (lchk updated on each write)
|
||||
[Fact]
|
||||
public void MsgBlock_checksum_chain_across_writes()
|
||||
{
|
||||
// Arrange
|
||||
using var block = MsgBlock.Create(1, _dir.FullName, 1024 * 1024);
|
||||
|
||||
// Act — write three records, capture checksum after each
|
||||
block.Write("a", ReadOnlyMemory<byte>.Empty, "one"u8.ToArray());
|
||||
var checksum1 = block.LastChecksum?.ToArray();
|
||||
|
||||
block.Write("b", ReadOnlyMemory<byte>.Empty, "two"u8.ToArray());
|
||||
var checksum2 = block.LastChecksum?.ToArray();
|
||||
|
||||
block.Write("c", ReadOnlyMemory<byte>.Empty, "three"u8.ToArray());
|
||||
var checksum3 = block.LastChecksum?.ToArray();
|
||||
|
||||
// Assert — each write produces a non-null checksum that changes
|
||||
checksum1.ShouldNotBeNull();
|
||||
checksum2.ShouldNotBeNull();
|
||||
checksum3.ShouldNotBeNull();
|
||||
checksum1.ShouldNotBe(checksum2!);
|
||||
checksum2.ShouldNotBe(checksum3!);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,305 @@
|
||||
// Reference: golang/nats-server/server/filestore_test.go
|
||||
// Tests ported from: TestFileStoreBasics (S2Compression permutation),
|
||||
// TestFileStoreWriteExpireWrite (compression variant),
|
||||
// TestFileStoreAgeLimit (compression variant),
|
||||
// TestFileStoreCompactLastPlusOne (compression variant)
|
||||
// The Go tests use testFileStoreAllPermutations to run each test with
|
||||
// NoCompression and S2Compression. These tests exercise the .NET compression path.
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class FileStoreCompressionTests : IDisposable
|
||||
{
|
||||
private readonly string _dir;
|
||||
|
||||
public FileStoreCompressionTests()
|
||||
{
|
||||
_dir = Path.Combine(Path.GetTempPath(), $"nats-js-fs-compress-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_dir);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_dir))
|
||||
Directory.Delete(_dir, recursive: true);
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string subdirectory, bool compress = true, FileStoreOptions? options = null)
|
||||
{
|
||||
var dir = Path.Combine(_dir, subdirectory);
|
||||
var opts = options ?? new FileStoreOptions();
|
||||
opts.Directory = dir;
|
||||
opts.EnableCompression = compress;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:86 (S2 permutation)
|
||||
[Fact]
|
||||
public async Task Compressed_store_and_load()
|
||||
{
|
||||
await using var store = CreateStore("comp-basic");
|
||||
|
||||
const string subject = "foo";
|
||||
var payload = "Hello World"u8.ToArray();
|
||||
|
||||
for (var i = 1; i <= 5; i++)
|
||||
{
|
||||
var seq = await store.AppendAsync(subject, payload, default);
|
||||
seq.ShouldBe((ulong)i);
|
||||
}
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
|
||||
var msg = await store.LoadAsync(3, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe(subject);
|
||||
msg.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasicWriteMsgsAndRestore server/filestore_test.go:181 (S2 permutation)
|
||||
[Fact]
|
||||
public async Task Compressed_store_and_recover()
|
||||
{
|
||||
var subDir = "comp-recover";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 100; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i:D4}"), default);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)100);
|
||||
|
||||
var msg = await store.LoadAsync(50, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe("foo");
|
||||
msg.Payload.ToArray().ShouldBe(Encoding.UTF8.GetBytes("msg-0049"));
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:86 (S2 permutation)
|
||||
[Fact]
|
||||
public async Task Compressed_remove_and_reload()
|
||||
{
|
||||
await using var store = CreateStore("comp-remove");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
await store.RemoveAsync(5, default);
|
||||
|
||||
(await store.LoadAsync(5, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(6, default)).ShouldNotBeNull();
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)9);
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurge server/filestore_test.go:709 (S2 permutation)
|
||||
[Fact]
|
||||
public async Task Compressed_purge()
|
||||
{
|
||||
await using var store = CreateStore("comp-purge");
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", "Hello"u8.ToArray(), default);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
state.Bytes.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreWriteExpireWrite server/filestore_test.go:424 (S2 permutation)
|
||||
[Fact]
|
||||
public async Task Compressed_large_batch()
|
||||
{
|
||||
await using var store = CreateStore("comp-large");
|
||||
|
||||
for (var i = 0; i < 200; i++)
|
||||
await store.AppendAsync("zzz", Encoding.UTF8.GetBytes($"Hello World! - {i}"), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)200);
|
||||
|
||||
for (ulong i = 1; i <= 200; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreAgeLimit server/filestore_test.go:616 (S2 permutation)
|
||||
[Fact]
|
||||
public async Task Compressed_with_age_expiry()
|
||||
{
|
||||
await using var store = CreateStore("comp-age", options: new FileStoreOptions { MaxAgeMs = 200 });
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "Hello"u8.ToArray(), default);
|
||||
|
||||
await Task.Delay(300);
|
||||
|
||||
await store.AppendAsync("foo", "trigger"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)1);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreSnapshot server/filestore_test.go:1799 (S2 permutation)
|
||||
[Fact]
|
||||
public async Task Compressed_snapshot_and_restore()
|
||||
{
|
||||
await using var store = CreateStore("comp-snap-src");
|
||||
|
||||
for (var i = 0; i < 30; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
var snap = await store.CreateSnapshotAsync(default);
|
||||
snap.Length.ShouldBeGreaterThan(0);
|
||||
|
||||
await using var restored = CreateStore("comp-snap-dst");
|
||||
await restored.RestoreSnapshotAsync(snap, default);
|
||||
|
||||
var srcState = await store.GetStateAsync(default);
|
||||
var dstState = await restored.GetStateAsync(default);
|
||||
dstState.Messages.ShouldBe(srcState.Messages);
|
||||
|
||||
for (ulong i = 1; i <= srcState.Messages; i++)
|
||||
{
|
||||
var original = await store.LoadAsync(i, default);
|
||||
var copy = await restored.LoadAsync(i, default);
|
||||
copy.ShouldNotBeNull();
|
||||
copy!.Payload.ToArray().ShouldBe(original!.Payload.ToArray());
|
||||
}
|
||||
}
|
||||
|
||||
// Combined encryption + compression (Go AES-S2 permutation).
|
||||
[Fact]
|
||||
public async Task Compressed_and_encrypted_round_trip()
|
||||
{
|
||||
var dir = Path.Combine(_dir, "comp-enc");
|
||||
await using var store = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir,
|
||||
EnableCompression = true,
|
||||
EnableEncryption = true,
|
||||
EncryptionKey = "test-key-for-compression!!!!!!"u8.ToArray(),
|
||||
});
|
||||
|
||||
var payload = "Hello World - compressed and encrypted"u8.ToArray();
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
for (ulong i = 1; i <= 10; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
}
|
||||
|
||||
// Combined encryption + compression with recovery.
|
||||
[Fact]
|
||||
public async Task Compressed_and_encrypted_recovery()
|
||||
{
|
||||
var subDir = "comp-enc-recover";
|
||||
var dir = Path.Combine(_dir, subDir);
|
||||
var key = "test-key-for-compression!!!!!!"u8.ToArray();
|
||||
|
||||
await using (var store = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir,
|
||||
EnableCompression = true,
|
||||
EnableEncryption = true,
|
||||
EncryptionKey = key,
|
||||
}))
|
||||
{
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i:D4}"), default);
|
||||
}
|
||||
|
||||
await using (var store = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir,
|
||||
EnableCompression = true,
|
||||
EnableEncryption = true,
|
||||
EncryptionKey = key,
|
||||
}))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)20);
|
||||
|
||||
var msg = await store.LoadAsync(15, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(Encoding.UTF8.GetBytes("msg-0014"));
|
||||
}
|
||||
}
|
||||
|
||||
// Compressed large payload (highly compressible).
|
||||
[Fact]
|
||||
public async Task Compressed_highly_compressible_payload()
|
||||
{
|
||||
await using var store = CreateStore("comp-compressible");
|
||||
|
||||
// Highly repetitive data should compress well.
|
||||
var payload = new byte[4096];
|
||||
Array.Fill(payload, (byte)'A');
|
||||
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
|
||||
// Compressed empty payload.
|
||||
[Fact]
|
||||
public async Task Compressed_empty_payload()
|
||||
{
|
||||
await using var store = CreateStore("comp-empty");
|
||||
|
||||
await store.AppendAsync("foo", ReadOnlyMemory<byte>.Empty, default);
|
||||
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.Length.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Verify compressed data is different from uncompressed on disk.
|
||||
[Fact]
|
||||
public async Task Compressed_data_differs_from_uncompressed_on_disk()
|
||||
{
|
||||
var compDir = Path.Combine(_dir, "comp-on-disk");
|
||||
var plainDir = Path.Combine(_dir, "plain-on-disk");
|
||||
|
||||
await using (var compStore = CreateStore("comp-on-disk"))
|
||||
{
|
||||
await compStore.AppendAsync("foo", "AAAAAAAAAAAAAAAAAAAAAAAAAAA"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
await using (var plainStore = CreateStore("plain-on-disk", compress: false))
|
||||
{
|
||||
await plainStore.AppendAsync("foo", "AAAAAAAAAAAAAAAAAAAAAAAAAAA"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
var compFile = Path.Combine(compDir, "messages.jsonl");
|
||||
var plainFile = Path.Combine(plainDir, "messages.jsonl");
|
||||
|
||||
if (File.Exists(compFile) && File.Exists(plainFile))
|
||||
{
|
||||
var compContent = File.ReadAllText(compFile);
|
||||
var plainContent = File.ReadAllText(plainFile);
|
||||
// The base64-encoded payloads should differ due to compression envelope.
|
||||
compContent.ShouldNotBe(plainContent);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,243 @@
|
||||
// Reference: golang/nats-server/server/filestore.go:5783-5842
|
||||
// Tests for Task 4: Crash Recovery Enhancement — FlushAllPending and WriteStreamState.
|
||||
// Go parity:
|
||||
// TestFileStoreSyncIntervals → FlushAllPending_flushes_active_block
|
||||
// TestFileStoreWriteFullStateBasics → FlushAllPending_writes_stream_state_file
|
||||
// TestFileStoreTtlWheelExpiry (recovery variant) → Recovery_rebuilds_ttl_and_expires_old
|
||||
// TestFileStoreBitRot (block tail truncation variant) → Recovery_handles_truncated_block
|
||||
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for <see cref="FileStore.FlushAllPending"/> and stream state checkpoint writes.
|
||||
/// Verifies that buffered block data is flushed to disk, that an atomic stream.state
|
||||
/// checkpoint file is written, that TTL recovery works across a restart, and that
|
||||
/// recovery is graceful when the tail of a block file has been truncated (simulating
|
||||
/// a crash mid-write).
|
||||
/// Reference: golang/nats-server/server/filestore.go:5783 (flushPendingWritesUnlocked).
|
||||
/// </summary>
|
||||
public sealed class FileStoreCrashRecoveryTests : IDisposable
|
||||
{
|
||||
private readonly string _root;
|
||||
|
||||
public FileStoreCrashRecoveryTests()
|
||||
{
|
||||
_root = Path.Combine(Path.GetTempPath(), $"nats-js-crash-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_root);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_root))
|
||||
Directory.Delete(_root, recursive: true);
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string subDir, FileStoreOptions? opts = null)
|
||||
{
|
||||
var dir = Path.Combine(_root, subDir);
|
||||
Directory.CreateDirectory(dir);
|
||||
var o = opts ?? new FileStoreOptions();
|
||||
o.Directory = dir;
|
||||
return new FileStore(o);
|
||||
}
|
||||
|
||||
private string StoreDir(string subDir) => Path.Combine(_root, subDir);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FlushAllPending — block flush
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreSyncIntervals (filestore_test.go) — verifies that pending writes
|
||||
// are flushed to disk and the .blk file is non-empty after FlushAllPending.
|
||||
[Fact]
|
||||
public async Task FlushAllPending_flushes_active_block()
|
||||
{
|
||||
// Arrange: write a message to the store.
|
||||
const string sub = "flush-block";
|
||||
var dir = StoreDir(sub);
|
||||
using var store = CreateStore(sub);
|
||||
|
||||
store.StoreMsg("events.a", null, "payload-for-flush"u8.ToArray(), 0L);
|
||||
|
||||
// Act: flush all pending writes.
|
||||
await store.FlushAllPending();
|
||||
|
||||
// Assert: at least one .blk file exists and it is non-empty, proving the
|
||||
// active block was flushed to disk.
|
||||
var blkFiles = Directory.GetFiles(dir, "*.blk");
|
||||
blkFiles.Length.ShouldBeGreaterThanOrEqualTo(1);
|
||||
blkFiles.All(f => new FileInfo(f).Length > 0).ShouldBeTrue(
|
||||
"every .blk file should contain at least the record bytes after a flush");
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FlushAllPending — stream.state checkpoint
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreWriteFullStateBasics (filestore_test.go:5461) — verifies that
|
||||
// WriteStreamState creates a valid, atomic stream.state checkpoint file.
|
||||
[Fact]
|
||||
public async Task FlushAllPending_writes_stream_state_file()
|
||||
{
|
||||
// Arrange: store several messages across subjects.
|
||||
const string sub = "state-file";
|
||||
var dir = StoreDir(sub);
|
||||
using var store = CreateStore(sub);
|
||||
|
||||
store.StoreMsg("orders.new", null, "order-1"u8.ToArray(), 0L);
|
||||
store.StoreMsg("orders.new", null, "order-2"u8.ToArray(), 0L);
|
||||
store.StoreMsg("events.a", null, "event-1"u8.ToArray(), 0L);
|
||||
|
||||
// Act: flush — this should write stream.state atomically.
|
||||
await store.FlushAllPending();
|
||||
|
||||
// Assert: stream.state exists and no leftover .tmp file.
|
||||
// AtomicFileWriter uses {path}.{random}.tmp so check by extension, not exact name.
|
||||
var statePath = Path.Combine(dir, "stream.state");
|
||||
File.Exists(statePath).ShouldBeTrue("stream.state checkpoint must exist after FlushAllPending");
|
||||
Directory.GetFiles(dir, "*.tmp").ShouldBeEmpty("all .tmp staging files must be renamed away after atomic write");
|
||||
|
||||
// Assert: the file is valid JSON with the expected fields.
|
||||
var json = File.ReadAllText(statePath);
|
||||
using var doc = JsonDocument.Parse(json);
|
||||
var root = doc.RootElement;
|
||||
|
||||
root.TryGetProperty("FirstSeq", out var firstSeq).ShouldBeTrue("stream.state must contain FirstSeq");
|
||||
root.TryGetProperty("LastSeq", out var lastSeq ).ShouldBeTrue("stream.state must contain LastSeq");
|
||||
root.TryGetProperty("Messages", out var messages).ShouldBeTrue("stream.state must contain Messages");
|
||||
|
||||
firstSeq.GetUInt64().ShouldBe(1UL);
|
||||
lastSeq.GetUInt64().ShouldBe(3UL);
|
||||
messages.GetUInt64().ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// Go: FlushAllPending is idempotent — calling it twice must not throw and must
|
||||
// overwrite the previous state file with the latest state.
|
||||
[Fact]
|
||||
public async Task FlushAllPending_is_idempotent()
|
||||
{
|
||||
const string sub = "flush-idempotent";
|
||||
var dir = StoreDir(sub);
|
||||
using var store = CreateStore(sub);
|
||||
|
||||
store.StoreMsg("foo", null, "msg-1"u8.ToArray(), 0L);
|
||||
await store.FlushAllPending();
|
||||
|
||||
store.StoreMsg("foo", null, "msg-2"u8.ToArray(), 0L);
|
||||
await store.FlushAllPending();
|
||||
|
||||
// The state file should reflect the second flush (2 messages, seq 1..2).
|
||||
var statePath = Path.Combine(dir, "stream.state");
|
||||
File.Exists(statePath).ShouldBeTrue();
|
||||
using var doc = JsonDocument.Parse(File.ReadAllText(statePath));
|
||||
doc.RootElement.GetProperty("Messages").GetUInt64().ShouldBe(2UL);
|
||||
doc.RootElement.GetProperty("LastSeq").GetUInt64().ShouldBe(2UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// TTL wheel rebuild across restart
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreRecoveryReregiistersTtls (filestore_test.go) — verifies that
|
||||
// messages whose timestamps pre-date the MaxAgeMs cutoff are pruned during recovery.
|
||||
// No Task.Delay is needed: messages are written directly to a MsgBlock with a
|
||||
// timestamp 1 hour in the past, so they are already expired when FileStore opens.
|
||||
[Fact]
|
||||
public async Task Recovery_rebuilds_ttl_and_expires_old()
|
||||
{
|
||||
// Arrange: build a block file with messages backdated 1 hour so they are
|
||||
// already past the MaxAgeMs cutoff at the moment FileStore opens for recovery.
|
||||
const string sub = "ttl-recovery";
|
||||
var dir = StoreDir(sub);
|
||||
Directory.CreateDirectory(dir);
|
||||
|
||||
var oneHourAgoNs = new DateTimeOffset(DateTime.UtcNow.AddHours(-1))
|
||||
.ToUnixTimeMilliseconds() * 1_000_000L;
|
||||
|
||||
using (var block = MsgBlock.Create(0, dir, maxBytes: 64 * 1024, firstSequence: 1))
|
||||
{
|
||||
block.WriteAt(1, "expire.me", ReadOnlyMemory<byte>.Empty, "short-lived"u8.ToArray(), oneHourAgoNs);
|
||||
block.WriteAt(2, "expire.me", ReadOnlyMemory<byte>.Empty, "short-lived-2"u8.ToArray(), oneHourAgoNs);
|
||||
block.Flush();
|
||||
}
|
||||
|
||||
// Act: open FileStore with a 60-second MaxAgeMs — messages timestamped 1 hour
|
||||
// ago are already expired, so PruneExpired during RecoverBlocks removes them.
|
||||
await using var recovered = CreateStore(sub, new FileStoreOptions { MaxAgeMs = 60_000 });
|
||||
|
||||
// Assert: expired messages must be gone after recovery + prune.
|
||||
var stateAfter = await recovered.GetStateAsync(default);
|
||||
stateAfter.Messages.ShouldBe(0UL, "expired messages must be pruned during recovery");
|
||||
stateAfter.FirstSeq.ShouldBe(0UL, "first sequence must be 0 when the store is empty after full expiry");
|
||||
stateAfter.LastSeq.ShouldBe(0UL, "last sequence must be 0 when the store is empty after full expiry");
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Truncated block recovery
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreErrPartialLoad (filestore_test.go) — verifies that recovery
|
||||
// handles a block whose tail has been truncated (simulating a crash mid-write).
|
||||
// The earlier records in the block must still be recoverable; the truncated tail
|
||||
// must be silently skipped.
|
||||
[Fact]
|
||||
public async Task Recovery_handles_truncated_block()
|
||||
{
|
||||
// Arrange: write a few messages and flush so the .blk file has valid data.
|
||||
const string sub = "truncated-block";
|
||||
var dir = StoreDir(sub);
|
||||
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions { BlockSizeBytes = 4096 }))
|
||||
{
|
||||
for (var i = 0; i < 5; i++)
|
||||
store.StoreMsg("events", null, Encoding.UTF8.GetBytes($"msg-{i}"), 0L);
|
||||
|
||||
// Flush to ensure data is on disk before we close.
|
||||
await store.FlushAllPending();
|
||||
}
|
||||
|
||||
// Simulate a crash mid-write by truncating the .blk file by a few bytes at
|
||||
// the tail. This leaves all but the last record in a valid state.
|
||||
var blkFile = Directory.GetFiles(dir, "*.blk").OrderBy(f => f).First();
|
||||
var originalLength = new FileInfo(blkFile).Length;
|
||||
originalLength.ShouldBeGreaterThan(4, "block file must have content before truncation");
|
||||
|
||||
// Remove the last 4 bytes — simulates a torn write at the file tail.
|
||||
using (var fs = new FileStream(blkFile, FileMode.Open, FileAccess.Write))
|
||||
{
|
||||
var newLength = Math.Max(0, originalLength - 4);
|
||||
fs.SetLength(newLength);
|
||||
}
|
||||
|
||||
// Act: re-open — recovery must not throw and must load what it can.
|
||||
// The key assertion is that recovery does not throw; it may lose the last
|
||||
// partial record but must preserve earlier complete records.
|
||||
Exception? thrown = null;
|
||||
FileStore? recovered = null;
|
||||
try
|
||||
{
|
||||
recovered = CreateStore(sub, new FileStoreOptions { BlockSizeBytes = 4096 });
|
||||
var state = await recovered.GetStateAsync(default);
|
||||
|
||||
// At least some messages must survive (the truncation only hit the tail).
|
||||
state.Messages.ShouldBeGreaterThanOrEqualTo(0UL,
|
||||
"recovery from a truncated block must not throw and must expose the surviving messages");
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
thrown = ex;
|
||||
}
|
||||
finally
|
||||
{
|
||||
recovered?.Dispose();
|
||||
}
|
||||
|
||||
// InvalidDataException is reserved for integrity failures (wrong encryption key);
|
||||
// a tail truncation must be silently skipped during recovery.
|
||||
thrown.ShouldBeNull("recovery from a truncated block must not propagate exceptions");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,283 @@
|
||||
// Reference: golang/nats-server/server/filestore_test.go
|
||||
// Tests ported from: TestFileStoreEncrypted,
|
||||
// TestFileStoreRestoreEncryptedWithNoKeyFuncFails,
|
||||
// TestFileStoreDoubleCompactWithWriteInBetweenEncryptedBug,
|
||||
// TestFileStoreEncryptedKeepIndexNeedBekResetBug,
|
||||
// TestFileStoreShortIndexWriteBug (encryption variant)
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class FileStoreEncryptionTests : IDisposable
|
||||
{
|
||||
private readonly string _dir;
|
||||
|
||||
public FileStoreEncryptionTests()
|
||||
{
|
||||
_dir = Path.Combine(Path.GetTempPath(), $"nats-js-fs-enc-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_dir);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_dir))
|
||||
Directory.Delete(_dir, recursive: true);
|
||||
}
|
||||
|
||||
private static byte[] TestKey => "nats-encryption-key-for-test!!"u8.ToArray();
|
||||
|
||||
private FileStore CreateStore(string subdirectory, bool encrypt = true, byte[]? key = null)
|
||||
{
|
||||
var dir = Path.Combine(_dir, subdirectory);
|
||||
return new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir,
|
||||
EnableEncryption = encrypt,
|
||||
EncryptionKey = key ?? TestKey,
|
||||
});
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEncrypted server/filestore_test.go:4204
|
||||
[Fact]
|
||||
public async Task Encrypted_store_and_load()
|
||||
{
|
||||
await using var store = CreateStore("enc-basic");
|
||||
|
||||
const string subject = "foo";
|
||||
var payload = "aes ftw"u8.ToArray();
|
||||
|
||||
for (var i = 0; i < 50; i++)
|
||||
await store.AppendAsync(subject, payload, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)50);
|
||||
|
||||
var msg = await store.LoadAsync(10, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe(subject);
|
||||
msg.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEncrypted server/filestore_test.go:4228
|
||||
[Fact]
|
||||
public async Task Encrypted_store_and_recover()
|
||||
{
|
||||
var subDir = "enc-recover";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 50; i++)
|
||||
await store.AppendAsync("foo", "aes ftw"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
// Reopen with the same key.
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var msg = await store.LoadAsync(10, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe("aes ftw"u8.ToArray());
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)50);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreRestoreEncryptedWithNoKeyFuncFails server/filestore_test.go:5134
|
||||
[Fact]
|
||||
public async Task Encrypted_data_without_key_throws_on_load()
|
||||
{
|
||||
var subDir = "enc-no-key";
|
||||
var dir = Path.Combine(_dir, subDir);
|
||||
|
||||
// Store with encryption.
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
await store.AppendAsync("foo", "secret data"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
// Reopen with a wrong key. The FileStore constructor calls LoadExisting()
|
||||
// which calls RestorePayload(), and that throws InvalidDataException when
|
||||
// the envelope key-hash does not match the configured key.
|
||||
var createWithWrongKey = () => new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir,
|
||||
EnableEncryption = true,
|
||||
EncryptionKey = "wrong-key-wrong-key-wrong-key!!"u8.ToArray(),
|
||||
EnablePayloadIntegrityChecks = true,
|
||||
});
|
||||
|
||||
Should.Throw<InvalidDataException>(createWithWrongKey);
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEncrypted server/filestore_test.go:4204
|
||||
[Fact]
|
||||
public async Task Encrypted_store_remove_and_reload()
|
||||
{
|
||||
await using var store = CreateStore("enc-remove");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
await store.RemoveAsync(5, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)9);
|
||||
|
||||
(await store.LoadAsync(5, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(6, default)).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEncrypted server/filestore_test.go:4204
|
||||
[Fact]
|
||||
public async Task Encrypted_purge_and_continue()
|
||||
{
|
||||
await using var store = CreateStore("enc-purge");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)0);
|
||||
|
||||
var seq = await store.AppendAsync("foo", "after purge"u8.ToArray(), default);
|
||||
seq.ShouldBeGreaterThan((ulong)0);
|
||||
|
||||
var msg = await store.LoadAsync(seq, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe("after purge"u8.ToArray());
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEncrypted server/filestore_test.go:4204
|
||||
[Fact]
|
||||
public async Task Encrypted_snapshot_and_restore()
|
||||
{
|
||||
await using var store = CreateStore("enc-snap-src");
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
var snap = await store.CreateSnapshotAsync(default);
|
||||
snap.Length.ShouldBeGreaterThan(0);
|
||||
|
||||
await using var restored = CreateStore("enc-snap-dst");
|
||||
await restored.RestoreSnapshotAsync(snap, default);
|
||||
|
||||
var srcState = await store.GetStateAsync(default);
|
||||
var dstState = await restored.GetStateAsync(default);
|
||||
dstState.Messages.ShouldBe(srcState.Messages);
|
||||
|
||||
for (ulong i = 1; i <= srcState.Messages; i++)
|
||||
{
|
||||
var original = await store.LoadAsync(i, default);
|
||||
var copy = await restored.LoadAsync(i, default);
|
||||
copy.ShouldNotBeNull();
|
||||
copy!.Payload.ToArray().ShouldBe(original!.Payload.ToArray());
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEncrypted server/filestore_test.go:4204
|
||||
[Fact]
|
||||
public async Task Encrypted_large_payload()
|
||||
{
|
||||
await using var store = CreateStore("enc-large");
|
||||
|
||||
var payload = new byte[8192];
|
||||
Random.Shared.NextBytes(payload);
|
||||
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEncrypted server/filestore_test.go:4204
|
||||
[Fact]
|
||||
public async Task Encrypted_binary_payload_round_trips()
|
||||
{
|
||||
await using var store = CreateStore("enc-binary");
|
||||
|
||||
// All byte values.
|
||||
var payload = new byte[256];
|
||||
for (var i = 0; i < 256; i++)
|
||||
payload[i] = (byte)i;
|
||||
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEncrypted server/filestore_test.go:4204
|
||||
[Fact]
|
||||
public async Task Encrypted_empty_payload()
|
||||
{
|
||||
await using var store = CreateStore("enc-empty");
|
||||
|
||||
await store.AppendAsync("foo", ReadOnlyMemory<byte>.Empty, default);
|
||||
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.Length.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreDoubleCompactWithWriteInBetweenEncryptedBug server/filestore_test.go:3924
|
||||
[Fact(Skip = "Compact not yet implemented in .NET FileStore")]
|
||||
public async Task Encrypted_double_compact_with_write_in_between()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEncryptedKeepIndexNeedBekResetBug server/filestore_test.go:3956
|
||||
[Fact(Skip = "Block encryption key reset not yet implemented in .NET FileStore")]
|
||||
public async Task Encrypted_keep_index_bek_reset()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Verify encryption with no-op key (empty key) does not crash.
|
||||
[Fact]
|
||||
public async Task Encrypted_with_empty_key_is_noop()
|
||||
{
|
||||
var dir = Path.Combine(_dir, "enc-noop");
|
||||
await using var store = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir,
|
||||
EnableEncryption = true,
|
||||
EncryptionKey = [],
|
||||
});
|
||||
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe("data"u8.ToArray());
|
||||
}
|
||||
|
||||
// Verify data at rest is not plaintext when encrypted.
|
||||
[Fact]
|
||||
public async Task Encrypted_data_not_plaintext_on_disk()
|
||||
{
|
||||
var subDir = "enc-disk-check";
|
||||
var dir = Path.Combine(_dir, subDir);
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
await store.AppendAsync("foo", "THIS IS SENSITIVE DATA"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
// Read the raw data file and verify the plaintext payload does not appear.
|
||||
var dataFile = Path.Combine(dir, "messages.jsonl");
|
||||
if (File.Exists(dataFile))
|
||||
{
|
||||
var raw = File.ReadAllText(dataFile);
|
||||
// The payload is base64-encoded after encryption, so the original
|
||||
// plaintext string should not appear verbatim.
|
||||
raw.ShouldNotContain("THIS IS SENSITIVE DATA");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,369 @@
|
||||
// Reference: golang/nats-server/server/filestore_test.go
|
||||
// Go refs:
|
||||
// FilteredState — filestore.go:3191
|
||||
// LoadMsg — filestore.go:8308
|
||||
// NumFiltered — fss generation-based subject-state cache (filestore.go:4950+)
|
||||
//
|
||||
// Tests for Gap 1.10: Query/Filter Operations
|
||||
// - FilteredState with literal, wildcard, and > subject filters
|
||||
// - FilteredState respects start-sequence parameter
|
||||
// - NumFiltered basic count and generation-based cache hit
|
||||
// - LoadMsg block-level binary search fallback
|
||||
// - CheckSkipFirstBlock helper
|
||||
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class FileStoreFilterQueryTests : IDisposable
|
||||
{
|
||||
private readonly string _dir =
|
||||
Path.Combine(Path.GetTempPath(), $"nats-js-fq-{Guid.NewGuid():N}");
|
||||
|
||||
public FileStoreFilterQueryTests() => Directory.CreateDirectory(_dir);
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_dir))
|
||||
Directory.Delete(_dir, recursive: true);
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string sub, FileStoreOptions? opts = null)
|
||||
{
|
||||
var dir = Path.Combine(_dir, sub);
|
||||
Directory.CreateDirectory(dir);
|
||||
opts ??= new FileStoreOptions { BlockSizeBytes = 64 * 1024 };
|
||||
opts.Directory = dir;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FilteredState — literal subject filter
|
||||
// Go: TestFileStoreFilteredState (filestore_test.go)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task FilteredState_with_literal_subject()
|
||||
{
|
||||
await using var store = CreateStore("fs-literal");
|
||||
|
||||
await store.AppendAsync("foo", "a"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar", "b"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "c"u8.ToArray(), default);
|
||||
await store.AppendAsync("baz", "d"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "e"u8.ToArray(), default);
|
||||
|
||||
var state = store.FilteredState(seq: 1, subject: "foo");
|
||||
|
||||
state.Msgs.ShouldBe(3UL);
|
||||
state.First.ShouldBe(1UL);
|
||||
state.Last.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task FilteredState_with_literal_subject_no_matches()
|
||||
{
|
||||
await using var store = CreateStore("fs-literal-none");
|
||||
|
||||
await store.AppendAsync("foo", "a"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar", "b"u8.ToArray(), default);
|
||||
|
||||
var state = store.FilteredState(seq: 1, subject: "qux");
|
||||
|
||||
state.Msgs.ShouldBe(0UL);
|
||||
state.First.ShouldBe(0UL);
|
||||
state.Last.ShouldBe(0UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FilteredState — single-token wildcard (*)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task FilteredState_with_wildcard_subject()
|
||||
{
|
||||
await using var store = CreateStore("fs-pwc");
|
||||
|
||||
await store.AppendAsync("foo.a", "1"u8.ToArray(), default); // seq 1
|
||||
await store.AppendAsync("foo.b", "2"u8.ToArray(), default); // seq 2
|
||||
await store.AppendAsync("bar.a", "3"u8.ToArray(), default); // seq 3
|
||||
await store.AppendAsync("foo.c", "4"u8.ToArray(), default); // seq 4
|
||||
|
||||
var state = store.FilteredState(seq: 1, subject: "foo.*");
|
||||
|
||||
state.Msgs.ShouldBe(3UL);
|
||||
state.First.ShouldBe(1UL);
|
||||
state.Last.ShouldBe(4UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FilteredState — full wildcard (>)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task FilteredState_with_gt_wildcard()
|
||||
{
|
||||
await using var store = CreateStore("fs-fwc");
|
||||
|
||||
await store.AppendAsync("events.created.user", "1"u8.ToArray(), default); // seq 1
|
||||
await store.AppendAsync("events.deleted.post", "2"u8.ToArray(), default); // seq 2
|
||||
await store.AppendAsync("metrics.cpu", "3"u8.ToArray(), default); // seq 3
|
||||
await store.AppendAsync("events.updated.user", "4"u8.ToArray(), default); // seq 4
|
||||
|
||||
var state = store.FilteredState(seq: 1, subject: "events.>");
|
||||
|
||||
state.Msgs.ShouldBe(3UL);
|
||||
state.First.ShouldBe(1UL);
|
||||
state.Last.ShouldBe(4UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FilteredState — respects start sequence
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task FilteredState_starts_from_sequence()
|
||||
{
|
||||
await using var store = CreateStore("fs-startseq");
|
||||
|
||||
await store.AppendAsync("foo", "1"u8.ToArray(), default); // seq 1
|
||||
await store.AppendAsync("foo", "2"u8.ToArray(), default); // seq 2
|
||||
await store.AppendAsync("bar", "3"u8.ToArray(), default); // seq 3
|
||||
await store.AppendAsync("foo", "4"u8.ToArray(), default); // seq 4
|
||||
await store.AppendAsync("foo", "5"u8.ToArray(), default); // seq 5
|
||||
|
||||
// Start at seq=3: should see foo@4 and foo@5 (not foo@1 or foo@2).
|
||||
var state = store.FilteredState(seq: 3, subject: "foo");
|
||||
|
||||
state.Msgs.ShouldBe(2UL);
|
||||
state.First.ShouldBe(4UL);
|
||||
state.Last.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task FilteredState_empty_subject_counts_all()
|
||||
{
|
||||
await using var store = CreateStore("fs-empty-subj");
|
||||
|
||||
await store.AppendAsync("a", "1"u8.ToArray(), default);
|
||||
await store.AppendAsync("b", "2"u8.ToArray(), default);
|
||||
await store.AppendAsync("c", "3"u8.ToArray(), default);
|
||||
|
||||
var state = store.FilteredState(seq: 1, subject: "");
|
||||
|
||||
state.Msgs.ShouldBe(3UL);
|
||||
state.First.ShouldBe(1UL);
|
||||
state.Last.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// NumFiltered — basic count
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task NumFiltered_counts_matching_messages()
|
||||
{
|
||||
await using var store = CreateStore("nf-basic");
|
||||
|
||||
await store.AppendAsync("orders.new", "a"u8.ToArray(), default);
|
||||
await store.AppendAsync("orders.paid", "b"u8.ToArray(), default);
|
||||
await store.AppendAsync("shipments.new", "c"u8.ToArray(), default);
|
||||
await store.AppendAsync("orders.new", "d"u8.ToArray(), default);
|
||||
|
||||
store.NumFiltered("orders.>").ShouldBe(3UL);
|
||||
store.NumFiltered("orders.new").ShouldBe(2UL);
|
||||
store.NumFiltered("shipments.new").ShouldBe(1UL);
|
||||
store.NumFiltered("other.*").ShouldBe(0UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// NumFiltered — generation-based cache hit
|
||||
// The same filter called twice with no writes in between must return the same
|
||||
// count. We verify correctness rather than timing; the generation counter
|
||||
// ensures the second call hits the cache rather than re-scanning.
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task NumFiltered_caches_result_across_identical_calls()
|
||||
{
|
||||
await using var store = CreateStore("nf-cache");
|
||||
|
||||
await store.AppendAsync("telemetry.a", "x"u8.ToArray(), default);
|
||||
await store.AppendAsync("telemetry.b", "y"u8.ToArray(), default);
|
||||
await store.AppendAsync("other", "z"u8.ToArray(), default);
|
||||
|
||||
var first = store.NumFiltered("telemetry.*");
|
||||
var second = store.NumFiltered("telemetry.*");
|
||||
|
||||
first.ShouldBe(2UL);
|
||||
second.ShouldBe(2UL, "cached result should equal direct scan result");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task NumFiltered_cache_invalidated_after_append()
|
||||
{
|
||||
await using var store = CreateStore("nf-invalidate");
|
||||
|
||||
await store.AppendAsync("foo", "a"u8.ToArray(), default);
|
||||
store.NumFiltered("foo").ShouldBe(1UL);
|
||||
|
||||
// A new write must invalidate the cache.
|
||||
await store.AppendAsync("foo", "b"u8.ToArray(), default);
|
||||
store.NumFiltered("foo").ShouldBe(2UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task NumFiltered_cache_invalidated_after_remove()
|
||||
{
|
||||
await using var store = CreateStore("nf-remove");
|
||||
|
||||
var seq = await store.AppendAsync("foo", "a"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "b"u8.ToArray(), default);
|
||||
store.NumFiltered("foo").ShouldBe(2UL);
|
||||
|
||||
// Removing a message must invalidate the cache.
|
||||
await store.RemoveAsync(seq, default);
|
||||
store.NumFiltered("foo").ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// LoadMsg — finds correct message (fast path: in-memory)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void LoadMsg_finds_correct_block()
|
||||
{
|
||||
using var store = CreateStore("lm-block", new FileStoreOptions
|
||||
{
|
||||
// Small block size so messages are distributed across multiple blocks.
|
||||
BlockSizeBytes = 128,
|
||||
});
|
||||
|
||||
var payloads = new[] { "first"u8.ToArray(), "second"u8.ToArray(), "third"u8.ToArray() };
|
||||
foreach (var p in payloads)
|
||||
{
|
||||
var t = store.StoreMsg($"subj.{p.Length}", hdr: null, msg: p, ttl: 0);
|
||||
_ = t;
|
||||
}
|
||||
|
||||
var sm = store.LoadMsg(2, null);
|
||||
sm.ShouldNotBeNull();
|
||||
sm.Sequence.ShouldBe(2UL);
|
||||
sm.Subject.ShouldBe("subj.6");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void LoadMsg_throws_for_missing_sequence()
|
||||
{
|
||||
using var store = CreateStore("lm-missing");
|
||||
store.StoreMsg("foo", null, "data"u8.ToArray(), 0);
|
||||
|
||||
Should.Throw<KeyNotFoundException>(() => store.LoadMsg(99, null));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void LoadMsg_reuses_provided_sm()
|
||||
{
|
||||
using var store = CreateStore("lm-reuse");
|
||||
store.StoreMsg("reuse.subject", null, "payload"u8.ToArray(), 0);
|
||||
|
||||
var sm = new StoreMsg();
|
||||
var returned = store.LoadMsg(1, sm);
|
||||
|
||||
returned.ShouldBeSameAs(sm);
|
||||
sm.Subject.ShouldBe("reuse.subject");
|
||||
sm.Sequence.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// CheckSkipFirstBlock — skips empty blocks
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void CheckSkipFirstBlock_skips_empty_block()
|
||||
{
|
||||
using var block = MsgBlock.Create(
|
||||
blockId: 1,
|
||||
directoryPath: Path.Combine(_dir, "skip-empty"),
|
||||
maxBytes: 1024 * 1024);
|
||||
|
||||
// An empty block (no messages) should always be skippable.
|
||||
FileStore.CheckSkipFirstBlock("foo.*", block).ShouldBeTrue(
|
||||
"an empty block has no messages and can always be skipped");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CheckSkipFirstBlock_does_not_skip_block_with_messages()
|
||||
{
|
||||
var blockDir = Path.Combine(_dir, "skip-nonempty");
|
||||
Directory.CreateDirectory(blockDir);
|
||||
using var block = MsgBlock.Create(1, blockDir, 1024 * 1024);
|
||||
block.Write("foo.bar", ReadOnlyMemory<byte>.Empty, "hello"u8.ToArray());
|
||||
|
||||
// Without per-block subject metadata we conservatively do not skip.
|
||||
FileStore.CheckSkipFirstBlock("foo.*", block).ShouldBeFalse(
|
||||
"a non-empty block cannot be skipped without per-block subject metadata");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CheckSkipFirstBlock_does_not_skip_for_empty_filter()
|
||||
{
|
||||
var blockDir = Path.Combine(_dir, "skip-emptyfilter");
|
||||
Directory.CreateDirectory(blockDir);
|
||||
using var block = MsgBlock.Create(1, blockDir, 1024 * 1024);
|
||||
block.Write("foo", ReadOnlyMemory<byte>.Empty, "x"u8.ToArray());
|
||||
|
||||
// An empty filter matches everything — never skip.
|
||||
FileStore.CheckSkipFirstBlock("", block).ShouldBeFalse(
|
||||
"empty filter matches all subjects, so the block must not be skipped");
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FindFirstBlockAtOrAfter — block-range binary search (via FilteredState)
|
||||
// Verifies that FilteredState returns the correct result when data spans
|
||||
// multiple sealed blocks.
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void FilteredState_correct_across_multiple_blocks()
|
||||
{
|
||||
// Use a tiny block size to force multiple blocks.
|
||||
using var store = CreateStore("fs-multiblock", new FileStoreOptions
|
||||
{
|
||||
BlockSizeBytes = 128,
|
||||
});
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
store.StoreMsg(i % 2 == 0 ? "events.even" : "events.odd",
|
||||
null, System.Text.Encoding.UTF8.GetBytes($"msg{i}"), 0);
|
||||
|
||||
store.BlockCount.ShouldBeGreaterThan(1, "test requires multiple blocks");
|
||||
|
||||
var even = store.FilteredState(seq: 1, subject: "events.even");
|
||||
var odd = store.FilteredState(seq: 1, subject: "events.odd");
|
||||
|
||||
even.Msgs.ShouldBe(5UL);
|
||||
odd.Msgs.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void FilteredState_start_seq_beyond_first_block()
|
||||
{
|
||||
using var store = CreateStore("fs-beyondblock", new FileStoreOptions
|
||||
{
|
||||
BlockSizeBytes = 128,
|
||||
});
|
||||
|
||||
for (var i = 1; i <= 10; i++)
|
||||
store.StoreMsg("topic", null, System.Text.Encoding.UTF8.GetBytes($"m{i}"), 0);
|
||||
|
||||
store.BlockCount.ShouldBeGreaterThan(1, "test requires multiple blocks");
|
||||
|
||||
// Request only messages from seq 6 onward.
|
||||
var state = store.FilteredState(seq: 6, subject: "topic");
|
||||
|
||||
state.Msgs.ShouldBe(5UL);
|
||||
state.First.ShouldBe(6UL);
|
||||
state.Last.ShouldBe(10UL);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,362 @@
|
||||
// Reference: golang/nats-server/server/filestore_test.go
|
||||
// Tests ported from: TestFileStoreMsgLimit, TestFileStoreMsgLimitBug,
|
||||
// TestFileStoreBytesLimit, TestFileStoreBytesLimitWithDiscardNew,
|
||||
// TestFileStoreAgeLimit, TestFileStoreMaxMsgsPerSubject,
|
||||
// TestFileStoreMaxMsgsAndMaxMsgsPerSubject,
|
||||
// TestFileStoreUpdateMaxMsgsPerSubject
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class FileStoreLimitsTests : IDisposable
|
||||
{
|
||||
private readonly string _dir;
|
||||
|
||||
public FileStoreLimitsTests()
|
||||
{
|
||||
_dir = Path.Combine(Path.GetTempPath(), $"nats-js-fs-limits-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_dir);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_dir))
|
||||
Directory.Delete(_dir, recursive: true);
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string subdirectory, FileStoreOptions? options = null)
|
||||
{
|
||||
var dir = Path.Combine(_dir, subdirectory);
|
||||
var opts = options ?? new FileStoreOptions();
|
||||
opts.Directory = dir;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMsgLimit server/filestore_test.go:484
|
||||
[Fact]
|
||||
public async Task TrimToMaxMessages_maintains_limit()
|
||||
{
|
||||
await using var store = CreateStore("msg-limit");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)10);
|
||||
|
||||
// Store one more, then trim.
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
store.TrimToMaxMessages(10);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)10);
|
||||
state.LastSeq.ShouldBe((ulong)11);
|
||||
state.FirstSeq.ShouldBe((ulong)2);
|
||||
|
||||
// Seq 1 should be evicted.
|
||||
(await store.LoadAsync(1, default)).ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMsgLimitBug server/filestore_test.go:518
|
||||
[Fact]
|
||||
public async Task TrimToMaxMessages_one_across_restart()
|
||||
{
|
||||
var subDir = "msg-limit-bug";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
store.TrimToMaxMessages(1);
|
||||
}
|
||||
|
||||
// Reopen and store one more.
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)1);
|
||||
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
store.TrimToMaxMessages(1);
|
||||
|
||||
state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)1);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMsgLimit server/filestore_test.go:484
|
||||
[Fact]
|
||||
public async Task TrimToMaxMessages_repeated_trims()
|
||||
{
|
||||
await using var store = CreateStore("repeated-trim");
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
store.TrimToMaxMessages(10);
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)10);
|
||||
(await store.GetStateAsync(default)).FirstSeq.ShouldBe((ulong)11);
|
||||
|
||||
store.TrimToMaxMessages(5);
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)5);
|
||||
(await store.GetStateAsync(default)).FirstSeq.ShouldBe((ulong)16);
|
||||
|
||||
store.TrimToMaxMessages(1);
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)1);
|
||||
(await store.GetStateAsync(default)).FirstSeq.ShouldBe((ulong)20);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBytesLimit server/filestore_test.go:537
|
||||
[Fact]
|
||||
public async Task Bytes_accumulate_correctly()
|
||||
{
|
||||
await using var store = CreateStore("bytes-accum");
|
||||
|
||||
var payload = new byte[512];
|
||||
const int count = 10;
|
||||
|
||||
for (var i = 0; i < count; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)count);
|
||||
state.Bytes.ShouldBe((ulong)(count * 512));
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBytesLimit server/filestore_test.go:537
|
||||
[Fact]
|
||||
public async Task TrimToMaxMessages_reduces_bytes()
|
||||
{
|
||||
await using var store = CreateStore("bytes-trim");
|
||||
|
||||
var payload = new byte[100];
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var beforeState = await store.GetStateAsync(default);
|
||||
beforeState.Bytes.ShouldBe((ulong)1000);
|
||||
|
||||
store.TrimToMaxMessages(5);
|
||||
|
||||
var afterState = await store.GetStateAsync(default);
|
||||
afterState.Messages.ShouldBe((ulong)5);
|
||||
afterState.Bytes.ShouldBe((ulong)500);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreAgeLimit server/filestore_test.go:616
|
||||
[Fact]
|
||||
public async Task MaxAge_expires_old_messages()
|
||||
{
|
||||
// MaxAgeMs = 200ms
|
||||
await using var store = CreateStore("age-limit", new FileStoreOptions { MaxAgeMs = 200 });
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)5);
|
||||
|
||||
// Wait for messages to expire.
|
||||
await Task.Delay(300);
|
||||
|
||||
// Trigger pruning by appending a new message.
|
||||
await store.AppendAsync("foo", "trigger"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
// Only the freshly-appended trigger message should remain.
|
||||
state.Messages.ShouldBe((ulong)1);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreAgeLimit server/filestore_test.go:660
|
||||
[Fact]
|
||||
public async Task MaxAge_timer_fires_again_for_second_batch()
|
||||
{
|
||||
await using var store = CreateStore("age-second-batch", new FileStoreOptions { MaxAgeMs = 200 });
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
await store.AppendAsync("foo", "batch1"u8.ToArray(), default);
|
||||
|
||||
await Task.Delay(300);
|
||||
|
||||
// Trigger pruning.
|
||||
await store.AppendAsync("foo", "trigger1"u8.ToArray(), default);
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)1);
|
||||
|
||||
// Second batch.
|
||||
for (var i = 0; i < 3; i++)
|
||||
await store.AppendAsync("foo", "batch2"u8.ToArray(), default);
|
||||
|
||||
await Task.Delay(300);
|
||||
|
||||
await store.AppendAsync("foo", "trigger2"u8.ToArray(), default);
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)1);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreAgeLimit server/filestore_test.go:616
|
||||
[Fact]
|
||||
public async Task MaxAge_zero_means_no_expiration()
|
||||
{
|
||||
await using var store = CreateStore("age-zero", new FileStoreOptions { MaxAgeMs = 0 });
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "Hello"u8.ToArray(), default);
|
||||
|
||||
await Task.Delay(100);
|
||||
|
||||
// Trigger append to check pruning.
|
||||
await store.AppendAsync("foo", "trigger"u8.ToArray(), default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)6);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMsgLimit server/filestore_test.go:484
|
||||
[Fact]
|
||||
public async Task TrimToMaxMessages_zero_removes_all()
|
||||
{
|
||||
await using var store = CreateStore("trim-zero");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
store.TrimToMaxMessages(0);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMsgLimit server/filestore_test.go:484
|
||||
[Fact]
|
||||
public async Task TrimToMaxMessages_larger_than_count_is_noop()
|
||||
{
|
||||
await using var store = CreateStore("trim-noop");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
store.TrimToMaxMessages(100);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
state.FirstSeq.ShouldBe((ulong)1);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBytesLimit server/filestore_test.go:537
|
||||
[Fact]
|
||||
public async Task Bytes_decrease_after_remove()
|
||||
{
|
||||
await using var store = CreateStore("bytes-rm");
|
||||
|
||||
var payload = new byte[100];
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var before = await store.GetStateAsync(default);
|
||||
before.Bytes.ShouldBe((ulong)500);
|
||||
|
||||
await store.RemoveAsync(1, default);
|
||||
await store.RemoveAsync(3, default);
|
||||
|
||||
var after = await store.GetStateAsync(default);
|
||||
after.Bytes.ShouldBe((ulong)300);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBytesLimitWithDiscardNew server/filestore_test.go:583
|
||||
[Fact(Skip = "DiscardNew policy not yet implemented in .NET FileStore")]
|
||||
public async Task Bytes_limit_with_discard_new_rejects_over_limit()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMaxMsgsPerSubject server/filestore_test.go:4065
|
||||
[Fact(Skip = "MaxMsgsPerSubject not yet implemented in .NET FileStore")]
|
||||
public async Task MaxMsgsPerSubject_enforces_per_subject_limit()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMaxMsgsAndMaxMsgsPerSubject server/filestore_test.go:4098
|
||||
[Fact(Skip = "MaxMsgsPerSubject not yet implemented in .NET FileStore")]
|
||||
public async Task MaxMsgs_and_MaxMsgsPerSubject_combined()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreUpdateMaxMsgsPerSubject server/filestore_test.go:4563
|
||||
[Fact(Skip = "UpdateConfig not yet implemented in .NET FileStore")]
|
||||
public async Task UpdateConfig_changes_MaxMsgsPerSubject()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMsgLimit server/filestore_test.go:484
|
||||
[Fact]
|
||||
public async Task TrimToMaxMessages_persists_across_restart()
|
||||
{
|
||||
var subDir = "trim-persist";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
store.TrimToMaxMessages(5);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
state.FirstSeq.ShouldBe((ulong)16);
|
||||
state.LastSeq.ShouldBe((ulong)20);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreAgeLimit server/filestore_test.go:616
|
||||
[Fact]
|
||||
public async Task MaxAge_with_interior_deletes()
|
||||
{
|
||||
await using var store = CreateStore("age-interior", new FileStoreOptions { MaxAgeMs = 200 });
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "Hello"u8.ToArray(), default);
|
||||
|
||||
// Remove some interior messages.
|
||||
await store.RemoveAsync(3, default);
|
||||
await store.RemoveAsync(5, default);
|
||||
await store.RemoveAsync(7, default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)7);
|
||||
|
||||
await Task.Delay(300);
|
||||
|
||||
// Trigger pruning.
|
||||
await store.AppendAsync("foo", "trigger"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)1);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMsgLimit server/filestore_test.go:484
|
||||
[Fact]
|
||||
public async Task Sequence_numbers_monotonically_increase_through_trimming()
|
||||
{
|
||||
await using var store = CreateStore("seq-mono");
|
||||
|
||||
for (var i = 1; i <= 15; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
store.TrimToMaxMessages(5);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.LastSeq.ShouldBe((ulong)15);
|
||||
state.FirstSeq.ShouldBe((ulong)11);
|
||||
|
||||
// Append more.
|
||||
var nextSeq = await store.AppendAsync("foo", "after-trim"u8.ToArray(), default);
|
||||
nextSeq.ShouldBe((ulong)16);
|
||||
|
||||
state = await store.GetStateAsync(default);
|
||||
state.LastSeq.ShouldBe((ulong)16);
|
||||
state.Messages.ShouldBe((ulong)6);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,930 @@
|
||||
// Reference: golang/nats-server/server/filestore_test.go
|
||||
// Go's testFileStoreAllPermutations (line 55) runs every test across 6 combinations:
|
||||
// {NoCipher, ChaCha, AES} x {NoCompression, S2Compression}
|
||||
// This file ports 16 representative tests from that matrix to .NET using
|
||||
// [Theory] + [MemberData] so each test case executes all 6 permutations
|
||||
// automatically, giving ~96 total executions.
|
||||
//
|
||||
// Covered Go tests (each appears 6 times):
|
||||
// TestFileStoreBasics (line 86)
|
||||
// TestFileStoreMsgHeaders (line 152)
|
||||
// TestFileStoreBasicWriteMsgsAndRestore (line 181)
|
||||
// TestFileStoreSelectNextFirst (line 304)
|
||||
// TestFileStoreMsgLimit (line 484)
|
||||
// TestFileStoreMsgLimitBug (line 518)
|
||||
// TestFileStoreBytesLimit (line 537)
|
||||
// TestFileStoreAgeLimit (line 616)
|
||||
// TestFileStoreTimeStamps (line 683)
|
||||
// TestFileStorePurge (line 710)
|
||||
// TestFileStoreCollapseDmap (line 1561)
|
||||
// TestFileStoreWriteAndReadSameBlock (line 1510)
|
||||
// TestFileStoreAndRetrieveMultiBlock (line 1527)
|
||||
// TestFileStoreSnapshot (line 1799)
|
||||
// TestFileStoreBasics (large payload variant)
|
||||
// TestFileStoreBasics (sequential ordering variant)
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class FileStorePermutationTests : IDisposable
|
||||
{
|
||||
private readonly string _dir;
|
||||
|
||||
public FileStorePermutationTests()
|
||||
{
|
||||
_dir = Path.Combine(Path.GetTempPath(), $"nats-js-fs-perm-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_dir);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_dir))
|
||||
Directory.Delete(_dir, recursive: true);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Permutation matrix: {NoCipher, ChaCha, Aes} x {NoCompression, S2Compression}
|
||||
// Mirrors Go's testFileStoreAllPermutations (filestore_test.go:55).
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
public static IEnumerable<object[]> AllPermutations()
|
||||
{
|
||||
foreach (var cipher in new[] { StoreCipher.NoCipher, StoreCipher.ChaCha, StoreCipher.Aes })
|
||||
foreach (var compression in new[] { StoreCompression.NoCompression, StoreCompression.S2Compression })
|
||||
yield return [cipher, compression];
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a FileStore wired for a specific cipher/compression permutation.
|
||||
/// Mirrors Go's prf() + newFileStoreWithCreated() pattern (filestore_test.go:73-84).
|
||||
/// </summary>
|
||||
private FileStore CreatePermutedStore(string subdir, StoreCipher cipher, StoreCompression compression,
|
||||
FileStoreOptions? extraOptions = null)
|
||||
{
|
||||
var dir = Path.Combine(_dir, subdir);
|
||||
byte[]? key = null;
|
||||
if (cipher != StoreCipher.NoCipher)
|
||||
{
|
||||
key = new byte[32];
|
||||
Random.Shared.NextBytes(key);
|
||||
}
|
||||
|
||||
var opts = extraOptions ?? new FileStoreOptions();
|
||||
opts.Directory = dir;
|
||||
opts.Cipher = cipher;
|
||||
opts.Compression = compression;
|
||||
opts.EncryptionKey = key;
|
||||
// Keep the legacy boolean flags in sync so existing code paths are not confused.
|
||||
opts.EnableCompression = compression != StoreCompression.NoCompression;
|
||||
opts.EnableEncryption = cipher != StoreCipher.NoCipher;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a permuted store re-using the same key as a previously-created store
|
||||
/// so that encrypted recovery tests can re-open with the correct key.
|
||||
/// </summary>
|
||||
private FileStore ReopenPermutedStore(string subdir, StoreCipher cipher, StoreCompression compression,
|
||||
byte[]? key, FileStoreOptions? extraOptions = null)
|
||||
{
|
||||
var dir = Path.Combine(_dir, subdir);
|
||||
var opts = extraOptions ?? new FileStoreOptions();
|
||||
opts.Directory = dir;
|
||||
opts.Cipher = cipher;
|
||||
opts.Compression = compression;
|
||||
opts.EncryptionKey = key;
|
||||
opts.EnableCompression = compression != StoreCompression.NoCompression;
|
||||
opts.EnableEncryption = cipher != StoreCipher.NoCipher;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
// Helper: build a stable subdir name from the permutation so test isolation is clear.
|
||||
private static string PermSubdir(string prefix, StoreCipher cipher, StoreCompression compression)
|
||||
=> $"{prefix}-{cipher}-{compression}";
|
||||
|
||||
// Helper: extract the key from an already-created store's options by re-reading the
|
||||
// options that were supplied. Because we cannot reach into the store's private field,
|
||||
// we use a separate dictionary keyed by subdir name.
|
||||
private readonly Dictionary<string, byte[]?> _keyStore = new();
|
||||
|
||||
private FileStore CreatePermutedStoreTracked(string subdir, StoreCipher cipher, StoreCompression compression,
|
||||
FileStoreOptions? extraOptions = null)
|
||||
{
|
||||
var dir = Path.Combine(_dir, subdir);
|
||||
byte[]? key = null;
|
||||
if (cipher != StoreCipher.NoCipher)
|
||||
{
|
||||
key = new byte[32];
|
||||
Random.Shared.NextBytes(key);
|
||||
}
|
||||
|
||||
_keyStore[subdir] = key;
|
||||
|
||||
var opts = extraOptions ?? new FileStoreOptions();
|
||||
opts.Directory = dir;
|
||||
opts.Cipher = cipher;
|
||||
opts.Compression = compression;
|
||||
opts.EncryptionKey = key;
|
||||
opts.EnableCompression = compression != StoreCompression.NoCompression;
|
||||
opts.EnableEncryption = cipher != StoreCipher.NoCipher;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
private FileStore ReopenTrackedStore(string subdir, StoreCipher cipher, StoreCompression compression,
|
||||
FileStoreOptions? extraOptions = null)
|
||||
{
|
||||
var dir = Path.Combine(_dir, subdir);
|
||||
var key = _keyStore.GetValueOrDefault(subdir);
|
||||
var opts = extraOptions ?? new FileStoreOptions();
|
||||
opts.Directory = dir;
|
||||
opts.Cipher = cipher;
|
||||
opts.Compression = compression;
|
||||
opts.EncryptionKey = key;
|
||||
opts.EnableCompression = compression != StoreCompression.NoCompression;
|
||||
opts.EnableEncryption = cipher != StoreCipher.NoCipher;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 1: Basic store and load round-trip
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:86
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Store_and_load_basic(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreBasics line 86 — store 5 messages and load by sequence.
|
||||
var subdir = PermSubdir("basic", cipher, compression);
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
const string subject = "foo";
|
||||
var payload = "Hello World"u8.ToArray();
|
||||
|
||||
for (var i = 1; i <= 5; i++)
|
||||
{
|
||||
var seq = await store.AppendAsync(subject, payload, default);
|
||||
seq.ShouldBe((ulong)i);
|
||||
}
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
|
||||
var msg2 = await store.LoadAsync(2, default);
|
||||
msg2.ShouldNotBeNull();
|
||||
msg2!.Subject.ShouldBe(subject);
|
||||
msg2.Payload.ToArray().ShouldBe(payload);
|
||||
|
||||
var msg3 = await store.LoadAsync(3, default);
|
||||
msg3.ShouldNotBeNull();
|
||||
msg3!.Subject.ShouldBe(subject);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 2: Store multiple messages, load by sequence
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:86 (extended variant)
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Store_multiple_messages_load_by_sequence(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreBasics — verify every message is independently loadable.
|
||||
var subdir = PermSubdir("multi-seq", cipher, compression);
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
const int count = 20;
|
||||
for (var i = 0; i < count; i++)
|
||||
{
|
||||
var payload = Encoding.UTF8.GetBytes($"msg-{i:D4}");
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
}
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)count);
|
||||
state.FirstSeq.ShouldBe((ulong)1);
|
||||
state.LastSeq.ShouldBe((ulong)count);
|
||||
|
||||
for (ulong i = 1; i <= count; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe("foo");
|
||||
var expected = Encoding.UTF8.GetBytes($"msg-{(int)(i - 1):D4}");
|
||||
msg.Payload.ToArray().ShouldBe(expected);
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 3: LoadLastBySubjectAsync
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:86 (per-subject lookup)
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task LoadLastBySubject_returns_most_recent_for_subject(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreBasics — per-subject last-message lookup.
|
||||
var subdir = PermSubdir("last-subj", cipher, compression);
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
await store.AppendAsync("foo", "first"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar", "other"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "second"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "third"u8.ToArray(), default);
|
||||
|
||||
var last = await store.LoadLastBySubjectAsync("foo", default);
|
||||
last.ShouldNotBeNull();
|
||||
last!.Payload.ToArray().ShouldBe("third"u8.ToArray());
|
||||
last.Sequence.ShouldBe((ulong)4);
|
||||
last.Subject.ShouldBe("foo");
|
||||
|
||||
// Non-existent subject returns null.
|
||||
(await store.LoadLastBySubjectAsync("does.not.exist", default)).ShouldBeNull();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 4: RemoveAsync single message
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:129
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Remove_single_message_updates_state(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreBasics remove section (line 129).
|
||||
var subdir = PermSubdir("remove-single", cipher, compression);
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
var payload = "Hello World"u8.ToArray();
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
// Remove first (seq 1).
|
||||
(await store.RemoveAsync(1, default)).ShouldBeTrue();
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)4);
|
||||
|
||||
// Remove last (seq 5).
|
||||
(await store.RemoveAsync(5, default)).ShouldBeTrue();
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)3);
|
||||
|
||||
// Remove middle (seq 3).
|
||||
(await store.RemoveAsync(3, default)).ShouldBeTrue();
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)2);
|
||||
|
||||
// Surviving sequences loadable.
|
||||
(await store.LoadAsync(2, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(4, default)).ShouldNotBeNull();
|
||||
|
||||
// Removed sequences return null.
|
||||
(await store.LoadAsync(1, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(3, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(5, default)).ShouldBeNull();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 5: PurgeAsync clears all messages
|
||||
// Go: TestFileStorePurge server/filestore_test.go:710
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Purge_clears_all_messages(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStorePurge line 710 — purge empties the store.
|
||||
var subdir = PermSubdir("purge", cipher, compression);
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", "Hello"u8.ToArray(), default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)20);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
state.Bytes.ShouldBe((ulong)0);
|
||||
|
||||
// Can still append after purge.
|
||||
var seq = await store.AppendAsync("foo", "after purge"u8.ToArray(), default);
|
||||
seq.ShouldBeGreaterThan((ulong)0);
|
||||
|
||||
var msg = await store.LoadAsync(seq, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe("after purge"u8.ToArray());
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 6: TrimToMaxMessages enforcement
|
||||
// Go: TestFileStoreMsgLimitBug server/filestore_test.go:518
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task TrimToMaxMessages_enforces_limit(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreMsgLimitBug line 518.
|
||||
var subdir = PermSubdir("trim-limit", cipher, compression);
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
|
||||
store.TrimToMaxMessages(5);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
state.FirstSeq.ShouldBe((ulong)6);
|
||||
state.LastSeq.ShouldBe((ulong)10);
|
||||
|
||||
// Evicted messages not loadable.
|
||||
for (ulong i = 1; i <= 5; i++)
|
||||
(await store.LoadAsync(i, default)).ShouldBeNull();
|
||||
|
||||
// Remaining messages loadable.
|
||||
for (ulong i = 6; i <= 10; i++)
|
||||
(await store.LoadAsync(i, default)).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 7: Block rotation when exceeding block size
|
||||
// Go: TestFileStoreAndRetrieveMultiBlock server/filestore_test.go:1527
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Block_rotation_when_exceeding_block_size(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreAndRetrieveMultiBlock line 1527 — small block forces rotation.
|
||||
// Both the initial and the reopened store must share the same key so
|
||||
// the encrypted data file can be decrypted on reopen.
|
||||
var subdir = PermSubdir("multi-block", cipher, compression);
|
||||
|
||||
// Generate a single key for the lifetime of this test (reopen must reuse it).
|
||||
byte[]? key = null;
|
||||
if (cipher != StoreCipher.NoCipher)
|
||||
{
|
||||
key = new byte[32];
|
||||
Random.Shared.NextBytes(key);
|
||||
}
|
||||
|
||||
var opts1 = new FileStoreOptions
|
||||
{
|
||||
BlockSizeBytes = 256,
|
||||
Cipher = cipher,
|
||||
Compression = compression,
|
||||
EncryptionKey = key,
|
||||
EnableCompression = compression != StoreCompression.NoCompression,
|
||||
EnableEncryption = cipher != StoreCipher.NoCipher,
|
||||
};
|
||||
opts1.Directory = Path.Combine(_dir, subdir);
|
||||
|
||||
await using (var store = new FileStore(opts1))
|
||||
{
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", "Hello World!"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)20);
|
||||
// With a 256-byte block and ~100 bytes per record, multiple blocks form.
|
||||
store.BlockCount.ShouldBeGreaterThan(1);
|
||||
}
|
||||
|
||||
// Reopen with the same key — all messages must survive block rotation.
|
||||
var opts2 = new FileStoreOptions
|
||||
{
|
||||
BlockSizeBytes = 256,
|
||||
Cipher = cipher,
|
||||
Compression = compression,
|
||||
EncryptionKey = key,
|
||||
EnableCompression = compression != StoreCompression.NoCompression,
|
||||
EnableEncryption = cipher != StoreCipher.NoCipher,
|
||||
};
|
||||
opts2.Directory = Path.Combine(_dir, subdir);
|
||||
|
||||
await using (var store = new FileStore(opts2))
|
||||
{
|
||||
for (ulong i = 1; i <= 20; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe("foo");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 8: GetStateAsync returns correct counts
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:104
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task GetState_returns_correct_counts(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreBasics — state tracks Msgs, Bytes, FirstSeq, LastSeq.
|
||||
var subdir = PermSubdir("state-counts", cipher, compression);
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
state.Bytes.ShouldBe((ulong)0);
|
||||
state.FirstSeq.ShouldBe((ulong)0);
|
||||
state.LastSeq.ShouldBe((ulong)0);
|
||||
|
||||
var payload = new byte[100];
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
state.Bytes.ShouldBe((ulong)(5 * 100));
|
||||
state.FirstSeq.ShouldBe((ulong)1);
|
||||
state.LastSeq.ShouldBe((ulong)5);
|
||||
|
||||
// Remove first and last — state updates accordingly.
|
||||
await store.RemoveAsync(1, default);
|
||||
await store.RemoveAsync(5, default);
|
||||
|
||||
state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)3);
|
||||
state.FirstSeq.ShouldBe((ulong)2);
|
||||
state.LastSeq.ShouldBe((ulong)4);
|
||||
state.Bytes.ShouldBe((ulong)(3 * 100));
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 9: CreateSnapshotAsync and RestoreSnapshotAsync round-trip
|
||||
// Go: TestFileStoreSnapshot server/filestore_test.go:1799
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Snapshot_and_restore_round_trip(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreSnapshot line 1799.
|
||||
//
|
||||
// The snapshot blob is produced by CreateSnapshotAsync which calls
|
||||
// TransformForPersist on each message (i.e. the data is encrypted with the
|
||||
// src store's key before being embedded in the snapshot). RestoreSnapshotAsync
|
||||
// then calls RestorePayload on those bytes using its own store's key.
|
||||
// Therefore src and dst MUST share the same key for encrypted permutations.
|
||||
var srcSubdir = PermSubdir("snap-src", cipher, compression);
|
||||
var dstSubdir = PermSubdir("snap-dst", cipher, compression);
|
||||
|
||||
// One key shared by both stores.
|
||||
byte[]? sharedKey = null;
|
||||
if (cipher != StoreCipher.NoCipher)
|
||||
{
|
||||
sharedKey = new byte[32];
|
||||
Random.Shared.NextBytes(sharedKey);
|
||||
}
|
||||
|
||||
var srcOpts = new FileStoreOptions
|
||||
{
|
||||
Cipher = cipher,
|
||||
Compression = compression,
|
||||
EncryptionKey = sharedKey,
|
||||
EnableCompression = compression != StoreCompression.NoCompression,
|
||||
EnableEncryption = cipher != StoreCipher.NoCipher,
|
||||
};
|
||||
srcOpts.Directory = Path.Combine(_dir, srcSubdir);
|
||||
await using var src = new FileStore(srcOpts);
|
||||
|
||||
for (var i = 0; i < 30; i++)
|
||||
await src.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
var snap = await src.CreateSnapshotAsync(default);
|
||||
snap.Length.ShouldBeGreaterThan(0);
|
||||
|
||||
var dstOpts = new FileStoreOptions
|
||||
{
|
||||
Cipher = cipher,
|
||||
Compression = compression,
|
||||
EncryptionKey = sharedKey,
|
||||
EnableCompression = compression != StoreCompression.NoCompression,
|
||||
EnableEncryption = cipher != StoreCipher.NoCipher,
|
||||
};
|
||||
dstOpts.Directory = Path.Combine(_dir, dstSubdir);
|
||||
await using var dst = new FileStore(dstOpts);
|
||||
await dst.RestoreSnapshotAsync(snap, default);
|
||||
|
||||
var srcState = await src.GetStateAsync(default);
|
||||
var dstState = await dst.GetStateAsync(default);
|
||||
dstState.Messages.ShouldBe(srcState.Messages);
|
||||
dstState.FirstSeq.ShouldBe(srcState.FirstSeq);
|
||||
dstState.LastSeq.ShouldBe(srcState.LastSeq);
|
||||
|
||||
// Verify every message round-trips correctly.
|
||||
for (ulong i = 1; i <= srcState.Messages; i++)
|
||||
{
|
||||
var original = await src.LoadAsync(i, default);
|
||||
var copy = await dst.LoadAsync(i, default);
|
||||
copy.ShouldNotBeNull();
|
||||
copy!.Subject.ShouldBe(original!.Subject);
|
||||
copy.Payload.ToArray().ShouldBe(original.Payload.ToArray());
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 10: ListAsync returns ordered messages
|
||||
// Go: TestFileStoreTimeStamps server/filestore_test.go:683
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task ListAsync_returns_ordered_messages(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreTimeStamps line 683 — messages returned in sequence order.
|
||||
var subdir = PermSubdir("list-ordered", cipher, compression);
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
await store.AppendAsync("foo", "one"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar", "two"u8.ToArray(), default);
|
||||
await store.AppendAsync("baz", "three"u8.ToArray(), default);
|
||||
|
||||
var messages = await store.ListAsync(default);
|
||||
messages.Count.ShouldBe(3);
|
||||
messages[0].Sequence.ShouldBe((ulong)1);
|
||||
messages[1].Sequence.ShouldBe((ulong)2);
|
||||
messages[2].Sequence.ShouldBe((ulong)3);
|
||||
messages[0].Subject.ShouldBe("foo");
|
||||
messages[1].Subject.ShouldBe("bar");
|
||||
messages[2].Subject.ShouldBe("baz");
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 11: Max age TTL prunes expired messages
|
||||
// Go: TestFileStoreAgeLimit server/filestore_test.go:616 (partial — skip
|
||||
// compression/cipher guard that Go applies to some variants)
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task MaxAge_prunes_expired_messages(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreAgeLimit line 616.
|
||||
var subdir = PermSubdir("max-age", cipher, compression);
|
||||
var opts = new FileStoreOptions { MaxAgeMs = 200 };
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression, opts);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)5);
|
||||
|
||||
// Wait for messages to age out.
|
||||
await Task.Delay(350);
|
||||
|
||||
// Trigger pruning by appending a new message.
|
||||
await store.AppendAsync("foo", "trigger"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
// Only the freshly-appended trigger message should remain.
|
||||
state.Messages.ShouldBe((ulong)1);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 12: Recovery after reopen
|
||||
// Go: TestFileStoreBasicWriteMsgsAndRestore server/filestore_test.go:181
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Recovery_after_reopen_preserves_messages(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreBasicWriteMsgsAndRestore line 181 — stop and restart.
|
||||
var subdir = PermSubdir("recovery", cipher, compression);
|
||||
|
||||
byte[]? key = null;
|
||||
if (cipher != StoreCipher.NoCipher)
|
||||
{
|
||||
key = new byte[32];
|
||||
Random.Shared.NextBytes(key);
|
||||
}
|
||||
_keyStore[subdir] = key;
|
||||
|
||||
await using (var store = ReopenPermutedStore(subdir, cipher, compression, key))
|
||||
{
|
||||
for (var i = 1; i <= 100; i++)
|
||||
{
|
||||
var payload = Encoding.UTF8.GetBytes($"[{i:D8}] Hello World!");
|
||||
var seq = await store.AppendAsync("foo", payload, default);
|
||||
seq.ShouldBe((ulong)i);
|
||||
}
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)100);
|
||||
}
|
||||
|
||||
// Reopen with same key and verify all 100 messages survived.
|
||||
await using (var store = ReopenPermutedStore(subdir, cipher, compression, key))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)100);
|
||||
state.FirstSeq.ShouldBe((ulong)1);
|
||||
state.LastSeq.ShouldBe((ulong)100);
|
||||
|
||||
// Spot-check a few messages.
|
||||
var msg1 = await store.LoadAsync(1, default);
|
||||
msg1.ShouldNotBeNull();
|
||||
msg1!.Payload.ToArray().ShouldBe(Encoding.UTF8.GetBytes("[00000001] Hello World!"));
|
||||
|
||||
var msg50 = await store.LoadAsync(50, default);
|
||||
msg50.ShouldNotBeNull();
|
||||
msg50!.Payload.ToArray().ShouldBe(Encoding.UTF8.GetBytes("[00000050] Hello World!"));
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 13: Large payload (64 KB) store and load
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:86 (large payload variant)
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Large_payload_store_and_load(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreBasics — large random payloads must round-trip exactly.
|
||||
var subdir = PermSubdir("large-payload", cipher, compression);
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
var payload = new byte[64 * 1024]; // 64 KiB
|
||||
Random.Shared.NextBytes(payload);
|
||||
|
||||
var seq = await store.AppendAsync("foo", payload, default);
|
||||
seq.ShouldBe((ulong)1);
|
||||
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe("foo");
|
||||
msg.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 14: Multiple subjects, filter by subject
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:86 (multi-subject variant)
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Multiple_subjects_filter_by_subject(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreBasics — multiple subjects stored; each LoadLastBySubject
|
||||
// returns the correct one.
|
||||
var subdir = PermSubdir("multi-subj", cipher, compression);
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
await store.AppendAsync("foo.bar", "one"u8.ToArray(), default);
|
||||
await store.AppendAsync("baz.qux", "two"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.bar", "three"u8.ToArray(), default);
|
||||
await store.AppendAsync("baz.qux", "four"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)4);
|
||||
|
||||
// Check each message's subject.
|
||||
(await store.LoadAsync(1, default))!.Subject.ShouldBe("foo.bar");
|
||||
(await store.LoadAsync(2, default))!.Subject.ShouldBe("baz.qux");
|
||||
(await store.LoadAsync(3, default))!.Subject.ShouldBe("foo.bar");
|
||||
(await store.LoadAsync(4, default))!.Subject.ShouldBe("baz.qux");
|
||||
|
||||
// LoadLastBySubject picks the correct last message per subject.
|
||||
var lastFoo = await store.LoadLastBySubjectAsync("foo.bar", default);
|
||||
lastFoo.ShouldNotBeNull();
|
||||
lastFoo!.Sequence.ShouldBe((ulong)3);
|
||||
lastFoo.Payload.ToArray().ShouldBe("three"u8.ToArray());
|
||||
|
||||
var lastBaz = await store.LoadLastBySubjectAsync("baz.qux", default);
|
||||
lastBaz.ShouldNotBeNull();
|
||||
lastBaz!.Sequence.ShouldBe((ulong)4);
|
||||
lastBaz.Payload.ToArray().ShouldBe("four"u8.ToArray());
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 15: Sequential writes maintain sequence ordering
|
||||
// Go: TestFileStoreSelectNextFirst server/filestore_test.go:304
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Sequential_writes_maintain_ordering(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreSelectNextFirst line 304 — remove a run, verify FirstSeq jumps.
|
||||
var subdir = PermSubdir("seq-order", cipher, compression);
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("zzz", "Hello World"u8.ToArray(), default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)10);
|
||||
|
||||
// Delete 2-7 forming a contiguous gap.
|
||||
for (var i = 2; i <= 7; i++)
|
||||
(await store.RemoveAsync((ulong)i, default)).ShouldBeTrue();
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)4);
|
||||
state.FirstSeq.ShouldBe((ulong)1);
|
||||
|
||||
// Remove seq 1 — first should jump to 8.
|
||||
(await store.RemoveAsync(1, default)).ShouldBeTrue();
|
||||
state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)3);
|
||||
state.FirstSeq.ShouldBe((ulong)8);
|
||||
|
||||
// Sequences 8, 9, 10 must be loadable.
|
||||
for (ulong i = 8; i <= 10; i++)
|
||||
(await store.LoadAsync(i, default)).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 16: Store to new directory, verify files created on disk
|
||||
// Go: TestFileStoreBasics server/filestore_test.go:86 (disk-presence variant)
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Store_creates_files_on_disk(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreBasics — the store must actually persist data on disk.
|
||||
var subdir = PermSubdir("disk-presence", cipher, compression);
|
||||
var dir = Path.Combine(_dir, subdir);
|
||||
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
|
||||
// The store directory must exist.
|
||||
Directory.Exists(dir).ShouldBeTrue();
|
||||
|
||||
// At least one file must be present (data file or manifest).
|
||||
var files = Directory.GetFiles(dir, "*", SearchOption.AllDirectories);
|
||||
files.Length.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 17: Write-and-read in the same block
|
||||
// Go: TestFileStoreWriteAndReadSameBlock server/filestore_test.go:1510
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Write_and_read_same_block(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreWriteAndReadSameBlock line 1510 — interleaved store+load.
|
||||
var subdir = PermSubdir("same-block", cipher, compression);
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
const string subject = "foo";
|
||||
var payload = "Hello World!"u8.ToArray();
|
||||
|
||||
for (ulong i = 1; i <= 10; i++)
|
||||
{
|
||||
var seq = await store.AppendAsync(subject, payload, default);
|
||||
seq.ShouldBe(i);
|
||||
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe(subject);
|
||||
msg.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 18: Timestamps are non-decreasing
|
||||
// Go: TestFileStoreTimeStamps server/filestore_test.go:683
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Stored_messages_have_non_decreasing_timestamps(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreTimeStamps line 683.
|
||||
var subdir = PermSubdir("timestamps", cipher, compression);
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
|
||||
var messages = await store.ListAsync(default);
|
||||
messages.Count.ShouldBe(10);
|
||||
|
||||
DateTime? previous = null;
|
||||
foreach (var msg in messages)
|
||||
{
|
||||
if (previous.HasValue)
|
||||
msg.TimestampUtc.ShouldBeGreaterThanOrEqualTo(previous.Value);
|
||||
previous = msg.TimestampUtc;
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 19: CollapseDmap — out-of-order removes, FirstSeq collapses properly
|
||||
// Go: TestFileStoreCollapseDmap server/filestore_test.go:1561
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Remove_out_of_order_collapses_first_seq(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreCollapseDmap line 1561.
|
||||
var subdir = PermSubdir("dmap", cipher, compression);
|
||||
await using var store = CreatePermutedStore(subdir, cipher, compression);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "Hello World!"u8.ToArray(), default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)10);
|
||||
|
||||
// Remove out of order, forming gaps.
|
||||
(await store.RemoveAsync(2, default)).ShouldBeTrue();
|
||||
(await store.RemoveAsync(4, default)).ShouldBeTrue();
|
||||
(await store.RemoveAsync(8, default)).ShouldBeTrue();
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)7);
|
||||
|
||||
// Remove first — seq 1 gone, FirstSeq advances to 3.
|
||||
(await store.RemoveAsync(1, default)).ShouldBeTrue();
|
||||
state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)6);
|
||||
state.FirstSeq.ShouldBe((ulong)3);
|
||||
|
||||
// Remove seq 3 — FirstSeq advances to 5.
|
||||
(await store.RemoveAsync(3, default)).ShouldBeTrue();
|
||||
state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
state.FirstSeq.ShouldBe((ulong)5);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 20: Snapshot after removes — removed sequences absent from restore
|
||||
// Go: TestFileStoreSnapshot server/filestore_test.go:1904
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[MemberData(nameof(AllPermutations))]
|
||||
public async Task Snapshot_after_removes_preserves_remaining(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
// Go: TestFileStoreSnapshot line 1904 — snapshot taken after removes; removed
|
||||
// sequences must not appear in the restored store.
|
||||
//
|
||||
// src and dst share the same key: see comment in Snapshot_and_restore_round_trip.
|
||||
var srcSubdir = PermSubdir("snap-rm-src", cipher, compression);
|
||||
var dstSubdir = PermSubdir("snap-rm-dst", cipher, compression);
|
||||
|
||||
byte[]? sharedKey = null;
|
||||
if (cipher != StoreCipher.NoCipher)
|
||||
{
|
||||
sharedKey = new byte[32];
|
||||
Random.Shared.NextBytes(sharedKey);
|
||||
}
|
||||
|
||||
var srcOpts = new FileStoreOptions
|
||||
{
|
||||
Cipher = cipher,
|
||||
Compression = compression,
|
||||
EncryptionKey = sharedKey,
|
||||
EnableCompression = compression != StoreCompression.NoCompression,
|
||||
EnableEncryption = cipher != StoreCipher.NoCipher,
|
||||
};
|
||||
srcOpts.Directory = Path.Combine(_dir, srcSubdir);
|
||||
await using var src = new FileStore(srcOpts);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await src.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
// Remove first 5 messages.
|
||||
for (ulong i = 1; i <= 5; i++)
|
||||
await src.RemoveAsync(i, default);
|
||||
|
||||
var snap = await src.CreateSnapshotAsync(default);
|
||||
|
||||
var dstOpts = new FileStoreOptions
|
||||
{
|
||||
Cipher = cipher,
|
||||
Compression = compression,
|
||||
EncryptionKey = sharedKey,
|
||||
EnableCompression = compression != StoreCompression.NoCompression,
|
||||
EnableEncryption = cipher != StoreCipher.NoCipher,
|
||||
};
|
||||
dstOpts.Directory = Path.Combine(_dir, dstSubdir);
|
||||
await using var dst = new FileStore(dstOpts);
|
||||
await dst.RestoreSnapshotAsync(snap, default);
|
||||
|
||||
var dstState = await dst.GetStateAsync(default);
|
||||
dstState.Messages.ShouldBe((ulong)15);
|
||||
dstState.FirstSeq.ShouldBe((ulong)6);
|
||||
|
||||
// Removed sequences must not be present.
|
||||
for (ulong i = 1; i <= 5; i++)
|
||||
(await dst.LoadAsync(i, default)).ShouldBeNull();
|
||||
|
||||
// Remaining sequences must be present.
|
||||
for (ulong i = 6; i <= 20; i++)
|
||||
(await dst.LoadAsync(i, default)).ShouldNotBeNull();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,419 @@
|
||||
// Reference: golang/nats-server/server/filestore_test.go
|
||||
// Tests ported from: TestFileStorePurgeEx, TestFileStorePurgeExWithSubject,
|
||||
// TestFileStorePurgeExKeepOneBug, TestFileStoreCompact, TestFileStoreStreamTruncate,
|
||||
// TestFileStoreState, TestFileStoreFilteredState, TestFileStoreSubjectsState,
|
||||
// TestFileStoreGetSeqFromTime
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for FileStore tombstone tracking and purge operations:
|
||||
/// PurgeEx, Compact, Truncate, FilteredState, SubjectsState, SubjectsTotals,
|
||||
/// State (with deleted sequences), and GetSeqFromTime.
|
||||
/// Reference: golang/nats-server/server/filestore_test.go
|
||||
/// </summary>
|
||||
public sealed class FileStorePurgeBlockTests : IDisposable
|
||||
{
|
||||
private readonly string _dir;
|
||||
|
||||
public FileStorePurgeBlockTests()
|
||||
{
|
||||
_dir = Path.Combine(Path.GetTempPath(), $"nats-js-purgeblock-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_dir);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_dir))
|
||||
Directory.Delete(_dir, recursive: true);
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string subdirectory, FileStoreOptions? options = null)
|
||||
{
|
||||
var dir = Path.Combine(_dir, subdirectory);
|
||||
var opts = options ?? new FileStoreOptions();
|
||||
opts.Directory = dir;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// PurgeEx tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStorePurgeExWithSubject — filestore_test.go:~867
|
||||
[Fact]
|
||||
public async Task PurgeEx_BySubject_RemovesMatchingMessages()
|
||||
{
|
||||
await using var store = CreateStore("purgex-subject");
|
||||
|
||||
// Store 5 messages on "foo" and 5 on "bar"
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"foo-{i}"), default);
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("bar", Encoding.UTF8.GetBytes($"bar-{i}"), default);
|
||||
|
||||
var stateBeforePurge = await store.GetStateAsync(default);
|
||||
stateBeforePurge.Messages.ShouldBe(10UL);
|
||||
|
||||
// Purge all "foo" messages (seq=0 means no upper limit; keep=0 means keep none)
|
||||
var purged = store.PurgeEx("foo", 0, 0);
|
||||
purged.ShouldBe(5UL);
|
||||
|
||||
// Only "bar" messages remain
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe(5UL);
|
||||
|
||||
var remaining = await store.ListAsync(default);
|
||||
remaining.All(m => m.Subject == "bar").ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurgeExKeepOneBug — filestore_test.go:~910
|
||||
[Fact]
|
||||
public async Task PurgeEx_WithKeep_RetainsNewestMessages()
|
||||
{
|
||||
await using var store = CreateStore("purgex-keep");
|
||||
|
||||
// Store 10 messages on "events"
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("events", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
// Purge keeping the 3 newest
|
||||
var purged = store.PurgeEx("events", 0, 3);
|
||||
purged.ShouldBe(7UL);
|
||||
|
||||
var remaining = await store.ListAsync(default);
|
||||
remaining.Count.ShouldBe(3);
|
||||
|
||||
// The retained messages should be the 3 highest sequences (8, 9, 10)
|
||||
var seqs = remaining.Select(m => m.Sequence).OrderBy(s => s).ToArray();
|
||||
seqs[0].ShouldBe(8UL);
|
||||
seqs[1].ShouldBe(9UL);
|
||||
seqs[2].ShouldBe(10UL);
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurgeEx — filestore_test.go:~855
|
||||
[Fact]
|
||||
public async Task PurgeEx_WithSeqLimit_OnlyPurgesBelowSequence()
|
||||
{
|
||||
await using var store = CreateStore("purgex-seqlimit");
|
||||
|
||||
// Store 10 messages on "data"
|
||||
for (var i = 1; i <= 10; i++)
|
||||
await store.AppendAsync("data", Encoding.UTF8.GetBytes($"d{i}"), default);
|
||||
|
||||
// Purge "data" messages with seq <= 5 (keep=0)
|
||||
var purged = store.PurgeEx("data", 5, 0);
|
||||
purged.ShouldBe(5UL);
|
||||
|
||||
// Messages 6-10 should remain
|
||||
var remaining = await store.ListAsync(default);
|
||||
remaining.Count.ShouldBe(5);
|
||||
remaining.Min(m => m.Sequence).ShouldBe(6UL);
|
||||
remaining.Max(m => m.Sequence).ShouldBe(10UL);
|
||||
}
|
||||
|
||||
// Go: PurgeEx with wildcard subject — filestore_test.go:~867
|
||||
[Fact]
|
||||
public async Task PurgeEx_WithWildcardSubject_RemovesAllMatchingSubjects()
|
||||
{
|
||||
await using var store = CreateStore("purgex-wildcard");
|
||||
|
||||
await store.AppendAsync("foo.a", "m1"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.b", "m2"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar.a", "m3"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.c", "m4"u8.ToArray(), default);
|
||||
|
||||
var purged = store.PurgeEx("foo.*", 0, 0);
|
||||
purged.ShouldBe(3UL);
|
||||
|
||||
var remaining = await store.ListAsync(default);
|
||||
remaining.Count.ShouldBe(1);
|
||||
remaining[0].Subject.ShouldBe("bar.a");
|
||||
}
|
||||
|
||||
// Go: PurgeEx with > wildcard — filestore_test.go:~867
|
||||
[Fact]
|
||||
public async Task PurgeEx_WithGtWildcard_RemovesAllMatchingSubjects()
|
||||
{
|
||||
await using var store = CreateStore("purgex-gt-wildcard");
|
||||
|
||||
await store.AppendAsync("a.b.c", "m1"u8.ToArray(), default);
|
||||
await store.AppendAsync("a.b.d", "m2"u8.ToArray(), default);
|
||||
await store.AppendAsync("a.x", "m3"u8.ToArray(), default);
|
||||
await store.AppendAsync("b.x", "m4"u8.ToArray(), default);
|
||||
|
||||
var purged = store.PurgeEx("a.>", 0, 0);
|
||||
purged.ShouldBe(3UL);
|
||||
|
||||
var remaining = await store.ListAsync(default);
|
||||
remaining.Count.ShouldBe(1);
|
||||
remaining[0].Subject.ShouldBe("b.x");
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Compact tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreCompact — filestore_test.go:~964
|
||||
[Fact]
|
||||
public async Task Compact_RemovesMessagesBeforeSequence()
|
||||
{
|
||||
await using var store = CreateStore("compact-basic");
|
||||
|
||||
// Store 10 messages
|
||||
for (var i = 1; i <= 10; i++)
|
||||
await store.AppendAsync("test", Encoding.UTF8.GetBytes($"msg{i}"), default);
|
||||
|
||||
// Compact to remove messages with seq < 5 (removes 1, 2, 3, 4)
|
||||
var removed = store.Compact(5);
|
||||
removed.ShouldBe(4UL);
|
||||
|
||||
var remaining = await store.ListAsync(default);
|
||||
remaining.Count.ShouldBe(6); // 5-10
|
||||
|
||||
remaining.Min(m => m.Sequence).ShouldBe(5UL);
|
||||
remaining.Max(m => m.Sequence).ShouldBe(10UL);
|
||||
|
||||
// Sequence 1-4 should no longer be loadable
|
||||
(await store.LoadAsync(1, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(4, default)).ShouldBeNull();
|
||||
|
||||
// Sequence 5 should still exist
|
||||
(await store.LoadAsync(5, default)).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Truncate tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreStreamTruncate — filestore_test.go:~1035
|
||||
[Fact]
|
||||
public async Task Truncate_RemovesMessagesAfterSequence()
|
||||
{
|
||||
await using var store = CreateStore("truncate-basic");
|
||||
|
||||
// Store 10 messages
|
||||
for (var i = 1; i <= 10; i++)
|
||||
await store.AppendAsync("stream", Encoding.UTF8.GetBytes($"m{i}"), default);
|
||||
|
||||
// Truncate at seq=5 (removes 6, 7, 8, 9, 10)
|
||||
store.Truncate(5);
|
||||
|
||||
var remaining = await store.ListAsync(default);
|
||||
remaining.Count.ShouldBe(5); // 1-5
|
||||
|
||||
remaining.Min(m => m.Sequence).ShouldBe(1UL);
|
||||
remaining.Max(m => m.Sequence).ShouldBe(5UL);
|
||||
|
||||
// Messages 6-10 should be gone
|
||||
(await store.LoadAsync(6, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(10, default)).ShouldBeNull();
|
||||
|
||||
// Message 5 should still exist
|
||||
(await store.LoadAsync(5, default)).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FilteredState tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreFilteredState — filestore_test.go:~1200
|
||||
[Fact]
|
||||
public async Task FilteredState_ReturnsCorrectState()
|
||||
{
|
||||
await using var store = CreateStore("filteredstate");
|
||||
|
||||
// Store 5 messages on "orders" and 5 on "invoices"
|
||||
for (var i = 1; i <= 5; i++)
|
||||
await store.AppendAsync("orders", Encoding.UTF8.GetBytes($"o{i}"), default);
|
||||
for (var i = 1; i <= 5; i++)
|
||||
await store.AppendAsync("invoices", Encoding.UTF8.GetBytes($"inv{i}"), default);
|
||||
|
||||
// FilteredState for "orders" from seq=1
|
||||
var ordersState = store.FilteredState(1, "orders");
|
||||
ordersState.Msgs.ShouldBe(5UL);
|
||||
ordersState.First.ShouldBe(1UL);
|
||||
ordersState.Last.ShouldBe(5UL);
|
||||
|
||||
// FilteredState for "invoices" from seq=1
|
||||
var invoicesState = store.FilteredState(1, "invoices");
|
||||
invoicesState.Msgs.ShouldBe(5UL);
|
||||
invoicesState.First.ShouldBe(6UL);
|
||||
invoicesState.Last.ShouldBe(10UL);
|
||||
|
||||
// FilteredState from seq=7 (only 4 invoices remain)
|
||||
var lateInvoices = store.FilteredState(7, "invoices");
|
||||
lateInvoices.Msgs.ShouldBe(4UL);
|
||||
lateInvoices.First.ShouldBe(7UL);
|
||||
lateInvoices.Last.ShouldBe(10UL);
|
||||
|
||||
// No match for non-existent subject
|
||||
var noneState = store.FilteredState(1, "orders.unknown");
|
||||
noneState.Msgs.ShouldBe(0UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// SubjectsState tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreSubjectsState — filestore_test.go:~1266
|
||||
[Fact]
|
||||
public async Task SubjectsState_ReturnsPerSubjectState()
|
||||
{
|
||||
await using var store = CreateStore("subjectsstate");
|
||||
|
||||
await store.AppendAsync("a.1", "msg"u8.ToArray(), default);
|
||||
await store.AppendAsync("a.2", "msg"u8.ToArray(), default);
|
||||
await store.AppendAsync("a.1", "msg"u8.ToArray(), default);
|
||||
await store.AppendAsync("b.1", "msg"u8.ToArray(), default);
|
||||
|
||||
var state = store.SubjectsState("a.>");
|
||||
|
||||
state.ShouldContainKey("a.1");
|
||||
state.ShouldContainKey("a.2");
|
||||
state.ShouldNotContainKey("b.1");
|
||||
|
||||
state["a.1"].Msgs.ShouldBe(2UL);
|
||||
state["a.1"].First.ShouldBe(1UL);
|
||||
state["a.1"].Last.ShouldBe(3UL);
|
||||
|
||||
state["a.2"].Msgs.ShouldBe(1UL);
|
||||
state["a.2"].First.ShouldBe(2UL);
|
||||
state["a.2"].Last.ShouldBe(2UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// SubjectsTotals tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreSubjectsTotals — filestore_test.go:~1300
|
||||
[Fact]
|
||||
public async Task SubjectsTotals_ReturnsPerSubjectCounts()
|
||||
{
|
||||
await using var store = CreateStore("subjectstotals");
|
||||
|
||||
await store.AppendAsync("x.1", "m"u8.ToArray(), default);
|
||||
await store.AppendAsync("x.1", "m"u8.ToArray(), default);
|
||||
await store.AppendAsync("x.2", "m"u8.ToArray(), default);
|
||||
await store.AppendAsync("y.1", "m"u8.ToArray(), default);
|
||||
await store.AppendAsync("x.3", "m"u8.ToArray(), default);
|
||||
|
||||
var totals = store.SubjectsTotals("x.*");
|
||||
|
||||
totals.ShouldContainKey("x.1");
|
||||
totals.ShouldContainKey("x.2");
|
||||
totals.ShouldContainKey("x.3");
|
||||
totals.ShouldNotContainKey("y.1");
|
||||
|
||||
totals["x.1"].ShouldBe(2UL);
|
||||
totals["x.2"].ShouldBe(1UL);
|
||||
totals["x.3"].ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// State (with deleted sequences) tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreState — filestore_test.go:~420
|
||||
[Fact]
|
||||
public async Task State_IncludesDeletedSequences()
|
||||
{
|
||||
await using var store = CreateStore("state-deleted");
|
||||
|
||||
// Store 10 messages
|
||||
for (var i = 1; i <= 10; i++)
|
||||
await store.AppendAsync("events", Encoding.UTF8.GetBytes($"e{i}"), default);
|
||||
|
||||
// Remove messages 3, 5, 7
|
||||
await store.RemoveAsync(3, default);
|
||||
await store.RemoveAsync(5, default);
|
||||
await store.RemoveAsync(7, default);
|
||||
|
||||
var state = store.State();
|
||||
|
||||
state.Msgs.ShouldBe(7UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(10UL);
|
||||
state.NumDeleted.ShouldBe(3);
|
||||
|
||||
state.Deleted.ShouldNotBeNull();
|
||||
state.Deleted!.ShouldContain(3UL);
|
||||
state.Deleted.ShouldContain(5UL);
|
||||
state.Deleted.ShouldContain(7UL);
|
||||
state.Deleted.Length.ShouldBe(3);
|
||||
|
||||
// NumSubjects: all messages are on "events"
|
||||
state.NumSubjects.ShouldBe(1);
|
||||
state.Subjects.ShouldNotBeNull();
|
||||
state.Subjects!["events"].ShouldBe(7UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// GetSeqFromTime tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreGetSeqFromTime — filestore_test.go:~1570
|
||||
[Fact]
|
||||
public async Task GetSeqFromTime_ReturnsCorrectSequence()
|
||||
{
|
||||
await using var store = CreateStore("getseqfromtime");
|
||||
|
||||
// Store 5 messages; we'll query by the timestamp of the 3rd message
|
||||
var timestamps = new List<DateTime>();
|
||||
for (var i = 1; i <= 5; i++)
|
||||
{
|
||||
await store.AppendAsync("time.test", Encoding.UTF8.GetBytes($"t{i}"), default);
|
||||
var msgs = await store.ListAsync(default);
|
||||
timestamps.Add(msgs[^1].TimestampUtc);
|
||||
// Small delay to ensure distinct timestamps
|
||||
await Task.Delay(5);
|
||||
}
|
||||
|
||||
// Query for first seq at or after the timestamp of msg 3
|
||||
var targetTime = timestamps[2]; // timestamp of sequence 3
|
||||
var seq = store.GetSeqFromTime(targetTime);
|
||||
seq.ShouldBe(3UL);
|
||||
|
||||
// Query with a time before all messages: should return 1
|
||||
var beforeAll = timestamps[0].AddMilliseconds(-100);
|
||||
store.GetSeqFromTime(beforeAll).ShouldBe(1UL);
|
||||
|
||||
// Query with a time after all messages: should return last+1
|
||||
var afterAll = timestamps[^1].AddSeconds(1);
|
||||
store.GetSeqFromTime(afterAll).ShouldBe(6UL); // _last + 1
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// MsgBlock enhancements
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: filestore.go dmap — soft-delete tracking and enumeration
|
||||
[Fact]
|
||||
public async Task MsgBlock_IsDeleted_AndEnumerateNonDeleted_Work()
|
||||
{
|
||||
await using var store = CreateStore("block-enumerate");
|
||||
|
||||
// Store 5 messages on 2 subjects
|
||||
await store.AppendAsync("a.1", "m1"u8.ToArray(), default);
|
||||
await store.AppendAsync("a.2", "m2"u8.ToArray(), default);
|
||||
await store.AppendAsync("a.1", "m3"u8.ToArray(), default);
|
||||
await store.AppendAsync("b.1", "m4"u8.ToArray(), default);
|
||||
await store.AppendAsync("a.2", "m5"u8.ToArray(), default);
|
||||
|
||||
// Delete sequences 2 and 4
|
||||
await store.RemoveAsync(2, default);
|
||||
await store.RemoveAsync(4, default);
|
||||
|
||||
// Verify the state after deletion
|
||||
var all = await store.ListAsync(default);
|
||||
all.Count.ShouldBe(3);
|
||||
all.Select(m => m.Sequence).ShouldBe([1UL, 3UL, 5UL]);
|
||||
|
||||
// FilteredState should only see non-deleted
|
||||
var aState = store.FilteredState(1, "a.1");
|
||||
aState.Msgs.ShouldBe(2UL); // sequences 1 and 3
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,276 @@
|
||||
// Reference: golang/nats-server/server/filestore_test.go
|
||||
// Tests ported from: TestFileStorePurge, TestFileStoreCompact,
|
||||
// TestFileStoreCompactLastPlusOne, TestFileStoreCompactMsgCountBug,
|
||||
// TestFileStorePurgeExWithSubject, TestFileStorePurgeExKeepOneBug,
|
||||
// TestFileStorePurgeExNoTombsOnBlockRemoval,
|
||||
// TestFileStoreStreamTruncate
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class FileStorePurgeTests : IDisposable
|
||||
{
|
||||
private readonly string _dir;
|
||||
|
||||
public FileStorePurgeTests()
|
||||
{
|
||||
_dir = Path.Combine(Path.GetTempPath(), $"nats-js-fs-purge-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_dir);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_dir))
|
||||
Directory.Delete(_dir, recursive: true);
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string subdirectory, FileStoreOptions? options = null)
|
||||
{
|
||||
var dir = Path.Combine(_dir, subdirectory);
|
||||
var opts = options ?? new FileStoreOptions();
|
||||
opts.Directory = dir;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurge server/filestore_test.go:709
|
||||
[Fact]
|
||||
public async Task Purge_removes_all_messages()
|
||||
{
|
||||
await using var store = CreateStore("purge-all");
|
||||
|
||||
for (var i = 0; i < 100; i++)
|
||||
await store.AppendAsync("foo", new byte[128], default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)100);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
state.Bytes.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurge server/filestore_test.go:740
|
||||
[Fact]
|
||||
public async Task Purge_recovers_same_state_after_restart()
|
||||
{
|
||||
var subDir = "purge-restart";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 50; i++)
|
||||
await store.AppendAsync("foo", "Hello"u8.ToArray(), default);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
state.Bytes.ShouldBe((ulong)0);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurge server/filestore_test.go:776
|
||||
[Fact]
|
||||
public async Task Store_after_purge_works()
|
||||
{
|
||||
await using var store = CreateStore("purge-then-store");
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", "Hello"u8.ToArray(), default);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
|
||||
// New messages after purge.
|
||||
for (var i = 0; i < 10; i++)
|
||||
{
|
||||
var seq = await store.AppendAsync("foo", "After purge"u8.ToArray(), default);
|
||||
seq.ShouldBeGreaterThan((ulong)0);
|
||||
}
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)10);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreCompact server/filestore_test.go:822
|
||||
[Fact(Skip = "Compact not yet implemented in .NET FileStore")]
|
||||
public async Task Compact_removes_messages_below_sequence()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreCompact server/filestore_test.go:851
|
||||
[Fact(Skip = "Compact not yet implemented in .NET FileStore")]
|
||||
public async Task Compact_beyond_last_seq_resets_first()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreCompact server/filestore_test.go:862
|
||||
[Fact(Skip = "Compact not yet implemented in .NET FileStore")]
|
||||
public async Task Compact_recovers_after_restart()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreCompactLastPlusOne server/filestore_test.go:875
|
||||
[Fact(Skip = "Compact not yet implemented in .NET FileStore")]
|
||||
public async Task Compact_last_plus_one_clears_all()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreCompactMsgCountBug server/filestore_test.go:916
|
||||
[Fact(Skip = "Compact not yet implemented in .NET FileStore")]
|
||||
public async Task Compact_with_prior_deletes_counts_correctly()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreStreamTruncate server/filestore_test.go:991
|
||||
[Fact(Skip = "Truncate not yet implemented in .NET FileStore")]
|
||||
public async Task Truncate_removes_messages_after_sequence()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreStreamTruncate server/filestore_test.go:1025
|
||||
[Fact(Skip = "Truncate not yet implemented in .NET FileStore")]
|
||||
public async Task Truncate_with_interior_deletes()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurgeExWithSubject server/filestore_test.go:3743
|
||||
[Fact(Skip = "PurgeEx not yet implemented in .NET FileStore")]
|
||||
public async Task PurgeEx_with_subject_removes_matching()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurgeExKeepOneBug server/filestore_test.go:3382
|
||||
[Fact(Skip = "PurgeEx not yet implemented in .NET FileStore")]
|
||||
public async Task PurgeEx_keep_one_preserves_last()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurgeExNoTombsOnBlockRemoval server/filestore_test.go:3823
|
||||
[Fact(Skip = "PurgeEx not yet implemented in .NET FileStore")]
|
||||
public async Task PurgeEx_no_tombstones_on_block_removal()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurge server/filestore_test.go:709
|
||||
[Fact]
|
||||
public async Task Purge_then_list_returns_empty()
|
||||
{
|
||||
await using var store = CreateStore("purge-list");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
|
||||
var messages = await store.ListAsync(default);
|
||||
messages.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurge server/filestore_test.go:709
|
||||
[Fact]
|
||||
public async Task Multiple_purges_are_safe()
|
||||
{
|
||||
await using var store = CreateStore("multi-purge");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
await store.PurgeAsync(default); // Double purge should not error.
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurge server/filestore_test.go:709
|
||||
[Fact]
|
||||
public async Task Purge_empty_store_is_safe()
|
||||
{
|
||||
await using var store = CreateStore("purge-empty");
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurge server/filestore_test.go:709
|
||||
[Fact]
|
||||
public async Task Purge_with_prior_removes()
|
||||
{
|
||||
await using var store = CreateStore("purge-prior-rm");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
// Remove some messages first.
|
||||
await store.RemoveAsync(2, default);
|
||||
await store.RemoveAsync(4, default);
|
||||
await store.RemoveAsync(6, default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)7);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
state.Bytes.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurge server/filestore_test.go:776
|
||||
[Fact]
|
||||
public async Task Purge_then_store_then_purge_again()
|
||||
{
|
||||
await using var store = CreateStore("purge-cycle");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
await store.AppendAsync("foo", "new data"u8.ToArray(), default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)3);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurge server/filestore_test.go:709
|
||||
[Fact]
|
||||
public async Task Purge_data_file_is_deleted()
|
||||
{
|
||||
var subDir = "purge-file";
|
||||
var dir = Path.Combine(_dir, subDir);
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
}
|
||||
|
||||
// The data file should be cleaned up or empty after purge.
|
||||
var dataFile = Path.Combine(dir, "messages.jsonl");
|
||||
if (File.Exists(dataFile))
|
||||
{
|
||||
var content = File.ReadAllText(dataFile);
|
||||
content.Trim().ShouldBeEmpty();
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,439 @@
|
||||
// Reference: golang/nats-server/server/filestore_test.go
|
||||
// Tests ported from: TestFileStoreRemovePartialRecovery,
|
||||
// TestFileStoreRemoveOutOfOrderRecovery,
|
||||
// TestFileStoreAgeLimitRecovery, TestFileStoreBitRot,
|
||||
// TestFileStoreEraseAndNoIndexRecovery,
|
||||
// TestFileStoreExpireMsgsOnStart,
|
||||
// TestFileStoreRebuildStateDmapAccountingBug,
|
||||
// TestFileStoreRecalcFirstSequenceBug,
|
||||
// TestFileStoreFullStateBasics
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class FileStoreRecoveryTests : IDisposable
|
||||
{
|
||||
private readonly string _dir;
|
||||
|
||||
public FileStoreRecoveryTests()
|
||||
{
|
||||
_dir = Path.Combine(Path.GetTempPath(), $"nats-js-fs-recovery-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_dir);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_dir))
|
||||
Directory.Delete(_dir, recursive: true);
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string subdirectory, FileStoreOptions? options = null)
|
||||
{
|
||||
var dir = Path.Combine(_dir, subdirectory);
|
||||
var opts = options ?? new FileStoreOptions();
|
||||
opts.Directory = dir;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreRemovePartialRecovery server/filestore_test.go:1076
|
||||
[Fact]
|
||||
public async Task Remove_half_then_recover()
|
||||
{
|
||||
var subDir = "partial-recovery";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 100; i++)
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
|
||||
// Remove first half.
|
||||
for (ulong i = 1; i <= 50; i++)
|
||||
await store.RemoveAsync(i, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)50);
|
||||
}
|
||||
|
||||
// Recover and verify state matches.
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)50);
|
||||
state.FirstSeq.ShouldBe((ulong)51);
|
||||
state.LastSeq.ShouldBe((ulong)100);
|
||||
|
||||
// Verify removed messages are gone.
|
||||
for (ulong i = 1; i <= 50; i++)
|
||||
(await store.LoadAsync(i, default)).ShouldBeNull();
|
||||
|
||||
// Verify remaining messages are present.
|
||||
for (ulong i = 51; i <= 100; i++)
|
||||
(await store.LoadAsync(i, default)).ShouldNotBeNull();
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreRemoveOutOfOrderRecovery server/filestore_test.go:1119
|
||||
[Fact]
|
||||
public async Task Remove_evens_then_recover()
|
||||
{
|
||||
var subDir = "ooo-recovery";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 100; i++)
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
|
||||
// Remove even-numbered sequences.
|
||||
for (var i = 2; i <= 100; i += 2)
|
||||
(await store.RemoveAsync((ulong)i, default)).ShouldBeTrue();
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)50);
|
||||
}
|
||||
|
||||
// Recover and verify.
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)50);
|
||||
|
||||
// Seq 1 should exist.
|
||||
(await store.LoadAsync(1, default)).ShouldNotBeNull();
|
||||
|
||||
// Even sequences should be gone.
|
||||
for (var i = 2; i <= 100; i += 2)
|
||||
(await store.LoadAsync((ulong)i, default)).ShouldBeNull();
|
||||
|
||||
// Odd sequences should exist.
|
||||
for (var i = 1; i <= 99; i += 2)
|
||||
(await store.LoadAsync((ulong)i, default)).ShouldNotBeNull();
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreAgeLimitRecovery server/filestore_test.go:1183
|
||||
[Fact]
|
||||
public async Task Age_limit_recovery_expires_on_restart()
|
||||
{
|
||||
var subDir = "age-recovery";
|
||||
|
||||
await using (var store = CreateStore(subDir, new FileStoreOptions { MaxAgeMs = 200 }))
|
||||
{
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)20);
|
||||
}
|
||||
|
||||
// Wait for messages to age out.
|
||||
await Task.Delay(300);
|
||||
|
||||
// Reopen — expired messages should be pruned on load.
|
||||
await using (var store = CreateStore(subDir, new FileStoreOptions { MaxAgeMs = 200 }))
|
||||
{
|
||||
// Trigger prune by appending.
|
||||
await store.AppendAsync("foo", "trigger"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)1);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEraseAndNoIndexRecovery server/filestore_test.go:1363
|
||||
[Fact]
|
||||
public async Task Remove_evens_then_recover_without_index()
|
||||
{
|
||||
var subDir = "no-index-recovery";
|
||||
var dir = Path.Combine(_dir, subDir);
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 100; i++)
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
|
||||
// Remove even-numbered sequences.
|
||||
for (var i = 2; i <= 100; i += 2)
|
||||
(await store.RemoveAsync((ulong)i, default)).ShouldBeTrue();
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)50);
|
||||
}
|
||||
|
||||
// Remove the index manifest file to force a full rebuild.
|
||||
var manifestPath = Path.Combine(dir, "index.manifest.json");
|
||||
if (File.Exists(manifestPath))
|
||||
File.Delete(manifestPath);
|
||||
|
||||
// Recover without index manifest.
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)50);
|
||||
|
||||
// Even sequences should still be gone.
|
||||
for (var i = 2; i <= 100; i += 2)
|
||||
(await store.LoadAsync((ulong)i, default)).ShouldBeNull();
|
||||
|
||||
// Odd sequences should exist.
|
||||
for (var i = 1; i <= 99; i += 2)
|
||||
(await store.LoadAsync((ulong)i, default)).ShouldNotBeNull();
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBitRot server/filestore_test.go:1229
|
||||
[Fact]
|
||||
public async Task Corrupted_data_file_loses_messages_but_store_recovers()
|
||||
{
|
||||
var subDir = "bitrot";
|
||||
var dir = Path.Combine(_dir, subDir);
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
// Corrupt the data file by writing random bytes in the middle.
|
||||
var dataFile = Path.Combine(dir, "messages.jsonl");
|
||||
if (File.Exists(dataFile))
|
||||
{
|
||||
var content = File.ReadAllBytes(dataFile);
|
||||
if (content.Length > 50)
|
||||
{
|
||||
// Corrupt some bytes in the middle.
|
||||
content[content.Length / 2] = 0xFF;
|
||||
content[content.Length / 2 + 1] = 0xFE;
|
||||
File.WriteAllBytes(dataFile, content);
|
||||
}
|
||||
}
|
||||
|
||||
// Recovery should not throw; it may lose some messages though.
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
// We may lose messages due to corruption, but at least some should survive
|
||||
// if the corruption only affected one record.
|
||||
// The key point is that the store recovered without throwing.
|
||||
state.Messages.ShouldBeGreaterThanOrEqualTo((ulong)0);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreFullStateBasics server/filestore_test.go:5461
|
||||
[Fact]
|
||||
public async Task Full_state_recovery_preserves_all_messages()
|
||||
{
|
||||
var subDir = "full-state";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 50; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
for (var i = 0; i < 50; i++)
|
||||
await store.AppendAsync("bar", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)100);
|
||||
state.FirstSeq.ShouldBe((ulong)1);
|
||||
state.LastSeq.ShouldBe((ulong)100);
|
||||
|
||||
var msg1 = await store.LoadAsync(1, default);
|
||||
msg1.ShouldNotBeNull();
|
||||
msg1!.Subject.ShouldBe("foo");
|
||||
|
||||
var msg51 = await store.LoadAsync(51, default);
|
||||
msg51.ShouldNotBeNull();
|
||||
msg51!.Subject.ShouldBe("bar");
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreExpireMsgsOnStart server/filestore_test.go:3018
|
||||
[Fact]
|
||||
public async Task Expire_on_restart_with_different_maxage()
|
||||
{
|
||||
var subDir = "expire-on-start";
|
||||
|
||||
// Store with no age limit.
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "Hello"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
await Task.Delay(100);
|
||||
|
||||
// Reopen with an age limit that will expire all old messages.
|
||||
await using (var store = CreateStore(subDir, new FileStoreOptions { MaxAgeMs = 50 }))
|
||||
{
|
||||
// Trigger pruning.
|
||||
await store.AppendAsync("foo", "trigger"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)1);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreRemovePartialRecovery server/filestore_test.go:1076
|
||||
[Fact]
|
||||
public async Task Remove_then_append_then_recover()
|
||||
{
|
||||
var subDir = "rm-append-recover";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "Hello"u8.ToArray(), default);
|
||||
|
||||
await store.RemoveAsync(5, default);
|
||||
await store.AppendAsync("foo", "After remove"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)10);
|
||||
state.LastSeq.ShouldBe((ulong)11);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)10);
|
||||
state.LastSeq.ShouldBe((ulong)11);
|
||||
|
||||
(await store.LoadAsync(5, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(11, default)).ShouldNotBeNull();
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreRecalcFirstSequenceBug server/filestore_test.go:5405
|
||||
[Fact]
|
||||
public async Task Recovery_preserves_first_seq_after_removes()
|
||||
{
|
||||
var subDir = "first-seq-recovery";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
// Remove first 10.
|
||||
for (ulong i = 1; i <= 10; i++)
|
||||
await store.RemoveAsync(i, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.FirstSeq.ShouldBe((ulong)11);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.FirstSeq.ShouldBe((ulong)11);
|
||||
state.Messages.ShouldBe((ulong)10);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreRebuildStateDmapAccountingBug server/filestore_test.go:3692
|
||||
[Fact]
|
||||
public async Task Recovery_with_scattered_deletes_preserves_count()
|
||||
{
|
||||
var subDir = "scattered-deletes";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 50; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
// Delete scattered: every 3rd.
|
||||
for (var i = 3; i <= 50; i += 3)
|
||||
await store.RemoveAsync((ulong)i, default);
|
||||
|
||||
var expectedCount = 50 - (50 / 3);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)expectedCount);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var expectedCount = 50 - (50 / 3);
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)expectedCount);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasicWriteMsgsAndRestore server/filestore_test.go:181
|
||||
[Fact]
|
||||
public async Task Recovery_preserves_message_payloads()
|
||||
{
|
||||
var subDir = "payload-recovery";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"message-{i}"), default);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (ulong i = 1; i <= 10; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe("foo");
|
||||
var expected = Encoding.UTF8.GetBytes($"message-{i - 1}");
|
||||
msg.Payload.ToArray().ShouldBe(expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBasicWriteMsgsAndRestore server/filestore_test.go:181
|
||||
[Fact]
|
||||
public async Task Recovery_preserves_subjects()
|
||||
{
|
||||
var subDir = "subject-recovery";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
await store.AppendAsync("alpha", "one"u8.ToArray(), default);
|
||||
await store.AppendAsync("beta", "two"u8.ToArray(), default);
|
||||
await store.AppendAsync("gamma", "three"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var msg1 = await store.LoadAsync(1, default);
|
||||
msg1.ShouldNotBeNull();
|
||||
msg1!.Subject.ShouldBe("alpha");
|
||||
|
||||
var msg2 = await store.LoadAsync(2, default);
|
||||
msg2.ShouldNotBeNull();
|
||||
msg2!.Subject.ShouldBe("beta");
|
||||
|
||||
var msg3 = await store.LoadAsync(3, default);
|
||||
msg3.ShouldNotBeNull();
|
||||
msg3!.Subject.ShouldBe("gamma");
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreRemoveOutOfOrderRecovery server/filestore_test.go:1119
|
||||
[Fact]
|
||||
public async Task Recovery_with_large_message_count()
|
||||
{
|
||||
var subDir = "large-recovery";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 500; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i:D4}"), default);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)500);
|
||||
state.FirstSeq.ShouldBe((ulong)1);
|
||||
state.LastSeq.ShouldBe((ulong)500);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,294 @@
|
||||
// Reference: golang/nats-server/server/filestore.go
|
||||
// Tests in this file:
|
||||
// StoreRawMsg_stores_at_specified_sequence — IStreamStore.StoreRawMsg preserves caller seq/ts
|
||||
// LoadPrevMsg_returns_message_before_seq — IStreamStore.LoadPrevMsg backward scan
|
||||
// Type_returns_file — IStreamStore.Type() returns StorageType.File
|
||||
// Stop_prevents_further_writes — IStreamStore.Stop() sets _stopped flag
|
||||
|
||||
using NATS.Server.JetStream.Storage;
|
||||
using StorageType = NATS.Server.JetStream.Models.StorageType;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for IStreamStore methods added to FileStore in Batch 1:
|
||||
/// StoreRawMsg, LoadPrevMsg, Type, and Stop.
|
||||
/// </summary>
|
||||
public sealed class FileStoreStreamStoreTests : IDisposable
|
||||
{
|
||||
private readonly string _root;
|
||||
|
||||
public FileStoreStreamStoreTests()
|
||||
{
|
||||
_root = Path.Combine(Path.GetTempPath(), $"nats-js-sstest-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_root);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
// Best-effort cleanup of temp directory. If it fails (e.g. open handles on CI),
|
||||
// the OS will clean it up on the next reboot. Letting it throw would suppress
|
||||
// the real test failure so we absorb IO errors explicitly.
|
||||
if (!Directory.Exists(_root))
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
Directory.Delete(_root, recursive: true);
|
||||
}
|
||||
catch (IOException ex)
|
||||
{
|
||||
// Open file handles (common on Windows CI) — log and continue.
|
||||
Console.Error.WriteLine($"[FileStoreStreamStoreTests] Dispose: {ex.Message}");
|
||||
}
|
||||
catch (UnauthorizedAccessException ex)
|
||||
{
|
||||
// Read-only files left by the test — log and continue.
|
||||
Console.Error.WriteLine($"[FileStoreStreamStoreTests] Dispose: {ex.Message}");
|
||||
}
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string subDir, FileStoreOptions? opts = null)
|
||||
{
|
||||
var dir = Path.Combine(_root, subDir);
|
||||
Directory.CreateDirectory(dir);
|
||||
var o = opts ?? new FileStoreOptions();
|
||||
o.Directory = dir;
|
||||
return new FileStore(o);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// StoreRawMsg
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: filestore.go storeRawMsg — caller specifies seq and ts; store must not
|
||||
// auto-increment, and _last must be updated to Math.Max(_last, seq).
|
||||
[Fact]
|
||||
public void StoreRawMsg_stores_at_specified_sequence()
|
||||
{
|
||||
using var store = CreateStore("raw-seq");
|
||||
IStreamStore ss = store;
|
||||
|
||||
var subject = "events.raw";
|
||||
var data = "hello raw"u8.ToArray();
|
||||
// Use a specific Unix nanosecond timestamp.
|
||||
var tsNs = new DateTimeOffset(2024, 6, 1, 12, 0, 0, TimeSpan.Zero).ToUnixTimeMilliseconds() * 1_000_000L;
|
||||
const ulong targetSeq = 42UL;
|
||||
|
||||
ss.StoreRawMsg(subject, null, data, targetSeq, tsNs, 0, false);
|
||||
|
||||
// Verify by loading the message back via LoadMsg.
|
||||
var sm = ss.LoadMsg(targetSeq, null);
|
||||
sm.Subject.ShouldBe(subject);
|
||||
sm.Sequence.ShouldBe(targetSeq);
|
||||
sm.Timestamp.ShouldBe(tsNs);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StoreRawMsg_updates_last_watermark()
|
||||
{
|
||||
using var store = CreateStore("raw-wm");
|
||||
IStreamStore ss = store;
|
||||
|
||||
// Store a message at seq 100 — _last should become 100.
|
||||
ss.StoreRawMsg("foo", null, "x"u8.ToArray(), 100UL, 1_000_000L, 0, false);
|
||||
|
||||
var state = new StreamState();
|
||||
ss.FastState(ref state);
|
||||
state.LastSeq.ShouldBe(100UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StoreRawMsg_does_not_decrement_last_for_lower_seq()
|
||||
{
|
||||
using var store = CreateStore("raw-order");
|
||||
IStreamStore ss = store;
|
||||
|
||||
// Write seq 50 first, then seq 30 (out-of-order replication scenario).
|
||||
ss.StoreRawMsg("foo", null, "x"u8.ToArray(), 50UL, 1_000_000L, 0, false);
|
||||
ss.StoreRawMsg("bar", null, "y"u8.ToArray(), 30UL, 2_000_000L, 0, false);
|
||||
|
||||
var state = new StreamState();
|
||||
ss.FastState(ref state);
|
||||
// _last should remain 50, not go down to 30.
|
||||
state.LastSeq.ShouldBe(50UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StoreRawMsg_preserves_caller_timestamp()
|
||||
{
|
||||
using var store = CreateStore("raw-ts");
|
||||
IStreamStore ss = store;
|
||||
|
||||
var subject = "ts.test";
|
||||
var data = "payload"u8.ToArray();
|
||||
// A deterministic Unix nanosecond timestamp.
|
||||
var tsNs = 1_717_238_400_000_000_000L; // 2024-06-01 00:00:00 UTC in ns
|
||||
|
||||
ss.StoreRawMsg(subject, null, data, 7UL, tsNs, 0, false);
|
||||
|
||||
var sm = ss.LoadMsg(7UL, null);
|
||||
sm.Timestamp.ShouldBe(tsNs);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StoreRawMsg_throws_after_stop()
|
||||
{
|
||||
using var store = CreateStore("raw-stop");
|
||||
IStreamStore ss = store;
|
||||
|
||||
ss.Stop();
|
||||
|
||||
Should.Throw<ObjectDisposedException>(() =>
|
||||
ss.StoreRawMsg("foo", null, "x"u8.ToArray(), 1UL, 1_000_000L, 0, false));
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// LoadPrevMsg
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: filestore.go LoadPrevMsg — walks backward from start-1 to first.
|
||||
[Fact]
|
||||
public void LoadPrevMsg_returns_message_before_seq()
|
||||
{
|
||||
using var store = CreateStore("prev-basic");
|
||||
IStreamStore ss = store;
|
||||
|
||||
// Write 3 messages at seqs 1, 2, 3.
|
||||
ss.StoreMsg("a", null, "msg1"u8.ToArray(), 0);
|
||||
ss.StoreMsg("b", null, "msg2"u8.ToArray(), 0);
|
||||
ss.StoreMsg("c", null, "msg3"u8.ToArray(), 0);
|
||||
|
||||
// LoadPrevMsg(3) should return seq 2.
|
||||
var sm = ss.LoadPrevMsg(3UL, null);
|
||||
sm.Sequence.ShouldBe(2UL);
|
||||
sm.Subject.ShouldBe("b");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void LoadPrevMsg_skips_deleted_message()
|
||||
{
|
||||
using var store = CreateStore("prev-skip");
|
||||
IStreamStore ss = store;
|
||||
|
||||
ss.StoreMsg("a", null, "msg1"u8.ToArray(), 0); // seq 1
|
||||
ss.StoreMsg("b", null, "msg2"u8.ToArray(), 0); // seq 2
|
||||
ss.StoreMsg("c", null, "msg3"u8.ToArray(), 0); // seq 3
|
||||
|
||||
// Delete seq 2 — LoadPrevMsg(3) must skip it and return seq 1.
|
||||
ss.RemoveMsg(2UL);
|
||||
|
||||
var sm = ss.LoadPrevMsg(3UL, null);
|
||||
sm.Sequence.ShouldBe(1UL);
|
||||
sm.Subject.ShouldBe("a");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void LoadPrevMsg_throws_when_no_message_before_seq()
|
||||
{
|
||||
using var store = CreateStore("prev-none");
|
||||
IStreamStore ss = store;
|
||||
|
||||
ss.StoreMsg("a", null, "msg1"u8.ToArray(), 0); // seq 1
|
||||
|
||||
// LoadPrevMsg(1) — nothing before seq 1.
|
||||
Should.Throw<KeyNotFoundException>(() => ss.LoadPrevMsg(1UL, null));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void LoadPrevMsg_reuses_provided_container()
|
||||
{
|
||||
using var store = CreateStore("prev-reuse");
|
||||
IStreamStore ss = store;
|
||||
|
||||
ss.StoreMsg("x", null, "d1"u8.ToArray(), 0); // seq 1
|
||||
ss.StoreMsg("y", null, "d2"u8.ToArray(), 0); // seq 2
|
||||
|
||||
var container = new StoreMsg();
|
||||
var result = ss.LoadPrevMsg(2UL, container);
|
||||
|
||||
// Should return the same object reference.
|
||||
result.ShouldBeSameAs(container);
|
||||
container.Sequence.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Type
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: filestore.go fileStore.Type — returns StorageType.File.
|
||||
[Fact]
|
||||
public void Type_returns_file()
|
||||
{
|
||||
using var store = CreateStore("type");
|
||||
IStreamStore ss = store;
|
||||
|
||||
ss.Type().ShouldBe(StorageType.File);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Stop
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: filestore.go fileStore.Stop — flushes and marks as stopped.
|
||||
[Fact]
|
||||
public void Stop_prevents_further_writes_via_StoreMsg()
|
||||
{
|
||||
using var store = CreateStore("stop-storemsg");
|
||||
IStreamStore ss = store;
|
||||
|
||||
ss.StoreMsg("ok", null, "before"u8.ToArray(), 0);
|
||||
|
||||
ss.Stop();
|
||||
|
||||
Should.Throw<ObjectDisposedException>(() =>
|
||||
ss.StoreMsg("fail", null, "after"u8.ToArray(), 0));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stop_prevents_further_writes_via_AppendAsync()
|
||||
{
|
||||
using var store = CreateStore("stop-append");
|
||||
|
||||
await store.AppendAsync("ok", "before"u8.ToArray(), CancellationToken.None);
|
||||
|
||||
((IStreamStore)store).Stop();
|
||||
|
||||
await Should.ThrowAsync<ObjectDisposedException>(() =>
|
||||
store.AppendAsync("fail", "after"u8.ToArray(), CancellationToken.None).AsTask());
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Stop_is_idempotent()
|
||||
{
|
||||
using var store = CreateStore("stop-idem");
|
||||
IStreamStore ss = store;
|
||||
|
||||
ss.Stop();
|
||||
|
||||
// Second Stop() must not throw.
|
||||
var ex = Record.Exception(() => ss.Stop());
|
||||
ex.ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Stop_preserves_messages_on_disk()
|
||||
{
|
||||
var dir = Path.Combine(_root, "stop-persist");
|
||||
Directory.CreateDirectory(dir);
|
||||
|
||||
FileStore CreateWithDir() => new FileStore(new FileStoreOptions { Directory = dir });
|
||||
|
||||
// Write a message, stop the store.
|
||||
using (var store = CreateWithDir())
|
||||
{
|
||||
((IStreamStore)store).StoreMsg("saved", null, "payload"u8.ToArray(), 0);
|
||||
((IStreamStore)store).Stop();
|
||||
}
|
||||
|
||||
// Re-open and verify the message survived.
|
||||
using var recovered = CreateWithDir();
|
||||
var sm = ((IStreamStore)recovered).LoadMsg(1UL, null);
|
||||
sm.Subject.ShouldBe("saved");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,306 @@
|
||||
// Reference: golang/nats-server/server/filestore_test.go
|
||||
// Tests ported from: TestFileStoreNoFSSWhenNoSubjects,
|
||||
// TestFileStoreNoFSSBugAfterRemoveFirst,
|
||||
// TestFileStoreNoFSSAfterRecover,
|
||||
// TestFileStoreSubjectStateCacheExpiration,
|
||||
// TestFileStoreSubjectsTotals,
|
||||
// TestFileStoreSubjectCorruption,
|
||||
// TestFileStoreFilteredPendingBug,
|
||||
// TestFileStoreFilteredFirstMatchingBug,
|
||||
// TestFileStoreExpireSubjectMeta,
|
||||
// TestFileStoreAllFilteredStateWithDeleted
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class FileStoreSubjectTests : IDisposable
|
||||
{
|
||||
private readonly string _dir;
|
||||
|
||||
public FileStoreSubjectTests()
|
||||
{
|
||||
_dir = Path.Combine(Path.GetTempPath(), $"nats-js-fs-subject-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_dir);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_dir))
|
||||
Directory.Delete(_dir, recursive: true);
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string subdirectory, FileStoreOptions? options = null)
|
||||
{
|
||||
var dir = Path.Combine(_dir, subdirectory);
|
||||
var opts = options ?? new FileStoreOptions();
|
||||
opts.Directory = dir;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreNoFSSWhenNoSubjects server/filestore_test.go:4251
|
||||
[Fact]
|
||||
public async Task Store_with_empty_subject()
|
||||
{
|
||||
await using var store = CreateStore("empty-subj");
|
||||
|
||||
// Store messages with empty subject (like raft state).
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync(string.Empty, "raft state"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)10);
|
||||
|
||||
// Should be loadable.
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe(string.Empty);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreNoFSSBugAfterRemoveFirst server/filestore_test.go:4289
|
||||
[Fact]
|
||||
public async Task Remove_first_with_different_subjects()
|
||||
{
|
||||
await using var store = CreateStore("rm-first-subj");
|
||||
|
||||
await store.AppendAsync("foo", "first"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar", "second"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "third"u8.ToArray(), default);
|
||||
|
||||
// Remove first message.
|
||||
(await store.RemoveAsync(1, default)).ShouldBeTrue();
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)2);
|
||||
state.FirstSeq.ShouldBe((ulong)2);
|
||||
|
||||
// LoadLastBySubject should still work for "foo".
|
||||
var lastFoo = await store.LoadLastBySubjectAsync("foo", default);
|
||||
lastFoo.ShouldNotBeNull();
|
||||
lastFoo!.Sequence.ShouldBe((ulong)3);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreNoFSSAfterRecover server/filestore_test.go:4333
|
||||
[Fact]
|
||||
public async Task Subject_filtering_after_recovery()
|
||||
{
|
||||
var subDir = "subj-after-recover";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
await store.AppendAsync("foo.1", "a"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.2", "b"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar.1", "c"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.1", "d"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
// Recover.
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)4);
|
||||
|
||||
// LoadLastBySubject should work after recovery.
|
||||
var lastFoo1 = await store.LoadLastBySubjectAsync("foo.1", default);
|
||||
lastFoo1.ShouldNotBeNull();
|
||||
lastFoo1!.Sequence.ShouldBe((ulong)4);
|
||||
lastFoo1.Payload.ToArray().ShouldBe("d"u8.ToArray());
|
||||
|
||||
var lastBar1 = await store.LoadLastBySubjectAsync("bar.1", default);
|
||||
lastBar1.ShouldNotBeNull();
|
||||
lastBar1!.Sequence.ShouldBe((ulong)3);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreSubjectStateCacheExpiration server/filestore_test.go:4143
|
||||
[Fact(Skip = "SubjectsState not yet implemented in .NET FileStore")]
|
||||
public async Task Subject_state_cache_expiration()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreSubjectsTotals server/filestore_test.go:4948
|
||||
[Fact(Skip = "SubjectsTotals not yet implemented in .NET FileStore")]
|
||||
public async Task Subjects_totals_with_wildcards()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreSubjectCorruption server/filestore_test.go:6466
|
||||
[Fact(Skip = "SubjectForSeq not yet implemented in .NET FileStore")]
|
||||
public async Task Subject_corruption_detection()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreFilteredPendingBug server/filestore_test.go:3414
|
||||
[Fact(Skip = "FilteredState not yet implemented in .NET FileStore")]
|
||||
public async Task Filtered_pending_no_match_returns_zero()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreFilteredFirstMatchingBug server/filestore_test.go:4448
|
||||
[Fact(Skip = "LoadNextMsg not yet implemented in .NET FileStore")]
|
||||
public async Task Filtered_first_matching_finds_correct_sequence()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreExpireSubjectMeta server/filestore_test.go:4014
|
||||
[Fact(Skip = "SubjectsState not yet implemented in .NET FileStore")]
|
||||
public async Task Expired_subject_metadata_cleans_up()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Go: TestFileStoreAllFilteredStateWithDeleted server/filestore_test.go:4827
|
||||
[Fact(Skip = "FilteredState not yet implemented in .NET FileStore")]
|
||||
public async Task Filtered_state_with_deleted_messages()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Test LoadLastBySubject with multiple subjects and removes.
|
||||
[Fact]
|
||||
public async Task LoadLastBySubject_after_removes()
|
||||
{
|
||||
await using var store = CreateStore("last-after-rm");
|
||||
|
||||
await store.AppendAsync("foo", "a"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "b"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "c"u8.ToArray(), default);
|
||||
|
||||
// Remove the last message on "foo" (seq 3).
|
||||
await store.RemoveAsync(3, default);
|
||||
|
||||
var last = await store.LoadLastBySubjectAsync("foo", default);
|
||||
last.ShouldNotBeNull();
|
||||
last!.Sequence.ShouldBe((ulong)2);
|
||||
last.Payload.ToArray().ShouldBe("b"u8.ToArray());
|
||||
}
|
||||
|
||||
// Test LoadLastBySubject when all messages on that subject are removed.
|
||||
[Fact]
|
||||
public async Task LoadLastBySubject_all_removed_returns_null()
|
||||
{
|
||||
await using var store = CreateStore("last-all-rm");
|
||||
|
||||
await store.AppendAsync("foo", "a"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "b"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar", "c"u8.ToArray(), default);
|
||||
|
||||
await store.RemoveAsync(1, default);
|
||||
await store.RemoveAsync(2, default);
|
||||
|
||||
var last = await store.LoadLastBySubjectAsync("foo", default);
|
||||
last.ShouldBeNull();
|
||||
|
||||
// "bar" should still be present.
|
||||
var lastBar = await store.LoadLastBySubjectAsync("bar", default);
|
||||
lastBar.ShouldNotBeNull();
|
||||
lastBar!.Sequence.ShouldBe((ulong)3);
|
||||
}
|
||||
|
||||
// Test multiple subjects interleaved.
|
||||
[Fact]
|
||||
public async Task Multiple_subjects_interleaved()
|
||||
{
|
||||
await using var store = CreateStore("interleaved");
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
{
|
||||
var subject = i % 3 == 0 ? "alpha" : (i % 3 == 1 ? "beta" : "gamma");
|
||||
await store.AppendAsync(subject, Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
}
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)20);
|
||||
|
||||
// Verify all subjects are loadable and correct.
|
||||
for (ulong i = 1; i <= 20; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
var idx = (int)(i - 1);
|
||||
var expectedSubj = idx % 3 == 0 ? "alpha" : (idx % 3 == 1 ? "beta" : "gamma");
|
||||
msg!.Subject.ShouldBe(expectedSubj);
|
||||
}
|
||||
}
|
||||
|
||||
// Test LoadLastBySubject with case-sensitive subjects.
|
||||
[Fact]
|
||||
public async Task LoadLastBySubject_is_case_sensitive()
|
||||
{
|
||||
await using var store = CreateStore("case-sensitive");
|
||||
|
||||
await store.AppendAsync("Foo", "upper"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "lower"u8.ToArray(), default);
|
||||
|
||||
var lastUpper = await store.LoadLastBySubjectAsync("Foo", default);
|
||||
lastUpper.ShouldNotBeNull();
|
||||
lastUpper!.Payload.ToArray().ShouldBe("upper"u8.ToArray());
|
||||
|
||||
var lastLower = await store.LoadLastBySubjectAsync("foo", default);
|
||||
lastLower.ShouldNotBeNull();
|
||||
lastLower!.Payload.ToArray().ShouldBe("lower"u8.ToArray());
|
||||
}
|
||||
|
||||
// Test subject preservation across restarts.
|
||||
[Fact]
|
||||
public async Task Subject_preserved_across_restart()
|
||||
{
|
||||
var subDir = "subj-restart";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
await store.AppendAsync("topic.a", "one"u8.ToArray(), default);
|
||||
await store.AppendAsync("topic.b", "two"u8.ToArray(), default);
|
||||
await store.AppendAsync("topic.c", "three"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var msg1 = await store.LoadAsync(1, default);
|
||||
msg1.ShouldNotBeNull();
|
||||
msg1!.Subject.ShouldBe("topic.a");
|
||||
|
||||
var msg2 = await store.LoadAsync(2, default);
|
||||
msg2.ShouldNotBeNull();
|
||||
msg2!.Subject.ShouldBe("topic.b");
|
||||
|
||||
var msg3 = await store.LoadAsync(3, default);
|
||||
msg3.ShouldNotBeNull();
|
||||
msg3!.Subject.ShouldBe("topic.c");
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreNumPendingLastBySubject server/filestore_test.go:6501
|
||||
[Fact(Skip = "NumPending not yet implemented in .NET FileStore")]
|
||||
public async Task NumPending_last_per_subject()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Test many distinct subjects.
|
||||
[Fact]
|
||||
public async Task Many_distinct_subjects()
|
||||
{
|
||||
await using var store = CreateStore("many-subjects");
|
||||
|
||||
for (var i = 0; i < 100; i++)
|
||||
await store.AppendAsync($"kv.{i}", Encoding.UTF8.GetBytes($"value-{i}"), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)100);
|
||||
|
||||
// Each subject should have exactly one message.
|
||||
for (var i = 0; i < 100; i++)
|
||||
{
|
||||
var last = await store.LoadLastBySubjectAsync($"kv.{i}", default);
|
||||
last.ShouldNotBeNull();
|
||||
last!.Payload.ToArray().ShouldBe(Encoding.UTF8.GetBytes($"value-{i}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,837 @@
|
||||
// Reference: golang/nats-server/server/filestore_test.go
|
||||
// Tests ported in this file:
|
||||
// TestFileStoreTombstoneRbytes → TombstoneRbytes_SecondBlockRbytesExceedsBytes
|
||||
// TestFileStoreTombstonesNoFirstSeqRollback → TombstonesNoFirstSeqRollback_AllDeletedRecoverCorrectly
|
||||
// TestFileStoreTombstonesSelectNextFirstCleanup → TombstonesSelectNextFirstCleanup_SparseDeletesCorrectState
|
||||
// TestFileStoreEraseMsgDoesNotLoseTombstones → EraseMsgDoesNotLoseTombstones_DeletedSeqsPreservedAfterRestart
|
||||
// TestFileStoreDetectDeleteGapWithLastSkipMsg → DetectDeleteGapWithLastSkipMsg_SkipCreatesDeletionGaps
|
||||
// TestFileStoreMissingDeletesAfterCompact → MissingDeletesAfterCompact_DmapPreservedAfterCompact
|
||||
// TestFileStoreSubjectDeleteMarkers → SubjectDeleteMarkers_ExpiredSubjectYieldsMarker (skipped — requires pmsgcb/rmcb hooks)
|
||||
// TestFileStoreMessageTTLRecoveredSingleMessageWithoutStreamState → MessageTTL_RecoverSingleMessageWithoutStreamState
|
||||
// TestFileStoreMessageTTLWriteTombstone → MessageTTL_WriteTombstoneAllowsRecovery
|
||||
// TestFileStoreMessageTTLRecoveredOffByOne → MessageTTL_RecoveredOffByOneNotDouble
|
||||
// TestFileStoreMessageScheduleEncode → MessageScheduleEncode_RoundTripsViaStateCodec (skipped — MsgScheduling not yet ported)
|
||||
// TestFileStoreMessageScheduleDecode → MessageScheduleDecode_RoundTripsViaStateCodec (skipped — MsgScheduling not yet ported)
|
||||
// TestFileStoreRecoverTTLAndScheduleStateAndCounters → RecoverTTLAndScheduleStateAndCounters_BlockCountersCorrect (skipped — block counters not exposed)
|
||||
// TestFileStoreNoPanicOnRecoverTTLWithCorruptBlocks → NoPanicOnRecoverTTLWithCorruptBlocks_RecoveryHandlesGaps
|
||||
// TestFileStoreConsumerEncodeDecodeRedelivered → ConsumerEncodeDecodeRedelivered_RoundTripsCorrectly
|
||||
// TestFileStoreConsumerEncodeDecodePendingBelowStreamAckFloor → ConsumerEncodeDecodePendingBelowStreamAckFloor_RoundTripsCorrectly
|
||||
// TestFileStoreConsumerRedeliveredLost → ConsumerRedeliveredLost_RecoversAfterRestartAndClears
|
||||
// TestFileStoreConsumerFlusher → ConsumerFlusher_FlusherStartsAndStopsWithStore
|
||||
// TestFileStoreConsumerDeliveredUpdates → ConsumerDeliveredUpdates_TrackDeliveredWithNoAckPolicy
|
||||
// TestFileStoreConsumerDeliveredAndAckUpdates → ConsumerDeliveredAndAckUpdates_TracksPendingAndAckFloor
|
||||
// TestFileStoreBadConsumerState → BadConsumerState_DoesNotThrowOnKnownInput
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
/// <summary>
|
||||
/// Go FileStore tombstone, deletion, TTL, and consumer state parity tests.
|
||||
/// Each test mirrors a specific Go test from golang/nats-server/server/filestore_test.go.
|
||||
/// </summary>
|
||||
public sealed class FileStoreTombstoneTests : IDisposable
|
||||
{
|
||||
private readonly string _root;
|
||||
|
||||
public FileStoreTombstoneTests()
|
||||
{
|
||||
_root = Path.Combine(Path.GetTempPath(), $"nats-js-tombstone-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_root);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_root))
|
||||
{
|
||||
try { Directory.Delete(_root, recursive: true); }
|
||||
catch { /* best-effort cleanup */ }
|
||||
}
|
||||
}
|
||||
|
||||
private string UniqueDir(string suffix = "")
|
||||
{
|
||||
var dir = Path.Combine(_root, $"{Guid.NewGuid():N}{suffix}");
|
||||
Directory.CreateDirectory(dir);
|
||||
return dir;
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string dir, FileStoreOptions? opts = null)
|
||||
{
|
||||
var o = opts ?? new FileStoreOptions();
|
||||
o.Directory = dir;
|
||||
return new FileStore(o);
|
||||
}
|
||||
|
||||
private FileStore CreateStoreWithBlockSize(string dir, int blockSizeBytes)
|
||||
{
|
||||
var opts = new FileStoreOptions { Directory = dir, BlockSizeBytes = blockSizeBytes };
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Tombstone / rbytes tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreTombstoneRbytes (filestore_test.go:7683)
|
||||
// Verifies that when messages in the first block are deleted, tombstone records
|
||||
// are written into the second block, and the second block's total byte usage
|
||||
// (rbytes) exceeds its live message bytes (bytes) — tombstones inflate the block.
|
||||
// .NET: We verify the behavioral outcome: after removing messages from block 1,
|
||||
// block 2 should have more total written bytes than live message bytes.
|
||||
[Fact]
|
||||
public void TombstoneRbytes_SecondBlockRbytesExceedsBytes()
|
||||
{
|
||||
// Go: BlockSize = 1024 -> block holds ~24 msgs of ~33 bytes each.
|
||||
// We use a small block to force a second block to be created.
|
||||
var dir = UniqueDir();
|
||||
using var store = CreateStoreWithBlockSize(dir, 1024);
|
||||
|
||||
var msg = Encoding.UTF8.GetBytes("hello");
|
||||
// Store 34 messages — enough to fill first block and start second.
|
||||
for (var i = 0; i < 34; i++)
|
||||
store.StoreMsg("foo.22", null, msg, 0);
|
||||
|
||||
store.BlockCount.ShouldBeGreaterThan(1);
|
||||
|
||||
// Delete messages 11-24 (second half of first block).
|
||||
// This places tombstones in the block file, inflating raw bytes.
|
||||
for (var seq = 11UL; seq <= 24UL; seq++)
|
||||
store.RemoveMsg(seq);
|
||||
|
||||
// After deletes the live message count should decrease.
|
||||
var state = store.State();
|
||||
state.Msgs.ShouldBeLessThan(34UL);
|
||||
// The deleted sequences should appear as interior gaps.
|
||||
state.NumDeleted.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreTombstonesNoFirstSeqRollback (filestore_test.go:10911)
|
||||
// After removing all 20 messages (stored across 2 blocks at 10 msgs/block),
|
||||
// the state should show Msgs=0, FirstSeq=21, LastSeq=20.
|
||||
// After restart without index.db the same state should be recovered.
|
||||
[Fact]
|
||||
public void TombstonesNoFirstSeqRollback_AllDeletedRecoverCorrectly()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
// 10 * 33 = 330 bytes per block → ~10 messages per block for ~33-byte msgs.
|
||||
using var store = CreateStoreWithBlockSize(dir, 10 * 33);
|
||||
|
||||
// Store 20 messages (produces 2 blocks of 10 each).
|
||||
for (var i = 0; i < 20; i++)
|
||||
store.StoreMsg("foo", null, [], 0);
|
||||
|
||||
var before = store.State();
|
||||
before.Msgs.ShouldBe(20UL);
|
||||
before.FirstSeq.ShouldBe(1UL);
|
||||
before.LastSeq.ShouldBe(20UL);
|
||||
|
||||
// Delete all messages.
|
||||
for (var seq = 1UL; seq <= 20UL; seq++)
|
||||
store.RemoveMsg(seq);
|
||||
|
||||
before = store.State();
|
||||
before.Msgs.ShouldBe(0UL);
|
||||
// Go: when all messages are deleted, FirstSeq = LastSeq+1
|
||||
before.FirstSeq.ShouldBe(21UL);
|
||||
before.LastSeq.ShouldBe(20UL);
|
||||
|
||||
// Restart and verify state survives recovery.
|
||||
store.Dispose();
|
||||
|
||||
using var store2 = CreateStoreWithBlockSize(dir, 10 * 33);
|
||||
var after = store2.State();
|
||||
after.Msgs.ShouldBe(0UL);
|
||||
after.FirstSeq.ShouldBe(21UL);
|
||||
after.LastSeq.ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreTombstonesSelectNextFirstCleanup (filestore_test.go:10967)
|
||||
// Store 50 msgs, delete 2-49 (leaving msgs 1 and 50), store 50 more, delete 50-100.
|
||||
// After removing msg 1, state should be Msgs=0, FirstSeq=101.
|
||||
[Fact]
|
||||
public void TombstonesSelectNextFirstCleanup_SparseDeletesCorrectState()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var store = CreateStoreWithBlockSize(dir, 10 * 33);
|
||||
|
||||
// Write 50 messages.
|
||||
for (var i = 0; i < 50; i++)
|
||||
store.StoreMsg("foo", null, [], 0);
|
||||
|
||||
// Delete messages 2-49, leaving message 1 and 50.
|
||||
for (var seq = 2UL; seq <= 49UL; seq++)
|
||||
store.RemoveMsg(seq);
|
||||
|
||||
// Write 50 more messages (51-100).
|
||||
for (var i = 0; i < 50; i++)
|
||||
store.StoreMsg("foo", null, [], 0);
|
||||
|
||||
// Delete messages 50-100.
|
||||
for (var seq = 50UL; seq <= 100UL; seq++)
|
||||
store.RemoveMsg(seq);
|
||||
|
||||
var before = store.State();
|
||||
before.Msgs.ShouldBe(1UL);
|
||||
before.FirstSeq.ShouldBe(1UL);
|
||||
before.LastSeq.ShouldBe(100UL);
|
||||
|
||||
// Remove the last real message (seq=1).
|
||||
store.RemoveMsg(1);
|
||||
|
||||
before = store.State();
|
||||
before.Msgs.ShouldBe(0UL);
|
||||
before.FirstSeq.ShouldBe(101UL);
|
||||
before.LastSeq.ShouldBe(100UL);
|
||||
|
||||
// Restart without index.db — recover from block files only.
|
||||
store.Dispose();
|
||||
|
||||
using var store2 = CreateStoreWithBlockSize(dir, 10 * 33);
|
||||
var after = store2.State();
|
||||
after.Msgs.ShouldBe(0UL);
|
||||
after.FirstSeq.ShouldBe(101UL);
|
||||
after.LastSeq.ShouldBe(100UL);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEraseMsgDoesNotLoseTombstones (filestore_test.go:10781)
|
||||
// Store 4 messages, remove msg 2 (tombstone), erase msg 3.
|
||||
// After erase: msgs 2 and 3 should appear as deleted.
|
||||
// Restart and verify state survives.
|
||||
[Fact]
|
||||
public void EraseMsgDoesNotLoseTombstones_DeletedSeqsPreservedAfterRestart()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var store = CreateStore(dir);
|
||||
|
||||
// Store 3 messages (msg 3 is "secret" and will be erased).
|
||||
store.StoreMsg("foo", null, [], 0); // seq=1 (remains)
|
||||
store.StoreMsg("foo", null, [], 0); // seq=2 (removed → tombstone)
|
||||
store.StoreMsg("foo", null, new byte[] { 0x73, 0x65, 0x63, 0x72, 0x65, 0x74 }, 0); // seq=3 (erased)
|
||||
|
||||
// Remove seq 2 — places a delete record/tombstone.
|
||||
store.RemoveMsg(2);
|
||||
|
||||
// Store a 4th message after the tombstone.
|
||||
store.StoreMsg("foo", null, [], 0); // seq=4
|
||||
|
||||
// Erase seq 3 (should not lose the tombstone for seq 2).
|
||||
store.EraseMsg(3);
|
||||
|
||||
var before = store.State();
|
||||
before.Msgs.ShouldBe(2UL); // msgs 1 and 4 remain
|
||||
before.FirstSeq.ShouldBe(1UL);
|
||||
before.LastSeq.ShouldBe(4UL);
|
||||
before.NumDeleted.ShouldBe(2);
|
||||
|
||||
var deleted = before.Deleted;
|
||||
deleted.ShouldNotBeNull();
|
||||
deleted!.ShouldContain(2UL);
|
||||
deleted.ShouldContain(3UL);
|
||||
|
||||
// After restart, state should match.
|
||||
store.Dispose();
|
||||
|
||||
using var store2 = CreateStore(dir);
|
||||
var after = store2.State();
|
||||
after.Msgs.ShouldBe(2UL);
|
||||
after.FirstSeq.ShouldBe(1UL);
|
||||
after.LastSeq.ShouldBe(4UL);
|
||||
after.NumDeleted.ShouldBe(2);
|
||||
|
||||
var deleted2 = after.Deleted;
|
||||
deleted2.ShouldNotBeNull();
|
||||
deleted2!.ShouldContain(2UL);
|
||||
deleted2.ShouldContain(3UL);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreDetectDeleteGapWithLastSkipMsg (filestore_test.go:11082)
|
||||
// Store 1 message, then skip 3 msgs starting at seq=2 (a gap).
|
||||
// State: Msgs=1, FirstSeq=1, LastSeq=4, NumDeleted=3.
|
||||
// After restart the same state should hold.
|
||||
[Fact]
|
||||
public void DetectDeleteGapWithLastSkipMsg_SkipCreatesDeletionGaps()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var store = CreateStore(dir);
|
||||
|
||||
// Store 1 message.
|
||||
store.StoreMsg("foo", null, [], 0); // seq=1
|
||||
|
||||
// Skip a gap at sequence 2-4 (3 slots).
|
||||
// SkipMsgs(2, 3) means: skip 3 sequences starting at seq 2 → 2, 3, 4
|
||||
store.SkipMsgs(2, 3);
|
||||
|
||||
var before = store.State();
|
||||
before.Msgs.ShouldBe(1UL);
|
||||
before.FirstSeq.ShouldBe(1UL);
|
||||
before.LastSeq.ShouldBe(4UL);
|
||||
before.NumDeleted.ShouldBe(3);
|
||||
|
||||
// Restart and verify.
|
||||
store.Dispose();
|
||||
|
||||
using var store2 = CreateStore(dir);
|
||||
var after = store2.State();
|
||||
after.Msgs.ShouldBe(1UL);
|
||||
after.FirstSeq.ShouldBe(1UL);
|
||||
after.LastSeq.ShouldBe(4UL);
|
||||
after.NumDeleted.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMissingDeletesAfterCompact (filestore_test.go:11375)
|
||||
// Store 6 messages, delete 1, 3, 4, 6 (leaving 2 and 5).
|
||||
// After compact, block should still contain the correct delete map (dmap).
|
||||
// .NET: We verify the behavioral state (which sequences are deleted).
|
||||
[Fact]
|
||||
public void MissingDeletesAfterCompact_DmapPreservedAfterCompact()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var store = CreateStore(dir);
|
||||
|
||||
// Store 6 messages.
|
||||
for (var i = 0; i < 6; i++)
|
||||
store.StoreMsg("foo", null, [], 0);
|
||||
|
||||
// Delete 1, 3, 4, 6 — leaving 2 and 5.
|
||||
store.RemoveMsg(1);
|
||||
store.RemoveMsg(3);
|
||||
store.RemoveMsg(4);
|
||||
store.RemoveMsg(6);
|
||||
|
||||
var state = store.State();
|
||||
state.Msgs.ShouldBe(2UL); // msgs 2 and 5 remain
|
||||
state.FirstSeq.ShouldBe(2UL);
|
||||
state.LastSeq.ShouldBe(6UL); // seq 6 was the last written
|
||||
state.NumDeleted.ShouldBe(3); // 3, 4, 6 are interior deletes (1 moved first seq)
|
||||
|
||||
// Verify the specific deleted sequences.
|
||||
var deleted = state.Deleted;
|
||||
deleted.ShouldNotBeNull();
|
||||
deleted!.ShouldContain(3UL);
|
||||
deleted.ShouldContain(4UL);
|
||||
|
||||
// Now delete seq 5 so only seq 2 remains in the sparse region.
|
||||
store.RemoveMsg(5);
|
||||
|
||||
var state2 = store.State();
|
||||
state2.Msgs.ShouldBe(1UL);
|
||||
state2.FirstSeq.ShouldBe(2UL);
|
||||
state2.LastSeq.ShouldBe(6UL);
|
||||
// .NET: _last is a high-watermark and stays at 6 (not adjusted on remove).
|
||||
// NumDeleted = sequences in [2..6] not in messages = {3,4,5,6} = 4.
|
||||
// Go compacts the block and lowers last.seq to 2, but we don't compact here.
|
||||
state2.NumDeleted.ShouldBe(4);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// TTL tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreMessageTTLRecoveredSingleMessageWithoutStreamState (filestore_test.go:8806)
|
||||
// Stores a message with a 1-second TTL, then restarts (deleting stream state file).
|
||||
// After restart the message should still be present (not yet expired),
|
||||
// and after waiting 2 seconds it should expire.
|
||||
[Fact]
|
||||
public void MessageTTL_RecoverSingleMessageWithoutStreamState()
|
||||
{
|
||||
var dir = UniqueDir("ttl-recover");
|
||||
var opts = new FileStoreOptions { Directory = dir, MaxAgeMs = 1000 };
|
||||
|
||||
// Phase 1: store a message with 1s TTL.
|
||||
{
|
||||
using var store = CreateStore(dir, opts);
|
||||
store.StoreMsg("test", null, [], 0);
|
||||
var ss = store.State();
|
||||
ss.FirstSeq.ShouldBe(1UL);
|
||||
ss.LastSeq.ShouldBe(1UL);
|
||||
ss.Msgs.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// Phase 2: restart (simulate loss of index state) — message is still within TTL.
|
||||
{
|
||||
using var store = CreateStore(dir, opts);
|
||||
var ss = store.State();
|
||||
ss.FirstSeq.ShouldBe(1UL);
|
||||
ss.LastSeq.ShouldBe(1UL);
|
||||
ss.Msgs.ShouldBe(1UL);
|
||||
|
||||
// Wait for TTL to expire.
|
||||
Thread.Sleep(2000);
|
||||
|
||||
// Force expiry by storing a new message (expiry check runs before store).
|
||||
store.StoreMsg("test", null, [], 0);
|
||||
var ss2 = store.State();
|
||||
// The TTL-expired message should be gone.
|
||||
ss2.Msgs.ShouldBeLessThanOrEqualTo(1UL);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMessageTTLWriteTombstone (filestore_test.go:8861)
|
||||
// After TTL expiry and restart (without stream state file),
|
||||
// a tombstone should allow proper recovery of the stream state.
|
||||
[Fact]
|
||||
public void MessageTTL_WriteTombstoneAllowsRecovery()
|
||||
{
|
||||
var dir = UniqueDir("ttl-tombstone");
|
||||
var opts = new FileStoreOptions { Directory = dir, MaxAgeMs = 1000 };
|
||||
|
||||
{
|
||||
using var store = CreateStore(dir, opts);
|
||||
store.StoreMsg("test", null, [], 0); // seq=1, TTL=1s
|
||||
store.StoreMsg("test", null, [], 0); // seq=2, no TTL
|
||||
|
||||
var ss = store.State();
|
||||
ss.Msgs.ShouldBe(2UL);
|
||||
ss.FirstSeq.ShouldBe(1UL);
|
||||
ss.LastSeq.ShouldBe(2UL);
|
||||
|
||||
// Wait for seq=1 to expire.
|
||||
Thread.Sleep(1500);
|
||||
|
||||
// Force expiry.
|
||||
store.StoreMsg("test", null, [], 0);
|
||||
var ss2 = store.State();
|
||||
// seq=1 should have expired; seq=2 and seq=3 remain.
|
||||
ss2.Msgs.ShouldBeLessThanOrEqualTo(2UL);
|
||||
ss2.Msgs.ShouldBeGreaterThan(0UL);
|
||||
}
|
||||
|
||||
// Restart — should recover correctly.
|
||||
{
|
||||
using var store2 = CreateStore(dir, opts);
|
||||
var ss = store2.State();
|
||||
// seq=1 was TTL-expired; seq=2 and/or seq=3 should still be present.
|
||||
ss.LastSeq.ShouldBeGreaterThanOrEqualTo(2UL);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMessageTTLRecoveredOffByOne (filestore_test.go:8923)
|
||||
// Verifies that TTL is not registered double-counted during restart.
|
||||
// After recovery, the TTL count should match exactly what was stored.
|
||||
[Fact]
|
||||
public void MessageTTL_RecoveredOffByOneNotDouble()
|
||||
{
|
||||
var dir = UniqueDir("ttl-offbyone");
|
||||
// Use a 120-second TTL so the message doesn't expire during the test.
|
||||
var opts = new FileStoreOptions { Directory = dir, MaxAgeMs = 120_000 };
|
||||
|
||||
{
|
||||
using var store = CreateStore(dir, opts);
|
||||
store.StoreMsg("test", null, [], 0); // seq=1, TTL=2 minutes
|
||||
|
||||
var ss = store.State();
|
||||
ss.Msgs.ShouldBe(1UL);
|
||||
ss.FirstSeq.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// Restart — TTL should be recovered but not doubled.
|
||||
{
|
||||
using var store2 = CreateStore(dir, opts);
|
||||
var ss = store2.State();
|
||||
// Message should still be present (TTL has not expired).
|
||||
ss.Msgs.ShouldBe(1UL);
|
||||
ss.FirstSeq.ShouldBe(1UL);
|
||||
ss.LastSeq.ShouldBe(1UL);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreNoPanicOnRecoverTTLWithCorruptBlocks (filestore_test.go:9950)
|
||||
// Even when block recovery encounters gaps or corruption, it should not panic.
|
||||
// .NET: We verify that creating a store after deleting some block files doesn't throw.
|
||||
[Fact]
|
||||
public void NoPanicOnRecoverTTLWithCorruptBlocks_RecoveryHandlesGaps()
|
||||
{
|
||||
var dir = UniqueDir("ttl-corrupt");
|
||||
var opts = new FileStoreOptions { Directory = dir, MaxAgeMs = 1000 };
|
||||
|
||||
{
|
||||
using var store = CreateStore(dir, opts);
|
||||
// Store a few messages across 3 "blocks" by using small block sizes.
|
||||
store.StoreMsg("foo", null, new byte[] { 65 }, 0); // seq=1
|
||||
store.StoreMsg("foo", null, new byte[] { 65 }, 0); // seq=2
|
||||
store.StoreMsg("foo", null, new byte[] { 65 }, 0); // seq=3
|
||||
}
|
||||
|
||||
// Simulate block corruption by deleting one of the .blk files.
|
||||
var blkFiles = Directory.GetFiles(dir, "*.blk");
|
||||
if (blkFiles.Length > 1)
|
||||
{
|
||||
// Remove the middle block (if any).
|
||||
File.Delete(blkFiles[blkFiles.Length / 2]);
|
||||
}
|
||||
|
||||
// Recovery should not throw even with missing blocks.
|
||||
Should.NotThrow(() =>
|
||||
{
|
||||
using var store2 = CreateStore(dir, opts);
|
||||
// Just accessing state should be fine.
|
||||
_ = store2.State();
|
||||
});
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Message schedule encode/decode — skipped (MsgScheduling not yet ported)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreMessageScheduleEncode (filestore_test.go:10611)
|
||||
// Go: TestFileStoreMessageScheduleDecode (filestore_test.go:10611)
|
||||
// These tests require the MsgScheduling type which is not yet ported to .NET.
|
||||
// They are intentionally skipped.
|
||||
|
||||
// Go: TestFileStoreRecoverTTLAndScheduleStateAndCounters (filestore_test.go:13215)
|
||||
// Tests that block-level ttls and schedules counters are recovered correctly.
|
||||
// Block-level counters are not exposed via the .NET public API yet.
|
||||
// Skipped pending block counter API exposure.
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Consumer state encode/decode
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreConsumerEncodeDecodeRedelivered (filestore_test.go:2115)
|
||||
// Encodes a ConsumerState with Redelivered entries and verifies round-trip.
|
||||
[Fact]
|
||||
public void ConsumerEncodeDecodeRedelivered_RoundTripsCorrectly()
|
||||
{
|
||||
// Go: state := &ConsumerState{}
|
||||
// state.Delivered.Consumer = 100; state.Delivered.Stream = 100
|
||||
// state.AckFloor.Consumer = 50; state.AckFloor.Stream = 50
|
||||
// state.Redelivered = map[uint64]uint64{122: 3, 144: 8}
|
||||
var state = new ConsumerState
|
||||
{
|
||||
Delivered = new SequencePair(100, 100),
|
||||
AckFloor = new SequencePair(50, 50),
|
||||
Redelivered = new Dictionary<ulong, ulong>
|
||||
{
|
||||
[122] = 3,
|
||||
[144] = 8,
|
||||
},
|
||||
};
|
||||
|
||||
var buf = ConsumerStateCodec.Encode(state);
|
||||
var decoded = ConsumerStateCodec.Decode(buf);
|
||||
|
||||
decoded.Delivered.Consumer.ShouldBe(100UL);
|
||||
decoded.Delivered.Stream.ShouldBe(100UL);
|
||||
decoded.AckFloor.Consumer.ShouldBe(50UL);
|
||||
decoded.AckFloor.Stream.ShouldBe(50UL);
|
||||
decoded.Redelivered.ShouldNotBeNull();
|
||||
decoded.Redelivered![122].ShouldBe(3UL);
|
||||
decoded.Redelivered[144].ShouldBe(8UL);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreConsumerEncodeDecodePendingBelowStreamAckFloor (filestore_test.go:2135)
|
||||
// Encodes a ConsumerState with Pending entries and verifies the round-trip.
|
||||
// Pending timestamps are downsampled to seconds and stored as deltas.
|
||||
[Fact]
|
||||
public void ConsumerEncodeDecodePendingBelowStreamAckFloor_RoundTripsCorrectly()
|
||||
{
|
||||
// Go: state.Delivered.Consumer = 1192; state.Delivered.Stream = 10185
|
||||
// state.AckFloor.Consumer = 1189; state.AckFloor.Stream = 10815
|
||||
// now := time.Now().Round(time.Second).Add(-10 * time.Second).UnixNano()
|
||||
// state.Pending = map[uint64]*Pending{
|
||||
// 10782: {1190, now},
|
||||
// 10810: {1191, now + 1e9},
|
||||
// 10815: {1192, now + 2e9},
|
||||
// }
|
||||
var now = DateTimeOffset.UtcNow.ToUnixTimeSeconds() * 1_000_000_000L - 10_000_000_000L;
|
||||
// Round to second boundary.
|
||||
now = (now / 1_000_000_000L) * 1_000_000_000L;
|
||||
|
||||
var state = new ConsumerState
|
||||
{
|
||||
Delivered = new SequencePair(1192, 10185),
|
||||
AckFloor = new SequencePair(1189, 10815),
|
||||
Pending = new Dictionary<ulong, Pending>
|
||||
{
|
||||
[10782] = new Pending(1190, now),
|
||||
[10810] = new Pending(1191, now + 1_000_000_000L),
|
||||
[10815] = new Pending(1192, now + 2_000_000_000L),
|
||||
},
|
||||
};
|
||||
|
||||
var buf = ConsumerStateCodec.Encode(state);
|
||||
var decoded = ConsumerStateCodec.Decode(buf);
|
||||
|
||||
decoded.Delivered.Consumer.ShouldBe(1192UL);
|
||||
decoded.Delivered.Stream.ShouldBe(10185UL);
|
||||
decoded.AckFloor.Consumer.ShouldBe(1189UL);
|
||||
decoded.AckFloor.Stream.ShouldBe(10815UL);
|
||||
|
||||
decoded.Pending.ShouldNotBeNull();
|
||||
decoded.Pending!.Count.ShouldBe(3);
|
||||
|
||||
foreach (var kv in state.Pending)
|
||||
{
|
||||
decoded.Pending.ContainsKey(kv.Key).ShouldBeTrue();
|
||||
var dp = decoded.Pending[kv.Key];
|
||||
dp.Sequence.ShouldBe(kv.Value.Sequence);
|
||||
// Timestamps are rounded to seconds, so allow 1-second delta.
|
||||
Math.Abs(dp.Timestamp - kv.Value.Timestamp).ShouldBeLessThan(2_000_000_000L);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBadConsumerState (filestore_test.go:3011)
|
||||
// Verifies that a known "bad" but parseable consumer state buffer does not throw
|
||||
// an unhandled exception and returns a non-null ConsumerState.
|
||||
[Fact]
|
||||
public void BadConsumerState_DoesNotThrowOnKnownInput()
|
||||
{
|
||||
// Go: bs := []byte("\x16\x02\x01\x01\x03\x02\x01\x98\xf4\x8a\x8a\f\x01\x03\x86\xfa\n\x01\x00\x01")
|
||||
var bs = new byte[] { 0x16, 0x02, 0x01, 0x01, 0x03, 0x02, 0x01, 0x98, 0xf4, 0x8a, 0x8a, 0x0c, 0x01, 0x03, 0x86, 0xfa, 0x0a, 0x01, 0x00, 0x01 };
|
||||
|
||||
ConsumerState? result = null;
|
||||
Exception? caught = null;
|
||||
try
|
||||
{
|
||||
result = ConsumerStateCodec.Decode(bs);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
caught = ex;
|
||||
}
|
||||
|
||||
// Go: require that this does NOT throw and cs != nil.
|
||||
// Go comment: "Expected to not throw error".
|
||||
// If we do throw, at least it should be a controlled InvalidDataException.
|
||||
if (caught != null)
|
||||
{
|
||||
caught.ShouldBeOfType<InvalidDataException>();
|
||||
}
|
||||
else
|
||||
{
|
||||
result.ShouldNotBeNull();
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Consumer file store tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreConsumerRedeliveredLost (filestore_test.go:2530)
|
||||
// Verifies that redelivered state is preserved across consumer restarts.
|
||||
[Fact]
|
||||
public void ConsumerRedeliveredLost_RecoversAfterRestartAndClears()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var store = CreateStore(dir);
|
||||
|
||||
var cfg = new ConsumerConfig { AckPolicy = AckPolicy.Explicit };
|
||||
var cs1 = store.ConsumerStore("o22", DateTime.UtcNow, cfg);
|
||||
|
||||
var ts = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() * 1_000_000L;
|
||||
cs1.UpdateDelivered(1, 1, 1, ts);
|
||||
cs1.UpdateDelivered(2, 1, 2, ts); // dc=2 → redelivered
|
||||
cs1.UpdateDelivered(3, 1, 3, ts);
|
||||
cs1.UpdateDelivered(4, 1, 4, ts);
|
||||
cs1.UpdateDelivered(5, 2, 1, ts);
|
||||
|
||||
cs1.Stop();
|
||||
Thread.Sleep(20); // wait for flush
|
||||
|
||||
// Reopen — should recover redelivered.
|
||||
var cs2 = store.ConsumerStore("o22", DateTime.UtcNow, cfg);
|
||||
var state = cs2.State();
|
||||
state.ShouldNotBeNull();
|
||||
state.Redelivered.ShouldNotBeNull();
|
||||
|
||||
cs2.UpdateDelivered(6, 2, 2, ts);
|
||||
cs2.UpdateDelivered(7, 3, 1, ts);
|
||||
|
||||
cs2.Stop();
|
||||
Thread.Sleep(20);
|
||||
|
||||
// Reopen again.
|
||||
var cs3 = store.ConsumerStore("o22", DateTime.UtcNow, cfg);
|
||||
var state3 = cs3.State();
|
||||
// Pending should contain 3 entries (5, 6, 7 — the ones not yet acked).
|
||||
state3.Pending?.Count.ShouldBe(3);
|
||||
|
||||
// Ack 7 and 6.
|
||||
cs3.UpdateAcks(7, 3);
|
||||
cs3.UpdateAcks(6, 2);
|
||||
|
||||
cs3.Stop();
|
||||
Thread.Sleep(20);
|
||||
|
||||
// Reopen and ack 4.
|
||||
var cs4 = store.ConsumerStore("o22", DateTime.UtcNow, cfg);
|
||||
cs4.UpdateAcks(4, 1);
|
||||
|
||||
var finalState = cs4.State();
|
||||
finalState.Pending?.Count.ShouldBe(0);
|
||||
finalState.Redelivered?.Count.ShouldBe(0);
|
||||
|
||||
cs4.Stop();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreConsumerFlusher (filestore_test.go:2596)
|
||||
// Verifies that the consumer flusher task starts when the store is opened
|
||||
// and stops when the store is stopped.
|
||||
[Fact]
|
||||
public async Task ConsumerFlusher_FlusherStartsAndStopsWithStore()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var store = CreateStore(dir);
|
||||
|
||||
var cfg = new ConsumerConfig();
|
||||
var cs = (ConsumerFileStore)store.ConsumerStore("o22", DateTime.UtcNow, cfg);
|
||||
|
||||
// Wait for flusher to start (it starts in the constructor's async task).
|
||||
var deadline = DateTime.UtcNow.AddSeconds(1);
|
||||
while (!cs.InFlusher && DateTime.UtcNow < deadline)
|
||||
await Task.Delay(20);
|
||||
|
||||
cs.InFlusher.ShouldBeTrue("Flusher should be running after construction");
|
||||
|
||||
// Stop the store — flusher should stop.
|
||||
cs.Stop();
|
||||
|
||||
var deadline2 = DateTime.UtcNow.AddSeconds(1);
|
||||
while (cs.InFlusher && DateTime.UtcNow < deadline2)
|
||||
await Task.Delay(20);
|
||||
|
||||
cs.InFlusher.ShouldBeFalse("Flusher should have stopped after Stop()");
|
||||
}
|
||||
|
||||
// Go: TestFileStoreConsumerDeliveredUpdates (filestore_test.go:2627)
|
||||
// Verifies delivered tracking with AckNone policy (no pending entries).
|
||||
[Fact]
|
||||
public void ConsumerDeliveredUpdates_TrackDeliveredWithNoAckPolicy()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var store = CreateStore(dir);
|
||||
|
||||
// Simple consumer with no ack policy.
|
||||
var cfg = new ConsumerConfig { AckPolicy = AckPolicy.None };
|
||||
using var _ = new ConsumerStopGuard(store.ConsumerStore("o22", DateTime.UtcNow, cfg));
|
||||
var cs = _.Store;
|
||||
|
||||
void TestDelivered(ulong dseq, ulong sseq)
|
||||
{
|
||||
var ts = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() * 1_000_000L;
|
||||
cs.UpdateDelivered(dseq, sseq, 1, ts);
|
||||
var state = cs.State();
|
||||
state.ShouldNotBeNull();
|
||||
state.Delivered.Consumer.ShouldBe(dseq);
|
||||
state.Delivered.Stream.ShouldBe(sseq);
|
||||
state.AckFloor.Consumer.ShouldBe(dseq);
|
||||
state.AckFloor.Stream.ShouldBe(sseq);
|
||||
state.Pending?.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
TestDelivered(1, 100);
|
||||
TestDelivered(2, 110);
|
||||
TestDelivered(5, 130);
|
||||
|
||||
// UpdateAcks on AckNone consumer should throw (ErrNoAckPolicy).
|
||||
var ex = Should.Throw<InvalidOperationException>(() => cs.UpdateAcks(1, 100));
|
||||
ex.Message.ShouldContain("ErrNoAckPolicy");
|
||||
|
||||
// UpdateDelivered with dc > 1 on AckNone should throw.
|
||||
var ts2 = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() * 1_000_000L;
|
||||
var ex2 = Should.Throw<InvalidOperationException>(() => cs.UpdateDelivered(5, 130, 2, ts2));
|
||||
ex2.Message.ShouldContain("ErrNoAckPolicy");
|
||||
}
|
||||
|
||||
// Go: TestFileStoreConsumerDeliveredAndAckUpdates (filestore_test.go:2681)
|
||||
// Full consumer lifecycle: deliver 5 messages, perform bad acks, good acks,
|
||||
// verify ack floor advancement, then persist and recover.
|
||||
[Fact]
|
||||
public void ConsumerDeliveredAndAckUpdates_TracksPendingAndAckFloor()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var store = CreateStore(dir);
|
||||
|
||||
var cfg = new ConsumerConfig { AckPolicy = AckPolicy.Explicit };
|
||||
using var guard = new ConsumerStopGuard(store.ConsumerStore("o22", DateTime.UtcNow, cfg));
|
||||
var cs = guard.Store;
|
||||
|
||||
var pending = 0;
|
||||
|
||||
void TestDelivered(ulong dseq, ulong sseq)
|
||||
{
|
||||
var ts = DateTimeOffset.UtcNow.ToUnixTimeSeconds() * 1_000_000_000L;
|
||||
cs.UpdateDelivered(dseq, sseq, 1, ts);
|
||||
pending++;
|
||||
var state = cs.State();
|
||||
state.Delivered.Consumer.ShouldBe(dseq);
|
||||
state.Delivered.Stream.ShouldBe(sseq);
|
||||
state.Pending?.Count.ShouldBe(pending);
|
||||
}
|
||||
|
||||
TestDelivered(1, 100);
|
||||
TestDelivered(2, 110);
|
||||
TestDelivered(3, 130);
|
||||
TestDelivered(4, 150);
|
||||
TestDelivered(5, 165);
|
||||
|
||||
// Bad acks (stream seq does not match pending consumer seq).
|
||||
Should.Throw<InvalidOperationException>(() => cs.UpdateAcks(3, 101));
|
||||
Should.Throw<InvalidOperationException>(() => cs.UpdateAcks(1, 1));
|
||||
|
||||
// Good ack of seq 1.
|
||||
cs.UpdateAcks(1, 100);
|
||||
pending--;
|
||||
cs.State().Pending?.Count.ShouldBe(pending);
|
||||
|
||||
// Good ack of seq 3.
|
||||
cs.UpdateAcks(3, 130);
|
||||
pending--;
|
||||
cs.State().Pending?.Count.ShouldBe(pending);
|
||||
|
||||
// Good ack of seq 2.
|
||||
cs.UpdateAcks(2, 110);
|
||||
pending--;
|
||||
cs.State().Pending?.Count.ShouldBe(pending);
|
||||
|
||||
// Good ack of seq 5.
|
||||
cs.UpdateAcks(5, 165);
|
||||
pending--;
|
||||
cs.State().Pending?.Count.ShouldBe(pending);
|
||||
|
||||
// Good ack of seq 4.
|
||||
cs.UpdateAcks(4, 150);
|
||||
pending--;
|
||||
cs.State().Pending?.Count.ShouldBe(pending);
|
||||
|
||||
TestDelivered(6, 170);
|
||||
TestDelivered(7, 171);
|
||||
TestDelivered(8, 172);
|
||||
TestDelivered(9, 173);
|
||||
TestDelivered(10, 200);
|
||||
|
||||
cs.UpdateAcks(7, 171);
|
||||
pending--;
|
||||
cs.UpdateAcks(8, 172);
|
||||
pending--;
|
||||
|
||||
var stateBefore = cs.State();
|
||||
|
||||
// Restart consumer and verify state is preserved.
|
||||
cs.Stop();
|
||||
Thread.Sleep(50); // allow flush to complete
|
||||
|
||||
var cs2 = store.ConsumerStore("o22", DateTime.UtcNow, cfg);
|
||||
var stateAfter = cs2.State();
|
||||
|
||||
stateAfter.Delivered.Consumer.ShouldBe(stateBefore.Delivered.Consumer);
|
||||
stateAfter.Delivered.Stream.ShouldBe(stateBefore.Delivered.Stream);
|
||||
stateAfter.Pending?.Count.ShouldBe(stateBefore.Pending?.Count ?? 0);
|
||||
|
||||
cs2.Stop();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Helper for automatic consumer stop
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
private sealed class ConsumerStopGuard : IDisposable
|
||||
{
|
||||
public IConsumerStore Store { get; }
|
||||
public ConsumerStopGuard(IConsumerStore store) => Store = store;
|
||||
public void Dispose() => Store.Stop();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,358 @@
|
||||
// Reference: golang/nats-server/server/filestore.go:5267 (removeMsg)
|
||||
// golang/nats-server/server/filestore.go:5890 (eraseMsg)
|
||||
//
|
||||
// Tests verifying:
|
||||
// 1. SequenceSet correctly tracks deleted sequences in MsgBlock
|
||||
// 2. Tombstones survive MsgBlock recovery (RebuildIndex populates SequenceSet)
|
||||
// 3. Secure erase (Delete with secureErase=true) overwrites payload bytes
|
||||
// 4. EraseMsg at FileStore level marks the sequence as deleted
|
||||
//
|
||||
// Go test analogs:
|
||||
// TestFileStoreEraseMsgDoesNotLoseTombstones (filestore_test.go:10781)
|
||||
// TestFileStoreTombstonesNoFirstSeqRollback (filestore_test.go:10911)
|
||||
// TestFileStoreRemoveMsg (filestore_test.go:5267)
|
||||
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for SequenceSet-backed deletion tracking and secure erase in MsgBlock.
|
||||
/// Reference: golang/nats-server/server/filestore.go eraseMsg / removeMsg.
|
||||
/// </summary>
|
||||
public sealed class FileStoreTombstoneTrackingTests : IDisposable
|
||||
{
|
||||
private readonly string _testDir;
|
||||
|
||||
public FileStoreTombstoneTrackingTests()
|
||||
{
|
||||
_testDir = Path.Combine(Path.GetTempPath(), $"nats-tombstone-tracking-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_testDir);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_testDir))
|
||||
Directory.Delete(_testDir, recursive: true);
|
||||
}
|
||||
|
||||
private string UniqueDir()
|
||||
{
|
||||
var dir = Path.Combine(_testDir, Guid.NewGuid().ToString("N"));
|
||||
Directory.CreateDirectory(dir);
|
||||
return dir;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// SequenceSet tracking in MsgBlock
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: removeMsg — after Delete, IsDeleted returns true and DeletedCount == 1
|
||||
[Fact]
|
||||
public void MsgBlock_Delete_TracksDeletionInSequenceSet()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var block = MsgBlock.Create(0, dir, maxBytes: 1024 * 1024);
|
||||
|
||||
block.Write("a", ReadOnlyMemory<byte>.Empty, "payload"u8.ToArray());
|
||||
block.Write("b", ReadOnlyMemory<byte>.Empty, "payload"u8.ToArray());
|
||||
block.Write("c", ReadOnlyMemory<byte>.Empty, "payload"u8.ToArray());
|
||||
|
||||
block.Delete(2).ShouldBeTrue();
|
||||
|
||||
block.IsDeleted(2).ShouldBeTrue();
|
||||
block.IsDeleted(1).ShouldBeFalse();
|
||||
block.IsDeleted(3).ShouldBeFalse();
|
||||
block.DeletedCount.ShouldBe(1UL);
|
||||
block.MessageCount.ShouldBe(2UL);
|
||||
}
|
||||
|
||||
// Multiple deletes tracked correctly — SequenceSet merges contiguous ranges.
|
||||
[Fact]
|
||||
public void MsgBlock_MultipleDeletes_AllTrackedInSequenceSet()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var block = MsgBlock.Create(0, dir, maxBytes: 1024 * 1024);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
block.Write($"subj.{i}", ReadOnlyMemory<byte>.Empty, "payload"u8.ToArray());
|
||||
|
||||
// Delete seqs 3, 4, 5 (contiguous — SequenceSet will merge into one range).
|
||||
block.Delete(3).ShouldBeTrue();
|
||||
block.Delete(4).ShouldBeTrue();
|
||||
block.Delete(5).ShouldBeTrue();
|
||||
|
||||
block.DeletedCount.ShouldBe(3UL);
|
||||
block.MessageCount.ShouldBe(7UL);
|
||||
|
||||
block.IsDeleted(3).ShouldBeTrue();
|
||||
block.IsDeleted(4).ShouldBeTrue();
|
||||
block.IsDeleted(5).ShouldBeTrue();
|
||||
block.IsDeleted(2).ShouldBeFalse();
|
||||
block.IsDeleted(6).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Tombstones survive recovery (RebuildIndex populates SequenceSet)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreTombstonesNoFirstSeqRollback — after restart, deleted seqs still deleted.
|
||||
// Reference: filestore.go RebuildIndex reads ebit from block file.
|
||||
[Fact]
|
||||
public void MsgBlock_Recovery_TombstonesInSequenceSet()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
|
||||
// Phase 1: write messages and delete one, then close.
|
||||
using (var block = MsgBlock.Create(0, dir, maxBytes: 1024 * 1024))
|
||||
{
|
||||
block.Write("a", ReadOnlyMemory<byte>.Empty, "one"u8.ToArray());
|
||||
block.Write("b", ReadOnlyMemory<byte>.Empty, "two"u8.ToArray());
|
||||
block.Write("c", ReadOnlyMemory<byte>.Empty, "three"u8.ToArray());
|
||||
block.Delete(2); // marks seq 2 with ebit on disk
|
||||
block.Flush();
|
||||
}
|
||||
|
||||
// Phase 2: recover from file — SequenceSet must be populated by RebuildIndex.
|
||||
using var recovered = MsgBlock.Recover(0, dir);
|
||||
recovered.DeletedCount.ShouldBe(1UL);
|
||||
recovered.MessageCount.ShouldBe(2UL);
|
||||
recovered.IsDeleted(1).ShouldBeFalse();
|
||||
recovered.IsDeleted(2).ShouldBeTrue();
|
||||
recovered.IsDeleted(3).ShouldBeFalse();
|
||||
|
||||
// Read should return null for deleted seq.
|
||||
recovered.Read(2).ShouldBeNull();
|
||||
recovered.Read(1).ShouldNotBeNull();
|
||||
recovered.Read(3).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Multiple tombstones survive recovery.
|
||||
[Fact]
|
||||
public void MsgBlock_Recovery_MultipleDeletedSeqs_AllInSequenceSet()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
|
||||
using (var block = MsgBlock.Create(0, dir, maxBytes: 1024 * 1024))
|
||||
{
|
||||
for (var i = 0; i < 10; i++)
|
||||
block.Write($"subj", ReadOnlyMemory<byte>.Empty, "payload"u8.ToArray());
|
||||
|
||||
block.Delete(1);
|
||||
block.Delete(3);
|
||||
block.Delete(5);
|
||||
block.Delete(7);
|
||||
block.Delete(9);
|
||||
block.Flush();
|
||||
}
|
||||
|
||||
using var recovered = MsgBlock.Recover(0, dir);
|
||||
recovered.DeletedCount.ShouldBe(5UL);
|
||||
recovered.MessageCount.ShouldBe(5UL);
|
||||
|
||||
for (ulong seq = 1; seq <= 9; seq += 2)
|
||||
recovered.IsDeleted(seq).ShouldBeTrue($"seq {seq} should be deleted");
|
||||
for (ulong seq = 2; seq <= 10; seq += 2)
|
||||
recovered.IsDeleted(seq).ShouldBeFalse($"seq {seq} should NOT be deleted");
|
||||
}
|
||||
|
||||
// Skip records (WriteSkip) survive recovery and appear in SequenceSet.
|
||||
[Fact]
|
||||
public void MsgBlock_Recovery_SkipRecordsInSequenceSet()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
|
||||
using (var block = MsgBlock.Create(0, dir, maxBytes: 1024 * 1024, firstSequence: 1))
|
||||
{
|
||||
block.Write("a", ReadOnlyMemory<byte>.Empty, "payload"u8.ToArray()); // seq=1
|
||||
block.WriteSkip(2); // tombstone
|
||||
block.WriteSkip(3); // tombstone
|
||||
block.Write("b", ReadOnlyMemory<byte>.Empty, "payload"u8.ToArray()); // seq=4
|
||||
block.Flush();
|
||||
}
|
||||
|
||||
using var recovered = MsgBlock.Recover(0, dir);
|
||||
// Seqs 2 and 3 are skip records → deleted.
|
||||
recovered.IsDeleted(2).ShouldBeTrue();
|
||||
recovered.IsDeleted(3).ShouldBeTrue();
|
||||
recovered.IsDeleted(1).ShouldBeFalse();
|
||||
recovered.IsDeleted(4).ShouldBeFalse();
|
||||
recovered.DeletedCount.ShouldBe(2UL);
|
||||
recovered.MessageCount.ShouldBe(2UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Secure erase — payload bytes are overwritten with random data
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: eraseMsg (filestore.go:5890) — payload bytes replaced with random bytes.
|
||||
[Fact]
|
||||
public void MsgBlock_SecureErase_OverwritesPayloadBytes()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
var original = Encoding.UTF8.GetBytes("this is a secret payload");
|
||||
|
||||
using (var block = MsgBlock.Create(0, dir, maxBytes: 1024 * 1024))
|
||||
{
|
||||
block.Write("secret", ReadOnlyMemory<byte>.Empty, original);
|
||||
|
||||
// Perform secure erase — overwrites payload bytes in-place on disk.
|
||||
block.Delete(1, secureErase: true).ShouldBeTrue();
|
||||
block.Flush();
|
||||
}
|
||||
|
||||
// Read the raw block file and verify the original payload bytes are gone.
|
||||
var blockFile = Path.Combine(dir, "000000.blk");
|
||||
var rawBytes = File.ReadAllBytes(blockFile);
|
||||
|
||||
// The payload "this is a secret payload" should no longer appear as a substring.
|
||||
var payloadBytes = Encoding.UTF8.GetBytes("this is a secret");
|
||||
var rawAsSpan = rawBytes.AsSpan();
|
||||
var found = false;
|
||||
for (var i = 0; i <= rawBytes.Length - payloadBytes.Length; i++)
|
||||
{
|
||||
if (rawAsSpan[i..].StartsWith(payloadBytes.AsSpan()))
|
||||
{
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
found.ShouldBeFalse("Secret payload bytes should have been overwritten by secure erase");
|
||||
}
|
||||
|
||||
// After secure erase, the message appears deleted (returns null on Read).
|
||||
[Fact]
|
||||
public void MsgBlock_SecureErase_MessageAppearsDeleted()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var block = MsgBlock.Create(0, dir, maxBytes: 1024 * 1024);
|
||||
|
||||
block.Write("sensitive", ReadOnlyMemory<byte>.Empty, "secret data"u8.ToArray());
|
||||
block.Write("other", ReadOnlyMemory<byte>.Empty, "normal"u8.ToArray());
|
||||
|
||||
block.Delete(1, secureErase: true).ShouldBeTrue();
|
||||
|
||||
block.IsDeleted(1).ShouldBeTrue();
|
||||
block.Read(1).ShouldBeNull();
|
||||
block.Read(2).ShouldNotBeNull(); // other message unaffected
|
||||
block.DeletedCount.ShouldBe(1UL);
|
||||
block.MessageCount.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// Secure erase with secureErase=false is identical to regular delete (no overwrite).
|
||||
[Fact]
|
||||
public void MsgBlock_Delete_WithSecureEraseFalse_NormalDelete()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var block = MsgBlock.Create(0, dir, maxBytes: 1024 * 1024);
|
||||
|
||||
block.Write("x", ReadOnlyMemory<byte>.Empty, "content"u8.ToArray());
|
||||
block.Delete(1, secureErase: false).ShouldBeTrue();
|
||||
block.IsDeleted(1).ShouldBeTrue();
|
||||
block.Read(1).ShouldBeNull();
|
||||
}
|
||||
|
||||
// Double secure erase returns false on second call.
|
||||
[Fact]
|
||||
public void MsgBlock_SecureErase_DoubleErase_ReturnsFalse()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var block = MsgBlock.Create(0, dir, maxBytes: 1024 * 1024);
|
||||
|
||||
block.Write("x", ReadOnlyMemory<byte>.Empty, "content"u8.ToArray());
|
||||
block.Delete(1, secureErase: true).ShouldBeTrue();
|
||||
block.Delete(1, secureErase: true).ShouldBeFalse(); // already deleted
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// DeletedSequences property returns snapshot of SequenceSet
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// DeletedSequences snapshot contains all deleted seqs (still IReadOnlySet from HashSet copy).
|
||||
[Fact]
|
||||
public void DeletedSequences_ReturnsCorrectSnapshot()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var block = MsgBlock.Create(0, dir, maxBytes: 1024 * 1024);
|
||||
|
||||
block.Write("a", ReadOnlyMemory<byte>.Empty, "one"u8.ToArray());
|
||||
block.Write("b", ReadOnlyMemory<byte>.Empty, "two"u8.ToArray());
|
||||
block.Write("c", ReadOnlyMemory<byte>.Empty, "three"u8.ToArray());
|
||||
block.Write("d", ReadOnlyMemory<byte>.Empty, "four"u8.ToArray());
|
||||
|
||||
block.Delete(2);
|
||||
block.Delete(4);
|
||||
|
||||
var snapshot = block.DeletedSequences;
|
||||
snapshot.Count.ShouldBe(2);
|
||||
snapshot.ShouldContain(2UL);
|
||||
snapshot.ShouldContain(4UL);
|
||||
snapshot.ShouldNotContain(1UL);
|
||||
snapshot.ShouldNotContain(3UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FileStore EraseMsg integration
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: eraseMsg — after EraseMsg, message is gone and state reflects deletion.
|
||||
[Fact]
|
||||
public void FileStore_EraseMsg_MessageGoneAfterErase()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
var opts = new FileStoreOptions { Directory = dir };
|
||||
using var store = new FileStore(opts);
|
||||
|
||||
store.StoreMsg("foo", null, "secret"u8.ToArray(), 0);
|
||||
store.StoreMsg("foo", null, "normal"u8.ToArray(), 0);
|
||||
|
||||
var state1 = store.State();
|
||||
state1.Msgs.ShouldBe(2UL);
|
||||
|
||||
store.EraseMsg(1).ShouldBeTrue();
|
||||
|
||||
var state2 = store.State();
|
||||
state2.Msgs.ShouldBe(1UL);
|
||||
|
||||
// Erasing same seq twice returns false.
|
||||
store.EraseMsg(1).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEraseMsgDoesNotLoseTombstones — erase does not disturb other tombstones.
|
||||
// Reference: filestore_test.go:10781
|
||||
[Fact]
|
||||
public void FileStore_EraseMsg_DoesNotLoseTombstones()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
var opts = new FileStoreOptions { Directory = dir };
|
||||
using var store = new FileStore(opts);
|
||||
|
||||
store.StoreMsg("foo", null, [], 0); // seq=1
|
||||
store.StoreMsg("foo", null, [], 0); // seq=2 (tombstone)
|
||||
store.StoreMsg("foo", null, "secret"u8.ToArray(), 0); // seq=3 (erased)
|
||||
|
||||
store.RemoveMsg(2); // tombstone seq=2
|
||||
store.StoreMsg("foo", null, [], 0); // seq=4
|
||||
|
||||
store.EraseMsg(3); // erase seq=3
|
||||
|
||||
var state = store.State();
|
||||
state.Msgs.ShouldBe(2UL); // msgs 1 and 4 remain
|
||||
state.NumDeleted.ShouldBe(2); // seqs 2 and 3 deleted
|
||||
state.Deleted.ShouldNotBeNull();
|
||||
state.Deleted!.ShouldContain(2UL);
|
||||
state.Deleted.ShouldContain(3UL);
|
||||
|
||||
// Restart — state should be identical.
|
||||
store.Dispose();
|
||||
using var store2 = new FileStore(opts);
|
||||
var after = store2.State();
|
||||
after.Msgs.ShouldBe(2UL);
|
||||
after.NumDeleted.ShouldBe(2);
|
||||
after.Deleted.ShouldNotBeNull();
|
||||
after.Deleted!.ShouldContain(2UL);
|
||||
after.Deleted.ShouldContain(3UL);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,324 @@
|
||||
// Reference: golang/nats-server/server/filestore_test.go
|
||||
// Tests ported:
|
||||
// TestFileStoreWriteCache — write cache hit (msgBlock.cache)
|
||||
// TestFileStoreClearCache — ClearCache evicts, disk read still works
|
||||
// TestFileStoreTtlWheelExpiry — TTL wheel expires old messages (expireMsgs)
|
||||
// TestFileStoreTtlWheelRetention — TTL wheel retains unexpired messages
|
||||
// TestFileStoreStoreMsg — StoreMsg returns seq + timestamp
|
||||
// TestFileStoreStoreMsgPerMsgTtl — StoreMsg with per-message TTL
|
||||
// TestFileStoreRecoveryReregiistersTtls — recovery re-registers unexpired TTL entries
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for the MsgBlock write cache and FileStore TTL wheel scheduling.
|
||||
/// Reference: golang/nats-server/server/filestore.go — msgBlock.cache, expireMsgs, storeMsg TTL.
|
||||
/// </summary>
|
||||
public sealed class FileStoreTtlTests : IDisposable
|
||||
{
|
||||
private readonly string _dir;
|
||||
|
||||
public FileStoreTtlTests()
|
||||
{
|
||||
_dir = Path.Combine(Path.GetTempPath(), $"nats-js-ttl-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_dir);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_dir))
|
||||
Directory.Delete(_dir, recursive: true);
|
||||
}
|
||||
|
||||
private FileStore CreateStore(FileStoreOptions? options = null, string? sub = null)
|
||||
{
|
||||
var dir = sub is null ? _dir : Path.Combine(_dir, sub);
|
||||
var opts = options ?? new FileStoreOptions();
|
||||
opts.Directory = dir;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// MsgBlock write cache tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreWriteCache — filestore_test.go (msgBlock.cache hit path)
|
||||
[Fact]
|
||||
public async Task WriteCache_ReadReturnsFromCache()
|
||||
{
|
||||
// The active block maintains a write cache populated on every Write/WriteAt.
|
||||
// After writing a message, the active block's cache should contain it so
|
||||
// Read() returns without touching disk.
|
||||
await using var store = CreateStore();
|
||||
|
||||
var seq = await store.AppendAsync("foo", "hello"u8.ToArray(), default);
|
||||
seq.ShouldBe(1UL);
|
||||
|
||||
// Load back through the store's in-memory cache (which calls MsgBlock.Read internally).
|
||||
var msg = await store.LoadAsync(seq, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe("foo");
|
||||
msg.Payload.ToArray().ShouldBe("hello"u8.ToArray());
|
||||
|
||||
// The active block should have a write cache populated.
|
||||
// We verify this indirectly: after clearing, the read should still work (disk path).
|
||||
// BlockCount == 1 means there is exactly one block (the active one).
|
||||
store.BlockCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreClearCache — filestore_test.go (clearCache eviction)
|
||||
[Fact]
|
||||
public async Task WriteCache_ClearEvictsButReadStillWorks()
|
||||
{
|
||||
// Write cache is an optimisation: clearing it should not affect correctness.
|
||||
// After clearing, reads fall through to disk and return the same data.
|
||||
await using var store = CreateStore(sub: "clear-cache");
|
||||
|
||||
var seq = await store.AppendAsync("bar", "world"u8.ToArray(), default);
|
||||
|
||||
// Access the single block directly via MsgBlock.Create/Recover round-trip:
|
||||
// We test ClearCache by writing several messages to force a block rotation
|
||||
// (the previous block's cache is cleared on rotation).
|
||||
|
||||
// Write enough data to fill the first block and trigger rotation.
|
||||
var opts = new FileStoreOptions
|
||||
{
|
||||
Directory = Path.Combine(_dir, "rotate-test"),
|
||||
BlockSizeBytes = 256, // small block so rotation happens quickly
|
||||
};
|
||||
await using var storeSmall = CreateStore(opts);
|
||||
|
||||
// Write several messages; block rotation will clear the cache on the sealed block.
|
||||
for (var i = 0; i < 10; i++)
|
||||
await storeSmall.AppendAsync($"sub.{i}", Encoding.UTF8.GetBytes($"payload-{i}"), default);
|
||||
|
||||
// All messages should still be readable even though earlier blocks were sealed
|
||||
// and their caches were cleared.
|
||||
for (ulong s = 1; s <= 10; s++)
|
||||
{
|
||||
var m = await storeSmall.LoadAsync(s, default);
|
||||
m.ShouldNotBeNull();
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// TTL wheel tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreTtlWheelExpiry — filestore.go expireMsgs (thw.ExpireTasks)
|
||||
[Fact]
|
||||
public async Task TtlWheel_ExpiredMessagesRemoved()
|
||||
{
|
||||
// MaxAgeMs = 50ms: messages older than 50ms should be expired on the next append.
|
||||
var opts = new FileStoreOptions { MaxAgeMs = 50 };
|
||||
await using var store = CreateStore(opts, "ttl-expire");
|
||||
|
||||
// Write some messages.
|
||||
await store.AppendAsync("events.a", "data-a"u8.ToArray(), default);
|
||||
await store.AppendAsync("events.b", "data-b"u8.ToArray(), default);
|
||||
|
||||
var stateBefore = await store.GetStateAsync(default);
|
||||
stateBefore.Messages.ShouldBe(2UL);
|
||||
|
||||
// Wait longer than the TTL.
|
||||
await Task.Delay(150);
|
||||
|
||||
// Trigger expiry by appending a new message (expiry check happens at the start of each append).
|
||||
await store.AppendAsync("events.c", "data-c"u8.ToArray(), default);
|
||||
|
||||
// The two old messages should now be gone; only the new one should remain.
|
||||
var stateAfter = await store.GetStateAsync(default);
|
||||
stateAfter.Messages.ShouldBe(1UL);
|
||||
stateAfter.LastSeq.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreTtlWheelRetention — filestore.go expireMsgs (no expiry when fresh)
|
||||
[Fact]
|
||||
public async Task TtlWheel_UnexpiredMessagesRetained()
|
||||
{
|
||||
// MaxAgeMs = 5000ms: messages written just now should not be expired immediately.
|
||||
var opts = new FileStoreOptions { MaxAgeMs = 5000 };
|
||||
await using var store = CreateStore(opts, "ttl-retain");
|
||||
|
||||
await store.AppendAsync("keep.a", "payload-a"u8.ToArray(), default);
|
||||
await store.AppendAsync("keep.b", "payload-b"u8.ToArray(), default);
|
||||
await store.AppendAsync("keep.c", "payload-c"u8.ToArray(), default);
|
||||
|
||||
// Trigger the expiry check path via another append.
|
||||
await store.AppendAsync("keep.d", "payload-d"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe(4UL, "all four messages should still be present");
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// StoreMsg sync method tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreStoreMsg — filestore.go storeMsg returns (seq, ts)
|
||||
[Fact]
|
||||
public async Task StoreMsg_ReturnsSequenceAndTimestamp()
|
||||
{
|
||||
await using var store = CreateStore(sub: "storemsg-basic");
|
||||
|
||||
var beforeNs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() * 1_000_000L;
|
||||
var (seq, ts) = store.StoreMsg("orders.new", null, "order-data"u8.ToArray(), 0L);
|
||||
var afterNs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() * 1_000_000L;
|
||||
|
||||
seq.ShouldBe(1UL);
|
||||
ts.ShouldBeGreaterThanOrEqualTo(beforeNs);
|
||||
ts.ShouldBeLessThanOrEqualTo(afterNs);
|
||||
|
||||
// Verify the message is retrievable.
|
||||
var loaded = await store.LoadAsync(seq, default);
|
||||
loaded.ShouldNotBeNull();
|
||||
loaded!.Subject.ShouldBe("orders.new");
|
||||
loaded.Payload.ToArray().ShouldBe("order-data"u8.ToArray());
|
||||
}
|
||||
|
||||
// Go: TestFileStoreStoreMsg — filestore.go storeMsg with headers
|
||||
[Fact]
|
||||
public async Task StoreMsg_WithHeaders_CombinesHeadersAndPayload()
|
||||
{
|
||||
await using var store = CreateStore(sub: "storemsg-headers");
|
||||
|
||||
var hdr = "NATS/1.0\r\nX-Custom: value\r\n\r\n"u8.ToArray();
|
||||
var body = "message-body"u8.ToArray();
|
||||
var (seq, ts) = store.StoreMsg("events.all", hdr, body, 0L);
|
||||
|
||||
seq.ShouldBe(1UL);
|
||||
ts.ShouldBeGreaterThan(0L);
|
||||
|
||||
// The stored payload should be the combination of headers + body.
|
||||
var loaded = await store.LoadAsync(seq, default);
|
||||
loaded.ShouldNotBeNull();
|
||||
loaded!.Payload.Length.ShouldBe(hdr.Length + body.Length);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreStoreMsgPerMsgTtl — filestore.go per-message TTL override
|
||||
[Fact]
|
||||
public async Task StoreMsg_WithTtl_ExpiresAfterDelay()
|
||||
{
|
||||
// No stream-level TTL — only per-message TTL.
|
||||
await using var store = CreateStore(sub: "storemsg-ttl");
|
||||
|
||||
// 80ms TTL in nanoseconds.
|
||||
const long ttlNs = 80_000_000L;
|
||||
|
||||
var (seq, _) = store.StoreMsg("expire.me", null, "short-lived"u8.ToArray(), ttlNs);
|
||||
seq.ShouldBe(1UL);
|
||||
|
||||
// Verify it's present immediately.
|
||||
var before = await store.GetStateAsync(default);
|
||||
before.Messages.ShouldBe(1UL);
|
||||
|
||||
// Wait for expiry.
|
||||
await Task.Delay(200);
|
||||
|
||||
// Trigger expiry by calling StoreMsg again (which calls ExpireFromWheel internally).
|
||||
store.StoreMsg("permanent", null, "stays"u8.ToArray(), 0L);
|
||||
|
||||
// The TTL'd message should be gone; only the permanent one remains.
|
||||
var after = await store.GetStateAsync(default);
|
||||
after.Messages.ShouldBe(1UL);
|
||||
after.LastSeq.ShouldBe(2UL);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreStoreMsg — multiple sequential StoreMsgs increment sequence
|
||||
[Fact]
|
||||
public async Task StoreMsg_MultipleMessages_SequenceIncrements()
|
||||
{
|
||||
await using var store = CreateStore(sub: "storemsg-multi");
|
||||
|
||||
for (var i = 1; i <= 5; i++)
|
||||
{
|
||||
var (seq, _) = store.StoreMsg($"topic.{i}", null, Encoding.UTF8.GetBytes($"msg-{i}"), 0L);
|
||||
seq.ShouldBe((ulong)i);
|
||||
}
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe(5UL);
|
||||
state.LastSeq.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Recovery re-registration test
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: filestore.go recoverMsgs — TTL re-registration on restart
|
||||
[Fact]
|
||||
public async Task Recovery_ReregistersUnexpiredTtls()
|
||||
{
|
||||
// Write messages with a 5-second TTL (well beyond the test duration).
|
||||
// After recovering the store, the messages should still be present.
|
||||
var dir = Path.Combine(_dir, "ttl-recovery");
|
||||
var opts = new FileStoreOptions
|
||||
{
|
||||
Directory = dir,
|
||||
MaxAgeMs = 5000, // 5 second TTL
|
||||
};
|
||||
|
||||
ulong seqA, seqB;
|
||||
|
||||
// First open: write messages.
|
||||
{
|
||||
await using var store = new FileStore(opts);
|
||||
seqA = await store.AppendAsync("topic.a", "payload-a"u8.ToArray(), default);
|
||||
seqB = await store.AppendAsync("topic.b", "payload-b"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe(2UL);
|
||||
} // FileStore disposed here.
|
||||
|
||||
// Second open: recovery should re-register TTLs and messages should still be present.
|
||||
{
|
||||
await using var recovered = new FileStore(opts);
|
||||
|
||||
var state = await recovered.GetStateAsync(default);
|
||||
state.Messages.ShouldBe(2UL, "unexpired messages should survive recovery");
|
||||
|
||||
var msgA = await recovered.LoadAsync(seqA, default);
|
||||
msgA.ShouldNotBeNull();
|
||||
msgA!.Subject.ShouldBe("topic.a");
|
||||
|
||||
var msgB = await recovered.LoadAsync(seqB, default);
|
||||
msgB.ShouldNotBeNull();
|
||||
msgB!.Subject.ShouldBe("topic.b");
|
||||
}
|
||||
}
|
||||
|
||||
// Go: filestore.go recoverMsgs — expired messages removed on recovery
|
||||
[Fact]
|
||||
public async Task Recovery_ExpiredMessagesRemovedOnReopen()
|
||||
{
|
||||
// Write messages with a very short TTL, wait for them to expire, then
|
||||
// reopen the store. The expired messages should be pruned at startup.
|
||||
var dir = Path.Combine(_dir, "ttl-recovery-expired");
|
||||
var opts = new FileStoreOptions
|
||||
{
|
||||
Directory = dir,
|
||||
MaxAgeMs = 50, // 50ms TTL
|
||||
};
|
||||
|
||||
// First open: write messages.
|
||||
{
|
||||
await using var store = new FileStore(opts);
|
||||
await store.AppendAsync("expiring.a", "data-a"u8.ToArray(), default);
|
||||
await store.AppendAsync("expiring.b", "data-b"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
// Wait for TTL to elapse.
|
||||
await Task.Delay(200);
|
||||
|
||||
// Second open: expired messages should be pruned during RecoverBlocks -> PruneExpired.
|
||||
{
|
||||
await using var recovered = new FileStore(opts);
|
||||
|
||||
var state = await recovered.GetStateAsync(default);
|
||||
state.Messages.ShouldBe(0UL, "expired messages should be removed on recovery");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,475 @@
|
||||
// Reference: golang/nats-server/server/filestore_test.go
|
||||
// Tests ported from: TestFileStoreEncrypted (AES + ChaCha permutations),
|
||||
// testFileStoreAllPermutations (S2 + cipher cross product),
|
||||
// TestFileStoreS2Compression (filestore_test.go:4180),
|
||||
// TestFileStoreEncryptedChaChaCipher (filestore_test.go:4250)
|
||||
//
|
||||
// The Go server runs testFileStoreAllPermutations which exercises all
|
||||
// combinations of {NoCompression, S2Compression} x {NoCipher, ChaCha, AES}.
|
||||
// These tests cover the FSV2 envelope path added in Task 4.
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class FileStoreV2Tests : IDisposable
|
||||
{
|
||||
private readonly string _dir;
|
||||
|
||||
public FileStoreV2Tests()
|
||||
{
|
||||
_dir = Path.Combine(Path.GetTempPath(), $"nats-js-fs-v2-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_dir);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_dir))
|
||||
Directory.Delete(_dir, recursive: true);
|
||||
}
|
||||
|
||||
// 32-byte key for AEAD ciphers.
|
||||
private static byte[] Key32 => "nats-v2-test-key-exactly-32-bytes"u8[..32].ToArray();
|
||||
|
||||
private FileStore CreateStore(string sub, FileStoreOptions options)
|
||||
{
|
||||
options.Directory = Path.Combine(_dir, sub);
|
||||
return new FileStore(options);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// S2 compression (no encryption) — FSV2 envelope
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreS2Compression filestore_test.go:4180
|
||||
[Fact]
|
||||
public async Task S2_compression_store_and_load()
|
||||
{
|
||||
await using var store = CreateStore("s2-basic", new FileStoreOptions
|
||||
{
|
||||
Compression = StoreCompression.S2Compression,
|
||||
});
|
||||
|
||||
var payload = "Hello, S2!"u8.ToArray();
|
||||
for (var i = 1; i <= 10; i++)
|
||||
{
|
||||
var seq = await store.AppendAsync("foo", payload, default);
|
||||
seq.ShouldBe((ulong)i);
|
||||
}
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)10);
|
||||
|
||||
var msg = await store.LoadAsync(5, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task S2_compression_store_and_recover()
|
||||
{
|
||||
const string sub = "s2-recover";
|
||||
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
Compression = StoreCompression.S2Compression,
|
||||
}))
|
||||
{
|
||||
for (var i = 0; i < 50; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i:D4}"), default);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
Compression = StoreCompression.S2Compression,
|
||||
}))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)50);
|
||||
|
||||
var msg = await store.LoadAsync(25, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(Encoding.UTF8.GetBytes("msg-0024"));
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// ChaCha20-Poly1305 encryption (no compression) — FSV2 envelope
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreEncryptedChaChaCipher filestore_test.go:4250
|
||||
[Fact]
|
||||
public async Task ChaCha_encryption_store_and_load()
|
||||
{
|
||||
await using var store = CreateStore("chacha-basic", new FileStoreOptions
|
||||
{
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = Key32,
|
||||
});
|
||||
|
||||
var payload = "aes ftw"u8.ToArray();
|
||||
for (var i = 0; i < 50; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)50);
|
||||
|
||||
var msg = await store.LoadAsync(10, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ChaCha_encryption_store_and_recover()
|
||||
{
|
||||
const string sub = "chacha-recover";
|
||||
var key = Key32;
|
||||
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = key,
|
||||
}))
|
||||
{
|
||||
for (var i = 0; i < 50; i++)
|
||||
await store.AppendAsync("foo", "chacha secret"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = key,
|
||||
}))
|
||||
{
|
||||
var msg = await store.LoadAsync(10, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe("chacha secret"u8.ToArray());
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)50);
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// AES-256-GCM encryption (no compression) — FSV2 envelope
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreEncrypted (AES permutation) filestore_test.go:4204
|
||||
[Fact]
|
||||
public async Task AesGcm_encryption_store_and_load()
|
||||
{
|
||||
await using var store = CreateStore("aes-basic", new FileStoreOptions
|
||||
{
|
||||
Cipher = StoreCipher.Aes,
|
||||
EncryptionKey = Key32,
|
||||
});
|
||||
|
||||
var payload = "aes-gcm secret"u8.ToArray();
|
||||
for (var i = 0; i < 50; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)50);
|
||||
|
||||
var msg = await store.LoadAsync(25, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task AesGcm_encryption_store_and_recover()
|
||||
{
|
||||
const string sub = "aes-recover";
|
||||
var key = Key32;
|
||||
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
Cipher = StoreCipher.Aes,
|
||||
EncryptionKey = key,
|
||||
}))
|
||||
{
|
||||
for (var i = 0; i < 50; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"aes-{i:D4}"), default);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
Cipher = StoreCipher.Aes,
|
||||
EncryptionKey = key,
|
||||
}))
|
||||
{
|
||||
var msg = await store.LoadAsync(30, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(Encoding.UTF8.GetBytes("aes-0029"));
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)50);
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// S2 + ChaCha combined — FSV2 envelope
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task S2_and_ChaCha_combined_round_trip()
|
||||
{
|
||||
await using var store = CreateStore("s2-chacha", new FileStoreOptions
|
||||
{
|
||||
Compression = StoreCompression.S2Compression,
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = Key32,
|
||||
});
|
||||
|
||||
var payload = "S2 + ChaCha combined payload"u8.ToArray();
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
for (ulong i = 1; i <= 20; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task S2_and_AesGcm_combined_round_trip()
|
||||
{
|
||||
await using var store = CreateStore("s2-aes", new FileStoreOptions
|
||||
{
|
||||
Compression = StoreCompression.S2Compression,
|
||||
Cipher = StoreCipher.Aes,
|
||||
EncryptionKey = Key32,
|
||||
});
|
||||
|
||||
var payload = "S2 + AES-GCM combined payload"u8.ToArray();
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("bar", payload, default);
|
||||
|
||||
for (ulong i = 1; i <= 20; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task S2_and_ChaCha_combined_store_and_recover()
|
||||
{
|
||||
const string sub = "s2-chacha-recover";
|
||||
var key = Key32;
|
||||
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
Compression = StoreCompression.S2Compression,
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = key,
|
||||
}))
|
||||
{
|
||||
for (var i = 0; i < 40; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"s2-chacha-{i:D3}"), default);
|
||||
}
|
||||
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
Compression = StoreCompression.S2Compression,
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = key,
|
||||
}))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)40);
|
||||
|
||||
var msg = await store.LoadAsync(20, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(Encoding.UTF8.GetBytes("s2-chacha-019"));
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Legacy FSV1 data still readable after upgrade
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: backward-compat requirement — existing FSV1 files must still load
|
||||
[Fact]
|
||||
public async Task Legacy_FSV1_deflate_compression_still_readable()
|
||||
{
|
||||
const string sub = "fsv1-compress-legacy";
|
||||
|
||||
// Write with legacy Deflate (EnableCompression=true, no enum set).
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
EnableCompression = true,
|
||||
}))
|
||||
{
|
||||
await store.AppendAsync("foo", "legacy deflate"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
// Reopen with same options — must read back correctly.
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
EnableCompression = true,
|
||||
}))
|
||||
{
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe("legacy deflate"u8.ToArray());
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Legacy_FSV1_xor_encryption_still_readable()
|
||||
{
|
||||
const string sub = "fsv1-encrypt-legacy";
|
||||
var key = "legacy-xor-key-16bytes!"u8.ToArray();
|
||||
|
||||
// Write with legacy XOR (EnableEncryption=true, no cipher enum set).
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
EnableEncryption = true,
|
||||
EncryptionKey = key,
|
||||
}))
|
||||
{
|
||||
await store.AppendAsync("foo", "legacy xor encrypted"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
// Reopen with same options — must read back correctly.
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
EnableEncryption = true,
|
||||
EncryptionKey = key,
|
||||
}))
|
||||
{
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe("legacy xor encrypted"u8.ToArray());
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// All 6 permutations: {NoCipher, ChaCha, AesGcm} x {NoCompression, S2}
|
||||
// Go: testFileStoreAllPermutations (filestore_test.go:98)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Theory]
|
||||
[InlineData(StoreCipher.NoCipher, StoreCompression.NoCompression)]
|
||||
[InlineData(StoreCipher.NoCipher, StoreCompression.S2Compression)]
|
||||
[InlineData(StoreCipher.ChaCha, StoreCompression.NoCompression)]
|
||||
[InlineData(StoreCipher.ChaCha, StoreCompression.S2Compression)]
|
||||
[InlineData(StoreCipher.Aes, StoreCompression.NoCompression)]
|
||||
[InlineData(StoreCipher.Aes, StoreCompression.S2Compression)]
|
||||
public async Task All_permutations_store_and_load(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
var sub = $"perm-{cipher}-{compression}";
|
||||
var key = cipher == StoreCipher.NoCipher ? null : Key32;
|
||||
|
||||
var payload = Encoding.UTF8.GetBytes($"payload for {cipher}+{compression}");
|
||||
|
||||
await using var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
Cipher = cipher,
|
||||
Compression = compression,
|
||||
EncryptionKey = key,
|
||||
});
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("test", payload, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)10);
|
||||
|
||||
for (ulong i = 1; i <= 10; i++)
|
||||
{
|
||||
var msg = await store.LoadAsync(i, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(StoreCipher.NoCipher, StoreCompression.NoCompression)]
|
||||
[InlineData(StoreCipher.NoCipher, StoreCompression.S2Compression)]
|
||||
[InlineData(StoreCipher.ChaCha, StoreCompression.NoCompression)]
|
||||
[InlineData(StoreCipher.ChaCha, StoreCompression.S2Compression)]
|
||||
[InlineData(StoreCipher.Aes, StoreCompression.NoCompression)]
|
||||
[InlineData(StoreCipher.Aes, StoreCompression.S2Compression)]
|
||||
public async Task All_permutations_store_and_recover(StoreCipher cipher, StoreCompression compression)
|
||||
{
|
||||
var sub = $"perm-recover-{cipher}-{compression}";
|
||||
var key = cipher == StoreCipher.NoCipher ? null : Key32;
|
||||
|
||||
// Write phase.
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions { Cipher = cipher, Compression = compression, EncryptionKey = key }))
|
||||
{
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("x", Encoding.UTF8.GetBytes($"msg-{i:D3}"), default);
|
||||
}
|
||||
|
||||
// Reopen and verify.
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions { Cipher = cipher, Compression = compression, EncryptionKey = key }))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)20);
|
||||
|
||||
var msg = await store.LoadAsync(10, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(Encoding.UTF8.GetBytes("msg-009"));
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FSV2 data is not plaintext on disk
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task S2_data_differs_from_plaintext_on_disk()
|
||||
{
|
||||
var sub = "s2-disk";
|
||||
var dir = Path.Combine(_dir, sub);
|
||||
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
Compression = StoreCompression.S2Compression,
|
||||
}))
|
||||
{
|
||||
await store.AppendAsync("foo", "AAAAAAAAAAAAAAAAAAAAAAAAA"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
var dataFile = Path.Combine(dir, "messages.jsonl");
|
||||
if (File.Exists(dataFile))
|
||||
{
|
||||
var raw = File.ReadAllText(dataFile);
|
||||
// The payload is base64-encoded in the JSONL file.
|
||||
// "FSV2" (0x46 0x53 0x56 0x32) base64-encodes to "RlNWMg".
|
||||
// FSV1 encodes as "RlNWMQ". Verify FSV2 is used, not FSV1.
|
||||
raw.ShouldContain("RlNWMg");
|
||||
raw.ShouldNotContain("RlNWMQ");
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ChaCha_encrypted_data_not_plaintext_on_disk()
|
||||
{
|
||||
var sub = "chacha-disk";
|
||||
var dir = Path.Combine(_dir, sub);
|
||||
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions
|
||||
{
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = Key32,
|
||||
}))
|
||||
{
|
||||
await store.AppendAsync("foo", "THIS IS SENSITIVE DATA"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
var dataFile = Path.Combine(dir, "messages.jsonl");
|
||||
if (File.Exists(dataFile))
|
||||
{
|
||||
var raw = File.ReadAllText(dataFile);
|
||||
raw.ShouldNotContain("THIS IS SENSITIVE DATA");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,180 @@
|
||||
// Ported from golang/nats-server/server/memstore_test.go:
|
||||
// TestMemStoreBasics, TestMemStorePurge, TestMemStoreMsgHeaders (adapted),
|
||||
// TestMemStoreTimeStamps, TestMemStoreEraseMsg
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public class MemStoreBasicTests
|
||||
{
|
||||
// Go ref: TestMemStoreBasics — store a message, verify sequence, state, and payload round-trip.
|
||||
[Fact]
|
||||
public async Task Store_and_load_messages()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
var payload1 = "Hello World"u8.ToArray();
|
||||
var payload2 = "Second message"u8.ToArray();
|
||||
var payload3 = "Third message"u8.ToArray();
|
||||
var payload4 = "Fourth message"u8.ToArray();
|
||||
var payload5 = "Fifth message"u8.ToArray();
|
||||
|
||||
var seq1 = await store.AppendAsync("foo", payload1, default);
|
||||
var seq2 = await store.AppendAsync("foo", payload2, default);
|
||||
var seq3 = await store.AppendAsync("bar", payload3, default);
|
||||
var seq4 = await store.AppendAsync("bar", payload4, default);
|
||||
var seq5 = await store.AppendAsync("baz", payload5, default);
|
||||
|
||||
seq1.ShouldBe((ulong)1);
|
||||
seq2.ShouldBe((ulong)2);
|
||||
seq3.ShouldBe((ulong)3);
|
||||
seq4.ShouldBe((ulong)4);
|
||||
seq5.ShouldBe((ulong)5);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
state.FirstSeq.ShouldBe((ulong)1);
|
||||
state.LastSeq.ShouldBe((ulong)5);
|
||||
|
||||
var loaded1 = await store.LoadAsync(1, default);
|
||||
loaded1.ShouldNotBeNull();
|
||||
loaded1.Subject.ShouldBe("foo");
|
||||
loaded1.Sequence.ShouldBe((ulong)1);
|
||||
loaded1.Payload.Span.SequenceEqual(payload1).ShouldBeTrue();
|
||||
|
||||
var loaded3 = await store.LoadAsync(3, default);
|
||||
loaded3.ShouldNotBeNull();
|
||||
loaded3.Subject.ShouldBe("bar");
|
||||
loaded3.Payload.Span.SequenceEqual(payload3).ShouldBeTrue();
|
||||
|
||||
var loaded5 = await store.LoadAsync(5, default);
|
||||
loaded5.ShouldNotBeNull();
|
||||
loaded5.Subject.ShouldBe("baz");
|
||||
loaded5.Payload.Span.SequenceEqual(payload5).ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go ref: TestMemStoreMsgHeaders (adapted) — MemStore stores and retrieves arbitrary payloads;
|
||||
// the .NET StoredMessage does not have a separate headers field (headers are embedded in the
|
||||
// payload by the protocol layer), so this test verifies that binary payload content round-trips
|
||||
// exactly including non-ASCII byte sequences that mimic header framing.
|
||||
[Fact]
|
||||
public async Task Store_preserves_payload_bytes_including_header_framing()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
// Simulate a payload that includes NATS header framing bytes followed by body bytes,
|
||||
// as the protocol layer would hand them to the store.
|
||||
var headerBytes = Encoding.ASCII.GetBytes("NATS/1.0\r\nName: derek\r\n\r\n");
|
||||
var bodyBytes = "Hello World"u8.ToArray();
|
||||
byte[] combined = [.. headerBytes, .. bodyBytes];
|
||||
|
||||
var seq = await store.AppendAsync("foo", combined, default);
|
||||
seq.ShouldBe((ulong)1);
|
||||
|
||||
var loaded = await store.LoadAsync(1, default);
|
||||
loaded.ShouldNotBeNull();
|
||||
loaded.Subject.ShouldBe("foo");
|
||||
loaded.Payload.Length.ShouldBe(combined.Length);
|
||||
loaded.Payload.Span.SequenceEqual(combined).ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go ref: TestMemStoreEraseMsg — remove a message returns true; subsequent load returns null.
|
||||
[Fact]
|
||||
public async Task Remove_messages_updates_state()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
var seq1 = await store.AppendAsync("foo", "one"u8.ToArray(), default);
|
||||
var seq2 = await store.AppendAsync("foo", "two"u8.ToArray(), default);
|
||||
var seq3 = await store.AppendAsync("foo", "three"u8.ToArray(), default);
|
||||
var seq4 = await store.AppendAsync("foo", "four"u8.ToArray(), default);
|
||||
var seq5 = await store.AppendAsync("foo", "five"u8.ToArray(), default);
|
||||
|
||||
var stateBefore = await store.GetStateAsync(default);
|
||||
stateBefore.Messages.ShouldBe((ulong)5);
|
||||
|
||||
// Remove seq2 and seq4 (interior messages).
|
||||
(await store.RemoveAsync(seq2, default)).ShouldBeTrue();
|
||||
(await store.RemoveAsync(seq4, default)).ShouldBeTrue();
|
||||
|
||||
var stateAfter = await store.GetStateAsync(default);
|
||||
stateAfter.Messages.ShouldBe((ulong)3);
|
||||
|
||||
// Removed sequences are no longer loadable.
|
||||
(await store.LoadAsync(seq2, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(seq4, default)).ShouldBeNull();
|
||||
|
||||
// Remaining messages are still loadable.
|
||||
(await store.LoadAsync(seq1, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(seq3, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(seq5, default)).ShouldNotBeNull();
|
||||
|
||||
// Removing a non-existent sequence returns false.
|
||||
(await store.RemoveAsync(99, default)).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Go ref: TestMemStorePurge — purge clears all messages and resets state.
|
||||
[Fact]
|
||||
public async Task Purge_clears_all_messages()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg{i}"), default);
|
||||
|
||||
var stateBefore = await store.GetStateAsync(default);
|
||||
stateBefore.Messages.ShouldBe((ulong)10);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
|
||||
var stateAfter = await store.GetStateAsync(default);
|
||||
stateAfter.Messages.ShouldBe((ulong)0);
|
||||
stateAfter.Bytes.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go ref: TestMemStoreTimeStamps — each stored message gets a distinct, monotonically
|
||||
// increasing timestamp.
|
||||
[Fact]
|
||||
public async Task Stored_messages_have_distinct_non_decreasing_timestamps()
|
||||
{
|
||||
var store = new MemStore();
|
||||
const int count = 5;
|
||||
|
||||
for (var i = 0; i < count; i++)
|
||||
await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
|
||||
var messages = await store.ListAsync(default);
|
||||
messages.Count.ShouldBe(count);
|
||||
|
||||
DateTime? previous = null;
|
||||
foreach (var msg in messages)
|
||||
{
|
||||
if (previous.HasValue)
|
||||
msg.TimestampUtc.ShouldBeGreaterThanOrEqualTo(previous.Value);
|
||||
previous = msg.TimestampUtc;
|
||||
}
|
||||
}
|
||||
|
||||
// Go ref: TestMemStoreBasics — LoadLastBySubject returns the highest-sequence message
|
||||
// for the given subject.
|
||||
[Fact]
|
||||
public async Task Load_last_by_subject_returns_most_recent_for_that_subject()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
await store.AppendAsync("foo", "first"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar", "other"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "second"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "third"u8.ToArray(), default);
|
||||
|
||||
var last = await store.LoadLastBySubjectAsync("foo", default);
|
||||
last.ShouldNotBeNull();
|
||||
last.Payload.Span.SequenceEqual("third"u8).ShouldBeTrue();
|
||||
last.Subject.ShouldBe("foo");
|
||||
|
||||
var noMatch = await store.LoadLastBySubjectAsync("does.not.exist", default);
|
||||
noMatch.ShouldBeNull();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,951 @@
|
||||
// Reference: golang/nats-server/server/memstore_test.go
|
||||
// Tests ported in this file:
|
||||
// TestMemStoreCompact → Compact_RemovesMessagesBeforeSeq
|
||||
// TestMemStoreStreamStateDeleted → StreamStateDeleted_TracksDmapCorrectly
|
||||
// TestMemStoreStreamTruncate → Truncate_RemovesMessagesAfterSeq
|
||||
// TestMemStoreUpdateMaxMsgsPerSubject → UpdateMaxMsgsPerSubject_EnforcesNewLimit
|
||||
// TestMemStoreStreamCompactMultiBlockSubjectInfo → Compact_AdjustsSubjectCount
|
||||
// TestMemStoreSubjectsTotals → SubjectsTotals_MatchesStoredCounts
|
||||
// TestMemStoreNumPending → NumPending_MatchesFilteredCount
|
||||
// TestMemStoreMultiLastSeqs → MultiLastSeqs_ReturnsLastPerSubject
|
||||
// TestMemStoreSubjectForSeq → SubjectForSeq_ReturnsCorrectSubject
|
||||
// TestMemStoreSubjectDeleteMarkers → SubjectDeleteMarkers_TtlExpiry (skipped: needs pmsgcb)
|
||||
// TestMemStoreAllLastSeqs → AllLastSeqs_ReturnsLastPerSubjectSorted
|
||||
// TestMemStoreGetSeqFromTimeWithLastDeleted → GetSeqFromTime_WithLastDeleted
|
||||
// TestMemStoreSkipMsgs → SkipMsgs_ReservesSequences
|
||||
// TestMemStoreDeleteBlocks → DeleteBlocks_DmapSizeMatchesNumDeleted
|
||||
// TestMemStoreMessageTTL → MessageTTL_ExpiresAfterDelay
|
||||
// TestMemStoreUpdateConfigTTLState → UpdateConfig_TtlStateInitializedAndDestroyed
|
||||
// TestMemStoreNextWildcardMatch → NextWildcardMatch_BoundsAreCorrect
|
||||
// TestMemStoreNextLiteralMatch → NextLiteralMatch_BoundsAreCorrect
|
||||
// TestMemStoreInitialFirstSeq → InitialFirstSeq_StartAtConfiguredSeq
|
||||
// TestMemStoreStreamTruncateReset → TruncateReset_ClearsEverything
|
||||
// TestMemStorePurgeExWithSubject → PurgeEx_WithSubject_PurgesAll
|
||||
// TestMemStorePurgeExWithDeletedMsgs → PurgeEx_WithDeletedMsgs_CorrectFirstSeq
|
||||
// TestMemStoreDeleteAllFirstSequenceCheck → DeleteAll_FirstSeqIsLastPlusOne
|
||||
// TestMemStoreNumPendingBug → NumPending_Bug_CorrectCount
|
||||
// TestMemStorePurgeLeaksDmap → Purge_ClearsDmap
|
||||
// TestMemStoreMultiLastSeqsMaxAllowed → MultiLastSeqs_MaxAllowed_ThrowsWhenExceeded
|
||||
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
/// <summary>
|
||||
/// Go MemStore parity tests. Each test mirrors a specific Go test from
|
||||
/// golang/nats-server/server/memstore_test.go to verify behaviour parity.
|
||||
/// </summary>
|
||||
public sealed class MemStoreGoParityTests
|
||||
{
|
||||
// Helper: cast to IStreamStore for sync methods
|
||||
private static IStreamStore Sync(MemStore ms) => ms;
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Compact
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreCompact server/memstore_test.go:259
|
||||
[Fact]
|
||||
public void Compact_RemovesMessagesBeforeSeq()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
s.StoreMsg("foo", null, "Hello World"u8.ToArray(), 0);
|
||||
|
||||
s.State().Msgs.ShouldBe(10UL);
|
||||
|
||||
var n = s.Compact(6);
|
||||
n.ShouldBe(5UL);
|
||||
|
||||
var state = s.State();
|
||||
state.Msgs.ShouldBe(5UL);
|
||||
state.FirstSeq.ShouldBe(6UL);
|
||||
|
||||
// Compact past the end resets first seq
|
||||
n = s.Compact(100);
|
||||
n.ShouldBe(5UL);
|
||||
s.State().FirstSeq.ShouldBe(100UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// StreamStateDeleted
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreStreamStateDeleted server/memstore_test.go:342
|
||||
[Fact]
|
||||
public void StreamStateDeleted_TracksDmapCorrectly()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
const ulong toStore = 10;
|
||||
for (ulong i = 1; i <= toStore; i++)
|
||||
s.StoreMsg("foo", null, new byte[8], 0);
|
||||
|
||||
s.State().Deleted.ShouldBeNull();
|
||||
|
||||
// Delete even sequences 2,4,6,8
|
||||
var expectedDeleted = new List<ulong>();
|
||||
for (ulong seq = 2; seq < toStore; seq += 2)
|
||||
{
|
||||
s.RemoveMsg(seq);
|
||||
expectedDeleted.Add(seq);
|
||||
}
|
||||
|
||||
var state = s.State();
|
||||
state.Deleted.ShouldNotBeNull();
|
||||
state.Deleted!.ShouldBe(expectedDeleted.ToArray());
|
||||
|
||||
// Delete 1 and 3 to fill first gap — deleted should shift forward
|
||||
s.RemoveMsg(1);
|
||||
s.RemoveMsg(3);
|
||||
expectedDeleted = expectedDeleted.Skip(2).ToList(); // remove 2 and 4 from start
|
||||
state = s.State();
|
||||
state.Deleted!.ShouldBe(expectedDeleted.ToArray());
|
||||
state.FirstSeq.ShouldBe(5UL);
|
||||
|
||||
s.Purge();
|
||||
s.State().Deleted.ShouldBeNull();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Truncate
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreStreamTruncate server/memstore_test.go:385
|
||||
[Fact]
|
||||
public void Truncate_RemovesMessagesAfterSeq()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
const ulong tseq = 50;
|
||||
const ulong toStore = 100;
|
||||
|
||||
for (ulong i = 1; i < tseq; i++)
|
||||
s.StoreMsg("foo", null, "ok"u8.ToArray(), 0);
|
||||
for (var i = tseq; i <= toStore; i++)
|
||||
s.StoreMsg("bar", null, "ok"u8.ToArray(), 0);
|
||||
|
||||
s.State().Msgs.ShouldBe(toStore);
|
||||
|
||||
s.Truncate(tseq);
|
||||
s.State().Msgs.ShouldBe(tseq);
|
||||
|
||||
// Truncate with some interior deletes
|
||||
s.RemoveMsg(10);
|
||||
s.RemoveMsg(20);
|
||||
s.RemoveMsg(30);
|
||||
s.RemoveMsg(40);
|
||||
|
||||
s.Truncate(25);
|
||||
var state = s.State();
|
||||
// 25 seqs remaining, minus 2 deleted (10, 20) = 23 messages
|
||||
state.Msgs.ShouldBe(tseq - 2 - (tseq - 25));
|
||||
state.NumSubjects.ShouldBe(1); // only "foo" left
|
||||
state.Deleted!.ShouldBe([10UL, 20UL]);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// TruncateReset
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreStreamTruncateReset server/memstore_test.go:490
|
||||
[Fact]
|
||||
public void TruncateReset_ClearsEverything()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
for (var i = 0; i < 1000; i++)
|
||||
s.StoreMsg("foo", null, "Hello World"u8.ToArray(), 0);
|
||||
|
||||
s.Truncate(0);
|
||||
|
||||
var state = s.State();
|
||||
state.Msgs.ShouldBe(0UL);
|
||||
state.Bytes.ShouldBe(0UL);
|
||||
state.FirstSeq.ShouldBe(0UL);
|
||||
state.LastSeq.ShouldBe(0UL);
|
||||
state.NumSubjects.ShouldBe(0);
|
||||
state.NumDeleted.ShouldBe(0);
|
||||
|
||||
// Can store again after reset
|
||||
for (var i = 0; i < 1000; i++)
|
||||
s.StoreMsg("foo", null, "Hello World"u8.ToArray(), 0);
|
||||
|
||||
state = s.State();
|
||||
state.Msgs.ShouldBe(1000UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(1000UL);
|
||||
state.NumSubjects.ShouldBe(1);
|
||||
state.NumDeleted.ShouldBe(0);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// UpdateMaxMsgsPerSubject
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreUpdateMaxMsgsPerSubject server/memstore_test.go:452
|
||||
[Fact]
|
||||
public void UpdateMaxMsgsPerSubject_EnforcesNewLimit()
|
||||
{
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "TEST",
|
||||
Storage = StorageType.Memory,
|
||||
Subjects = ["foo"],
|
||||
MaxMsgsPer = 10,
|
||||
};
|
||||
var ms = new MemStore(cfg);
|
||||
var s = Sync(ms);
|
||||
|
||||
// Increase limit — should allow more
|
||||
cfg.MaxMsgsPer = 50;
|
||||
s.UpdateConfig(cfg);
|
||||
|
||||
const int numStored = 22;
|
||||
for (var i = 0; i < numStored; i++)
|
||||
s.StoreMsg("foo", null, [], 0);
|
||||
|
||||
var ss = s.SubjectsState("foo")["foo"];
|
||||
ss.Msgs.ShouldBe((ulong)numStored);
|
||||
|
||||
// Shrink limit — should truncate stored
|
||||
cfg.MaxMsgsPer = 10;
|
||||
s.UpdateConfig(cfg);
|
||||
|
||||
ss = s.SubjectsState("foo")["foo"];
|
||||
ss.Msgs.ShouldBe(10UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// CompactMultiBlockSubjectInfo
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreStreamCompactMultiBlockSubjectInfo server/memstore_test.go:531
|
||||
[Fact]
|
||||
public void Compact_AdjustsSubjectCount()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
for (var i = 0; i < 1000; i++)
|
||||
s.StoreMsg($"foo.{i}", null, "Hello World"u8.ToArray(), 0);
|
||||
|
||||
var deleted = s.Compact(501);
|
||||
deleted.ShouldBe(500UL);
|
||||
|
||||
s.State().NumSubjects.ShouldBe(500);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// SubjectsTotals
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreSubjectsTotals server/memstore_test.go:557
|
||||
[Fact]
|
||||
public void SubjectsTotals_MatchesStoredCounts()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
var fmap = new Dictionary<int, int>();
|
||||
var bmap = new Dictionary<int, int>();
|
||||
var rng = new Random(42);
|
||||
|
||||
for (var i = 0; i < 10_000; i++)
|
||||
{
|
||||
string ft;
|
||||
Dictionary<int, int> m;
|
||||
if (rng.Next(2) == 0) { ft = "foo"; m = fmap; }
|
||||
else { ft = "bar"; m = bmap; }
|
||||
var dt = rng.Next(100);
|
||||
var subj = $"{ft}.{dt}";
|
||||
m.TryGetValue(dt, out var c);
|
||||
m[dt] = c + 1;
|
||||
s.StoreMsg(subj, null, "Hello World"u8.ToArray(), 0);
|
||||
}
|
||||
|
||||
// Check individual foo subjects
|
||||
foreach (var kv in fmap)
|
||||
{
|
||||
var subj = $"foo.{kv.Key}";
|
||||
var totals = s.SubjectsTotals(subj);
|
||||
totals[subj].ShouldBe((ulong)kv.Value);
|
||||
}
|
||||
|
||||
// Check foo.* wildcard
|
||||
var fooTotals = s.SubjectsTotals("foo.*");
|
||||
fooTotals.Count.ShouldBe(fmap.Count);
|
||||
var fooExpected = (ulong)fmap.Values.Sum(n => n);
|
||||
fooTotals.Values.Aggregate(0UL, (a, v) => a + v).ShouldBe(fooExpected);
|
||||
|
||||
// Check bar.* wildcard
|
||||
var barTotals = s.SubjectsTotals("bar.*");
|
||||
barTotals.Count.ShouldBe(bmap.Count);
|
||||
|
||||
// Check *.*
|
||||
var allTotals = s.SubjectsTotals("*.*");
|
||||
allTotals.Count.ShouldBe(fmap.Count + bmap.Count);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// NumPending
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreNumPending server/memstore_test.go:637
|
||||
[Fact]
|
||||
public void NumPending_MatchesFilteredCount()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
var tokens = new[] { "foo", "bar", "baz" };
|
||||
var rng = new Random(99);
|
||||
|
||||
string GenSubj() => $"{tokens[rng.Next(3)]}.{tokens[rng.Next(3)]}.{tokens[rng.Next(3)]}.{tokens[rng.Next(3)]}";
|
||||
|
||||
for (var i = 0; i < 5_000; i++)
|
||||
s.StoreMsg(GenSubj(), null, "Hello World"u8.ToArray(), 0);
|
||||
|
||||
var state = s.State();
|
||||
var startSeqs = new ulong[] { 0, 1, 2, 200, 444, 555, 2222, 4000 };
|
||||
var checkSubs = new[] { "foo.>", "*.bar.>", "foo.bar.*.baz", "*.foo.bar.*", "foo.foo.bar.baz" };
|
||||
|
||||
foreach (var filter in checkSubs)
|
||||
{
|
||||
foreach (var startSeq in startSeqs)
|
||||
{
|
||||
var (total, validThrough) = s.NumPending(startSeq, filter, false);
|
||||
validThrough.ShouldBe(state.LastSeq);
|
||||
|
||||
// Sanity-check: manually count matching msgs from startSeq
|
||||
var sseq = startSeq == 0 ? 1 : startSeq;
|
||||
ulong expected = 0;
|
||||
for (var seq = sseq; seq <= state.LastSeq; seq++)
|
||||
{
|
||||
try
|
||||
{
|
||||
var sm = s.LoadMsg(seq, null);
|
||||
if (SubjectMatchesFilter(sm.Subject, filter)) expected++;
|
||||
}
|
||||
catch (KeyNotFoundException) { }
|
||||
}
|
||||
total.ShouldBe(expected, $"filter={filter} start={startSeq}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// MultiLastSeqs
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreMultiLastSeqs server/memstore_test.go:923
|
||||
[Fact]
|
||||
public void MultiLastSeqs_ReturnsLastPerSubject()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
var msg = "abc"u8.ToArray();
|
||||
|
||||
for (var i = 0; i < 33; i++)
|
||||
{
|
||||
s.StoreMsg("foo.foo", null, msg, 0);
|
||||
s.StoreMsg("foo.bar", null, msg, 0);
|
||||
s.StoreMsg("foo.baz", null, msg, 0);
|
||||
}
|
||||
for (var i = 0; i < 33; i++)
|
||||
{
|
||||
s.StoreMsg("bar.foo", null, msg, 0);
|
||||
s.StoreMsg("bar.bar", null, msg, 0);
|
||||
s.StoreMsg("bar.baz", null, msg, 0);
|
||||
}
|
||||
|
||||
// Up to seq 3
|
||||
s.MultiLastSeqs(["foo.*"], 3, -1).ShouldBe([1UL, 2UL, 3UL]);
|
||||
// All of foo.*
|
||||
s.MultiLastSeqs(["foo.*"], 0, -1).ShouldBe([97UL, 98UL, 99UL]);
|
||||
// All of bar.*
|
||||
s.MultiLastSeqs(["bar.*"], 0, -1).ShouldBe([196UL, 197UL, 198UL]);
|
||||
// bar.* at seq <= 99 — nothing
|
||||
s.MultiLastSeqs(["bar.*"], 99, -1).ShouldBe([]);
|
||||
|
||||
// Explicit subjects
|
||||
s.MultiLastSeqs(["foo.foo", "foo.bar", "foo.baz"], 3, -1).ShouldBe([1UL, 2UL, 3UL]);
|
||||
s.MultiLastSeqs(["foo.foo", "foo.bar", "foo.baz"], 0, -1).ShouldBe([97UL, 98UL, 99UL]);
|
||||
s.MultiLastSeqs(["bar.foo", "bar.bar", "bar.baz"], 0, -1).ShouldBe([196UL, 197UL, 198UL]);
|
||||
s.MultiLastSeqs(["bar.foo", "bar.bar", "bar.baz"], 99, -1).ShouldBe([]);
|
||||
|
||||
// Single filter
|
||||
s.MultiLastSeqs(["foo.foo"], 3, -1).ShouldBe([1UL]);
|
||||
|
||||
// De-duplicate overlapping filters
|
||||
s.MultiLastSeqs(["foo.*", "foo.bar"], 3, -1).ShouldBe([1UL, 2UL, 3UL]);
|
||||
|
||||
// All subjects
|
||||
s.MultiLastSeqs([">"], 0, -1).ShouldBe([97UL, 98UL, 99UL, 196UL, 197UL, 198UL]);
|
||||
s.MultiLastSeqs([">"], 99, -1).ShouldBe([97UL, 98UL, 99UL]);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// MultiLastSeqs — maxAllowed
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreMultiLastSeqsMaxAllowed server/memstore_test.go:1010
|
||||
[Fact]
|
||||
public void MultiLastSeqs_MaxAllowed_ThrowsWhenExceeded()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
var msg = "abc"u8.ToArray();
|
||||
|
||||
for (var i = 1; i <= 100; i++)
|
||||
s.StoreMsg($"foo.{i}", null, msg, 0);
|
||||
|
||||
Should.Throw<InvalidOperationException>(() => s.MultiLastSeqs(["foo.*"], 0, 10));
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// SubjectForSeq
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreSubjectForSeq server/memstore_test.go:1319
|
||||
[Fact]
|
||||
public void SubjectForSeq_ReturnsCorrectSubject()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
s.StoreMsg("foo.bar", null, [], 0);
|
||||
|
||||
// seq 0 (not found)
|
||||
Should.Throw<KeyNotFoundException>(() => s.SubjectForSeq(0));
|
||||
|
||||
// seq 1 — should be "foo.bar"
|
||||
s.SubjectForSeq(1).ShouldBe("foo.bar");
|
||||
|
||||
// seq 2 (not yet stored)
|
||||
Should.Throw<KeyNotFoundException>(() => s.SubjectForSeq(2));
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// AllLastSeqs
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreAllLastSeqs server/memstore_test.go:1266
|
||||
[Fact]
|
||||
public void AllLastSeqs_ReturnsLastPerSubjectSorted()
|
||||
{
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "zzz",
|
||||
Subjects = ["*.*"],
|
||||
MaxMsgsPer = 50,
|
||||
Storage = StorageType.Memory,
|
||||
};
|
||||
var ms = new MemStore(cfg);
|
||||
var s = Sync(ms);
|
||||
|
||||
var subjs = new[] { "foo.foo", "foo.bar", "foo.baz", "bar.foo", "bar.bar", "bar.baz" };
|
||||
var msg = "abc"u8.ToArray();
|
||||
var rng = new Random(7);
|
||||
|
||||
for (var i = 0; i < 10_000; i++)
|
||||
s.StoreMsg(subjs[rng.Next(subjs.Length)], null, msg, 0);
|
||||
|
||||
// Compute expected last sequences per subject
|
||||
var expected = new List<ulong>();
|
||||
foreach (var subj in subjs)
|
||||
{
|
||||
try
|
||||
{
|
||||
var sm = s.LoadLastMsg(subj, null);
|
||||
expected.Add(sm.Sequence);
|
||||
}
|
||||
catch (KeyNotFoundException) { }
|
||||
}
|
||||
expected.Sort();
|
||||
|
||||
var seqs = s.AllLastSeqs();
|
||||
seqs.ShouldBe(expected.ToArray());
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// GetSeqFromTime with last deleted
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreGetSeqFromTimeWithLastDeleted server/memstore_test.go:839
|
||||
[Fact]
|
||||
public void GetSeqFromTime_WithLastDeleted()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
const int total = 1000;
|
||||
DateTime midTime = default;
|
||||
for (var i = 1; i <= total; i++)
|
||||
{
|
||||
s.StoreMsg("A", null, "OK"u8.ToArray(), 0);
|
||||
if (i == total / 2)
|
||||
{
|
||||
Thread.Sleep(100);
|
||||
midTime = DateTime.UtcNow;
|
||||
}
|
||||
}
|
||||
|
||||
// Delete last 100
|
||||
for (var seq = total - 100; seq <= total; seq++)
|
||||
s.RemoveMsg((ulong)seq);
|
||||
|
||||
// Should not panic and should return correct value
|
||||
var found = s.GetSeqFromTime(midTime);
|
||||
found.ShouldBe(501UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// SkipMsgs
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreSkipMsgs server/memstore_test.go:871
|
||||
[Fact]
|
||||
public void SkipMsgs_ReservesSequences()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
// Wrong starting sequence should fail
|
||||
Should.Throw<InvalidOperationException>(() => s.SkipMsgs(10, 100));
|
||||
|
||||
// Skip from seq 1
|
||||
s.SkipMsgs(1, 100);
|
||||
var state = s.State();
|
||||
state.FirstSeq.ShouldBe(101UL);
|
||||
state.LastSeq.ShouldBe(100UL);
|
||||
|
||||
// Skip many more
|
||||
s.SkipMsgs(101, 100_000);
|
||||
state = s.State();
|
||||
state.FirstSeq.ShouldBe(100_101UL);
|
||||
state.LastSeq.ShouldBe(100_100UL);
|
||||
|
||||
// New store: store a message then skip
|
||||
var ms2 = new MemStore();
|
||||
var s2 = Sync(ms2);
|
||||
s2.StoreMsg("foo", null, [], 0);
|
||||
s2.SkipMsgs(2, 10);
|
||||
|
||||
state = s2.State();
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(11UL);
|
||||
state.Msgs.ShouldBe(1UL);
|
||||
state.NumDeleted.ShouldBe(10);
|
||||
state.Deleted.ShouldNotBeNull();
|
||||
state.Deleted!.Length.ShouldBe(10);
|
||||
|
||||
// FastState consistency
|
||||
var fstate = new StreamState();
|
||||
s2.FastState(ref fstate);
|
||||
fstate.FirstSeq.ShouldBe(1UL);
|
||||
fstate.LastSeq.ShouldBe(11UL);
|
||||
fstate.Msgs.ShouldBe(1UL);
|
||||
fstate.NumDeleted.ShouldBe(10);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// DeleteBlocks
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreDeleteBlocks server/memstore_test.go:799
|
||||
[Fact]
|
||||
public void DeleteBlocks_DmapSizeMatchesNumDeleted()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
const int total = 10_000;
|
||||
for (var i = 0; i < total; i++)
|
||||
s.StoreMsg("A", null, "OK"u8.ToArray(), 0);
|
||||
|
||||
// Delete 5000 random sequences
|
||||
var rng = new Random(13);
|
||||
var deleteSet = new HashSet<int>();
|
||||
while (deleteSet.Count < 5000)
|
||||
deleteSet.Add(rng.Next(total) + 1);
|
||||
|
||||
foreach (var seq in deleteSet)
|
||||
s.RemoveMsg((ulong)seq);
|
||||
|
||||
var fstate = new StreamState();
|
||||
s.FastState(ref fstate);
|
||||
|
||||
// NumDeleted from FastState must equal interior gap count
|
||||
var fullState = s.State();
|
||||
var dmapSize = fullState.Deleted?.Length ?? 0;
|
||||
dmapSize.ShouldBe(fstate.NumDeleted);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// MessageTTL
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreMessageTTL server/memstore_test.go:1202
|
||||
[Fact]
|
||||
public void MessageTTL_ExpiresAfterDelay()
|
||||
{
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "zzz",
|
||||
Subjects = ["test"],
|
||||
Storage = StorageType.Memory,
|
||||
AllowMsgTtl = true,
|
||||
};
|
||||
var ms = new MemStore(cfg);
|
||||
var s = Sync(ms);
|
||||
|
||||
const long ttl = 1; // 1 second
|
||||
|
||||
for (var i = 1; i <= 10; i++)
|
||||
s.StoreMsg("test", null, [], ttl);
|
||||
|
||||
var ss = new StreamState();
|
||||
s.FastState(ref ss);
|
||||
ss.FirstSeq.ShouldBe(1UL);
|
||||
ss.LastSeq.ShouldBe(10UL);
|
||||
ss.Msgs.ShouldBe(10UL);
|
||||
|
||||
// Wait for TTL to expire (> 1 sec + check interval of 1 sec)
|
||||
Thread.Sleep(2_500);
|
||||
|
||||
s.FastState(ref ss);
|
||||
ss.FirstSeq.ShouldBe(11UL);
|
||||
ss.LastSeq.ShouldBe(10UL);
|
||||
ss.Msgs.ShouldBe(0UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// UpdateConfigTTLState
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreUpdateConfigTTLState server/memstore_test.go:1299
|
||||
[Fact]
|
||||
public void UpdateConfig_TtlStateInitializedAndDestroyed()
|
||||
{
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "zzz",
|
||||
Subjects = [">"],
|
||||
Storage = StorageType.Memory,
|
||||
AllowMsgTtl = false,
|
||||
};
|
||||
var ms = new MemStore(cfg);
|
||||
var s = Sync(ms);
|
||||
|
||||
// TTL disabled — internal TTL wheel should be null (we cannot observe it directly,
|
||||
// but UpdateConfig must not throw and subsequent behaviour must be correct)
|
||||
cfg.AllowMsgTtl = true;
|
||||
s.UpdateConfig(cfg);
|
||||
|
||||
// Store with TTL — should work
|
||||
s.StoreMsg("test", null, [], 3600);
|
||||
s.State().Msgs.ShouldBe(1UL);
|
||||
|
||||
// Disable TTL again
|
||||
cfg.AllowMsgTtl = false;
|
||||
s.UpdateConfig(cfg);
|
||||
|
||||
// Message stored before disabling should still be present (TTL wheel gone but msg stays)
|
||||
s.State().Msgs.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// NextWildcardMatch
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreNextWildcardMatch server/memstore_test.go:1373
|
||||
[Fact]
|
||||
public void NextWildcardMatch_BoundsAreCorrect()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
void StoreN(string subj, int n)
|
||||
{
|
||||
for (var i = 0; i < n; i++)
|
||||
s.StoreMsg(subj, null, "msg"u8.ToArray(), 0);
|
||||
}
|
||||
|
||||
StoreN("foo.bar.a", 1); // seq 1
|
||||
StoreN("foo.baz.bar", 10); // seqs 2-11
|
||||
StoreN("foo.bar.b", 1); // seq 12
|
||||
StoreN("foo.baz.bar", 10); // seqs 13-22
|
||||
StoreN("foo.baz.bar.no.match", 10); // seqs 23-32
|
||||
|
||||
lock (ms.Gate)
|
||||
{
|
||||
var (first, last, found) = ms.NextWildcardMatchLocked("foo.bar.*", 0);
|
||||
found.ShouldBeTrue();
|
||||
first.ShouldBe(1UL);
|
||||
last.ShouldBe(12UL);
|
||||
|
||||
(first, last, found) = ms.NextWildcardMatchLocked("foo.bar.*", 1);
|
||||
found.ShouldBeTrue();
|
||||
first.ShouldBe(1UL);
|
||||
last.ShouldBe(12UL);
|
||||
|
||||
(first, last, found) = ms.NextWildcardMatchLocked("foo.bar.*", 2);
|
||||
found.ShouldBeTrue();
|
||||
first.ShouldBe(12UL);
|
||||
last.ShouldBe(12UL);
|
||||
|
||||
(_, _, found) = ms.NextWildcardMatchLocked("foo.bar.*", first + 1);
|
||||
found.ShouldBeFalse();
|
||||
|
||||
(first, last, found) = ms.NextWildcardMatchLocked("foo.baz.*", 1);
|
||||
found.ShouldBeTrue();
|
||||
first.ShouldBe(2UL);
|
||||
last.ShouldBe(22UL);
|
||||
|
||||
(first, last, found) = ms.NextWildcardMatchLocked("foo.nope.*", 1);
|
||||
found.ShouldBeFalse();
|
||||
first.ShouldBe(0UL);
|
||||
last.ShouldBe(0UL);
|
||||
|
||||
(first, last, found) = ms.NextWildcardMatchLocked("foo.>", 1);
|
||||
found.ShouldBeTrue();
|
||||
first.ShouldBe(1UL);
|
||||
last.ShouldBe(32UL);
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// NextLiteralMatch
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreNextLiteralMatch server/memstore_test.go:1454
|
||||
[Fact]
|
||||
public void NextLiteralMatch_BoundsAreCorrect()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
void StoreN(string subj, int n)
|
||||
{
|
||||
for (var i = 0; i < n; i++)
|
||||
s.StoreMsg(subj, null, "msg"u8.ToArray(), 0);
|
||||
}
|
||||
|
||||
StoreN("foo.bar.a", 1); // seq 1
|
||||
StoreN("foo.baz.bar", 10); // seqs 2-11
|
||||
StoreN("foo.bar.b", 1); // seq 12
|
||||
StoreN("foo.baz.bar", 10); // seqs 13-22
|
||||
StoreN("foo.baz.bar.no.match", 10); // seqs 23-32
|
||||
|
||||
lock (ms.Gate)
|
||||
{
|
||||
var (first, last, found) = ms.NextLiteralMatchLocked("foo.bar.a", 0);
|
||||
found.ShouldBeTrue();
|
||||
first.ShouldBe(1UL);
|
||||
last.ShouldBe(1UL);
|
||||
|
||||
(_, _, found) = ms.NextLiteralMatchLocked("foo.bar.a", 2);
|
||||
found.ShouldBeFalse();
|
||||
|
||||
(first, last, found) = ms.NextLiteralMatchLocked("foo.baz.bar", 1);
|
||||
found.ShouldBeTrue();
|
||||
first.ShouldBe(2UL);
|
||||
last.ShouldBe(22UL);
|
||||
|
||||
(first, last, found) = ms.NextLiteralMatchLocked("foo.baz.bar", 22);
|
||||
found.ShouldBeTrue();
|
||||
first.ShouldBe(22UL);
|
||||
last.ShouldBe(22UL);
|
||||
|
||||
(first, last, found) = ms.NextLiteralMatchLocked("foo.baz.bar", 23);
|
||||
found.ShouldBeFalse();
|
||||
first.ShouldBe(0UL);
|
||||
last.ShouldBe(0UL);
|
||||
|
||||
(_, _, found) = ms.NextLiteralMatchLocked("foo.nope", 1);
|
||||
found.ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// InitialFirstSeq
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreInitialFirstSeq server/memstore_test.go:765
|
||||
[Fact]
|
||||
public void InitialFirstSeq_StartAtConfiguredSeq()
|
||||
{
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "zzz",
|
||||
Storage = StorageType.Memory,
|
||||
FirstSeq = 1000,
|
||||
};
|
||||
var ms = new MemStore(cfg);
|
||||
var s = Sync(ms);
|
||||
|
||||
var (seq, _) = s.StoreMsg("A", null, "OK"u8.ToArray(), 0);
|
||||
seq.ShouldBe(1000UL);
|
||||
|
||||
(seq, _) = s.StoreMsg("B", null, "OK"u8.ToArray(), 0);
|
||||
seq.ShouldBe(1001UL);
|
||||
|
||||
var state = new StreamState();
|
||||
s.FastState(ref state);
|
||||
state.Msgs.ShouldBe(2UL);
|
||||
state.FirstSeq.ShouldBe(1000UL);
|
||||
state.LastSeq.ShouldBe(1001UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// PurgeEx with subject
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStorePurgeExWithSubject server/memstore_test.go:437
|
||||
[Fact]
|
||||
public void PurgeEx_WithSubject_PurgesAll()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
for (var i = 0; i < 100; i++)
|
||||
s.StoreMsg("foo", null, [], 0);
|
||||
|
||||
s.PurgeEx("foo", 1, 0);
|
||||
s.State().Msgs.ShouldBe(0UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// PurgeEx with deleted messages
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStorePurgeExWithDeletedMsgs server/memstore_test.go:1031
|
||||
[Fact]
|
||||
public void PurgeEx_WithDeletedMsgs_CorrectFirstSeq()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
var msg = "abc"u8.ToArray();
|
||||
|
||||
for (var i = 1; i <= 10; i++)
|
||||
s.StoreMsg("foo", null, msg, 0);
|
||||
|
||||
s.RemoveMsg(2);
|
||||
s.RemoveMsg(9); // was the bug
|
||||
|
||||
var n = s.PurgeEx("", 9, 0);
|
||||
n.ShouldBe(7UL); // seqs 1,3,4,5,6,7,8 (not 2 since deleted, not 9 since deleted)
|
||||
|
||||
var state = new StreamState();
|
||||
s.FastState(ref state);
|
||||
state.FirstSeq.ShouldBe(10UL);
|
||||
state.LastSeq.ShouldBe(10UL);
|
||||
state.Msgs.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// DeleteAll FirstSequenceCheck
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreDeleteAllFirstSequenceCheck server/memstore_test.go:1060
|
||||
[Fact]
|
||||
public void DeleteAll_FirstSeqIsLastPlusOne()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
for (var i = 1; i <= 10; i++)
|
||||
s.StoreMsg("foo", null, "abc"u8.ToArray(), 0);
|
||||
|
||||
for (ulong seq = 1; seq <= 10; seq++)
|
||||
s.RemoveMsg(seq);
|
||||
|
||||
var state = new StreamState();
|
||||
s.FastState(ref state);
|
||||
state.FirstSeq.ShouldBe(11UL);
|
||||
state.LastSeq.ShouldBe(10UL);
|
||||
state.Msgs.ShouldBe(0UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// NumPending — bug fix
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStoreNumPendingBug server/memstore_test.go:1137
|
||||
[Fact]
|
||||
public void NumPending_Bug_CorrectCount()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
foreach (var subj in new[] { "foo.foo", "foo.bar", "foo.baz", "foo.zzz" })
|
||||
{
|
||||
s.StoreMsg("foo.aaa", null, [], 0);
|
||||
s.StoreMsg(subj, null, [], 0);
|
||||
s.StoreMsg(subj, null, [], 0);
|
||||
}
|
||||
|
||||
// 12 msgs total
|
||||
var (total, _) = s.NumPending(4, "foo.*", false);
|
||||
|
||||
ulong expected = 0;
|
||||
for (var seq = 4; seq <= 12; seq++)
|
||||
{
|
||||
try
|
||||
{
|
||||
var sm = s.LoadMsg((ulong)seq, null);
|
||||
if (SubjectMatchesFilter(sm.Subject, "foo.*")) expected++;
|
||||
}
|
||||
catch (KeyNotFoundException) { }
|
||||
}
|
||||
total.ShouldBe(expected);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Purge clears dmap
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestMemStorePurgeLeaksDmap server/memstore_test.go:1168
|
||||
[Fact]
|
||||
public void Purge_ClearsDmap()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
s.StoreMsg("foo", null, [], 0);
|
||||
|
||||
for (ulong i = 2; i <= 9; i++)
|
||||
s.RemoveMsg(i);
|
||||
|
||||
// 8 interior gaps now
|
||||
var state = s.State();
|
||||
state.NumDeleted.ShouldBe(8);
|
||||
|
||||
// Purge should also clear dmap
|
||||
var purged = s.Purge();
|
||||
purged.ShouldBe(2UL); // 2 actual msgs remain (1 and 10)
|
||||
|
||||
state = s.State();
|
||||
state.NumDeleted.ShouldBe(0);
|
||||
state.Deleted.ShouldBeNull();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
private static bool SubjectMatchesFilter(string subject, string filter)
|
||||
{
|
||||
if (string.IsNullOrEmpty(filter) || filter == ">") return true;
|
||||
if (NATS.Server.Subscriptions.SubjectMatch.IsLiteral(filter))
|
||||
return string.Equals(subject, filter, StringComparison.Ordinal);
|
||||
return NATS.Server.Subscriptions.SubjectMatch.MatchLiteral(subject, filter);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,361 @@
|
||||
// Reference: golang/nats-server/server/memstore_test.go and filestore_test.go
|
||||
// Tests ported from: TestMemStoreBasics, TestMemStorePurge, TestMemStoreMsgHeaders,
|
||||
// TestMemStoreTimeStamps, TestMemStoreEraseMsg,
|
||||
// TestMemStoreMsgLimit, TestMemStoreBytesLimit,
|
||||
// TestMemStoreAgeLimit, plus parity tests matching
|
||||
// filestore behavior in MemStore.
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class MemStoreTests
|
||||
{
|
||||
// Go: TestMemStoreBasics server/memstore_test.go
|
||||
[Fact]
|
||||
public async Task Store_and_load_messages()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
var seq1 = await store.AppendAsync("foo", "Hello World"u8.ToArray(), default);
|
||||
var seq2 = await store.AppendAsync("bar", "Second"u8.ToArray(), default);
|
||||
|
||||
seq1.ShouldBe((ulong)1);
|
||||
seq2.ShouldBe((ulong)2);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)2);
|
||||
state.FirstSeq.ShouldBe((ulong)1);
|
||||
state.LastSeq.ShouldBe((ulong)2);
|
||||
|
||||
var msg1 = await store.LoadAsync(1, default);
|
||||
msg1.ShouldNotBeNull();
|
||||
msg1!.Subject.ShouldBe("foo");
|
||||
msg1.Payload.ToArray().ShouldBe("Hello World"u8.ToArray());
|
||||
|
||||
var msg2 = await store.LoadAsync(2, default);
|
||||
msg2.ShouldNotBeNull();
|
||||
msg2!.Subject.ShouldBe("bar");
|
||||
}
|
||||
|
||||
// Go: TestMemStoreBasics server/memstore_test.go
|
||||
[Fact]
|
||||
public async Task Load_non_existent_returns_null()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
(await store.LoadAsync(99, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(0, default)).ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestMemStoreEraseMsg server/memstore_test.go
|
||||
[Fact]
|
||||
public async Task Remove_messages()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
(await store.RemoveAsync(2, default)).ShouldBeTrue();
|
||||
(await store.RemoveAsync(4, default)).ShouldBeTrue();
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)3);
|
||||
|
||||
(await store.LoadAsync(2, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(4, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(1, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(3, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(5, default)).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestMemStoreEraseMsg server/memstore_test.go
|
||||
[Fact]
|
||||
public async Task Remove_non_existent_returns_false()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
(await store.RemoveAsync(99, default)).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Go: TestMemStorePurge server/memstore_test.go
|
||||
[Fact]
|
||||
public async Task Purge_clears_all()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)10);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
state.Bytes.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestMemStorePurge server/memstore_test.go
|
||||
[Fact]
|
||||
public async Task Purge_empty_store_is_safe()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestMemStoreTimeStamps server/memstore_test.go
|
||||
[Fact]
|
||||
public async Task Timestamps_non_decreasing()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
var messages = await store.ListAsync(default);
|
||||
messages.Count.ShouldBe(10);
|
||||
|
||||
DateTime? prev = null;
|
||||
foreach (var msg in messages)
|
||||
{
|
||||
if (prev.HasValue)
|
||||
msg.TimestampUtc.ShouldBeGreaterThanOrEqualTo(prev.Value);
|
||||
prev = msg.TimestampUtc;
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestMemStoreMsgHeaders (adapted) server/memstore_test.go
|
||||
[Fact]
|
||||
public async Task Payload_with_header_bytes_round_trips()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
var headerBytes = "NATS/1.0\r\nName: derek\r\n\r\n"u8.ToArray();
|
||||
var bodyBytes = "Hello World"u8.ToArray();
|
||||
byte[] combined = [.. headerBytes, .. bodyBytes];
|
||||
|
||||
await store.AppendAsync("foo", combined, default);
|
||||
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe(combined);
|
||||
}
|
||||
|
||||
// Go: TestMemStoreBasics server/memstore_test.go
|
||||
[Fact]
|
||||
public async Task LoadLastBySubject_returns_most_recent()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
await store.AppendAsync("foo", "first"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar", "other"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "second"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "third"u8.ToArray(), default);
|
||||
|
||||
var last = await store.LoadLastBySubjectAsync("foo", default);
|
||||
last.ShouldNotBeNull();
|
||||
last!.Payload.ToArray().ShouldBe("third"u8.ToArray());
|
||||
last.Sequence.ShouldBe((ulong)4);
|
||||
|
||||
(await store.LoadLastBySubjectAsync("does.not.exist", default)).ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestMemStoreMsgLimit server/memstore_test.go
|
||||
[Fact]
|
||||
public async Task TrimToMaxMessages_evicts_oldest()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
store.TrimToMaxMessages(10);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)10);
|
||||
state.FirstSeq.ShouldBe((ulong)11);
|
||||
state.LastSeq.ShouldBe((ulong)20);
|
||||
|
||||
(await store.LoadAsync(1, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(10, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(11, default)).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestMemStoreMsgLimit server/memstore_test.go
|
||||
[Fact]
|
||||
public async Task TrimToMaxMessages_to_zero()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
store.TrimToMaxMessages(0);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestMemStoreBytesLimit server/memstore_test.go
|
||||
[Fact]
|
||||
public async Task Bytes_tracks_payload_sizes()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
var payload = new byte[100];
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
// Go parity: MsgSize = subj.Length + hdr + data + 16 overhead
|
||||
// "foo"(3) + 100 + 16 = 119 per msg × 5 = 595
|
||||
state.Bytes.ShouldBe((ulong)595);
|
||||
}
|
||||
|
||||
// Go: TestMemStoreBytesLimit server/memstore_test.go
|
||||
[Fact]
|
||||
public async Task Bytes_decrease_after_remove()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
var payload = new byte[100];
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
await store.RemoveAsync(1, default);
|
||||
await store.RemoveAsync(3, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
// Go parity: MsgSize = subj.Length + hdr + data + 16 overhead
|
||||
// "foo"(3) + 100 + 16 = 119 per msg × 3 remaining = 357
|
||||
state.Bytes.ShouldBe((ulong)357);
|
||||
}
|
||||
|
||||
// Snapshot and restore.
|
||||
[Fact]
|
||||
public async Task Snapshot_and_restore()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
var snap = await store.CreateSnapshotAsync(default);
|
||||
snap.Length.ShouldBeGreaterThan(0);
|
||||
|
||||
var restored = new MemStore();
|
||||
await restored.RestoreSnapshotAsync(snap, default);
|
||||
|
||||
var srcState = await store.GetStateAsync(default);
|
||||
var dstState = await restored.GetStateAsync(default);
|
||||
dstState.Messages.ShouldBe(srcState.Messages);
|
||||
dstState.FirstSeq.ShouldBe(srcState.FirstSeq);
|
||||
dstState.LastSeq.ShouldBe(srcState.LastSeq);
|
||||
|
||||
for (ulong i = 1; i <= srcState.Messages; i++)
|
||||
{
|
||||
var original = await store.LoadAsync(i, default);
|
||||
var copy = await restored.LoadAsync(i, default);
|
||||
copy.ShouldNotBeNull();
|
||||
copy!.Payload.ToArray().ShouldBe(original!.Payload.ToArray());
|
||||
}
|
||||
}
|
||||
|
||||
// Snapshot after removes.
|
||||
[Fact]
|
||||
public async Task Snapshot_after_removes()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"msg-{i}"), default);
|
||||
|
||||
await store.RemoveAsync(2, default);
|
||||
await store.RemoveAsync(5, default);
|
||||
await store.RemoveAsync(8, default);
|
||||
|
||||
var snap = await store.CreateSnapshotAsync(default);
|
||||
|
||||
var restored = new MemStore();
|
||||
await restored.RestoreSnapshotAsync(snap, default);
|
||||
|
||||
var dstState = await restored.GetStateAsync(default);
|
||||
dstState.Messages.ShouldBe((ulong)7);
|
||||
|
||||
(await restored.LoadAsync(2, default)).ShouldBeNull();
|
||||
(await restored.LoadAsync(5, default)).ShouldBeNull();
|
||||
(await restored.LoadAsync(8, default)).ShouldBeNull();
|
||||
(await restored.LoadAsync(1, default)).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// ListAsync ordered.
|
||||
[Fact]
|
||||
public async Task ListAsync_returns_ordered()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
await store.AppendAsync("c", "three"u8.ToArray(), default);
|
||||
await store.AppendAsync("a", "one"u8.ToArray(), default);
|
||||
await store.AppendAsync("b", "two"u8.ToArray(), default);
|
||||
|
||||
var messages = await store.ListAsync(default);
|
||||
messages.Count.ShouldBe(3);
|
||||
messages[0].Sequence.ShouldBe((ulong)1);
|
||||
messages[1].Sequence.ShouldBe((ulong)2);
|
||||
messages[2].Sequence.ShouldBe((ulong)3);
|
||||
}
|
||||
|
||||
// Purge then append.
|
||||
[Fact]
|
||||
public async Task Purge_then_append()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
await store.PurgeAsync(default);
|
||||
|
||||
var seq = await store.AppendAsync("foo", "after purge"u8.ToArray(), default);
|
||||
seq.ShouldBeGreaterThan((ulong)0);
|
||||
|
||||
var msg = await store.LoadAsync(seq, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.ToArray().ShouldBe("after purge"u8.ToArray());
|
||||
}
|
||||
|
||||
// Empty payload.
|
||||
[Fact]
|
||||
public async Task Empty_payload_round_trips()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
await store.AppendAsync("foo", ReadOnlyMemory<byte>.Empty, default);
|
||||
|
||||
var msg = await store.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Payload.Length.ShouldBe(0);
|
||||
}
|
||||
|
||||
// State on empty store.
|
||||
[Fact]
|
||||
public async Task Empty_store_state()
|
||||
{
|
||||
var store = new MemStore();
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
state.Bytes.ShouldBe((ulong)0);
|
||||
state.FirstSeq.ShouldBe((ulong)0);
|
||||
state.LastSeq.ShouldBe((ulong)0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,200 @@
|
||||
// Reference: golang/nats-server/server/filestore.go
|
||||
// Go wire format: filestore.go:6720-6724 (writeMsgRecordLocked)
|
||||
// Go decode: filestore.go:8180-8250 (msgFromBufEx)
|
||||
// Go size calc: filestore.go:8770-8777 (fileStoreMsgSizeRaw)
|
||||
//
|
||||
// These tests verify the .NET MessageRecord binary encoder/decoder that matches
|
||||
// the Go message block record format:
|
||||
// [1:flags][varint:subj_len][N:subject][varint:hdr_len][M:headers][varint:payload_len][P:payload][8:sequence_LE][8:checksum]
|
||||
|
||||
using NATS.Server.JetStream.Storage;
|
||||
using System.Text;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class MessageRecordTests
|
||||
{
|
||||
// Go: writeMsgRecordLocked / msgFromBufEx — basic round-trip
|
||||
[Fact]
|
||||
public void RoundTrip_SimpleMessage()
|
||||
{
|
||||
var record = new MessageRecord
|
||||
{
|
||||
Sequence = 42,
|
||||
Subject = "foo.bar",
|
||||
Headers = ReadOnlyMemory<byte>.Empty,
|
||||
Payload = Encoding.UTF8.GetBytes("hello world"),
|
||||
Timestamp = 1_700_000_000_000_000_000L,
|
||||
Deleted = false,
|
||||
};
|
||||
|
||||
var encoded = MessageRecord.Encode(record);
|
||||
var decoded = MessageRecord.Decode(encoded);
|
||||
|
||||
decoded.Sequence.ShouldBe(record.Sequence);
|
||||
decoded.Subject.ShouldBe(record.Subject);
|
||||
decoded.Headers.Length.ShouldBe(0);
|
||||
decoded.Payload.ToArray().ShouldBe(Encoding.UTF8.GetBytes("hello world"));
|
||||
decoded.Timestamp.ShouldBe(record.Timestamp);
|
||||
decoded.Deleted.ShouldBe(false);
|
||||
}
|
||||
|
||||
// Go: writeMsgRecordLocked with headers — hdr_len(4) hdr present in record
|
||||
[Fact]
|
||||
public void RoundTrip_WithHeaders()
|
||||
{
|
||||
var headers = "NATS/1.0\r\nX-Test: value\r\n\r\n"u8.ToArray();
|
||||
var record = new MessageRecord
|
||||
{
|
||||
Sequence = 99,
|
||||
Subject = "test.headers",
|
||||
Headers = headers,
|
||||
Payload = Encoding.UTF8.GetBytes("payload with headers"),
|
||||
Timestamp = 1_700_000_000_000_000_000L,
|
||||
Deleted = false,
|
||||
};
|
||||
|
||||
var encoded = MessageRecord.Encode(record);
|
||||
var decoded = MessageRecord.Decode(encoded);
|
||||
|
||||
decoded.Sequence.ShouldBe(99UL);
|
||||
decoded.Subject.ShouldBe("test.headers");
|
||||
decoded.Headers.ToArray().ShouldBe(headers);
|
||||
decoded.Payload.ToArray().ShouldBe(Encoding.UTF8.GetBytes("payload with headers"));
|
||||
decoded.Timestamp.ShouldBe(record.Timestamp);
|
||||
}
|
||||
|
||||
// Verify that the last 8 bytes of the encoded record contain a nonzero XxHash64 checksum.
|
||||
[Fact]
|
||||
public void Encode_SetsChecksumInTrailer()
|
||||
{
|
||||
var record = new MessageRecord
|
||||
{
|
||||
Sequence = 1,
|
||||
Subject = "checksum.test",
|
||||
Headers = ReadOnlyMemory<byte>.Empty,
|
||||
Payload = Encoding.UTF8.GetBytes("data"),
|
||||
Timestamp = 0,
|
||||
Deleted = false,
|
||||
};
|
||||
|
||||
var encoded = MessageRecord.Encode(record);
|
||||
|
||||
// Last 8 bytes are the checksum — should be nonzero for any non-trivial message.
|
||||
var checksumBytes = encoded.AsSpan(encoded.Length - 8);
|
||||
var checksum = BitConverter.ToUInt64(checksumBytes);
|
||||
checksum.ShouldNotBe(0UL);
|
||||
}
|
||||
|
||||
// Flip a byte in the encoded data and verify decode throws InvalidDataException.
|
||||
[Fact]
|
||||
public void Decode_DetectsCorruptChecksum()
|
||||
{
|
||||
var record = new MessageRecord
|
||||
{
|
||||
Sequence = 7,
|
||||
Subject = "corrupt",
|
||||
Headers = ReadOnlyMemory<byte>.Empty,
|
||||
Payload = Encoding.UTF8.GetBytes("will be corrupted"),
|
||||
Timestamp = 0,
|
||||
Deleted = false,
|
||||
};
|
||||
|
||||
var encoded = MessageRecord.Encode(record);
|
||||
|
||||
// Flip a byte in the payload area (not the checksum itself).
|
||||
var corrupted = encoded.ToArray();
|
||||
corrupted[corrupted.Length / 2] ^= 0xFF;
|
||||
|
||||
Should.Throw<InvalidDataException>(() => MessageRecord.Decode(corrupted));
|
||||
}
|
||||
|
||||
// Go: varint encoding matches protobuf convention — high-bit continuation.
|
||||
[Theory]
|
||||
[InlineData(0UL)]
|
||||
[InlineData(1UL)]
|
||||
[InlineData(127UL)]
|
||||
[InlineData(128UL)]
|
||||
[InlineData(16383UL)]
|
||||
[InlineData(16384UL)]
|
||||
public void Varint_RoundTrip(ulong value)
|
||||
{
|
||||
Span<byte> buf = stackalloc byte[10];
|
||||
var written = MessageRecord.WriteVarint(buf, value);
|
||||
written.ShouldBeGreaterThan(0);
|
||||
|
||||
var (decoded, bytesRead) = MessageRecord.ReadVarint(buf);
|
||||
decoded.ShouldBe(value);
|
||||
bytesRead.ShouldBe(written);
|
||||
}
|
||||
|
||||
// Go: ebit (1 << 63) marks deleted/erased messages in the sequence field.
|
||||
[Fact]
|
||||
public void RoundTrip_DeletedFlag()
|
||||
{
|
||||
var record = new MessageRecord
|
||||
{
|
||||
Sequence = 100,
|
||||
Subject = "deleted.msg",
|
||||
Headers = ReadOnlyMemory<byte>.Empty,
|
||||
Payload = ReadOnlyMemory<byte>.Empty,
|
||||
Timestamp = 0,
|
||||
Deleted = true,
|
||||
};
|
||||
|
||||
var encoded = MessageRecord.Encode(record);
|
||||
var decoded = MessageRecord.Decode(encoded);
|
||||
|
||||
decoded.Deleted.ShouldBe(true);
|
||||
decoded.Sequence.ShouldBe(100UL);
|
||||
decoded.Subject.ShouldBe("deleted.msg");
|
||||
}
|
||||
|
||||
// Edge case: empty payload should encode and decode cleanly.
|
||||
[Fact]
|
||||
public void RoundTrip_EmptyPayload()
|
||||
{
|
||||
var record = new MessageRecord
|
||||
{
|
||||
Sequence = 1,
|
||||
Subject = "empty",
|
||||
Headers = ReadOnlyMemory<byte>.Empty,
|
||||
Payload = ReadOnlyMemory<byte>.Empty,
|
||||
Timestamp = 0,
|
||||
Deleted = false,
|
||||
};
|
||||
|
||||
var encoded = MessageRecord.Encode(record);
|
||||
var decoded = MessageRecord.Decode(encoded);
|
||||
|
||||
decoded.Subject.ShouldBe("empty");
|
||||
decoded.Payload.Length.ShouldBe(0);
|
||||
decoded.Headers.Length.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Verify 64KB payload works (large payload stress test).
|
||||
[Fact]
|
||||
public void RoundTrip_LargePayload()
|
||||
{
|
||||
var payload = new byte[64 * 1024];
|
||||
Random.Shared.NextBytes(payload);
|
||||
|
||||
var record = new MessageRecord
|
||||
{
|
||||
Sequence = 999_999,
|
||||
Subject = "large.payload.test",
|
||||
Headers = ReadOnlyMemory<byte>.Empty,
|
||||
Payload = payload,
|
||||
Timestamp = long.MaxValue,
|
||||
Deleted = false,
|
||||
};
|
||||
|
||||
var encoded = MessageRecord.Encode(record);
|
||||
var decoded = MessageRecord.Decode(encoded);
|
||||
|
||||
decoded.Sequence.ShouldBe(999_999UL);
|
||||
decoded.Subject.ShouldBe("large.payload.test");
|
||||
decoded.Payload.ToArray().ShouldBe(payload);
|
||||
decoded.Timestamp.ShouldBe(long.MaxValue);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,263 @@
|
||||
// Reference: golang/nats-server/server/filestore.go:217-267 (msgBlock struct)
|
||||
// Go block write: filestore.go:6700-6760 (writeMsgRecord)
|
||||
// Go block load: filestore.go:8140-8260 (loadMsgs / msgFromBufEx)
|
||||
// Go deletion: filestore.go dmap (avl.SequenceSet) for soft-deletes
|
||||
//
|
||||
// These tests verify the .NET MsgBlock abstraction — a block of messages stored
|
||||
// in a single append-only file on disk, with in-memory index and soft-delete support.
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class MsgBlockTests : IDisposable
|
||||
{
|
||||
private readonly string _testDir;
|
||||
|
||||
public MsgBlockTests()
|
||||
{
|
||||
_testDir = Path.Combine(Path.GetTempPath(), $"msgblock_test_{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_testDir);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
try { Directory.Delete(_testDir, recursive: true); }
|
||||
catch { /* best effort cleanup */ }
|
||||
}
|
||||
|
||||
// Go: writeMsgRecord — single message write returns first sequence
|
||||
[Fact]
|
||||
public void Write_SingleMessage_ReturnsSequence()
|
||||
{
|
||||
using var block = MsgBlock.Create(0, _testDir, maxBytes: 1024 * 1024);
|
||||
var seq = block.Write("foo.bar", ReadOnlyMemory<byte>.Empty, "hello"u8.ToArray());
|
||||
seq.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// Go: writeMsgRecord — sequential writes increment sequence
|
||||
[Fact]
|
||||
public void Write_MultipleMessages_IncrementsSequence()
|
||||
{
|
||||
using var block = MsgBlock.Create(0, _testDir, maxBytes: 1024 * 1024);
|
||||
var s1 = block.Write("a", ReadOnlyMemory<byte>.Empty, "one"u8.ToArray());
|
||||
var s2 = block.Write("b", ReadOnlyMemory<byte>.Empty, "two"u8.ToArray());
|
||||
var s3 = block.Write("c", ReadOnlyMemory<byte>.Empty, "three"u8.ToArray());
|
||||
|
||||
s1.ShouldBe(1UL);
|
||||
s2.ShouldBe(2UL);
|
||||
s3.ShouldBe(3UL);
|
||||
block.MessageCount.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// Go: loadMsgs / msgFromBufEx — read back by sequence number
|
||||
[Fact]
|
||||
public void Read_BySequence_ReturnsMessage()
|
||||
{
|
||||
using var block = MsgBlock.Create(0, _testDir, maxBytes: 1024 * 1024);
|
||||
block.Write("test.subject", ReadOnlyMemory<byte>.Empty, "payload data"u8.ToArray());
|
||||
|
||||
var record = block.Read(1);
|
||||
record.ShouldNotBeNull();
|
||||
record.Sequence.ShouldBe(1UL);
|
||||
record.Subject.ShouldBe("test.subject");
|
||||
Encoding.UTF8.GetString(record.Payload.Span).ShouldBe("payload data");
|
||||
}
|
||||
|
||||
// Go: loadMsgs — reading a non-existent sequence returns nil
|
||||
[Fact]
|
||||
public void Read_NonexistentSequence_ReturnsNull()
|
||||
{
|
||||
using var block = MsgBlock.Create(0, _testDir, maxBytes: 1024 * 1024);
|
||||
block.Write("a", ReadOnlyMemory<byte>.Empty, "data"u8.ToArray());
|
||||
|
||||
var record = block.Read(999);
|
||||
record.ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: filestore.go rbytes check — block seals when size exceeds maxBytes
|
||||
[Fact]
|
||||
public void IsSealed_ReturnsTrueWhenFull()
|
||||
{
|
||||
// Use a very small maxBytes so the block seals quickly.
|
||||
// A single record with subject "a", empty headers, and 32-byte payload is ~61 bytes.
|
||||
// Set maxBytes to 50 so one write seals the block.
|
||||
using var block = MsgBlock.Create(0, _testDir, maxBytes: 50);
|
||||
|
||||
var payload = new byte[32];
|
||||
block.Write("a", ReadOnlyMemory<byte>.Empty, payload);
|
||||
block.IsSealed.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: filestore.go errNoRoom — cannot write to sealed block
|
||||
[Fact]
|
||||
public void Write_ThrowsWhenSealed()
|
||||
{
|
||||
using var block = MsgBlock.Create(0, _testDir, maxBytes: 50);
|
||||
block.Write("a", ReadOnlyMemory<byte>.Empty, new byte[32]);
|
||||
block.IsSealed.ShouldBeTrue();
|
||||
|
||||
Should.Throw<InvalidOperationException>(() =>
|
||||
block.Write("b", ReadOnlyMemory<byte>.Empty, "more"u8.ToArray()));
|
||||
}
|
||||
|
||||
// Go: dmap soft-delete — deleted message reads back as null
|
||||
[Fact]
|
||||
public void Delete_MarksSequenceAsDeleted()
|
||||
{
|
||||
using var block = MsgBlock.Create(0, _testDir, maxBytes: 1024 * 1024);
|
||||
block.Write("a", ReadOnlyMemory<byte>.Empty, "data"u8.ToArray());
|
||||
|
||||
block.Delete(1).ShouldBeTrue();
|
||||
block.Read(1).ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: dmap — MessageCount reflects only non-deleted messages
|
||||
[Fact]
|
||||
public void Delete_DecreasesMessageCount()
|
||||
{
|
||||
using var block = MsgBlock.Create(0, _testDir, maxBytes: 1024 * 1024);
|
||||
block.Write("a", ReadOnlyMemory<byte>.Empty, "one"u8.ToArray());
|
||||
block.Write("b", ReadOnlyMemory<byte>.Empty, "two"u8.ToArray());
|
||||
block.Write("c", ReadOnlyMemory<byte>.Empty, "three"u8.ToArray());
|
||||
|
||||
block.MessageCount.ShouldBe(3UL);
|
||||
block.DeletedCount.ShouldBe(0UL);
|
||||
|
||||
block.Delete(2).ShouldBeTrue();
|
||||
|
||||
block.MessageCount.ShouldBe(2UL);
|
||||
block.DeletedCount.ShouldBe(1UL);
|
||||
|
||||
// Double delete returns false
|
||||
block.Delete(2).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Go: recovery path — rebuild index from existing block file
|
||||
[Fact]
|
||||
public void Recover_RebuildsIndexFromFile()
|
||||
{
|
||||
// Write messages and dispose
|
||||
using (var block = MsgBlock.Create(0, _testDir, maxBytes: 1024 * 1024))
|
||||
{
|
||||
block.Write("a.b", ReadOnlyMemory<byte>.Empty, "first"u8.ToArray());
|
||||
block.Write("c.d", ReadOnlyMemory<byte>.Empty, "second"u8.ToArray());
|
||||
block.Write("e.f", ReadOnlyMemory<byte>.Empty, "third"u8.ToArray());
|
||||
block.Flush();
|
||||
}
|
||||
|
||||
// Recover and verify all messages readable
|
||||
using var recovered = MsgBlock.Recover(0, _testDir);
|
||||
recovered.MessageCount.ShouldBe(3UL);
|
||||
recovered.FirstSequence.ShouldBe(1UL);
|
||||
recovered.LastSequence.ShouldBe(3UL);
|
||||
|
||||
var r1 = recovered.Read(1);
|
||||
r1.ShouldNotBeNull();
|
||||
r1.Subject.ShouldBe("a.b");
|
||||
Encoding.UTF8.GetString(r1.Payload.Span).ShouldBe("first");
|
||||
|
||||
var r2 = recovered.Read(2);
|
||||
r2.ShouldNotBeNull();
|
||||
r2.Subject.ShouldBe("c.d");
|
||||
|
||||
var r3 = recovered.Read(3);
|
||||
r3.ShouldNotBeNull();
|
||||
r3.Subject.ShouldBe("e.f");
|
||||
}
|
||||
|
||||
// Go: recovery with dmap — deleted records still show as null after recovery
|
||||
[Fact]
|
||||
public void Recover_PreservesDeletedState()
|
||||
{
|
||||
// Write messages, delete one, flush and dispose
|
||||
using (var block = MsgBlock.Create(0, _testDir, maxBytes: 1024 * 1024))
|
||||
{
|
||||
block.Write("a", ReadOnlyMemory<byte>.Empty, "one"u8.ToArray());
|
||||
block.Write("b", ReadOnlyMemory<byte>.Empty, "two"u8.ToArray());
|
||||
block.Write("c", ReadOnlyMemory<byte>.Empty, "three"u8.ToArray());
|
||||
block.Delete(2);
|
||||
block.Flush();
|
||||
}
|
||||
|
||||
// Recover — seq 2 should still be deleted
|
||||
using var recovered = MsgBlock.Recover(0, _testDir);
|
||||
recovered.MessageCount.ShouldBe(2UL);
|
||||
recovered.DeletedCount.ShouldBe(1UL);
|
||||
|
||||
recovered.Read(1).ShouldNotBeNull();
|
||||
recovered.Read(2).ShouldBeNull();
|
||||
recovered.Read(3).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: sync.RWMutex on msgBlock — concurrent reads during writes should not crash
|
||||
[Fact]
|
||||
public async Task ConcurrentReads_DuringWrite()
|
||||
{
|
||||
using var block = MsgBlock.Create(0, _testDir, maxBytes: 1024 * 1024);
|
||||
|
||||
// Pre-populate some messages
|
||||
for (var i = 0; i < 10; i++)
|
||||
block.Write($"subj.{i}", ReadOnlyMemory<byte>.Empty, Encoding.UTF8.GetBytes($"msg-{i}"));
|
||||
|
||||
// Run concurrent reads and writes
|
||||
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(2));
|
||||
var exceptions = new List<Exception>();
|
||||
|
||||
var writerTask = Task.Run(() =>
|
||||
{
|
||||
try
|
||||
{
|
||||
while (!cts.Token.IsCancellationRequested)
|
||||
{
|
||||
try
|
||||
{
|
||||
block.Write("concurrent", ReadOnlyMemory<byte>.Empty, "data"u8.ToArray());
|
||||
}
|
||||
catch (InvalidOperationException)
|
||||
{
|
||||
// Block sealed — expected eventually
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception ex) { lock (exceptions) { exceptions.Add(ex); } }
|
||||
});
|
||||
|
||||
var readerTasks = Enumerable.Range(0, 4).Select(t => Task.Run(() =>
|
||||
{
|
||||
try
|
||||
{
|
||||
while (!cts.Token.IsCancellationRequested)
|
||||
{
|
||||
for (ulong seq = 1; seq <= 10; seq++)
|
||||
_ = block.Read(seq);
|
||||
}
|
||||
}
|
||||
catch (Exception ex) { lock (exceptions) { exceptions.Add(ex); } }
|
||||
})).ToArray();
|
||||
|
||||
await Task.WhenAll([writerTask, .. readerTasks]).WaitAsync(TimeSpan.FromSeconds(5));
|
||||
|
||||
exceptions.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
// Go: msgBlock first/last — custom firstSequence offsets sequence numbering
|
||||
[Fact]
|
||||
public void Write_WithCustomFirstSequence()
|
||||
{
|
||||
using var block = MsgBlock.Create(0, _testDir, maxBytes: 1024 * 1024, firstSequence: 100);
|
||||
var s1 = block.Write("x", ReadOnlyMemory<byte>.Empty, "a"u8.ToArray());
|
||||
var s2 = block.Write("y", ReadOnlyMemory<byte>.Empty, "b"u8.ToArray());
|
||||
|
||||
s1.ShouldBe(100UL);
|
||||
s2.ShouldBe(101UL);
|
||||
block.FirstSequence.ShouldBe(100UL);
|
||||
block.LastSequence.ShouldBe(101UL);
|
||||
|
||||
var r = block.Read(100);
|
||||
r.ShouldNotBeNull();
|
||||
r.Subject.ShouldBe("x");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,154 @@
|
||||
// Reference: golang/nats-server/server/filestore.go
|
||||
// Go uses S2/Snappy compression throughout FileStore:
|
||||
// - msgCompress / msgDecompress (filestore.go ~line 840)
|
||||
// - compressBlock / decompressBlock for block-level data
|
||||
// These tests verify the .NET S2Codec helper used in the FSV2 envelope path.
|
||||
|
||||
using NATS.Server.JetStream.Storage;
|
||||
using System.Text;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public sealed class S2CodecTests
|
||||
{
|
||||
// Go: TestFileStoreBasics (S2 permutation) filestore_test.go:86
|
||||
[Fact]
|
||||
public void Compress_then_decompress_round_trips()
|
||||
{
|
||||
var original = "Hello, NATS JetStream S2 compression!"u8.ToArray();
|
||||
|
||||
var compressed = S2Codec.Compress(original);
|
||||
var restored = S2Codec.Decompress(compressed);
|
||||
|
||||
restored.ShouldBe(original);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Compress_empty_returns_empty()
|
||||
{
|
||||
var compressed = S2Codec.Compress([]);
|
||||
compressed.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Decompress_empty_returns_empty()
|
||||
{
|
||||
var decompressed = S2Codec.Decompress([]);
|
||||
decompressed.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Compress_large_highly_compressible_payload()
|
||||
{
|
||||
// 1 MB of repeated 'A' — highly compressible.
|
||||
var original = new byte[1024 * 1024];
|
||||
Array.Fill(original, (byte)'A');
|
||||
|
||||
var compressed = S2Codec.Compress(original);
|
||||
var restored = S2Codec.Decompress(compressed);
|
||||
|
||||
// S2/Snappy should compress this well.
|
||||
compressed.Length.ShouldBeLessThan(original.Length);
|
||||
restored.ShouldBe(original);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Compress_large_incompressible_payload_round_trips()
|
||||
{
|
||||
// 1 MB of random data — not compressible, but must still round-trip.
|
||||
var original = new byte[1024 * 1024];
|
||||
Random.Shared.NextBytes(original);
|
||||
|
||||
var compressed = S2Codec.Compress(original);
|
||||
var restored = S2Codec.Decompress(compressed);
|
||||
|
||||
restored.ShouldBe(original);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Compress_single_byte_round_trips()
|
||||
{
|
||||
var original = new byte[] { 0x42 };
|
||||
var compressed = S2Codec.Compress(original);
|
||||
var restored = S2Codec.Decompress(compressed);
|
||||
restored.ShouldBe(original);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Compress_binary_all_byte_values_round_trips()
|
||||
{
|
||||
var original = new byte[256];
|
||||
for (var i = 0; i < 256; i++)
|
||||
original[i] = (byte)i;
|
||||
|
||||
var compressed = S2Codec.Compress(original);
|
||||
var restored = S2Codec.Decompress(compressed);
|
||||
restored.ShouldBe(original);
|
||||
}
|
||||
|
||||
// Go: msgCompress with trailing CRC (filestore.go ~line 840) — the checksum
|
||||
// lives outside the S2 frame so only the body is compressed.
|
||||
[Fact]
|
||||
public void CompressWithTrailingChecksum_preserves_last_n_bytes_uncompressed()
|
||||
{
|
||||
const int checksumSize = 8;
|
||||
var body = Encoding.UTF8.GetBytes("NATS payload body that should be compressed");
|
||||
var checksum = new byte[checksumSize];
|
||||
Random.Shared.NextBytes(checksum);
|
||||
|
||||
var input = body.Concat(checksum).ToArray();
|
||||
|
||||
var result = S2Codec.CompressWithTrailingChecksum(input, checksumSize);
|
||||
|
||||
// Last checksumSize bytes must be verbatim.
|
||||
var resultChecksum = result[^checksumSize..];
|
||||
resultChecksum.ShouldBe(checksum);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CompressWithTrailingChecksum_zero_checksum_compresses_all()
|
||||
{
|
||||
var data = "Hello, no checksum"u8.ToArray();
|
||||
var result = S2Codec.CompressWithTrailingChecksum(data, 0);
|
||||
var restored = S2Codec.Decompress(result);
|
||||
restored.ShouldBe(data);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void DecompressWithTrailingChecksum_round_trips()
|
||||
{
|
||||
const int checksumSize = 8;
|
||||
var body = new byte[512];
|
||||
Random.Shared.NextBytes(body);
|
||||
var checksum = new byte[checksumSize];
|
||||
Random.Shared.NextBytes(checksum);
|
||||
|
||||
var input = body.Concat(checksum).ToArray();
|
||||
|
||||
var compressed = S2Codec.CompressWithTrailingChecksum(input, checksumSize);
|
||||
var restored = S2Codec.DecompressWithTrailingChecksum(compressed, checksumSize);
|
||||
|
||||
restored.ShouldBe(input);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CompressWithTrailingChecksum_empty_input_returns_empty()
|
||||
{
|
||||
var result = S2Codec.CompressWithTrailingChecksum([], 0);
|
||||
result.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CompressWithTrailingChecksum_negative_size_throws()
|
||||
{
|
||||
Should.Throw<ArgumentOutOfRangeException>(
|
||||
() => S2Codec.CompressWithTrailingChecksum([1, 2, 3], -1));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void DecompressWithTrailingChecksum_negative_size_throws()
|
||||
{
|
||||
Should.Throw<ArgumentOutOfRangeException>(
|
||||
() => S2Codec.DecompressWithTrailingChecksum([1, 2, 3], -1));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,395 @@
|
||||
// Reference: golang/nats-server/server/avl/seqset_test.go
|
||||
// Tests ported / inspired by:
|
||||
// TestSequenceSetBasic → Add_Contains_Count_BasicOperations
|
||||
// TestSequenceSetRange → GetEnumerator_ReturnsAscendingOrder
|
||||
// TestSequenceSetDelete → Remove_SplitsAndTrimsRanges
|
||||
// (range compression) → Add_ContiguousSequences_CompressesToOneRange
|
||||
// (binary search) → Contains_BinarySearchCorrectness
|
||||
// (boundary) → Add_Remove_AtBoundaries
|
||||
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
/// <summary>
|
||||
/// Unit tests for <see cref="SequenceSet"/> — the range-compressed sorted set
|
||||
/// used to track soft-deleted sequences in JetStream FileStore blocks.
|
||||
///
|
||||
/// Reference: golang/nats-server/server/avl/seqset_test.go
|
||||
/// </summary>
|
||||
public sealed class SequenceSetTests
|
||||
{
|
||||
// -------------------------------------------------------------------------
|
||||
// Basic Add / Contains / Count
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestSequenceSetBasic — empty set has zero count
|
||||
[Fact]
|
||||
public void Count_EmptySet_ReturnsZero()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Count.ShouldBe(0);
|
||||
ss.IsEmpty.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestSequenceSetBasic — single element is found
|
||||
[Fact]
|
||||
public void Add_SingleSequence_ContainsIt()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(42).ShouldBeTrue();
|
||||
ss.Contains(42).ShouldBeTrue();
|
||||
ss.Count.ShouldBe(1);
|
||||
ss.IsEmpty.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Go: duplicate insert returns false (already present)
|
||||
[Fact]
|
||||
public void Add_DuplicateSequence_ReturnsFalse()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(10).ShouldBeTrue();
|
||||
ss.Add(10).ShouldBeFalse();
|
||||
ss.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Go: non-member returns false on Contains
|
||||
[Fact]
|
||||
public void Contains_NonMember_ReturnsFalse()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(5);
|
||||
ss.Contains(4).ShouldBeFalse();
|
||||
ss.Contains(6).ShouldBeFalse();
|
||||
ss.Contains(0).ShouldBeFalse();
|
||||
ss.Contains(ulong.MaxValue).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Range compression
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Adding three contiguous sequences should compress to a single range.
|
||||
// This is the key efficiency property of SequenceSet vs HashSet.
|
||||
[Fact]
|
||||
public void Add_ContiguousSequences_CompressesToOneRange()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(1);
|
||||
ss.Add(2);
|
||||
ss.Add(3);
|
||||
|
||||
ss.Count.ShouldBe(3);
|
||||
ss.RangeCount.ShouldBe(1); // single range [1, 3]
|
||||
ss.Contains(1).ShouldBeTrue();
|
||||
ss.Contains(2).ShouldBeTrue();
|
||||
ss.Contains(3).ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Adding in reverse order should still compress.
|
||||
[Fact]
|
||||
public void Add_ContiguousReverse_CompressesToOneRange()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(3);
|
||||
ss.Add(2);
|
||||
ss.Add(1);
|
||||
|
||||
ss.Count.ShouldBe(3);
|
||||
ss.RangeCount.ShouldBe(1); // single range [1, 3]
|
||||
}
|
||||
|
||||
// Two separate gaps should stay as two ranges.
|
||||
[Fact]
|
||||
public void Add_WithGap_TwoRanges()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(1);
|
||||
ss.Add(2);
|
||||
ss.Add(4); // gap at 3
|
||||
ss.Add(5);
|
||||
|
||||
ss.Count.ShouldBe(4);
|
||||
ss.RangeCount.ShouldBe(2); // [1,2] and [4,5]
|
||||
}
|
||||
|
||||
// Filling the gap merges to one range.
|
||||
[Fact]
|
||||
public void Add_FillsGap_MergesToOneRange()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(1);
|
||||
ss.Add(2);
|
||||
ss.Add(4);
|
||||
ss.Add(5);
|
||||
ss.RangeCount.ShouldBe(2);
|
||||
|
||||
// Fill the gap.
|
||||
ss.Add(3);
|
||||
ss.RangeCount.ShouldBe(1); // [1, 5]
|
||||
ss.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// Large run of contiguous sequences stays as one range.
|
||||
[Fact]
|
||||
public void Add_LargeContiguousRun_OnlyOneRange()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
for (ulong i = 1; i <= 10_000; i++)
|
||||
ss.Add(i);
|
||||
|
||||
ss.Count.ShouldBe(10_000);
|
||||
ss.RangeCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Remove / split / trim
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Removing from an empty set returns false.
|
||||
[Fact]
|
||||
public void Remove_EmptySet_ReturnsFalse()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Remove(1).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Removing a non-member returns false and doesn't change count.
|
||||
[Fact]
|
||||
public void Remove_NonMember_ReturnsFalse()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(5);
|
||||
ss.Remove(4).ShouldBeFalse();
|
||||
ss.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Removing the only element empties the set.
|
||||
[Fact]
|
||||
public void Remove_SingleElement_EmptiesSet()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(7);
|
||||
ss.Remove(7).ShouldBeTrue();
|
||||
ss.Count.ShouldBe(0);
|
||||
ss.IsEmpty.ShouldBeTrue();
|
||||
ss.Contains(7).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Removing the left edge of a range trims it.
|
||||
[Fact]
|
||||
public void Remove_LeftEdge_TrimsRange()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(1); ss.Add(2); ss.Add(3);
|
||||
ss.RangeCount.ShouldBe(1);
|
||||
|
||||
ss.Remove(1).ShouldBeTrue();
|
||||
ss.Count.ShouldBe(2);
|
||||
ss.Contains(1).ShouldBeFalse();
|
||||
ss.Contains(2).ShouldBeTrue();
|
||||
ss.Contains(3).ShouldBeTrue();
|
||||
ss.RangeCount.ShouldBe(1); // still one range [2, 3]
|
||||
}
|
||||
|
||||
// Removing the right edge of a range trims it.
|
||||
[Fact]
|
||||
public void Remove_RightEdge_TrimsRange()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(1); ss.Add(2); ss.Add(3);
|
||||
|
||||
ss.Remove(3).ShouldBeTrue();
|
||||
ss.Count.ShouldBe(2);
|
||||
ss.Contains(3).ShouldBeFalse();
|
||||
ss.RangeCount.ShouldBe(1); // still [1, 2]
|
||||
}
|
||||
|
||||
// Removing the middle element splits a range into two.
|
||||
[Fact]
|
||||
public void Remove_MiddleElement_SplitsRange()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(1); ss.Add(2); ss.Add(3); ss.Add(4); ss.Add(5);
|
||||
ss.RangeCount.ShouldBe(1);
|
||||
|
||||
ss.Remove(3).ShouldBeTrue();
|
||||
ss.Count.ShouldBe(4);
|
||||
ss.Contains(3).ShouldBeFalse();
|
||||
ss.Contains(1).ShouldBeTrue();
|
||||
ss.Contains(2).ShouldBeTrue();
|
||||
ss.Contains(4).ShouldBeTrue();
|
||||
ss.Contains(5).ShouldBeTrue();
|
||||
ss.RangeCount.ShouldBe(2); // [1,2] and [4,5]
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Enumeration
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// GetEnumerator returns all sequences in ascending order.
|
||||
[Fact]
|
||||
public void GetEnumerator_ReturnsAscendingOrder()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(5); ss.Add(3); ss.Add(1); ss.Add(2); ss.Add(4);
|
||||
|
||||
var list = ss.ToList();
|
||||
list.ShouldBe([1, 2, 3, 4, 5]);
|
||||
}
|
||||
|
||||
// Enumeration over a compressed range expands correctly.
|
||||
[Fact]
|
||||
public void GetEnumerator_CompressedRange_ExpandsAll()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
for (ulong i = 100; i <= 200; i++)
|
||||
ss.Add(i);
|
||||
|
||||
var list = ss.ToList();
|
||||
list.Count.ShouldBe(101);
|
||||
list[0].ShouldBe(100UL);
|
||||
list[^1].ShouldBe(200UL);
|
||||
}
|
||||
|
||||
// Enumeration over multiple disjoint ranges returns all in order.
|
||||
[Fact]
|
||||
public void GetEnumerator_MultipleRanges_AllInOrder()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(10); ss.Add(11);
|
||||
ss.Add(20); ss.Add(21); ss.Add(22);
|
||||
ss.Add(30);
|
||||
|
||||
var list = ss.ToList();
|
||||
list.ShouldBe([10UL, 11UL, 20UL, 21UL, 22UL, 30UL]);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Clear
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Clear_RemovesAll()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(1); ss.Add(2); ss.Add(3);
|
||||
ss.Clear();
|
||||
ss.Count.ShouldBe(0);
|
||||
ss.IsEmpty.ShouldBeTrue();
|
||||
ss.Contains(1).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// ToHashSet snapshot
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ToHashSet_ReturnsAllElements()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(1); ss.Add(2); ss.Add(5); ss.Add(6); ss.Add(7);
|
||||
|
||||
var hs = ss.ToHashSet();
|
||||
hs.Count.ShouldBe(5);
|
||||
hs.ShouldContain(1UL);
|
||||
hs.ShouldContain(2UL);
|
||||
hs.ShouldContain(5UL);
|
||||
hs.ShouldContain(6UL);
|
||||
hs.ShouldContain(7UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Binary search correctness — sparse insertions
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Reference: Go seqset_test.go — large number of non-contiguous sequences.
|
||||
[Fact]
|
||||
public void Add_Contains_SparseInsertions_AllFound()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
var expected = new List<ulong>();
|
||||
for (ulong i = 1; i <= 1000; i += 3) // every 3rd: 1, 4, 7, ...
|
||||
{
|
||||
ss.Add(i);
|
||||
expected.Add(i);
|
||||
}
|
||||
|
||||
ss.Count.ShouldBe(expected.Count);
|
||||
|
||||
foreach (var seq in expected)
|
||||
ss.Contains(seq).ShouldBeTrue($"Expected seq {seq} to be present");
|
||||
|
||||
// Non-members should not appear.
|
||||
ss.Contains(2).ShouldBeFalse();
|
||||
ss.Contains(3).ShouldBeFalse();
|
||||
ss.Contains(999).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Boundary conditions
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Add_SequenceZero_Works()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(0).ShouldBeTrue();
|
||||
ss.Contains(0).ShouldBeTrue();
|
||||
ss.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Add_AdjacentToZero_Merges()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
ss.Add(0);
|
||||
ss.Add(1);
|
||||
ss.RangeCount.ShouldBe(1); // [0, 1]
|
||||
ss.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Add_Remove_RoundTrip()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
for (ulong i = 1; i <= 100; i++)
|
||||
ss.Add(i);
|
||||
|
||||
// Remove all odd sequences.
|
||||
for (ulong i = 1; i <= 100; i += 2)
|
||||
ss.Remove(i);
|
||||
|
||||
ss.Count.ShouldBe(50);
|
||||
for (ulong i = 2; i <= 100; i += 2)
|
||||
ss.Contains(i).ShouldBeTrue();
|
||||
for (ulong i = 1; i <= 99; i += 2)
|
||||
ss.Contains(i).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Merging at boundaries of existing ranges (not just single adjacency)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Add_BridgesMultipleGaps_CorrectState()
|
||||
{
|
||||
var ss = new SequenceSet();
|
||||
// Create three separate ranges: [1,2], [4,5], [7,8]
|
||||
ss.Add(1); ss.Add(2);
|
||||
ss.Add(4); ss.Add(5);
|
||||
ss.Add(7); ss.Add(8);
|
||||
ss.RangeCount.ShouldBe(3);
|
||||
ss.Count.ShouldBe(6);
|
||||
|
||||
// Fill gap between [1,2] and [4,5]: add 3
|
||||
ss.Add(3);
|
||||
ss.RangeCount.ShouldBe(2); // [1,5] and [7,8]
|
||||
ss.Count.ShouldBe(7);
|
||||
|
||||
// Fill gap between [1,5] and [7,8]: add 6
|
||||
ss.Add(6);
|
||||
ss.RangeCount.ShouldBe(1); // [1,8]
|
||||
ss.Count.ShouldBe(8);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,165 @@
|
||||
// Ported from golang/nats-server/server/memstore_test.go:
|
||||
// TestMemStoreMsgLimit, TestMemStoreBytesLimit, TestMemStoreAgeLimit
|
||||
//
|
||||
// Retention limits are enforced by StreamManager (which calls MemStore.TrimToMaxMessages,
|
||||
// removes oldest messages by bytes, and prunes by age). These tests exercise the full
|
||||
// Limits-retention path via StreamManager.Capture, which is the code path the Go server
|
||||
// exercises through its StoreMsg integration.
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
public class StorageRetentionTests
|
||||
{
|
||||
// Go ref: TestMemStoreMsgLimit — store MaxMsgs+N messages; only MaxMsgs remain,
|
||||
// oldest are evicted, sequence window advances.
|
||||
[Fact]
|
||||
public async Task Max_msgs_limit_enforced()
|
||||
{
|
||||
const int maxMsgs = 10;
|
||||
const int overCount = 20;
|
||||
|
||||
var manager = new StreamManager();
|
||||
manager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "MSGLIMIT",
|
||||
Subjects = ["msglimit.*"],
|
||||
MaxMsgs = maxMsgs,
|
||||
Storage = StorageType.Memory,
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
for (var i = 0; i < overCount; i++)
|
||||
manager.Capture("msglimit.foo", Encoding.UTF8.GetBytes($"msg{i}"));
|
||||
|
||||
manager.TryGet("MSGLIMIT", out var handle).ShouldBeTrue();
|
||||
var state = await handle.Store.GetStateAsync(default);
|
||||
|
||||
state.Messages.ShouldBe((ulong)maxMsgs);
|
||||
// The last stored sequence is overCount.
|
||||
state.LastSeq.ShouldBe((ulong)overCount);
|
||||
// The first kept sequence is overCount - maxMsgs + 1.
|
||||
state.FirstSeq.ShouldBe((ulong)(overCount - maxMsgs + 1));
|
||||
}
|
||||
|
||||
// Go ref: TestMemStoreBytesLimit — store messages until bytes exceed MaxBytes;
|
||||
// oldest messages are purged to keep total bytes at or below the limit.
|
||||
[Fact]
|
||||
public async Task Max_bytes_limit_enforced()
|
||||
{
|
||||
// Go: memStoreMsgSize = subject.Length + headers.Length + data.Length + 16
|
||||
// Each message = "byteslimit.foo"(14) + payload(100) + overhead(16) = 130 bytes.
|
||||
var payload = new byte[100];
|
||||
const string subject = "byteslimit.foo";
|
||||
const int msgSize = 14 + 100 + 16; // 130
|
||||
const int maxCapacity = 5;
|
||||
var maxBytes = (long)(msgSize * maxCapacity); // 650
|
||||
|
||||
var manager = new StreamManager();
|
||||
manager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "BYTESLIMIT",
|
||||
Subjects = ["byteslimit.*"],
|
||||
MaxBytes = maxBytes,
|
||||
Storage = StorageType.Memory,
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
// Store exactly maxCapacity messages — should all fit.
|
||||
for (var i = 0; i < maxCapacity; i++)
|
||||
manager.Capture(subject, payload);
|
||||
|
||||
manager.TryGet("BYTESLIMIT", out var handle).ShouldBeTrue();
|
||||
var stateAtCapacity = await handle.Store.GetStateAsync(default);
|
||||
stateAtCapacity.Messages.ShouldBe((ulong)maxCapacity);
|
||||
stateAtCapacity.Bytes.ShouldBe((ulong)(msgSize * maxCapacity));
|
||||
|
||||
// Store 5 more — each one should displace an old message.
|
||||
for (var i = 0; i < maxCapacity; i++)
|
||||
manager.Capture(subject, payload);
|
||||
|
||||
var stateFinal = await handle.Store.GetStateAsync(default);
|
||||
stateFinal.Messages.ShouldBe((ulong)maxCapacity);
|
||||
stateFinal.Bytes.ShouldBeLessThanOrEqualTo((ulong)maxBytes);
|
||||
stateFinal.LastSeq.ShouldBe((ulong)(maxCapacity * 2));
|
||||
}
|
||||
|
||||
// Go ref: TestMemStoreAgeLimit — messages older than MaxAge are pruned on the next Capture.
|
||||
// In the Go server, the memstore runs a background timer; in the .NET port, pruning happens
|
||||
// synchronously inside StreamManager.Capture via PruneExpiredMessages which compares
|
||||
// TimestampUtc against (now - MaxAge). We backdate stored messages to simulate expiry.
|
||||
[Fact]
|
||||
public async Task Max_age_limit_enforced()
|
||||
{
|
||||
// Use a 1-second MaxAge so we can reason clearly about cutoff.
|
||||
const int maxAgeMs = 1000;
|
||||
|
||||
var manager = new StreamManager();
|
||||
manager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "AGELIMIT",
|
||||
Subjects = ["agelimit.*"],
|
||||
MaxAgeMs = maxAgeMs,
|
||||
Storage = StorageType.Memory,
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
// Store 5 messages that are logically "already expired" by storing them,
|
||||
// then relying on an additional capture after sleeping past MaxAge to trigger
|
||||
// the pruning pass.
|
||||
const int initialCount = 5;
|
||||
for (var i = 0; i < initialCount; i++)
|
||||
manager.Capture("agelimit.foo", Encoding.UTF8.GetBytes($"msg{i}"));
|
||||
|
||||
manager.TryGet("AGELIMIT", out var handle).ShouldBeTrue();
|
||||
var stateBefore = await handle.Store.GetStateAsync(default);
|
||||
stateBefore.Messages.ShouldBe((ulong)initialCount);
|
||||
|
||||
// Wait for MaxAge to elapse so the stored messages are now older than the cutoff.
|
||||
await Task.Delay(maxAgeMs + 50);
|
||||
|
||||
// A subsequent Capture triggers PruneExpiredMessages, which removes all messages
|
||||
// whose TimestampUtc < (now - MaxAge).
|
||||
manager.Capture("agelimit.foo", "trigger"u8.ToArray());
|
||||
|
||||
var stateAfter = await handle.Store.GetStateAsync(default);
|
||||
// Only the freshly-appended trigger message should remain.
|
||||
stateAfter.Messages.ShouldBe((ulong)1);
|
||||
stateAfter.Bytes.ShouldBeGreaterThan((ulong)0);
|
||||
}
|
||||
|
||||
// Go ref: TestMemStoreMsgLimit — verifies that sequence numbers keep incrementing even
|
||||
// after old messages are evicted; the store window moves forward rather than wrapping.
|
||||
[Fact]
|
||||
public async Task Sequence_numbers_monotonically_increase_through_eviction()
|
||||
{
|
||||
const int maxMsgs = 5;
|
||||
const int totalToStore = 15;
|
||||
|
||||
var manager = new StreamManager();
|
||||
manager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "SEQMONOT",
|
||||
Subjects = ["seqmonot.*"],
|
||||
MaxMsgs = maxMsgs,
|
||||
Storage = StorageType.Memory,
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
for (var i = 1; i <= totalToStore; i++)
|
||||
manager.Capture("seqmonot.foo", Encoding.UTF8.GetBytes($"msg{i}"));
|
||||
|
||||
manager.TryGet("SEQMONOT", out var handle).ShouldBeTrue();
|
||||
var state = await handle.Store.GetStateAsync(default);
|
||||
|
||||
state.Messages.ShouldBe((ulong)maxMsgs);
|
||||
state.LastSeq.ShouldBe((ulong)totalToStore);
|
||||
state.FirstSeq.ShouldBe((ulong)(totalToStore - maxMsgs + 1));
|
||||
|
||||
// The first evicted sequence (1) is no longer loadable.
|
||||
(await handle.Store.LoadAsync(1, default)).ShouldBeNull();
|
||||
// The last evicted sequence is totalToStore - maxMsgs (= 10).
|
||||
(await handle.Store.LoadAsync((ulong)(totalToStore - maxMsgs), default)).ShouldBeNull();
|
||||
// The first surviving message is still present.
|
||||
(await handle.Store.LoadAsync((ulong)(totalToStore - maxMsgs + 1), default)).ShouldNotBeNull();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,536 @@
|
||||
// Reference: golang/nats-server/server/store_test.go
|
||||
// Tests ported in this file:
|
||||
// TestStoreLoadNextMsgWildcardStartBeforeFirstMatch → LoadNextMsg_WildcardStartBeforeFirstMatch
|
||||
// TestStoreSubjectStateConsistency → SubjectStateConsistency_UpdatesFirstAndLast
|
||||
// TestStoreCompactCleansUpDmap → Compact_CleansUpDmap (parameterised)
|
||||
// TestStoreTruncateCleansUpDmap → Truncate_CleansUpDmap (parameterised)
|
||||
// TestStorePurgeExZero → PurgeEx_ZeroSeq_EquivalentToPurge
|
||||
// TestStoreUpdateConfigTTLState → UpdateConfigTTLState_MessageSurvivesWhenTtlDisabled
|
||||
// TestStoreStreamInteriorDeleteAccounting → InteriorDeleteAccounting_MemStore (subset without FileStore restart)
|
||||
// TestStoreGetSeqFromTimeWithInteriorDeletesGap → GetSeqFromTime_WithInteriorDeletesGap
|
||||
// TestStoreGetSeqFromTimeWithTrailingDeletes → GetSeqFromTime_WithTrailingDeletes
|
||||
// TestStoreMaxMsgsPerUpdateBug → MaxMsgsPerUpdateBug_ReducesOnConfigUpdate
|
||||
// TestFileStoreMultiLastSeqsAndLoadLastMsgWithLazySubjectState → MultiLastSeqs_AndLoadLastMsg_WithLazySubjectState
|
||||
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
/// <summary>
|
||||
/// IStreamStore interface contract tests. Validates behaviour shared by all store
|
||||
/// implementations using MemStore (the simplest implementation).
|
||||
/// Each test mirrors a specific Go test from golang/nats-server/server/store_test.go.
|
||||
/// </summary>
|
||||
public sealed class StoreInterfaceTests
|
||||
{
|
||||
// Helper: cast MemStore to IStreamStore to access sync interface methods.
|
||||
private static IStreamStore Sync(MemStore ms) => ms;
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// LoadNextMsg — wildcard start before first match
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestStoreLoadNextMsgWildcardStartBeforeFirstMatch server/store_test.go:118
|
||||
[Fact]
|
||||
public void LoadNextMsg_WildcardStartBeforeFirstMatch()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
// Fill non-matching subjects first so the first wildcard match starts
|
||||
// strictly after the requested start sequence.
|
||||
for (var i = 0; i < 100; i++)
|
||||
s.StoreMsg($"bar.{i}", null, [], 0);
|
||||
|
||||
var (seq, _) = s.StoreMsg("foo.1", null, [], 0);
|
||||
seq.ShouldBe(101UL);
|
||||
|
||||
// Loading with wildcard "foo.*" from seq 1 should find the message at seq 101.
|
||||
var sm = new StoreMsg();
|
||||
var (msg, skip) = s.LoadNextMsg("foo.*", true, 1, sm);
|
||||
msg.Subject.ShouldBe("foo.1");
|
||||
// skip = seq - start, so seq 101 - start 1 = 100
|
||||
skip.ShouldBe(100UL);
|
||||
msg.Sequence.ShouldBe(101UL);
|
||||
|
||||
// Loading after seq 101 should throw — no more foo.* messages.
|
||||
Should.Throw<KeyNotFoundException>(() => s.LoadNextMsg("foo.*", true, 102, null));
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// SubjectStateConsistency
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestStoreSubjectStateConsistency server/store_test.go:179
|
||||
[Fact]
|
||||
public void SubjectStateConsistency_UpdatesFirstAndLast()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
SimpleState GetSubjectState()
|
||||
{
|
||||
var ss = s.SubjectsState("foo");
|
||||
ss.TryGetValue("foo", out var state);
|
||||
return state;
|
||||
}
|
||||
|
||||
ulong LoadFirstSeq()
|
||||
{
|
||||
var sm = new StoreMsg();
|
||||
var (msg, _) = s.LoadNextMsg("foo", false, 0, sm);
|
||||
return msg.Sequence;
|
||||
}
|
||||
|
||||
ulong LoadLastSeq()
|
||||
{
|
||||
var sm = new StoreMsg();
|
||||
var msg = s.LoadLastMsg("foo", sm);
|
||||
return msg.Sequence;
|
||||
}
|
||||
|
||||
// Publish an initial batch of messages.
|
||||
for (var i = 0; i < 4; i++)
|
||||
s.StoreMsg("foo", null, [], 0);
|
||||
|
||||
// Expect 4 msgs, with first=1, last=4.
|
||||
var ss = GetSubjectState();
|
||||
ss.Msgs.ShouldBe(4UL);
|
||||
ss.First.ShouldBe(1UL);
|
||||
LoadFirstSeq().ShouldBe(1UL);
|
||||
ss.Last.ShouldBe(4UL);
|
||||
LoadLastSeq().ShouldBe(4UL);
|
||||
|
||||
// Remove first message — first should update to seq 2.
|
||||
s.RemoveMsg(1).ShouldBeTrue();
|
||||
|
||||
ss = GetSubjectState();
|
||||
ss.Msgs.ShouldBe(3UL);
|
||||
ss.First.ShouldBe(2UL);
|
||||
LoadFirstSeq().ShouldBe(2UL);
|
||||
ss.Last.ShouldBe(4UL);
|
||||
LoadLastSeq().ShouldBe(4UL);
|
||||
|
||||
// Remove last message — last should update to seq 3.
|
||||
s.RemoveMsg(4).ShouldBeTrue();
|
||||
|
||||
ss = GetSubjectState();
|
||||
ss.Msgs.ShouldBe(2UL);
|
||||
ss.First.ShouldBe(2UL);
|
||||
LoadFirstSeq().ShouldBe(2UL);
|
||||
ss.Last.ShouldBe(3UL);
|
||||
LoadLastSeq().ShouldBe(3UL);
|
||||
|
||||
// Remove first message again.
|
||||
s.RemoveMsg(2).ShouldBeTrue();
|
||||
|
||||
// Only one message left — first and last should both equal 3.
|
||||
ss = GetSubjectState();
|
||||
ss.Msgs.ShouldBe(1UL);
|
||||
ss.First.ShouldBe(3UL);
|
||||
LoadFirstSeq().ShouldBe(3UL);
|
||||
ss.Last.ShouldBe(3UL);
|
||||
LoadLastSeq().ShouldBe(3UL);
|
||||
|
||||
// Publish some more messages so we can test another scenario.
|
||||
for (var i = 0; i < 3; i++)
|
||||
s.StoreMsg("foo", null, [], 0);
|
||||
|
||||
ss = GetSubjectState();
|
||||
ss.Msgs.ShouldBe(4UL);
|
||||
ss.First.ShouldBe(3UL);
|
||||
LoadFirstSeq().ShouldBe(3UL);
|
||||
ss.Last.ShouldBe(7UL);
|
||||
LoadLastSeq().ShouldBe(7UL);
|
||||
|
||||
// Remove last sequence.
|
||||
s.RemoveMsg(7).ShouldBeTrue();
|
||||
|
||||
// Remove first sequence.
|
||||
s.RemoveMsg(3).ShouldBeTrue();
|
||||
|
||||
// Remove (now) first sequence 5.
|
||||
s.RemoveMsg(5).ShouldBeTrue();
|
||||
|
||||
// ss.First and ss.Last should both be recalculated and equal each other.
|
||||
ss = GetSubjectState();
|
||||
ss.Msgs.ShouldBe(1UL);
|
||||
ss.First.ShouldBe(6UL);
|
||||
LoadFirstSeq().ShouldBe(6UL);
|
||||
ss.Last.ShouldBe(6UL);
|
||||
LoadLastSeq().ShouldBe(6UL);
|
||||
|
||||
// Store a new message and immediately remove it (marks lastNeedsUpdate),
|
||||
// then store another — that new one becomes the real last.
|
||||
s.StoreMsg("foo", null, [], 0); // seq 8
|
||||
s.RemoveMsg(8).ShouldBeTrue();
|
||||
s.StoreMsg("foo", null, [], 0); // seq 9
|
||||
|
||||
ss = GetSubjectState();
|
||||
ss.Msgs.ShouldBe(2UL);
|
||||
ss.First.ShouldBe(6UL);
|
||||
LoadFirstSeq().ShouldBe(6UL);
|
||||
ss.Last.ShouldBe(9UL);
|
||||
LoadLastSeq().ShouldBe(9UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// CompactCleansUpDmap — parameterised over compact sequences 2, 3, 4
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestStoreCompactCleansUpDmap server/store_test.go:449
|
||||
[Theory]
|
||||
[InlineData(2UL)]
|
||||
[InlineData(3UL)]
|
||||
[InlineData(4UL)]
|
||||
public void Compact_CleansUpDmap(ulong compactSeq)
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
// Publish 3 messages — no interior deletes.
|
||||
for (var i = 0; i < 3; i++)
|
||||
s.StoreMsg("foo", null, [], 0);
|
||||
|
||||
var state = s.State();
|
||||
state.NumDeleted.ShouldBe(0);
|
||||
state.Deleted.ShouldBeNull();
|
||||
|
||||
// Removing the middle message creates an interior delete.
|
||||
s.RemoveMsg(2).ShouldBeTrue();
|
||||
state = s.State();
|
||||
state.NumDeleted.ShouldBe(1);
|
||||
state.Deleted.ShouldNotBeNull();
|
||||
state.Deleted!.Length.ShouldBe(1);
|
||||
|
||||
// Compacting must always clean up the interior delete.
|
||||
s.Compact(compactSeq);
|
||||
state = s.State();
|
||||
state.NumDeleted.ShouldBe(0);
|
||||
state.Deleted.ShouldBeNull();
|
||||
|
||||
// Validate first/last sequence.
|
||||
var expectedFirst = Math.Max(3UL, compactSeq);
|
||||
state.FirstSeq.ShouldBe(expectedFirst);
|
||||
state.LastSeq.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// TruncateCleansUpDmap — parameterised over truncate sequences 0, 1
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestStoreTruncateCleansUpDmap server/store_test.go:500
|
||||
[Theory]
|
||||
[InlineData(0UL)]
|
||||
[InlineData(1UL)]
|
||||
public void Truncate_CleansUpDmap(ulong truncateSeq)
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
// Publish 3 messages — no interior deletes.
|
||||
for (var i = 0; i < 3; i++)
|
||||
s.StoreMsg("foo", null, [], 0);
|
||||
|
||||
var state = s.State();
|
||||
state.NumDeleted.ShouldBe(0);
|
||||
state.Deleted.ShouldBeNull();
|
||||
|
||||
// Removing the middle message creates an interior delete.
|
||||
s.RemoveMsg(2).ShouldBeTrue();
|
||||
state = s.State();
|
||||
state.NumDeleted.ShouldBe(1);
|
||||
state.Deleted.ShouldNotBeNull();
|
||||
state.Deleted!.Length.ShouldBe(1);
|
||||
|
||||
// Truncating must always clean up the interior delete.
|
||||
s.Truncate(truncateSeq);
|
||||
state = s.State();
|
||||
state.NumDeleted.ShouldBe(0);
|
||||
state.Deleted.ShouldBeNull();
|
||||
|
||||
// Validate first/last sequence after truncate.
|
||||
var expectedFirst = Math.Min(1UL, truncateSeq);
|
||||
state.FirstSeq.ShouldBe(expectedFirst);
|
||||
state.LastSeq.ShouldBe(truncateSeq);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// PurgeEx with zero sequence — must equal Purge
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestStorePurgeExZero server/store_test.go:552
|
||||
[Fact]
|
||||
public void PurgeEx_ZeroSeq_EquivalentToPurge()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
// Simple purge all — stream should be empty but first seq = last seq + 1.
|
||||
s.Purge();
|
||||
var ss = s.State();
|
||||
ss.FirstSeq.ShouldBe(1UL);
|
||||
ss.LastSeq.ShouldBe(0UL);
|
||||
|
||||
// PurgeEx with seq=0 must produce the same result.
|
||||
s.PurgeEx("", 0, 0);
|
||||
ss = s.State();
|
||||
ss.FirstSeq.ShouldBe(1UL);
|
||||
ss.LastSeq.ShouldBe(0UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// UpdateConfig TTL state — message survives when TTL is disabled
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestStoreUpdateConfigTTLState server/store_test.go:574
|
||||
[Fact]
|
||||
public void UpdateConfigTTLState_MessageSurvivesWhenTtlDisabled()
|
||||
{
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "TEST",
|
||||
Subjects = ["foo"],
|
||||
Storage = StorageType.Memory,
|
||||
AllowMsgTtl = false,
|
||||
};
|
||||
var ms = new MemStore(cfg);
|
||||
var s = Sync(ms);
|
||||
|
||||
// TTLs disabled — message with ttl=1s should survive even after 2s.
|
||||
var (seq, _) = s.StoreMsg("foo", null, [], 1);
|
||||
Thread.Sleep(2_000);
|
||||
// Should not throw — message should still be present.
|
||||
var loaded = s.LoadMsg(seq, null);
|
||||
loaded.Sequence.ShouldBe(seq);
|
||||
|
||||
// Now enable TTLs.
|
||||
cfg.AllowMsgTtl = true;
|
||||
s.UpdateConfig(cfg);
|
||||
|
||||
// TTLs enabled — message with ttl=1s should expire.
|
||||
var (seq2, _) = s.StoreMsg("foo", null, [], 1);
|
||||
Thread.Sleep(2_500);
|
||||
// Should throw — message should have expired.
|
||||
Should.Throw<KeyNotFoundException>(() => s.LoadMsg(seq2, null));
|
||||
|
||||
// Now disable TTLs again.
|
||||
cfg.AllowMsgTtl = false;
|
||||
s.UpdateConfig(cfg);
|
||||
|
||||
// TTLs disabled — message with ttl=1s should survive.
|
||||
var (seq3, _) = s.StoreMsg("foo", null, [], 1);
|
||||
Thread.Sleep(2_000);
|
||||
// Should not throw — TTL wheel is gone so message stays.
|
||||
var loaded3 = s.LoadMsg(seq3, null);
|
||||
loaded3.Sequence.ShouldBe(seq3);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// StreamInteriorDeleteAccounting — MemStore subset (no FileStore restart)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestStoreStreamInteriorDeleteAccounting server/store_test.go:621
|
||||
// Tests TruncateWithRemove and TruncateWithErase variants on MemStore.
|
||||
[Theory]
|
||||
[InlineData(false, "TruncateWithRemove")]
|
||||
[InlineData(false, "TruncateWithErase")]
|
||||
[InlineData(false, "SkipMsg")]
|
||||
[InlineData(false, "SkipMsgs")]
|
||||
[InlineData(true, "TruncateWithRemove")]
|
||||
[InlineData(true, "TruncateWithErase")]
|
||||
[InlineData(true, "SkipMsg")]
|
||||
[InlineData(true, "SkipMsgs")]
|
||||
public void InteriorDeleteAccounting_StateIsCorrect(bool empty, string actionTitle)
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
ulong lseq = 0;
|
||||
if (!empty)
|
||||
{
|
||||
var (storedSeq, _) = s.StoreMsg("foo", null, [], 0);
|
||||
storedSeq.ShouldBe(1UL);
|
||||
lseq = storedSeq;
|
||||
}
|
||||
lseq++;
|
||||
|
||||
switch (actionTitle)
|
||||
{
|
||||
case "TruncateWithRemove":
|
||||
{
|
||||
var (storedSeq, _) = s.StoreMsg("foo", null, [], 0);
|
||||
storedSeq.ShouldBe(lseq);
|
||||
s.RemoveMsg(lseq).ShouldBeTrue();
|
||||
s.Truncate(lseq);
|
||||
break;
|
||||
}
|
||||
case "TruncateWithErase":
|
||||
{
|
||||
var (storedSeq, _) = s.StoreMsg("foo", null, [], 0);
|
||||
storedSeq.ShouldBe(lseq);
|
||||
s.EraseMsg(lseq).ShouldBeTrue();
|
||||
s.Truncate(lseq);
|
||||
break;
|
||||
}
|
||||
case "SkipMsg":
|
||||
{
|
||||
s.SkipMsg(0);
|
||||
break;
|
||||
}
|
||||
case "SkipMsgs":
|
||||
{
|
||||
s.SkipMsgs(lseq, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Confirm state.
|
||||
var before = s.State();
|
||||
if (empty)
|
||||
{
|
||||
before.Msgs.ShouldBe(0UL);
|
||||
before.FirstSeq.ShouldBe(2UL);
|
||||
before.LastSeq.ShouldBe(1UL);
|
||||
}
|
||||
else
|
||||
{
|
||||
before.Msgs.ShouldBe(1UL);
|
||||
before.FirstSeq.ShouldBe(1UL);
|
||||
before.LastSeq.ShouldBe(2UL);
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// GetSeqFromTime with interior deletes gap
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestStoreGetSeqFromTimeWithInteriorDeletesGap server/store_test.go:874
|
||||
[Fact]
|
||||
public void GetSeqFromTime_WithInteriorDeletesGap()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
long startTs = 0;
|
||||
for (var i = 0; i < 10; i++)
|
||||
{
|
||||
var (_, ts) = s.StoreMsg("foo", null, [], 0);
|
||||
if (i == 1)
|
||||
startTs = ts;
|
||||
}
|
||||
|
||||
// Create a delete gap in the middle: seqs 4-7 deleted.
|
||||
// A naive binary search would hit deleted sequences and return wrong result.
|
||||
for (var seq = 4UL; seq <= 7UL; seq++)
|
||||
s.RemoveMsg(seq).ShouldBeTrue();
|
||||
|
||||
// Convert Unix nanoseconds timestamp to DateTime.
|
||||
var t = new DateTime(startTs / 100L + DateTime.UnixEpoch.Ticks, DateTimeKind.Utc);
|
||||
var found = s.GetSeqFromTime(t);
|
||||
found.ShouldBe(2UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// GetSeqFromTime with trailing deletes
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestStoreGetSeqFromTimeWithTrailingDeletes server/store_test.go:900
|
||||
[Fact]
|
||||
public void GetSeqFromTime_WithTrailingDeletes()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
long startTs = 0;
|
||||
for (var i = 0; i < 3; i++)
|
||||
{
|
||||
var (_, ts) = s.StoreMsg("foo", null, [], 0);
|
||||
if (i == 1)
|
||||
startTs = ts;
|
||||
}
|
||||
|
||||
// Delete last message — trailing delete.
|
||||
s.RemoveMsg(3).ShouldBeTrue();
|
||||
|
||||
var t = new DateTime(startTs / 100L + DateTime.UnixEpoch.Ticks, DateTimeKind.Utc);
|
||||
var found = s.GetSeqFromTime(t);
|
||||
found.ShouldBe(2UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// MaxMsgsPerUpdateBug — per-subject limit enforced on config update
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestStoreMaxMsgsPerUpdateBug server/store_test.go:405
|
||||
[Fact]
|
||||
public void MaxMsgsPerUpdateBug_ReducesOnConfigUpdate()
|
||||
{
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "TEST",
|
||||
Subjects = ["foo"],
|
||||
MaxMsgsPer = 0,
|
||||
Storage = StorageType.Memory,
|
||||
};
|
||||
var ms = new MemStore(cfg);
|
||||
var s = Sync(ms);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
s.StoreMsg("foo", null, [], 0);
|
||||
|
||||
var state = s.State();
|
||||
state.Msgs.ShouldBe(5UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(5UL);
|
||||
|
||||
// Update max messages per-subject from 0 (infinite) to 1.
|
||||
// Since the per-subject limit was not specified before, messages should be
|
||||
// removed upon config update, leaving only the most recent.
|
||||
cfg.MaxMsgsPer = 1;
|
||||
s.UpdateConfig(cfg);
|
||||
|
||||
state = s.State();
|
||||
state.Msgs.ShouldBe(1UL);
|
||||
state.FirstSeq.ShouldBe(5UL);
|
||||
state.LastSeq.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// MultiLastSeqs and LoadLastMsg with lazy subject state
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreMultiLastSeqsAndLoadLastMsgWithLazySubjectState server/store_test.go:921
|
||||
[Fact]
|
||||
public void MultiLastSeqs_AndLoadLastMsg_WithLazySubjectState()
|
||||
{
|
||||
var ms = new MemStore();
|
||||
var s = Sync(ms);
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
s.StoreMsg("foo", null, [], 0);
|
||||
|
||||
// MultiLastSeqs for "foo" should return [3].
|
||||
var seqs = s.MultiLastSeqs(["foo"], 0, -1);
|
||||
seqs.Length.ShouldBe(1);
|
||||
seqs[0].ShouldBe(3UL);
|
||||
|
||||
// Remove last message — lazy last needs update.
|
||||
s.RemoveMsg(3).ShouldBeTrue();
|
||||
|
||||
seqs = s.MultiLastSeqs(["foo"], 0, -1);
|
||||
seqs.Length.ShouldBe(1);
|
||||
seqs[0].ShouldBe(2UL);
|
||||
|
||||
// Store another and load it as last.
|
||||
s.StoreMsg("foo", null, [], 0); // seq 4
|
||||
var lastMsg = s.LoadLastMsg("foo", null);
|
||||
lastMsg.Sequence.ShouldBe(4UL);
|
||||
|
||||
// Remove seq 4 — lazy last update again.
|
||||
s.RemoveMsg(4).ShouldBeTrue();
|
||||
lastMsg = s.LoadLastMsg("foo", null);
|
||||
lastMsg.Sequence.ShouldBe(2UL);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,372 @@
|
||||
// Reference: golang/nats-server/server/filestore.go:4443 (setupWriteCache)
|
||||
// Reference: golang/nats-server/server/filestore.go:6148 (expireCache)
|
||||
// Reference: golang/nats-server/server/filestore.go:6220 (expireCacheLocked)
|
||||
//
|
||||
// Tests for WriteCacheManager (Gap 1.8) — bounded write cache with TTL eviction
|
||||
// and background flush inside FileStore.
|
||||
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for <see cref="FileStore.WriteCacheManager"/>. Uses direct access to the
|
||||
/// internal class where needed, and tests through the public <see cref="FileStore"/>
|
||||
/// API for integration coverage.
|
||||
///
|
||||
/// Timing-sensitive eviction tests use <c>TrackWriteAt</c> to inject an explicit
|
||||
/// past timestamp rather than sleeping, avoiding flaky timing dependencies.
|
||||
/// </summary>
|
||||
public sealed class WriteCacheTests : IDisposable
|
||||
{
|
||||
private readonly DirectoryInfo _dir = Directory.CreateTempSubdirectory("wcache-");
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
try { _dir.Delete(recursive: true); }
|
||||
catch (IOException) { /* best effort — OS may hold handles briefly */ }
|
||||
catch (UnauthorizedAccessException) { /* best effort on locked directories */ }
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string sub, FileStoreOptions? opts = null)
|
||||
{
|
||||
var dir = Path.Combine(_dir.FullName, sub);
|
||||
opts ??= new FileStoreOptions();
|
||||
opts.Directory = dir;
|
||||
return new FileStore(opts);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// TrackWrite / TrackedBlockCount / TotalCachedBytes
|
||||
// Go: filestore.go:4443 (setupWriteCache) — track write for a block.
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void TrackWrite_AddsSizeToEntry()
|
||||
{
|
||||
// Arrange
|
||||
using var block = MsgBlock.Create(1, Path.Combine(_dir.FullName, "blk1"), 1024 * 1024);
|
||||
var manager = new FileStore.WriteCacheManager(
|
||||
maxCacheSizeBytes: 64 * 1024 * 1024,
|
||||
cacheExpiry: TimeSpan.FromSeconds(2),
|
||||
blockLookup: id => id == 1 ? block : null);
|
||||
|
||||
// Act
|
||||
manager.TrackWrite(blockId: 1, bytes: 512);
|
||||
manager.TrackWrite(blockId: 1, bytes: 256);
|
||||
|
||||
// Assert
|
||||
manager.TrackedBlockCount.ShouldBe(1);
|
||||
manager.TotalCachedBytes.ShouldBe(768L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void TrackWrite_MultipleBlocks_AccumulatesSeparately()
|
||||
{
|
||||
// Arrange
|
||||
using var block1 = MsgBlock.Create(1, Path.Combine(_dir.FullName, "blk-m1"), 1024 * 1024);
|
||||
using var block2 = MsgBlock.Create(2, Path.Combine(_dir.FullName, "blk-m2"), 1024 * 1024);
|
||||
var manager = new FileStore.WriteCacheManager(
|
||||
maxCacheSizeBytes: 64 * 1024 * 1024,
|
||||
cacheExpiry: TimeSpan.FromSeconds(2),
|
||||
blockLookup: id => id == 1 ? block1 : id == 2 ? block2 : null);
|
||||
|
||||
// Act
|
||||
manager.TrackWrite(blockId: 1, bytes: 100);
|
||||
manager.TrackWrite(blockId: 2, bytes: 200);
|
||||
manager.TrackWrite(blockId: 1, bytes: 50);
|
||||
|
||||
// Assert
|
||||
manager.TrackedBlockCount.ShouldBe(2);
|
||||
manager.TotalCachedBytes.ShouldBe(350L);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// EvictBlock — flush then clear for a single block
|
||||
// Go: filestore.go:4499 (flushPendingMsgsLocked on rotation).
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void EvictBlock_ClearsBlockCache()
|
||||
{
|
||||
// Arrange: write a message to populate the write cache.
|
||||
var dir = Path.Combine(_dir.FullName, "evict1");
|
||||
Directory.CreateDirectory(dir);
|
||||
using var block = MsgBlock.Create(1, dir, 1024 * 1024);
|
||||
block.Write("test.subject", ReadOnlyMemory<byte>.Empty, "hello"u8.ToArray());
|
||||
|
||||
block.HasCache.ShouldBeTrue("block should have write cache after write");
|
||||
|
||||
var manager = new FileStore.WriteCacheManager(
|
||||
maxCacheSizeBytes: 64 * 1024 * 1024,
|
||||
cacheExpiry: TimeSpan.FromSeconds(10),
|
||||
blockLookup: id => id == 1 ? block : null);
|
||||
|
||||
manager.TrackWrite(blockId: 1, bytes: 64);
|
||||
|
||||
// Act
|
||||
manager.EvictBlock(blockId: 1);
|
||||
|
||||
// Assert: write cache must be cleared after eviction.
|
||||
block.HasCache.ShouldBeFalse("block cache should be cleared after EvictBlock");
|
||||
manager.TrackedBlockCount.ShouldBe(0);
|
||||
manager.TotalCachedBytes.ShouldBe(0L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void EvictBlock_NonExistentBlock_IsNoOp()
|
||||
{
|
||||
// Arrange
|
||||
var manager = new FileStore.WriteCacheManager(
|
||||
maxCacheSizeBytes: 64 * 1024 * 1024,
|
||||
cacheExpiry: TimeSpan.FromSeconds(2),
|
||||
blockLookup: _ => null);
|
||||
|
||||
// Act + Assert: should not throw for an unknown block ID
|
||||
Should.NotThrow(() => manager.EvictBlock(blockId: 99));
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// TTL eviction via RunEviction
|
||||
// Go: filestore.go:6220 (expireCacheLocked) — expire idle cache after TTL.
|
||||
//
|
||||
// Uses TrackWriteAt to inject a past timestamp so TTL tests do not depend
|
||||
// on real elapsed time (no Task.Delay).
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RunEviction_ExpiresCacheAfterTtl()
|
||||
{
|
||||
// Arrange: inject a write timestamp 5 seconds in the past so it is
|
||||
// well beyond the 2-second TTL when RunEviction fires immediately.
|
||||
var dir = Path.Combine(_dir.FullName, "ttl1");
|
||||
Directory.CreateDirectory(dir);
|
||||
using var block = MsgBlock.Create(1, dir, 1024 * 1024);
|
||||
block.Write("ttl.subject", ReadOnlyMemory<byte>.Empty, "data"u8.ToArray());
|
||||
block.HasCache.ShouldBeTrue();
|
||||
|
||||
var manager = new FileStore.WriteCacheManager(
|
||||
maxCacheSizeBytes: 64 * 1024 * 1024,
|
||||
cacheExpiry: TimeSpan.FromSeconds(2),
|
||||
blockLookup: id => id == 1 ? block : null);
|
||||
|
||||
// Place the entry 5 000 ms in the past — well past the 2 s TTL.
|
||||
var pastTimestamp = Environment.TickCount64 - 5_000;
|
||||
manager.TrackWriteAt(blockId: 1, bytes: 128, tickCount64Ms: pastTimestamp);
|
||||
|
||||
// Act: immediately trigger eviction without sleeping
|
||||
manager.RunEviction();
|
||||
|
||||
// Assert: cache cleared after TTL
|
||||
block.HasCache.ShouldBeFalse("cache should be cleared after TTL expiry");
|
||||
manager.TrackedBlockCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task RunEviction_DoesNotExpireRecentWrites()
|
||||
{
|
||||
// Arrange: write timestamp is now (fresh), TTL is 30 s — should not evict.
|
||||
var dir = Path.Combine(_dir.FullName, "ttl2");
|
||||
Directory.CreateDirectory(dir);
|
||||
using var block = MsgBlock.Create(1, dir, 1024 * 1024);
|
||||
block.Write("ttl2.subject", ReadOnlyMemory<byte>.Empty, "data"u8.ToArray());
|
||||
|
||||
var manager = new FileStore.WriteCacheManager(
|
||||
maxCacheSizeBytes: 64 * 1024 * 1024,
|
||||
cacheExpiry: TimeSpan.FromSeconds(30),
|
||||
blockLookup: id => id == 1 ? block : null);
|
||||
|
||||
manager.TrackWrite(blockId: 1, bytes: 64);
|
||||
|
||||
// Act: trigger eviction immediately (well before TTL)
|
||||
manager.RunEviction();
|
||||
|
||||
// Assert: cache should still be intact
|
||||
block.HasCache.ShouldBeTrue("cache should remain since TTL has not expired");
|
||||
manager.TrackedBlockCount.ShouldBe(1);
|
||||
|
||||
await manager.DisposeAsync();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Size-cap eviction via RunEviction
|
||||
// Go: filestore.go:6220 (expireCacheLocked) — evict oldest when over cap.
|
||||
//
|
||||
// Uses TrackWriteAt to inject explicit timestamps, making block1 definitively
|
||||
// older than block2 without relying on Task.Delay.
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task RunEviction_EvictsOldestWhenOverSizeCap()
|
||||
{
|
||||
// Arrange: size cap = 300 bytes, two blocks, block1 is older.
|
||||
var dir1 = Path.Combine(_dir.FullName, "cap1");
|
||||
var dir2 = Path.Combine(_dir.FullName, "cap2");
|
||||
Directory.CreateDirectory(dir1);
|
||||
Directory.CreateDirectory(dir2);
|
||||
using var block1 = MsgBlock.Create(1, dir1, 1024 * 1024);
|
||||
using var block2 = MsgBlock.Create(2, dir2, 1024 * 1024);
|
||||
|
||||
block1.Write("s1", ReadOnlyMemory<byte>.Empty, "payload-one"u8.ToArray());
|
||||
block2.Write("s2", ReadOnlyMemory<byte>.Empty, "payload-two"u8.ToArray());
|
||||
|
||||
var manager = new FileStore.WriteCacheManager(
|
||||
maxCacheSizeBytes: 300,
|
||||
cacheExpiry: TimeSpan.FromSeconds(60),
|
||||
blockLookup: id => id == 1 ? block1 : id == 2 ? block2 : null);
|
||||
|
||||
var now = Environment.TickCount64;
|
||||
// block1 written 10 s ago (older), block2 written now (newer).
|
||||
manager.TrackWriteAt(blockId: 1, bytes: 200, tickCount64Ms: now - 10_000);
|
||||
manager.TrackWriteAt(blockId: 2, bytes: 200, tickCount64Ms: now);
|
||||
|
||||
// Total is 400 bytes — exceeds cap of 300.
|
||||
manager.TotalCachedBytes.ShouldBe(400L);
|
||||
|
||||
// Act
|
||||
manager.RunEviction();
|
||||
|
||||
// Assert: oldest (block1) should have been evicted to bring total <= cap.
|
||||
block1.HasCache.ShouldBeFalse("oldest block should be evicted to enforce size cap");
|
||||
manager.TotalCachedBytes.ShouldBeLessThanOrEqualTo(300L);
|
||||
|
||||
await manager.DisposeAsync();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FlushAllAsync
|
||||
// Go: filestore.go:5499 (flushPendingMsgsLocked, all blocks).
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task FlushAllAsync_ClearsAllTrackedBlocks()
|
||||
{
|
||||
// Arrange
|
||||
var dir1 = Path.Combine(_dir.FullName, "flush1");
|
||||
var dir2 = Path.Combine(_dir.FullName, "flush2");
|
||||
Directory.CreateDirectory(dir1);
|
||||
Directory.CreateDirectory(dir2);
|
||||
using var block1 = MsgBlock.Create(1, dir1, 1024 * 1024);
|
||||
using var block2 = MsgBlock.Create(2, dir2, 1024 * 1024);
|
||||
|
||||
block1.Write("flush.a", ReadOnlyMemory<byte>.Empty, "aaa"u8.ToArray());
|
||||
block2.Write("flush.b", ReadOnlyMemory<byte>.Empty, "bbb"u8.ToArray());
|
||||
|
||||
var manager = new FileStore.WriteCacheManager(
|
||||
maxCacheSizeBytes: 64 * 1024 * 1024,
|
||||
cacheExpiry: TimeSpan.FromSeconds(60),
|
||||
blockLookup: id => id == 1 ? block1 : id == 2 ? block2 : null);
|
||||
|
||||
manager.TrackWrite(blockId: 1, bytes: 64);
|
||||
manager.TrackWrite(blockId: 2, bytes: 64);
|
||||
|
||||
manager.TrackedBlockCount.ShouldBe(2);
|
||||
|
||||
// Act
|
||||
await manager.FlushAllAsync();
|
||||
|
||||
// Assert
|
||||
manager.TrackedBlockCount.ShouldBe(0);
|
||||
manager.TotalCachedBytes.ShouldBe(0L);
|
||||
block1.HasCache.ShouldBeFalse("block1 cache should be cleared after FlushAllAsync");
|
||||
block2.HasCache.ShouldBeFalse("block2 cache should be cleared after FlushAllAsync");
|
||||
|
||||
await manager.DisposeAsync();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Integration with FileStore: TrackWrite called on AppendAsync / StoreMsg
|
||||
// Go: filestore.go:6700 (writeMsgRecord) — cache populated on write.
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task FileStore_TracksWriteAfterAppend()
|
||||
{
|
||||
// Arrange
|
||||
await using var store = CreateStore("int-append", new FileStoreOptions
|
||||
{
|
||||
BlockSizeBytes = 1024 * 1024,
|
||||
MaxCacheSize = 64 * 1024 * 1024,
|
||||
CacheExpiry = TimeSpan.FromSeconds(60),
|
||||
});
|
||||
|
||||
// Act: write a few messages
|
||||
await store.AppendAsync("foo.bar", "hello world"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.baz", "second message"u8.ToArray(), default);
|
||||
|
||||
// Assert: blocks were created and messages are retrievable (cache is live).
|
||||
store.BlockCount.ShouldBeGreaterThanOrEqualTo(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task FileStore_EvictsBlockCacheOnRotation()
|
||||
{
|
||||
// Arrange: tiny block size so rotation happens quickly.
|
||||
var opts = new FileStoreOptions
|
||||
{
|
||||
BlockSizeBytes = 128, // Forces rotation after ~2 messages
|
||||
MaxCacheSize = 64 * 1024 * 1024,
|
||||
CacheExpiry = TimeSpan.FromSeconds(60),
|
||||
};
|
||||
await using var store = CreateStore("int-rotate", opts);
|
||||
|
||||
// Act: write enough to trigger rotation
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync($"subj.{i}", new byte[20], default);
|
||||
|
||||
// Assert: multiple blocks exist and all reads still succeed
|
||||
store.BlockCount.ShouldBeGreaterThan(1);
|
||||
|
||||
for (ulong seq = 1; seq <= 10; seq++)
|
||||
{
|
||||
var msg = await store.LoadAsync(seq, default);
|
||||
msg.ShouldNotBeNull($"message at seq={seq} should be recoverable after block rotation");
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void FileStore_StoreMsg_TracksWrite()
|
||||
{
|
||||
// Arrange
|
||||
using var store = CreateStore("int-storemsg", new FileStoreOptions
|
||||
{
|
||||
BlockSizeBytes = 1024 * 1024,
|
||||
MaxCacheSize = 64 * 1024 * 1024,
|
||||
CacheExpiry = TimeSpan.FromSeconds(60),
|
||||
});
|
||||
|
||||
// Act
|
||||
var (seq, _) = store.StoreMsg("test.subject", hdr: null, msg: "payload"u8.ToArray(), ttl: 0);
|
||||
|
||||
// Assert: message is retrievable (write was tracked, cache is alive)
|
||||
seq.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// IAsyncDisposable: DisposeAsync flushes then stops the timer
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Dispose_FlushesAndStopsBackgroundTask()
|
||||
{
|
||||
// Arrange
|
||||
var dir = Path.Combine(_dir.FullName, "dispose-test");
|
||||
Directory.CreateDirectory(dir);
|
||||
using var block = MsgBlock.Create(1, dir, 1024 * 1024);
|
||||
block.Write("d.subject", ReadOnlyMemory<byte>.Empty, "data"u8.ToArray());
|
||||
|
||||
var manager = new FileStore.WriteCacheManager(
|
||||
maxCacheSizeBytes: 64 * 1024 * 1024,
|
||||
cacheExpiry: TimeSpan.FromSeconds(60),
|
||||
blockLookup: id => id == 1 ? block : null);
|
||||
|
||||
manager.TrackWrite(blockId: 1, bytes: 64);
|
||||
|
||||
// Act: dispose should complete within a reasonable time and clear entries
|
||||
await manager.DisposeAsync();
|
||||
|
||||
// Assert
|
||||
manager.TrackedBlockCount.ShouldBe(0);
|
||||
block.HasCache.ShouldBeFalse("cache should be flushed/cleared during DisposeAsync");
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user