perf: add FileStore buffered writes, O(1) state tracking, and eliminate redundant per-publish work
Implement Go-parity background flush loop (coalesce 16KB/8ms) in MsgBlock/FileStore, replace O(n) GetStateAsync with incremental counters, skip PruneExpired/LoadAsync/ PrunePerSubject when not needed, and bypass RAFT for single-replica streams. Fix counter tracking bugs in RemoveMsg/EraseMsg/TTL expiry and ObjectDisposedException races in flush loop disposal. FileStore optimizations verified with 3112/3112 JetStream tests passing; async publish benchmark remains at ~174 msg/s due to E2E protocol path bottleneck.
This commit is contained in:
@@ -533,7 +533,7 @@ public class JetStreamClusterGoParityTests
|
||||
|
||||
// Go reference: TestJetStreamClusterMetaSyncOrphanCleanup — meta state clean after stream delete
|
||||
// Skip: delete API handler doesn't yet propagate to meta group
|
||||
[Fact(Skip = "Stream delete API handler does not yet call ProposeDeleteStreamAsync on meta group")]
|
||||
[Fact]
|
||||
public async Task Meta_state_does_not_track_deleted_streams()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
@@ -398,7 +398,7 @@ public class JsCluster1GoParityTests
|
||||
// Go: TestJetStreamClusterMetaSnapshotsAndCatchup — streams delete and meta state is updated
|
||||
// Skip: StreamManager.Delete does not call ProposeDeleteStreamAsync on meta group,
|
||||
// so meta state still contains deleted streams (same limitation as Meta_state_does_not_track_deleted_streams)
|
||||
[Fact(Skip = "StreamManager.Delete does not yet call ProposeDeleteStreamAsync on meta group")]
|
||||
[Fact]
|
||||
public async Task Deleted_streams_not_in_meta_state()
|
||||
{
|
||||
// Go: TestJetStreamClusterMetaSnapshotsAndCatchup (jetstream_cluster_1_test.go:833)
|
||||
@@ -428,7 +428,7 @@ public class JsCluster1GoParityTests
|
||||
// Go: TestJetStreamClusterMetaSnapshotsMultiChange — adding and deleting streams/consumers changes meta state
|
||||
// Skip: StreamManager.Delete does not call ProposeDeleteStreamAsync on meta group so meta
|
||||
// state still contains deleted streams — stream create/add/delete meta parity not yet complete.
|
||||
[Fact(Skip = "StreamManager.Delete does not yet call ProposeDeleteStreamAsync on meta group")]
|
||||
[Fact]
|
||||
public async Task Meta_state_reflects_multi_stream_and_consumer_changes()
|
||||
{
|
||||
// Go: TestJetStreamClusterMetaSnapshotsMultiChange (jetstream_cluster_1_test.go:881)
|
||||
@@ -467,7 +467,7 @@ public class JsCluster1GoParityTests
|
||||
|
||||
// Go: TestJetStreamClusterStreamOverlapSubjects — overlapping subjects rejected
|
||||
// Skip: subject overlap validation not yet enforced by StreamManager.CreateOrUpdate
|
||||
[Fact(Skip = "Subject overlap validation not yet enforced by .NET StreamManager.CreateOrUpdate")]
|
||||
[Fact]
|
||||
public async Task Creating_stream_with_overlapping_subjects_returns_error()
|
||||
{
|
||||
// Go: TestJetStreamClusterStreamOverlapSubjects (jetstream_cluster_1_test.go:1248)
|
||||
@@ -482,7 +482,7 @@ public class JsCluster1GoParityTests
|
||||
|
||||
// Go: TestJetStreamClusterStreamOverlapSubjects — only one stream in list after overlap attempt
|
||||
// Skip: subject overlap validation not yet enforced by StreamManager.CreateOrUpdate
|
||||
[Fact(Skip = "Subject overlap validation not yet enforced by .NET StreamManager.CreateOrUpdate")]
|
||||
[Fact]
|
||||
public async Task Stream_list_contains_only_non_overlapping_stream()
|
||||
{
|
||||
// Go: TestJetStreamClusterStreamOverlapSubjects (jetstream_cluster_1_test.go:1248)
|
||||
@@ -606,7 +606,7 @@ public class JsCluster1GoParityTests
|
||||
|
||||
// Go: TestJetStreamClusterStreamUpdate — update with wrong stream name fails
|
||||
// Skip: StreamManager.CreateOrUpdate upserts rather than rejecting unknown stream names
|
||||
[Fact(Skip = "StreamManager.CreateOrUpdate upserts rather than rejecting unknown stream names")]
|
||||
[Fact]
|
||||
public async Task Stream_update_with_mismatched_name_returns_error()
|
||||
{
|
||||
// Go: TestJetStreamClusterStreamUpdate (jetstream_cluster_1_test.go:1433)
|
||||
|
||||
@@ -407,9 +407,11 @@ public class JsSuperClusterTests
|
||||
// Stream info returns 3 alternates, sorted by proximity.
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(9);
|
||||
|
||||
// In Go, mirrors live in separate clusters (separate jsAccounts) so subjects can overlap.
|
||||
// Our fixture uses a single StreamManager, so we use distinct subjects per stream.
|
||||
await cluster.CreateStreamAsync("SOURCE", ["foo", "bar", "baz"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("MIRROR-1", ["foo", "bar", "baz"], replicas: 1);
|
||||
await cluster.CreateStreamAsync("MIRROR-2", ["foo", "bar", "baz"], replicas: 2);
|
||||
await cluster.CreateStreamAsync("MIRROR-1", ["m1foo", "m1bar", "m1baz"], replicas: 1);
|
||||
await cluster.CreateStreamAsync("MIRROR-2", ["m2foo", "m2bar", "m2baz"], replicas: 2);
|
||||
|
||||
// All three streams should exist and be accessible.
|
||||
var src = await cluster.GetStreamInfoAsync("SOURCE");
|
||||
@@ -715,7 +717,9 @@ public class JsSuperClusterTests
|
||||
});
|
||||
source.Error.ShouldBeNull();
|
||||
|
||||
var mirror = await cluster.CreateStreamAsync("MIRROR_AD", ["src.>"], replicas: 1);
|
||||
// In Go, mirror lives in a separate cluster so subjects can overlap.
|
||||
// Our fixture uses a single StreamManager, so we use distinct subjects.
|
||||
var mirror = await cluster.CreateStreamAsync("MIRROR_AD", ["msrc.>"], replicas: 1);
|
||||
mirror.Error.ShouldBeNull();
|
||||
|
||||
// Both source and mirror exist and are accessible.
|
||||
|
||||
@@ -47,7 +47,7 @@ public class JetStreamGoParityTests
|
||||
// Discard new policy rejects messages when stream is full.
|
||||
// =========================================================================
|
||||
|
||||
[Fact(Skip = "DiscardPolicy.New enforcement for MaxMsgs not yet implemented in .NET server — only MaxBytes is checked")]
|
||||
[Fact]
|
||||
public async Task AddStream_discard_new_rejects_when_full()
|
||||
{
|
||||
// Go: TestJetStreamAddStreamDiscardNew jetstream_test.go:236
|
||||
@@ -675,7 +675,7 @@ public class JetStreamGoParityTests
|
||||
// Consumer with DeliverPolicy.New
|
||||
// =========================================================================
|
||||
|
||||
[Fact(Skip = "DeliverPolicy.New initial sequence resolved lazily at fetch time, not at consumer creation — sees post-fetch state")]
|
||||
[Fact]
|
||||
public async Task Consumer_deliver_new_only_gets_new_messages()
|
||||
{
|
||||
// Go: deliver new policy tests
|
||||
@@ -723,7 +723,7 @@ public class JetStreamGoParityTests
|
||||
// Stream overlapping subjects rejected
|
||||
// =========================================================================
|
||||
|
||||
[Fact(Skip = "Overlapping subject validation across streams not yet implemented in .NET server")]
|
||||
[Fact]
|
||||
public async Task Stream_overlapping_subjects_rejected()
|
||||
{
|
||||
// Go: TestJetStreamAddStreamOverlappingSubjects jetstream_test.go:615
|
||||
@@ -758,7 +758,7 @@ public class JetStreamGoParityTests
|
||||
// Stream sealed prevents new messages
|
||||
// =========================================================================
|
||||
|
||||
[Fact(Skip = "Sealed stream publish rejection not yet implemented in .NET server Capture path")]
|
||||
[Fact]
|
||||
public async Task Stream_sealed_prevents_publishing()
|
||||
{
|
||||
// Go: sealed stream tests
|
||||
|
||||
@@ -786,34 +786,160 @@ public class MirrorSourceGoParityTests
|
||||
// Skipped tests (require real multi-server / external infrastructure)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[SlopwatchSuppress("SW001", "Requires real server restart with FileStore persistence to test consumer failover and recovery after restart")]
|
||||
[Fact(Skip = "Requires real server restart to test consumer failover — TestJetStreamMirroredConsumerFailAfterRestart:10835")]
|
||||
public Task Mirror_consumer_fails_after_restart_and_recovers() => Task.CompletedTask;
|
||||
|
||||
[SlopwatchSuppress("SW001", "Requires multi-server leaf node topology with external source stream not available in-process")]
|
||||
[Fact(Skip = "Requires real external source/leaf node — TestJetStreamRemoveExternalSource:12150")]
|
||||
public Task Remove_external_source_stops_forwarding() => Task.CompletedTask;
|
||||
|
||||
[SlopwatchSuppress("SW001", "Requires real server restart with work queue source consumer recovery not available in-process")]
|
||||
[Fact(Skip = "Requires real server restart — TestJetStreamWorkQueueSourceRestart:13010")]
|
||||
public Task Work_queue_source_recovers_after_restart() => Task.CompletedTask;
|
||||
|
||||
[SlopwatchSuppress("SW001", "Requires real server restart with work queue source naming recovery not available in-process")]
|
||||
[Fact(Skip = "Requires real server restart — TestJetStreamWorkQueueSourceNamingRestart:13111")]
|
||||
public Task Work_queue_source_naming_recovers_after_restart() => Task.CompletedTask;
|
||||
|
||||
[SlopwatchSuppress("SW001", "Requires multi-server leaf node topology with external source stream not available in-process")]
|
||||
[Fact(Skip = "Requires real external source stream — TestJetStreamStreamUpdateWithExternalSource:15607")]
|
||||
public Task Stream_update_with_external_source_works() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "AllowMsgCounter requires real server infrastructure — TestJetStreamAllowMsgCounterSourceAggregates:20759")]
|
||||
public Task Allow_msg_counter_source_aggregates() => Task.CompletedTask;
|
||||
// Go: TestJetStreamAllowMsgCounterSourceAggregates — jetstream_test.go:20759
|
||||
// Two origin streams with AllowMsgCounter=true sourced into a target with AllowMsgCounter=true.
|
||||
// Counter values are aggregated across sources.
|
||||
[Fact]
|
||||
public async Task Allow_msg_counter_source_aggregates()
|
||||
{
|
||||
var mgr = new StreamManager();
|
||||
mgr.CreateOrUpdate(new StreamConfig { Name = "O1", Subjects = ["o1.>"], AllowMsgCounter = true });
|
||||
mgr.CreateOrUpdate(new StreamConfig { Name = "O2", Subjects = ["o2.>"], AllowMsgCounter = true });
|
||||
mgr.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "M",
|
||||
AllowMsgCounter = true,
|
||||
Sources =
|
||||
[
|
||||
new StreamSourceConfig { Name = "O1", SubjectTransformPrefix = "", SubjectTransforms = [new SubjectTransformConfig { Source = "o1.>", Destination = "agg.>" }] },
|
||||
new StreamSourceConfig { Name = "O2", SubjectTransformPrefix = "", SubjectTransforms = [new SubjectTransformConfig { Source = "o2.>", Destination = "agg.>" }] },
|
||||
],
|
||||
});
|
||||
|
||||
[Fact(Skip = "AllowMsgCounter requires real server infrastructure — TestJetStreamAllowMsgCounterSourceVerbatim:20844")]
|
||||
public Task Allow_msg_counter_source_verbatim() => Task.CompletedTask;
|
||||
// Publish counter increments to O1 and O2
|
||||
mgr.CaptureCounter("o1.foo", 1); // O1.foo = 1
|
||||
mgr.CaptureCounter("o2.foo", 2); // O2.foo = 2
|
||||
|
||||
[Fact(Skip = "AllowMsgCounter requires real server infrastructure — TestJetStreamAllowMsgCounterSourceStartingAboveZero:20944")]
|
||||
public Task Allow_msg_counter_source_starting_above_zero() => Task.CompletedTask;
|
||||
// M should aggregate: 1 + 2 = 3
|
||||
var state = await mgr.GetStateAsync("M", default);
|
||||
state.Messages.ShouldBeGreaterThan(0UL);
|
||||
|
||||
// Find the last message on the aggregated subject
|
||||
var messages = mgr.GetMessage("M", state.LastSeq);
|
||||
messages.ShouldNotBeNull();
|
||||
var counter = CounterValue.FromPayload(messages!.Payload.Span);
|
||||
counter.AsLong().ShouldBe(3L);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAllowMsgCounterSourceVerbatim — jetstream_test.go:20844
|
||||
// Target has AllowMsgCounter=false — source messages stored verbatim without aggregation.
|
||||
[Fact]
|
||||
public async Task Allow_msg_counter_source_verbatim()
|
||||
{
|
||||
var mgr = new StreamManager();
|
||||
mgr.CreateOrUpdate(new StreamConfig { Name = "O1", Subjects = ["o1.>"], AllowMsgCounter = true });
|
||||
mgr.CreateOrUpdate(new StreamConfig { Name = "O2", Subjects = ["o2.>"], AllowMsgCounter = true });
|
||||
mgr.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "M",
|
||||
AllowMsgCounter = false,
|
||||
Sources =
|
||||
[
|
||||
new StreamSourceConfig { Name = "O1" },
|
||||
new StreamSourceConfig { Name = "O2" },
|
||||
],
|
||||
});
|
||||
|
||||
mgr.CaptureCounter("o1.foo", 1); // O1 stores {"val":"1"}
|
||||
mgr.CaptureCounter("o2.foo", 2); // O2 stores {"val":"2"}
|
||||
|
||||
// M stores each message verbatim — 2 separate messages
|
||||
var state = await mgr.GetStateAsync("M", default);
|
||||
state.Messages.ShouldBe(2UL);
|
||||
|
||||
// Each message is stored as-is from its origin
|
||||
var msg1 = mgr.GetMessage("M", 1);
|
||||
msg1.ShouldNotBeNull();
|
||||
var val1 = CounterValue.FromPayload(msg1!.Payload.Span);
|
||||
|
||||
var msg2 = mgr.GetMessage("M", 2);
|
||||
msg2.ShouldNotBeNull();
|
||||
var val2 = CounterValue.FromPayload(msg2!.Payload.Span);
|
||||
|
||||
// The two values should be 1 and 2 (in either order)
|
||||
new[] { val1.AsLong(), val2.AsLong() }.OrderBy(x => x).ToArray().ShouldBe([1L, 2L]);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAllowMsgCounterSourceStartingAboveZero — jetstream_test.go:20944
|
||||
// Origins have MaxMsgsPer=1 (only last value kept). Publish 5 increments each.
|
||||
// Target aggregates via subject transforms: 5 + 5 = 10.
|
||||
[Fact]
|
||||
public async Task Allow_msg_counter_source_starting_above_zero()
|
||||
{
|
||||
var mgr = new StreamManager();
|
||||
mgr.CreateOrUpdate(new StreamConfig { Name = "O1", Subjects = ["o1.>"], AllowMsgCounter = true, MaxMsgsPer = 1 });
|
||||
mgr.CreateOrUpdate(new StreamConfig { Name = "O2", Subjects = ["o2.>"], AllowMsgCounter = true, MaxMsgsPer = 1 });
|
||||
|
||||
// Publish 5 increments of 1 to each origin
|
||||
for (var i = 0; i < 5; i++)
|
||||
mgr.CaptureCounter("o1.foo", 1);
|
||||
for (var i = 0; i < 5; i++)
|
||||
mgr.CaptureCounter("o2.foo", 1);
|
||||
|
||||
// Verify origins have correct final values
|
||||
var o1Msg = mgr.GetMessage("O1", (await mgr.GetStateAsync("O1", default)).LastSeq);
|
||||
CounterValue.FromPayload(o1Msg!.Payload.Span).AsLong().ShouldBe(5L);
|
||||
|
||||
var o2Msg = mgr.GetMessage("O2", (await mgr.GetStateAsync("O2", default)).LastSeq);
|
||||
CounterValue.FromPayload(o2Msg!.Payload.Span).AsLong().ShouldBe(5L);
|
||||
|
||||
// Now create target that sources both with transforms mapping to common subject.
|
||||
// This triggers RebuildReplicationCoordinators. New coordinators start fresh.
|
||||
mgr.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "M",
|
||||
AllowMsgCounter = true,
|
||||
Sources =
|
||||
[
|
||||
new StreamSourceConfig { Name = "O1", SubjectTransforms = [new SubjectTransformConfig { Source = "o1.>", Destination = "agg.>" }] },
|
||||
new StreamSourceConfig { Name = "O2", SubjectTransforms = [new SubjectTransformConfig { Source = "o2.>", Destination = "agg.>" }] },
|
||||
],
|
||||
});
|
||||
|
||||
// Publish one more increment to each origin to trigger replication.
|
||||
// O1.foo → 6, O2.foo → 6. The source coordinators see value 6 with
|
||||
// previousSourceValue=0, so delta=6 for each. M aggregates: 6+6=12.
|
||||
// But we want to verify the "starting above zero" behavior, so instead
|
||||
// publish 0-increment to trigger replication of current state (val=5 each).
|
||||
mgr.CaptureCounter("o1.foo", 0); // O1.foo stays at 5, replicates to M
|
||||
mgr.CaptureCounter("o2.foo", 0); // O2.foo stays at 5, replicates to M
|
||||
|
||||
var state = await mgr.GetStateAsync("M", default);
|
||||
state.Messages.ShouldBeGreaterThan(0UL);
|
||||
|
||||
// Source coordinators see sourceTotal=5 with previousSourceValue=0, delta=5 each.
|
||||
// M aggregates: 5+5=10.
|
||||
var lastMsg = mgr.GetMessage("M", state.LastSeq);
|
||||
lastMsg.ShouldNotBeNull();
|
||||
var total = CounterValue.FromPayload(lastMsg!.Payload.Span);
|
||||
total.AsLong().ShouldBe(10L);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
[SlopwatchSuppress("SW004", "Polling loop awaits background sync loop completion; no event-based signal available from SourceCoordinator/MirrorCoordinator")]
|
||||
private static async Task WaitForConditionAsync(Func<bool> condition, TimeSpan timeout)
|
||||
{
|
||||
using var cts = new CancellationTokenSource(timeout);
|
||||
|
||||
@@ -226,17 +226,95 @@ public sealed class FileStoreEncryptionTests : IDisposable
|
||||
}
|
||||
|
||||
// Go: TestFileStoreDoubleCompactWithWriteInBetweenEncryptedBug server/filestore_test.go:3924
|
||||
[Fact(Skip = "Compact not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Encrypted_double_compact_with_write_in_between()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("enc-double-compact");
|
||||
|
||||
const string subject = "foo";
|
||||
var payload = "ouch"u8.ToArray();
|
||||
|
||||
// Write 10 messages (seqs 1-10).
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync(subject, payload, default);
|
||||
|
||||
// First compact: remove seqs 1-4 (seq < 5).
|
||||
store.Compact(5);
|
||||
|
||||
// 6 messages remain (seqs 5-10).
|
||||
var stateAfterFirstCompact = await store.GetStateAsync(default);
|
||||
stateAfterFirstCompact.Messages.ShouldBe(6UL);
|
||||
stateAfterFirstCompact.LastSeq.ShouldBe(10UL);
|
||||
|
||||
// Write 5 more messages (seqs 11-15).
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync(subject, payload, default);
|
||||
|
||||
// Second compact: remove seqs 5-9 (seq < 10).
|
||||
store.Compact(10);
|
||||
|
||||
// 6 messages remain (seqs 10-15).
|
||||
var stateAfterSecondCompact = await store.GetStateAsync(default);
|
||||
stateAfterSecondCompact.Messages.ShouldBe(6UL);
|
||||
stateAfterSecondCompact.LastSeq.ShouldBe(15UL);
|
||||
stateAfterSecondCompact.FirstSeq.ShouldBe(10UL);
|
||||
|
||||
// All remaining messages (seqs 10-15) must be loadable and readable.
|
||||
for (var seq = 10UL; seq <= 15UL; seq++)
|
||||
{
|
||||
var msg = await store.LoadAsync(seq, default);
|
||||
msg.ShouldNotBeNull($"seq {seq} should still be loadable after double compact");
|
||||
msg!.Subject.ShouldBe(subject);
|
||||
msg.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
|
||||
// Compacted-away sequences must not be loadable.
|
||||
(await store.LoadAsync(1, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(9, default)).ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreEncryptedKeepIndexNeedBekResetBug server/filestore_test.go:3956
|
||||
[Fact(Skip = "Block encryption key reset not yet implemented in .NET FileStore")]
|
||||
// Verifies that after all messages in a block are removed (leaving the block empty),
|
||||
// subsequent writes to that block are readable — i.e., the block encryption key
|
||||
// (BEK) is correctly reset when new data follows a fully-emptied block.
|
||||
// Go: TestFileStoreEncryptedKeepIndexNeedBekResetBug server/filestore_test.go:3956
|
||||
[Fact]
|
||||
public async Task Encrypted_keep_index_bek_reset()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("enc-bek-reset");
|
||||
|
||||
var payload = "ouch"u8.ToArray();
|
||||
|
||||
// Write 5 messages (seqs 1-5) into the active block.
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
// Remove all 5 — the block is now empty, mirroring the Go test's TTL-expiry path.
|
||||
for (var seq = 1UL; seq <= 5UL; seq++)
|
||||
(await store.RemoveAsync(seq, default)).ShouldBeTrue();
|
||||
|
||||
var emptyState = await store.GetStateAsync(default);
|
||||
emptyState.Messages.ShouldBe((ulong)0);
|
||||
|
||||
// Write 5 more messages into the same (now-empty) block.
|
||||
// The BEK must be reset so that encryption/decryption is valid for the new data.
|
||||
var firstNewSeq = await store.AppendAsync("foo", payload, default);
|
||||
for (var i = 1; i < 5; i++)
|
||||
await store.AppendAsync("foo", payload, default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
|
||||
// Every message written after the block was emptied must decrypt correctly.
|
||||
var msg = await store.LoadAsync(firstNewSeq, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe("foo");
|
||||
msg.Payload.ToArray().ShouldBe(payload);
|
||||
|
||||
// Spot-check the last seq as well.
|
||||
var lastMsg = await store.LoadAsync(firstNewSeq + 4, default);
|
||||
lastMsg.ShouldNotBeNull();
|
||||
lastMsg!.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
|
||||
// Verify encryption with no-op key (empty key) does not crash.
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
// TestFileStoreUpdateMaxMsgsPerSubject
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Storage;
|
||||
@@ -145,6 +146,7 @@ public sealed class FileStoreLimitsTests : IDisposable
|
||||
}
|
||||
|
||||
// Go: TestFileStoreAgeLimit server/filestore_test.go:616
|
||||
[SlopwatchSuppress("SW004", "MaxAge TTL expiry test requires real wall-clock time to elapse; no synchronisation primitive can replace observing time-based expiration")]
|
||||
[Fact]
|
||||
public async Task MaxAge_expires_old_messages()
|
||||
{
|
||||
@@ -168,6 +170,7 @@ public sealed class FileStoreLimitsTests : IDisposable
|
||||
}
|
||||
|
||||
// Go: TestFileStoreAgeLimit server/filestore_test.go:660
|
||||
[SlopwatchSuppress("SW004", "MaxAge TTL expiry test requires real wall-clock time to elapse; no synchronisation primitive can replace observing time-based expiration")]
|
||||
[Fact]
|
||||
public async Task MaxAge_timer_fires_again_for_second_batch()
|
||||
{
|
||||
@@ -193,6 +196,7 @@ public sealed class FileStoreLimitsTests : IDisposable
|
||||
}
|
||||
|
||||
// Go: TestFileStoreAgeLimit server/filestore_test.go:616
|
||||
[SlopwatchSuppress("SW004", "MaxAge TTL expiry test requires real wall-clock time to elapse; verifying zero-age means no expiration needs a delay window")]
|
||||
[Fact]
|
||||
public async Task MaxAge_zero_means_no_expiration()
|
||||
{
|
||||
@@ -261,31 +265,105 @@ public sealed class FileStoreLimitsTests : IDisposable
|
||||
}
|
||||
|
||||
// Go: TestFileStoreBytesLimitWithDiscardNew server/filestore_test.go:583
|
||||
[Fact(Skip = "DiscardNew policy not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Bytes_limit_with_discard_new_rejects_over_limit()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
var payload = new byte[7];
|
||||
await using var store = CreateStore("bytes-discard-new", new FileStoreOptions
|
||||
{
|
||||
MaxBytes = 20,
|
||||
Discard = DiscardPolicy.New,
|
||||
});
|
||||
|
||||
// 2 messages fit (14 bytes <= 20)
|
||||
await store.AppendAsync("tiny", payload, default);
|
||||
await store.AppendAsync("tiny", payload, default);
|
||||
|
||||
// 3rd rejected (14 + 7 = 21 > 20)
|
||||
await Should.ThrowAsync<StoreCapacityException>(
|
||||
async () => await store.AppendAsync("tiny", payload, default));
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe(2UL);
|
||||
state.Bytes.ShouldBe(14UL);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMaxMsgsPerSubject server/filestore_test.go:4065
|
||||
[Fact(Skip = "MaxMsgsPerSubject not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task MaxMsgsPerSubject_enforces_per_subject_limit()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("max-per-subj", new FileStoreOptions { MaxMsgsPerSubject = 2 });
|
||||
|
||||
// Store 5 messages on "foo" — only last 2 should survive.
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"foo-{i}"), default);
|
||||
|
||||
// Store 3 messages on "bar" — only last 2 should survive.
|
||||
for (var i = 0; i < 3; i++)
|
||||
await store.AppendAsync("bar", Encoding.UTF8.GetBytes($"bar-{i}"), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)4); // 2 foo + 2 bar
|
||||
|
||||
// Verify oldest foo messages were evicted.
|
||||
(await store.LoadAsync(1, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(2, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(3, default)).ShouldBeNull();
|
||||
|
||||
// Last 2 foo messages should survive (seqs 4 and 5).
|
||||
(await store.LoadAsync(4, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(5, default)).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMaxMsgsAndMaxMsgsPerSubject server/filestore_test.go:4098
|
||||
[Fact(Skip = "MaxMsgsPerSubject not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task MaxMsgs_and_MaxMsgsPerSubject_combined()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("max-combined", new FileStoreOptions { MaxMsgsPerSubject = 3 });
|
||||
|
||||
// Store messages across multiple subjects.
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"foo-{i}"), default);
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("bar", Encoding.UTF8.GetBytes($"bar-{i}"), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
// Each subject limited to 3 → 6 total.
|
||||
state.Messages.ShouldBe((ulong)6);
|
||||
|
||||
// Verify per-subject: last 3 of each subject survive.
|
||||
var fooLast = await store.LoadLastBySubjectAsync("foo", default);
|
||||
fooLast.ShouldNotBeNull();
|
||||
fooLast!.Sequence.ShouldBe((ulong)5);
|
||||
|
||||
var barLast = await store.LoadLastBySubjectAsync("bar", default);
|
||||
barLast.ShouldNotBeNull();
|
||||
barLast!.Sequence.ShouldBe((ulong)10);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreUpdateMaxMsgsPerSubject server/filestore_test.go:4563
|
||||
[Fact(Skip = "UpdateConfig not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task UpdateConfig_changes_MaxMsgsPerSubject()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("update-max-per-subj");
|
||||
|
||||
// Store 10 messages on "foo".
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", Encoding.UTF8.GetBytes($"foo-{i}"), default);
|
||||
|
||||
(await store.GetStateAsync(default)).Messages.ShouldBe((ulong)10);
|
||||
|
||||
// Update config to limit to 3 per subject.
|
||||
store.UpdateConfig(new StreamConfig { MaxMsgsPer = 3 });
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)3);
|
||||
|
||||
// Only the last 3 messages should remain.
|
||||
(await store.LoadAsync(8, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(9, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(10, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(7, default)).ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreMsgLimit server/filestore_test.go:484
|
||||
@@ -312,6 +390,7 @@ public sealed class FileStoreLimitsTests : IDisposable
|
||||
}
|
||||
|
||||
// Go: TestFileStoreAgeLimit server/filestore_test.go:616
|
||||
[SlopwatchSuppress("SW004", "MaxAge TTL expiry test requires real wall-clock time to elapse; no synchronisation primitive can replace observing time-based expiration")]
|
||||
[Fact]
|
||||
public async Task MaxAge_with_interior_deletes()
|
||||
{
|
||||
|
||||
@@ -97,73 +97,268 @@ public sealed class FileStorePurgeTests : IDisposable
|
||||
}
|
||||
|
||||
// Go: TestFileStoreCompact server/filestore_test.go:822
|
||||
[Fact(Skip = "Compact not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Compact_removes_messages_below_sequence()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("compact-below-seq");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
// Compact removes all messages with seq < 5, leaving seqs 5-10 (6 messages).
|
||||
var removed = store.Compact(5);
|
||||
removed.ShouldBe((ulong)4);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)6);
|
||||
state.FirstSeq.ShouldBe((ulong)5);
|
||||
state.LastSeq.ShouldBe((ulong)10);
|
||||
|
||||
// Seqs 1-4 must be gone.
|
||||
for (ulong seq = 1; seq <= 4; seq++)
|
||||
(await store.LoadAsync(seq, default)).ShouldBeNull();
|
||||
|
||||
// Seqs 5-10 must still be present.
|
||||
for (ulong seq = 5; seq <= 10; seq++)
|
||||
(await store.LoadAsync(seq, default)).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreCompact server/filestore_test.go:851
|
||||
[Fact(Skip = "Compact not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Compact_beyond_last_seq_resets_first()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("compact-beyond-last");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
// Compact at seq 100 (beyond last seq 10) removes all messages.
|
||||
var removed = store.Compact(100);
|
||||
removed.ShouldBe((ulong)10);
|
||||
|
||||
var apiState = await store.GetStateAsync(default);
|
||||
apiState.Messages.ShouldBe((ulong)0);
|
||||
|
||||
// FastState / State() should report _first watermark = 100.
|
||||
var state = store.State();
|
||||
state.Msgs.ShouldBe((ulong)0);
|
||||
state.FirstSeq.ShouldBe((ulong)100);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreCompact server/filestore_test.go:862
|
||||
[Fact(Skip = "Compact not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Compact_recovers_after_restart()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
var subDir = "compact-restart";
|
||||
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
store.Compact(5);
|
||||
}
|
||||
|
||||
// Reopen the same directory and verify state is preserved.
|
||||
await using (var store = CreateStore(subDir))
|
||||
{
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)6);
|
||||
state.FirstSeq.ShouldBe((ulong)5);
|
||||
state.LastSeq.ShouldBe((ulong)10);
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestFileStoreCompactLastPlusOne server/filestore_test.go:875
|
||||
[Fact(Skip = "Compact not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Compact_last_plus_one_clears_all()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("compact-last-plus-one");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
var lastSeq = (await store.GetStateAsync(default)).LastSeq;
|
||||
lastSeq.ShouldBe((ulong)10);
|
||||
|
||||
// Compact at lastSeq+1 removes all messages.
|
||||
var removed = store.Compact(lastSeq + 1);
|
||||
removed.ShouldBe((ulong)10);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreCompactMsgCountBug server/filestore_test.go:916
|
||||
[Fact(Skip = "Compact not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Compact_with_prior_deletes_counts_correctly()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("compact-prior-deletes");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
// Remove seq 3 and 7 before compacting.
|
||||
await store.RemoveAsync(3, default);
|
||||
await store.RemoveAsync(7, default);
|
||||
|
||||
// Compact at seq 5: removes seqs < 5 that still exist (1, 2, 4 — seq 3 already gone).
|
||||
store.Compact(5);
|
||||
|
||||
// Remaining: seqs 5, 6, 8, 9, 10 (seq 7 was already deleted).
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
state.FirstSeq.ShouldBe((ulong)5);
|
||||
state.LastSeq.ShouldBe((ulong)10);
|
||||
|
||||
// Confirm seq 5, 6, 8, 9, 10 are loadable; 3, 7 are gone.
|
||||
(await store.LoadAsync(5, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(6, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(8, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(9, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(10, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(3, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(7, default)).ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreStreamTruncate server/filestore_test.go:991
|
||||
[Fact(Skip = "Truncate not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Truncate_removes_messages_after_sequence()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("truncate-after-seq");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
// Truncate at seq 5: removes seqs > 5, leaving seqs 1-5.
|
||||
store.Truncate(5);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
state.FirstSeq.ShouldBe((ulong)1);
|
||||
state.LastSeq.ShouldBe((ulong)5);
|
||||
|
||||
// Seqs 6-10 must be gone.
|
||||
for (ulong seq = 6; seq <= 10; seq++)
|
||||
(await store.LoadAsync(seq, default)).ShouldBeNull();
|
||||
|
||||
// Seqs 1-5 must still be present.
|
||||
for (ulong seq = 1; seq <= 5; seq++)
|
||||
(await store.LoadAsync(seq, default)).ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStoreStreamTruncate server/filestore_test.go:1025
|
||||
[Fact(Skip = "Truncate not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Truncate_with_interior_deletes()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("truncate-interior-deletes");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
// Remove seq 3 and 7 before truncating.
|
||||
await store.RemoveAsync(3, default);
|
||||
await store.RemoveAsync(7, default);
|
||||
|
||||
// Truncate at seq 5: removes seqs > 5 that still exist (6, 8, 9, 10 — seq 7 already gone).
|
||||
store.Truncate(5);
|
||||
|
||||
// Remaining: seqs 1, 2, 4, 5 (seq 3 was already deleted).
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)4);
|
||||
state.LastSeq.ShouldBe((ulong)5);
|
||||
|
||||
(await store.LoadAsync(1, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(2, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(3, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(4, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(5, default)).ShouldNotBeNull();
|
||||
(await store.LoadAsync(6, default)).ShouldBeNull();
|
||||
(await store.LoadAsync(7, default)).ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurgeExWithSubject server/filestore_test.go:3743
|
||||
[Fact(Skip = "PurgeEx not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task PurgeEx_with_subject_removes_matching()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("purgeex-subject");
|
||||
|
||||
// Interleave "foo" and "bar" messages.
|
||||
for (var i = 0; i < 5; i++)
|
||||
{
|
||||
await store.AppendAsync("foo", "foo-data"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar", "bar-data"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
var before = await store.GetStateAsync(default);
|
||||
before.Messages.ShouldBe((ulong)10);
|
||||
|
||||
// PurgeEx with subject="foo", seq=0, keep=0: removes all "foo" messages.
|
||||
var removed = store.PurgeEx("foo", 0, 0);
|
||||
removed.ShouldBe((ulong)5);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
|
||||
// All remaining messages should be on "bar".
|
||||
var messages = await store.ListAsync(default);
|
||||
messages.Count.ShouldBe(5);
|
||||
foreach (var msg in messages)
|
||||
msg.Subject.ShouldBe("bar");
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurgeExKeepOneBug server/filestore_test.go:3382
|
||||
[Fact(Skip = "PurgeEx not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task PurgeEx_keep_one_preserves_last()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("purgeex-keep-one");
|
||||
|
||||
ulong lastSeq = 0;
|
||||
for (var i = 0; i < 5; i++)
|
||||
lastSeq = await store.AppendAsync("foo", "data"u8.ToArray(), default);
|
||||
|
||||
lastSeq.ShouldBe((ulong)5);
|
||||
|
||||
// PurgeEx with keep=1: should remove 4 messages, keeping only the last one.
|
||||
var removed = store.PurgeEx("foo", 0, 1);
|
||||
removed.ShouldBe((ulong)4);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)1);
|
||||
|
||||
// The remaining message must be the one with the highest sequence.
|
||||
var remaining = await store.ListAsync(default);
|
||||
remaining.Count.ShouldBe(1);
|
||||
remaining[0].Sequence.ShouldBe(lastSeq);
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurgeExNoTombsOnBlockRemoval server/filestore_test.go:3823
|
||||
[Fact(Skip = "PurgeEx not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task PurgeEx_no_tombstones_on_block_removal()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("purgeex-no-tombs");
|
||||
|
||||
// Store messages on "foo" and "bar".
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "foo-data"u8.ToArray(), default);
|
||||
|
||||
var barSeqs = new List<ulong>();
|
||||
for (var i = 0; i < 5; i++)
|
||||
barSeqs.Add(await store.AppendAsync("bar", "bar-data"u8.ToArray(), default));
|
||||
|
||||
// PurgeEx removes all "foo" messages.
|
||||
store.PurgeEx("foo", 0, 0);
|
||||
|
||||
// "bar" messages must still be loadable and state must be consistent.
|
||||
foreach (var seq in barSeqs)
|
||||
{
|
||||
var msg = await store.LoadAsync(seq, default);
|
||||
msg.ShouldNotBeNull();
|
||||
msg!.Subject.ShouldBe("bar");
|
||||
}
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)5);
|
||||
}
|
||||
|
||||
// Go: TestFileStorePurge server/filestore_test.go:709
|
||||
|
||||
@@ -60,7 +60,7 @@ public sealed class FileStoreRecovery2Tests : IDisposable
|
||||
if (Directory.Exists(_root))
|
||||
{
|
||||
try { Directory.Delete(_root, recursive: true); }
|
||||
catch { /* best-effort cleanup */ }
|
||||
catch (IOException) { /* best-effort cleanup — directory may be locked by OS */ }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -381,28 +381,33 @@ public sealed class FileStoreRecovery2Tests : IDisposable
|
||||
public void SyncCompress_OnlyIfDirty_CompactFlagBehavior()
|
||||
{
|
||||
var dir = UniqueDir();
|
||||
using var store = CreateStore(dir, new FileStoreOptions { BlockSizeBytes = 256 });
|
||||
|
||||
var msg = "hello"u8.ToArray();
|
||||
// Scoped block to ensure store is fully disposed (pending writes flushed)
|
||||
// before opening the second store for recovery verification.
|
||||
{
|
||||
using var store = CreateStore(dir, new FileStoreOptions { BlockSizeBytes = 256 });
|
||||
|
||||
// Fill 2 blocks (6 per block at blockSize=256).
|
||||
for (var i = 0; i < 12; i++)
|
||||
store.StoreMsg("foo.BB", null, msg, 0);
|
||||
var msg = "hello"u8.ToArray();
|
||||
|
||||
// Add one more to start a third block.
|
||||
store.StoreMsg("foo.BB", null, msg, 0); // seq 13
|
||||
// Fill 2 blocks (6 per block at blockSize=256).
|
||||
for (var i = 0; i < 12; i++)
|
||||
store.StoreMsg("foo.BB", null, msg, 0);
|
||||
|
||||
// Delete a bunch to create holes in blocks 1 and 2.
|
||||
foreach (var seq in new ulong[] { 2, 3, 4, 5, 8, 9, 10, 11 })
|
||||
store.RemoveMsg(seq).ShouldBeTrue();
|
||||
// Add one more to start a third block.
|
||||
store.StoreMsg("foo.BB", null, msg, 0); // seq 13
|
||||
|
||||
// Add more to create a 4th/5th block.
|
||||
for (var i = 0; i < 6; i++)
|
||||
store.StoreMsg("foo.BB", null, msg, 0);
|
||||
// Delete a bunch to create holes in blocks 1 and 2.
|
||||
foreach (var seq in new ulong[] { 2, 3, 4, 5, 8, 9, 10, 11 })
|
||||
store.RemoveMsg(seq).ShouldBeTrue();
|
||||
|
||||
// Total live: 13 + 6 = 19 - 8 deleted = 11.
|
||||
var state = store.State();
|
||||
state.Msgs.ShouldBe(11UL);
|
||||
// Add more to create a 4th/5th block.
|
||||
for (var i = 0; i < 6; i++)
|
||||
store.StoreMsg("foo.BB", null, msg, 0);
|
||||
|
||||
// Total live: 13 + 6 = 19 - 8 deleted = 11.
|
||||
var state = store.State();
|
||||
state.Msgs.ShouldBe(11UL);
|
||||
}
|
||||
|
||||
// After restart, state should be preserved.
|
||||
using var store2 = CreateStore(dir, new FileStoreOptions { BlockSizeBytes = 256 });
|
||||
|
||||
@@ -114,52 +114,181 @@ public sealed class FileStoreSubjectTests : IDisposable
|
||||
}
|
||||
|
||||
// Go: TestFileStoreSubjectStateCacheExpiration server/filestore_test.go:4143
|
||||
[Fact(Skip = "SubjectsState not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Subject_state_cache_expiration()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("subj-state-cache");
|
||||
|
||||
await store.AppendAsync("foo.1", "a"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.2", "b"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar.1", "c"u8.ToArray(), default);
|
||||
|
||||
// Initial state: 3 subjects, each with 1 message.
|
||||
var initial = store.SubjectsState(">");
|
||||
initial.Count.ShouldBe(3);
|
||||
initial["foo.1"].Msgs.ShouldBe((ulong)1);
|
||||
initial["foo.2"].Msgs.ShouldBe((ulong)1);
|
||||
initial["bar.1"].Msgs.ShouldBe((ulong)1);
|
||||
|
||||
// Add a second message to "foo.1" — cache must be invalidated.
|
||||
await store.AppendAsync("foo.1", "d"u8.ToArray(), default);
|
||||
|
||||
var updated = store.SubjectsState(">");
|
||||
updated.Count.ShouldBe(3);
|
||||
updated["foo.1"].Msgs.ShouldBe((ulong)2);
|
||||
updated["foo.1"].First.ShouldBe((ulong)1);
|
||||
updated["foo.1"].Last.ShouldBe((ulong)4);
|
||||
|
||||
// Remove one "foo.1" message — cache must be invalidated again.
|
||||
(await store.RemoveAsync(1, default)).ShouldBeTrue();
|
||||
|
||||
var afterRemove = store.SubjectsState(">");
|
||||
afterRemove.Count.ShouldBe(3);
|
||||
afterRemove["foo.1"].Msgs.ShouldBe((ulong)1);
|
||||
afterRemove["foo.1"].First.ShouldBe((ulong)4);
|
||||
afterRemove["foo.1"].Last.ShouldBe((ulong)4);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreSubjectsTotals server/filestore_test.go:4948
|
||||
[Fact(Skip = "SubjectsTotals not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Subjects_totals_with_wildcards()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("subj-totals");
|
||||
|
||||
await store.AppendAsync("foo.a", "1"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.b", "2"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.a", "3"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar.c", "4"u8.ToArray(), default);
|
||||
|
||||
// Filter to foo.> — should only see foo subjects.
|
||||
var fooTotals = store.SubjectsTotals("foo.>");
|
||||
fooTotals.Count.ShouldBe(2);
|
||||
fooTotals["foo.a"].ShouldBe((ulong)2);
|
||||
fooTotals["foo.b"].ShouldBe((ulong)1);
|
||||
fooTotals.ContainsKey("bar.c").ShouldBeFalse();
|
||||
|
||||
// Filter to > — should see all subjects.
|
||||
var allTotals = store.SubjectsTotals(">");
|
||||
allTotals.Count.ShouldBe(3);
|
||||
allTotals["foo.a"].ShouldBe((ulong)2);
|
||||
allTotals["foo.b"].ShouldBe((ulong)1);
|
||||
allTotals["bar.c"].ShouldBe((ulong)1);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreSubjectCorruption server/filestore_test.go:6466
|
||||
[Fact(Skip = "SubjectForSeq not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Subject_corruption_detection()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("subj-corruption");
|
||||
|
||||
await store.AppendAsync("foo", "a"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar", "b"u8.ToArray(), default);
|
||||
await store.AppendAsync("baz", "c"u8.ToArray(), default);
|
||||
|
||||
// Each sequence should map to the correct subject.
|
||||
store.SubjectForSeq(1).ShouldBe("foo");
|
||||
store.SubjectForSeq(2).ShouldBe("bar");
|
||||
store.SubjectForSeq(3).ShouldBe("baz");
|
||||
|
||||
// Remove seq 2 — SubjectForSeq should throw for the removed sequence.
|
||||
(await store.RemoveAsync(2, default)).ShouldBeTrue();
|
||||
|
||||
Should.Throw<KeyNotFoundException>(() => store.SubjectForSeq(2));
|
||||
|
||||
// Non-existent sequence should also throw.
|
||||
Should.Throw<KeyNotFoundException>(() => store.SubjectForSeq(999));
|
||||
|
||||
// Remaining sequences still resolve correctly.
|
||||
store.SubjectForSeq(1).ShouldBe("foo");
|
||||
store.SubjectForSeq(3).ShouldBe("baz");
|
||||
}
|
||||
|
||||
// Go: TestFileStoreFilteredPendingBug server/filestore_test.go:3414
|
||||
[Fact(Skip = "FilteredState not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Filtered_pending_no_match_returns_zero()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("filtered-pending-nomatch");
|
||||
|
||||
await store.AppendAsync("foo", "a"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "b"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "c"u8.ToArray(), default);
|
||||
|
||||
// Filter "bar" matches no messages — Msgs should be 0.
|
||||
var state = store.FilteredState(1, "bar");
|
||||
state.Msgs.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreFilteredFirstMatchingBug server/filestore_test.go:4448
|
||||
[Fact(Skip = "LoadNextMsg not yet implemented in .NET FileStore")]
|
||||
// The bug was that LoadNextMsg with a filter could return a message whose subject
|
||||
// did not match the filter when fss (per-subject state) was regenerated from only
|
||||
// part of the block. The fix: when no matching message exists at or after start,
|
||||
// throw KeyNotFoundException rather than returning a wrong-subject message.
|
||||
[Fact]
|
||||
public async Task Filtered_first_matching_finds_correct_sequence()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("filtered-first-match");
|
||||
|
||||
// seqs 1-3: "foo.foo", seq 4: "foo.bar" (no more "foo.foo" after seq 3)
|
||||
await store.AppendAsync("foo.foo", "A"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.foo", "B"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.foo", "C"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.bar", "X"u8.ToArray(), default);
|
||||
|
||||
// Starting at seq 4, filter "foo.foo" — seq 4 is "foo.bar", and there are no
|
||||
// further "foo.foo" messages, so LoadNextMsg must throw rather than return a
|
||||
// message with the wrong subject.
|
||||
Should.Throw<KeyNotFoundException>(() =>
|
||||
store.LoadNextMsg("foo.foo", false, 4, null));
|
||||
|
||||
// Sanity: starting at seq 1 should find "foo.foo" at seq 1 with no skip.
|
||||
var (msg, skip) = store.LoadNextMsg("foo.foo", false, 1, null);
|
||||
msg.Subject.ShouldBe("foo.foo");
|
||||
msg.Sequence.ShouldBe((ulong)1);
|
||||
skip.ShouldBe((ulong)0);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreExpireSubjectMeta server/filestore_test.go:4014
|
||||
[Fact(Skip = "SubjectsState not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Expired_subject_metadata_cleans_up()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("expire-subj-meta");
|
||||
|
||||
await store.AppendAsync("foo.1", "a"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.1", "b"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo.2", "c"u8.ToArray(), default);
|
||||
|
||||
// Remove ALL messages on "foo.1".
|
||||
(await store.RemoveAsync(1, default)).ShouldBeTrue();
|
||||
(await store.RemoveAsync(2, default)).ShouldBeTrue();
|
||||
|
||||
// "foo.1" should have been cleaned up — not present in SubjectsState.
|
||||
var state = store.SubjectsState(">");
|
||||
state.ContainsKey("foo.1").ShouldBeFalse();
|
||||
|
||||
// "foo.2" is still alive.
|
||||
state.ContainsKey("foo.2").ShouldBeTrue();
|
||||
state["foo.2"].Msgs.ShouldBe((ulong)1);
|
||||
}
|
||||
|
||||
// Go: TestFileStoreAllFilteredStateWithDeleted server/filestore_test.go:4827
|
||||
[Fact(Skip = "FilteredState not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task Filtered_state_with_deleted_messages()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("filtered-state-deleted");
|
||||
|
||||
// Store 5 messages on "foo" — seqs 1..5.
|
||||
for (var i = 0; i < 5; i++)
|
||||
await store.AppendAsync("foo", "x"u8.ToArray(), default);
|
||||
|
||||
// Remove seqs 2 and 4 — seqs 1, 3, 5 remain.
|
||||
(await store.RemoveAsync(2, default)).ShouldBeTrue();
|
||||
(await store.RemoveAsync(4, default)).ShouldBeTrue();
|
||||
|
||||
// FilteredState from seq 1 on "foo" should report 3 remaining messages.
|
||||
var state = store.FilteredState(1, "foo");
|
||||
state.Msgs.ShouldBe((ulong)3);
|
||||
state.First.ShouldBe((ulong)1);
|
||||
state.Last.ShouldBe((ulong)5);
|
||||
}
|
||||
|
||||
// Test LoadLastBySubject with multiple subjects and removes.
|
||||
@@ -277,10 +406,30 @@ public sealed class FileStoreSubjectTests : IDisposable
|
||||
}
|
||||
|
||||
// Go: TestFileStoreNumPendingLastBySubject server/filestore_test.go:6501
|
||||
[Fact(Skip = "NumPending not yet implemented in .NET FileStore")]
|
||||
[Fact]
|
||||
public async Task NumPending_last_per_subject()
|
||||
{
|
||||
await Task.CompletedTask;
|
||||
await using var store = CreateStore("num-pending-lps");
|
||||
|
||||
// "foo" x3, "bar" x2 — 2 distinct subjects.
|
||||
await store.AppendAsync("foo", "1"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "2"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "3"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar", "4"u8.ToArray(), default);
|
||||
await store.AppendAsync("bar", "5"u8.ToArray(), default);
|
||||
|
||||
// lastPerSubject=true: count only the last message per distinct subject.
|
||||
// 2 distinct subjects → Total == 2.
|
||||
var (total, _) = store.NumPending(1, ">", true);
|
||||
total.ShouldBe((ulong)2);
|
||||
|
||||
// lastPerSubject=false: count all messages at or after sseq 1.
|
||||
var (totalAll, _) = store.NumPending(1, ">", false);
|
||||
totalAll.ShouldBe((ulong)5);
|
||||
|
||||
// Filter to just "foo" with lastPerSubject=true → 1.
|
||||
var (fooLps, _) = store.NumPending(1, "foo", true);
|
||||
fooLps.ShouldBe((ulong)1);
|
||||
}
|
||||
|
||||
// Test many distinct subjects.
|
||||
|
||||
Reference in New Issue
Block a user