refactor: extract NATS.Server.JetStream.Tests project
Move 225 JetStream-related test files from NATS.Server.Tests into a dedicated NATS.Server.JetStream.Tests project. This includes root-level JetStream*.cs files, storage test files (FileStore, MemStore, StreamStoreContract), and the full JetStream/ subfolder tree (Api, Cluster, Consumers, MirrorSource, Snapshots, Storage, Streams). Updated all namespaces, added InternalsVisibleTo, registered in the solution file, and added the JETSTREAM_INTEGRATION_MATRIX define.
This commit is contained in:
@@ -0,0 +1,21 @@
|
||||
using NATS.Server.Configuration;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class ClusterJetStreamConfigProcessorTests
|
||||
{
|
||||
[Fact]
|
||||
public void ConfigProcessor_maps_jetstream_and_cluster_blocks()
|
||||
{
|
||||
var cfg = """
|
||||
cluster { name: C1; listen: 127.0.0.1:6222 }
|
||||
jetstream { store_dir: /tmp/js; max_mem_store: 1GB; max_file_store: 10GB }
|
||||
""";
|
||||
|
||||
var opts = ConfigProcessor.ProcessConfig(cfg);
|
||||
|
||||
opts.Cluster.ShouldNotBeNull();
|
||||
opts.JetStream.ShouldNotBeNull();
|
||||
opts.JetStream!.StoreDir.ShouldBe("/tmp/js");
|
||||
}
|
||||
}
|
||||
104
tests/NATS.Server.JetStream.Tests/FileStoreEncryptionTests.cs
Normal file
104
tests/NATS.Server.JetStream.Tests/FileStoreEncryptionTests.cs
Normal file
@@ -0,0 +1,104 @@
|
||||
// Go: TestFileStoreEncryption server/filestore_test.go
|
||||
// Reference: golang/nats-server/server/filestore.go:816-907 (genEncryptionKeys, recoverAEK, setupAEK)
|
||||
// Tests that block files are encrypted at rest and can be recovered with the same key.
|
||||
|
||||
using System.Security.Cryptography;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class FileStoreEncryptionTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Encrypted_block_round_trips_message()
|
||||
{
|
||||
// Go: TestFileStoreEncryption server/filestore_test.go
|
||||
var dir = Directory.CreateTempSubdirectory();
|
||||
var key = new byte[32];
|
||||
RandomNumberGenerator.Fill(key);
|
||||
|
||||
await using (var store = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir.FullName,
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = key,
|
||||
}))
|
||||
{
|
||||
await store.AppendAsync("test.subj", "hello encrypted"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
// Raw block file should NOT contain plaintext
|
||||
var blkFiles = Directory.GetFiles(dir.FullName, "*.blk");
|
||||
blkFiles.ShouldNotBeEmpty();
|
||||
var raw = File.ReadAllBytes(blkFiles[0]);
|
||||
System.Text.Encoding.UTF8.GetString(raw).ShouldNotContain("hello encrypted");
|
||||
|
||||
// Recover with same key should return plaintext
|
||||
await using var recovered = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir.FullName,
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = key,
|
||||
});
|
||||
var msg = await recovered.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
System.Text.Encoding.UTF8.GetString(msg.Payload.Span).ShouldBe("hello encrypted");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Encrypted_block_with_aes_round_trips()
|
||||
{
|
||||
var dir = Directory.CreateTempSubdirectory();
|
||||
var key = new byte[32];
|
||||
RandomNumberGenerator.Fill(key);
|
||||
|
||||
await using (var store = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir.FullName,
|
||||
Cipher = StoreCipher.Aes,
|
||||
EncryptionKey = key,
|
||||
}))
|
||||
{
|
||||
await store.AppendAsync("aes.subj", "aes payload"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
await using var recovered = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir.FullName,
|
||||
Cipher = StoreCipher.Aes,
|
||||
EncryptionKey = key,
|
||||
});
|
||||
var msg = await recovered.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
System.Text.Encoding.UTF8.GetString(msg.Payload.Span).ShouldBe("aes payload");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Wrong_key_fails_to_decrypt()
|
||||
{
|
||||
var dir = Directory.CreateTempSubdirectory();
|
||||
var key1 = new byte[32];
|
||||
var key2 = new byte[32];
|
||||
RandomNumberGenerator.Fill(key1);
|
||||
RandomNumberGenerator.Fill(key2);
|
||||
|
||||
await using (var store = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir.FullName,
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = key1,
|
||||
}))
|
||||
{
|
||||
await store.AppendAsync("secret", "data"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
// Recovery with wrong key should throw InvalidDataException (from CryptographicException)
|
||||
var act = () => new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir.FullName,
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = key2,
|
||||
});
|
||||
Should.Throw<InvalidDataException>(act);
|
||||
}
|
||||
}
|
||||
18
tests/NATS.Server.JetStream.Tests/FileStoreTests.cs
Normal file
18
tests/NATS.Server.JetStream.Tests/FileStoreTests.cs
Normal file
@@ -0,0 +1,18 @@
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class FileStoreTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task FileStore_recovers_messages_after_restart()
|
||||
{
|
||||
var dir = Directory.CreateTempSubdirectory();
|
||||
|
||||
await using (var store = new FileStore(new FileStoreOptions { Directory = dir.FullName }))
|
||||
await store.AppendAsync("foo", "payload"u8.ToArray(), default);
|
||||
|
||||
await using var recovered = new FileStore(new FileStoreOptions { Directory = dir.FullName });
|
||||
(await recovered.GetStateAsync(default)).Messages.ShouldBe((ulong)1);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,206 @@
|
||||
// Go reference: jetstream_api.go — advisory event publication for stream/consumer lifecycle.
|
||||
// Advisory subjects use the pattern $JS.EVENT.ADVISORY.{type}.{stream}[.{consumer}].
|
||||
|
||||
using NATS.Server.Events;
|
||||
using NATS.Server.JetStream.Api;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Api;
|
||||
|
||||
public class AdvisoryEventTests
|
||||
{
|
||||
private static (AdvisoryPublisher Publisher, List<(string Subject, object Body)> Published) CreatePublisher()
|
||||
{
|
||||
var published = new List<(string Subject, object Body)>();
|
||||
var publisher = new AdvisoryPublisher((s, b) => published.Add((s, b)));
|
||||
return (publisher, published);
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — stream created advisory on $JS.EVENT.ADVISORY.STREAM.CREATED.{stream}.
|
||||
[Fact]
|
||||
public void StreamCreated_publishes_advisory_to_correct_subject()
|
||||
{
|
||||
var (publisher, published) = CreatePublisher();
|
||||
|
||||
publisher.StreamCreated("ORDERS");
|
||||
|
||||
published.Count.ShouldBe(1);
|
||||
published[0].Subject.ShouldBe("$JS.EVENT.ADVISORY.STREAM.CREATED.ORDERS");
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — stream deleted advisory includes stream name in subject.
|
||||
[Fact]
|
||||
public void StreamDeleted_publishes_advisory_with_stream_name()
|
||||
{
|
||||
var (publisher, published) = CreatePublisher();
|
||||
|
||||
publisher.StreamDeleted("PAYMENTS");
|
||||
|
||||
published.Count.ShouldBe(1);
|
||||
published[0].Subject.ShouldBe("$JS.EVENT.ADVISORY.STREAM.DELETED.PAYMENTS");
|
||||
var evt = published[0].Body.ShouldBeOfType<AdvisoryEvent>();
|
||||
evt.Stream.ShouldBe("PAYMENTS");
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — stream updated advisory carries optional detail payload.
|
||||
[Fact]
|
||||
public void StreamUpdated_publishes_advisory_with_detail()
|
||||
{
|
||||
var (publisher, published) = CreatePublisher();
|
||||
var detail = new { Reason = "config_change" };
|
||||
|
||||
publisher.StreamUpdated("EVENTS", detail);
|
||||
|
||||
published.Count.ShouldBe(1);
|
||||
published[0].Subject.ShouldBe("$JS.EVENT.ADVISORY.STREAM.UPDATED.EVENTS");
|
||||
var evt = published[0].Body.ShouldBeOfType<AdvisoryEvent>();
|
||||
evt.Detail.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — consumer created advisory on $JS.EVENT.ADVISORY.CONSUMER.CREATED.{stream}.{consumer}.
|
||||
[Fact]
|
||||
public void ConsumerCreated_publishes_advisory_with_stream_and_consumer()
|
||||
{
|
||||
var (publisher, published) = CreatePublisher();
|
||||
|
||||
publisher.ConsumerCreated("ORDERS", "push-consumer");
|
||||
|
||||
published.Count.ShouldBe(1);
|
||||
published[0].Subject.ShouldBe("$JS.EVENT.ADVISORY.CONSUMER.CREATED.ORDERS.push-consumer");
|
||||
var evt = published[0].Body.ShouldBeOfType<AdvisoryEvent>();
|
||||
evt.Stream.ShouldBe("ORDERS");
|
||||
evt.Consumer.ShouldBe("push-consumer");
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — consumer deleted advisory type field identifies event kind.
|
||||
[Fact]
|
||||
public void ConsumerDeleted_publishes_advisory_with_correct_type()
|
||||
{
|
||||
var (publisher, published) = CreatePublisher();
|
||||
|
||||
publisher.ConsumerDeleted("ORDERS", "my-consumer");
|
||||
|
||||
published.Count.ShouldBe(1);
|
||||
var evt = published[0].Body.ShouldBeOfType<AdvisoryEvent>();
|
||||
evt.Type.ShouldBe("io.nats.jetstream.advisory.consumer_deleted");
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — publish count tracks all emitted advisories atomically.
|
||||
[Fact]
|
||||
public void PublishCount_increments_for_each_advisory()
|
||||
{
|
||||
var (publisher, _) = CreatePublisher();
|
||||
|
||||
publisher.PublishCount.ShouldBe(0);
|
||||
|
||||
publisher.StreamCreated("S1");
|
||||
publisher.PublishCount.ShouldBe(1);
|
||||
|
||||
publisher.StreamDeleted("S1");
|
||||
publisher.PublishCount.ShouldBe(2);
|
||||
|
||||
publisher.ConsumerCreated("S1", "C1");
|
||||
publisher.PublishCount.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — each advisory type has its own descriptive type string.
|
||||
[Fact]
|
||||
public void Advisory_event_has_correct_type_field()
|
||||
{
|
||||
var (publisher, published) = CreatePublisher();
|
||||
|
||||
publisher.StreamCreated("S");
|
||||
published[0].Body.ShouldBeOfType<AdvisoryEvent>().Type
|
||||
.ShouldBe("io.nats.jetstream.advisory.stream_created");
|
||||
|
||||
publisher.StreamDeleted("S");
|
||||
published[1].Body.ShouldBeOfType<AdvisoryEvent>().Type
|
||||
.ShouldBe("io.nats.jetstream.advisory.stream_deleted");
|
||||
|
||||
publisher.StreamUpdated("S");
|
||||
published[2].Body.ShouldBeOfType<AdvisoryEvent>().Type
|
||||
.ShouldBe("io.nats.jetstream.advisory.stream_updated");
|
||||
|
||||
publisher.ConsumerCreated("S", "C");
|
||||
published[3].Body.ShouldBeOfType<AdvisoryEvent>().Type
|
||||
.ShouldBe("io.nats.jetstream.advisory.consumer_created");
|
||||
|
||||
publisher.ConsumerDeleted("S", "C");
|
||||
published[4].Body.ShouldBeOfType<AdvisoryEvent>().Type
|
||||
.ShouldBe("io.nats.jetstream.advisory.consumer_deleted");
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — advisory timestamps use UTC to ensure cross-cluster consistency.
|
||||
[Fact]
|
||||
public void Advisory_event_has_utc_timestamp()
|
||||
{
|
||||
var (publisher, published) = CreatePublisher();
|
||||
var before = DateTime.UtcNow;
|
||||
|
||||
publisher.StreamCreated("TEST");
|
||||
|
||||
var after = DateTime.UtcNow;
|
||||
var evt = published[0].Body.ShouldBeOfType<AdvisoryEvent>();
|
||||
evt.TimeStamp.Kind.ShouldBe(DateTimeKind.Utc);
|
||||
evt.TimeStamp.ShouldBeGreaterThanOrEqualTo(before);
|
||||
evt.TimeStamp.ShouldBeLessThanOrEqualTo(after);
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — advisory subjects are derived from EventSubjects constants.
|
||||
[Fact]
|
||||
public void Advisory_subjects_format_correctly()
|
||||
{
|
||||
string.Format(EventSubjects.JsAdvisoryStreamCreated, "MY_STREAM")
|
||||
.ShouldBe("$JS.EVENT.ADVISORY.STREAM.CREATED.MY_STREAM");
|
||||
|
||||
string.Format(EventSubjects.JsAdvisoryStreamDeleted, "MY_STREAM")
|
||||
.ShouldBe("$JS.EVENT.ADVISORY.STREAM.DELETED.MY_STREAM");
|
||||
|
||||
string.Format(EventSubjects.JsAdvisoryStreamUpdated, "MY_STREAM")
|
||||
.ShouldBe("$JS.EVENT.ADVISORY.STREAM.UPDATED.MY_STREAM");
|
||||
|
||||
string.Format(EventSubjects.JsAdvisoryConsumerCreated, "MY_STREAM", "MY_CONSUMER")
|
||||
.ShouldBe("$JS.EVENT.ADVISORY.CONSUMER.CREATED.MY_STREAM.MY_CONSUMER");
|
||||
|
||||
string.Format(EventSubjects.JsAdvisoryConsumerDeleted, "MY_STREAM", "MY_CONSUMER")
|
||||
.ShouldBe("$JS.EVENT.ADVISORY.CONSUMER.DELETED.MY_STREAM.MY_CONSUMER");
|
||||
|
||||
string.Format(EventSubjects.JsAdvisoryStreamSnapshotCreated, "MY_STREAM")
|
||||
.ShouldBe("$JS.EVENT.ADVISORY.STREAM.SNAPSHOT_CREATE.MY_STREAM");
|
||||
|
||||
string.Format(EventSubjects.JsAdvisoryStreamSnapshotCompleted, "MY_STREAM")
|
||||
.ShouldBe("$JS.EVENT.ADVISORY.STREAM.SNAPSHOT_COMPLETE.MY_STREAM");
|
||||
|
||||
string.Format(EventSubjects.JsAdvisoryStreamRestoreCreated, "MY_STREAM")
|
||||
.ShouldBe("$JS.EVENT.ADVISORY.STREAM.RESTORE_CREATE.MY_STREAM");
|
||||
|
||||
string.Format(EventSubjects.JsAdvisoryStreamRestoreCompleted, "MY_STREAM")
|
||||
.ShouldBe("$JS.EVENT.ADVISORY.STREAM.RESTORE_COMPLETE.MY_STREAM");
|
||||
|
||||
string.Format(EventSubjects.JsAdvisoryStreamLeaderElected, "MY_STREAM")
|
||||
.ShouldBe("$JS.EVENT.ADVISORY.STREAM.LEADER_ELECTED.MY_STREAM");
|
||||
|
||||
string.Format(EventSubjects.JsAdvisoryStreamQuorumLost, "MY_STREAM")
|
||||
.ShouldBe("$JS.EVENT.ADVISORY.STREAM.QUORUM_LOST.MY_STREAM");
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — full lifecycle sequence (create, update, delete) emits all advisories.
|
||||
[Fact]
|
||||
public void Multiple_advisories_all_published()
|
||||
{
|
||||
var (publisher, published) = CreatePublisher();
|
||||
|
||||
publisher.StreamCreated("LIFECYCLE");
|
||||
publisher.StreamUpdated("LIFECYCLE", new { Reason = "retention_change" });
|
||||
publisher.ConsumerCreated("LIFECYCLE", "worker");
|
||||
publisher.ConsumerDeleted("LIFECYCLE", "worker");
|
||||
publisher.StreamDeleted("LIFECYCLE");
|
||||
|
||||
published.Count.ShouldBe(5);
|
||||
published[0].Subject.ShouldBe("$JS.EVENT.ADVISORY.STREAM.CREATED.LIFECYCLE");
|
||||
published[1].Subject.ShouldBe("$JS.EVENT.ADVISORY.STREAM.UPDATED.LIFECYCLE");
|
||||
published[2].Subject.ShouldBe("$JS.EVENT.ADVISORY.CONSUMER.CREATED.LIFECYCLE.worker");
|
||||
published[3].Subject.ShouldBe("$JS.EVENT.ADVISORY.CONSUMER.DELETED.LIFECYCLE.worker");
|
||||
published[4].Subject.ShouldBe("$JS.EVENT.ADVISORY.STREAM.DELETED.LIFECYCLE");
|
||||
publisher.PublishCount.ShouldBe(5);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,124 @@
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
// Go reference: golang/nats-server/server/jetstream.go — $JS.API.* subject dispatch
|
||||
// Covers create/info/update/delete for streams, create/info/list/delete for consumers,
|
||||
// direct-get access, account info, and 404 routing for unknown subjects.
|
||||
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class ApiEndpointParityTests
|
||||
{
|
||||
// Go ref: jsStreamCreateT handler — stream create persists config and info round-trips correctly.
|
||||
[Fact]
|
||||
public async Task Stream_create_info_update_delete_lifecycle()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("EVENTS", "events.*");
|
||||
|
||||
var info = await fx.RequestLocalAsync("$JS.API.STREAM.INFO.EVENTS", "{}");
|
||||
info.Error.ShouldBeNull();
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.Config.Name.ShouldBe("EVENTS");
|
||||
info.StreamInfo.Config.Subjects.ShouldContain("events.*");
|
||||
|
||||
var update = await fx.RequestLocalAsync(
|
||||
"$JS.API.STREAM.UPDATE.EVENTS",
|
||||
"{\"name\":\"EVENTS\",\"subjects\":[\"events.*\"],\"max_msgs\":100}");
|
||||
update.Error.ShouldBeNull();
|
||||
update.StreamInfo.ShouldNotBeNull();
|
||||
update.StreamInfo!.Config.MaxMsgs.ShouldBe(100);
|
||||
|
||||
var delete = await fx.RequestLocalAsync("$JS.API.STREAM.DELETE.EVENTS", "{}");
|
||||
delete.Error.ShouldBeNull();
|
||||
delete.Success.ShouldBeTrue();
|
||||
|
||||
var infoAfterDelete = await fx.RequestLocalAsync("$JS.API.STREAM.INFO.EVENTS", "{}");
|
||||
infoAfterDelete.Error.ShouldNotBeNull();
|
||||
infoAfterDelete.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// Go ref: jsConsumerCreateT / jsConsumerInfoT handlers — consumer create then info returns config.
|
||||
[Fact]
|
||||
public async Task Consumer_create_info_list_delete_lifecycle()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("ORDERS", "orders.*");
|
||||
|
||||
var create = await fx.CreateConsumerAsync("ORDERS", "MON", "orders.created");
|
||||
create.Error.ShouldBeNull();
|
||||
create.ConsumerInfo.ShouldNotBeNull();
|
||||
create.ConsumerInfo!.Config.DurableName.ShouldBe("MON");
|
||||
|
||||
var info = await fx.RequestLocalAsync("$JS.API.CONSUMER.INFO.ORDERS.MON", "{}");
|
||||
info.Error.ShouldBeNull();
|
||||
info.ConsumerInfo.ShouldNotBeNull();
|
||||
info.ConsumerInfo!.Config.FilterSubject.ShouldBe("orders.created");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.CONSUMER.NAMES.ORDERS", "{}");
|
||||
names.Error.ShouldBeNull();
|
||||
names.ConsumerNames.ShouldNotBeNull();
|
||||
names.ConsumerNames.ShouldContain("MON");
|
||||
|
||||
var list = await fx.RequestLocalAsync("$JS.API.CONSUMER.LIST.ORDERS", "{}");
|
||||
list.Error.ShouldBeNull();
|
||||
list.ConsumerNames.ShouldNotBeNull();
|
||||
list.ConsumerNames.ShouldContain("MON");
|
||||
|
||||
var del = await fx.RequestLocalAsync("$JS.API.CONSUMER.DELETE.ORDERS.MON", "{}");
|
||||
del.Error.ShouldBeNull();
|
||||
del.Success.ShouldBeTrue();
|
||||
|
||||
var infoAfterDelete = await fx.RequestLocalAsync("$JS.API.CONSUMER.INFO.ORDERS.MON", "{}");
|
||||
infoAfterDelete.Error.ShouldNotBeNull();
|
||||
infoAfterDelete.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// Go ref: jsDirectMsgGetT handler — direct get returns message payload at correct sequence.
|
||||
[Fact]
|
||||
public async Task Direct_get_returns_message_at_sequence()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("LOGS", "logs.*");
|
||||
var ack = await fx.PublishAndGetAckAsync("logs.app", "hello-direct");
|
||||
|
||||
var direct = await fx.RequestLocalAsync("$JS.API.DIRECT.GET.LOGS", $"{{\"seq\":{ack.Seq}}}");
|
||||
direct.Error.ShouldBeNull();
|
||||
direct.DirectMessage.ShouldNotBeNull();
|
||||
direct.DirectMessage!.Sequence.ShouldBe(ack.Seq);
|
||||
direct.DirectMessage.Payload.ShouldBe("hello-direct");
|
||||
}
|
||||
|
||||
// Go ref: jsStreamNamesT / $JS.API.INFO handler — names list reflects created streams,
|
||||
// account info reflects total stream and consumer counts.
|
||||
[Fact]
|
||||
public async Task Stream_names_and_account_info_reflect_state()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("ALPHA", "alpha.*");
|
||||
_ = await fx.CreateStreamAsync("BETA", ["beta.*"]);
|
||||
_ = await fx.CreateConsumerAsync("ALPHA", "C1", "alpha.>");
|
||||
_ = await fx.CreateConsumerAsync("BETA", "C2", "beta.>");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.STREAM.NAMES", "{}");
|
||||
names.Error.ShouldBeNull();
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames.ShouldContain("ALPHA");
|
||||
names.StreamNames.ShouldContain("BETA");
|
||||
|
||||
var accountInfo = await fx.RequestLocalAsync("$JS.API.INFO", "{}");
|
||||
accountInfo.Error.ShouldBeNull();
|
||||
accountInfo.AccountInfo.ShouldNotBeNull();
|
||||
accountInfo.AccountInfo!.Streams.ShouldBe(2);
|
||||
accountInfo.AccountInfo.Consumers.ShouldBe(2);
|
||||
}
|
||||
|
||||
// Go ref: JetStreamApiRouter dispatch — subjects not matching any handler return 404 error shape.
|
||||
[Fact]
|
||||
public async Task Unknown_api_subject_returns_404_error_response()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("ORDERS", "orders.*");
|
||||
|
||||
var response = await fx.RequestLocalAsync("$JS.API.STREAM.FROBNICATE.ORDERS", "{}");
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Code.ShouldBe(404);
|
||||
response.StreamInfo.ShouldBeNull();
|
||||
response.ConsumerInfo.ShouldBeNull();
|
||||
response.Success.ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,186 @@
|
||||
// Go reference: jetstream_api.go — rate limiting via maxConcurrentRequests semaphore and
|
||||
// request deduplication via the dedup cache keyed by Nats-Msg-Id header.
|
||||
// The Go server uses a configurable semaphore (default 256) to throttle concurrent API
|
||||
// requests, and caches responses for duplicate request IDs within a TTL window.
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Api;
|
||||
|
||||
using NATS.Server.JetStream.Api;
|
||||
|
||||
public class ApiRateLimiterTests : IDisposable
|
||||
{
|
||||
private readonly ApiRateLimiter _limiter = new(maxConcurrent: 4);
|
||||
|
||||
public void Dispose() => _limiter.Dispose();
|
||||
|
||||
// Go reference: jetstream_api.go — semaphore.TryAcquire(0) used for non-blocking attempt.
|
||||
[Fact]
|
||||
public async Task TryAcquire_succeeds_when_slots_available()
|
||||
{
|
||||
var acquired = await _limiter.TryAcquireAsync();
|
||||
acquired.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — when all slots are taken, new requests are rejected.
|
||||
[Fact]
|
||||
public async Task TryAcquire_fails_when_all_slots_taken()
|
||||
{
|
||||
// Fill all 4 slots.
|
||||
for (var i = 0; i < 4; i++)
|
||||
(await _limiter.TryAcquireAsync()).ShouldBeTrue();
|
||||
|
||||
// 5th attempt should fail.
|
||||
var rejected = await _limiter.TryAcquireAsync();
|
||||
rejected.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — releasing a slot allows a subsequent request to proceed.
|
||||
[Fact]
|
||||
public async Task Release_frees_slot_for_next_request()
|
||||
{
|
||||
// Fill all slots.
|
||||
for (var i = 0; i < 4; i++)
|
||||
(await _limiter.TryAcquireAsync()).ShouldBeTrue();
|
||||
|
||||
// Currently full.
|
||||
(await _limiter.TryAcquireAsync()).ShouldBeFalse();
|
||||
|
||||
// Release one slot.
|
||||
_limiter.Release();
|
||||
|
||||
// Now one slot is free.
|
||||
(await _limiter.TryAcquireAsync()).ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — active count reflects in-flight requests.
|
||||
[Fact]
|
||||
public async Task ActiveCount_tracks_concurrent_requests()
|
||||
{
|
||||
_limiter.ActiveCount.ShouldBe(0);
|
||||
|
||||
await _limiter.TryAcquireAsync();
|
||||
await _limiter.TryAcquireAsync();
|
||||
await _limiter.TryAcquireAsync();
|
||||
|
||||
_limiter.ActiveCount.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — unknown request ID returns null (cache miss).
|
||||
[Fact]
|
||||
public void GetCachedResponse_returns_null_for_unknown_id()
|
||||
{
|
||||
var result = _limiter.GetCachedResponse("nonexistent-id");
|
||||
result.ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — dedup cache stores response keyed by Nats-Msg-Id.
|
||||
[Fact]
|
||||
public void CacheResponse_and_get_returns_cached()
|
||||
{
|
||||
var response = JetStreamApiResponse.SuccessResponse();
|
||||
_limiter.CacheResponse("req-001", response);
|
||||
|
||||
var cached = _limiter.GetCachedResponse("req-001");
|
||||
cached.ShouldNotBeNull();
|
||||
cached!.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — dedup window expires after TTL (dedupWindow config).
|
||||
[SlopwatchSuppress("SW004", "TTL expiry test requires real wall-clock time to elapse; no synchronisation primitive can replace observing a time-based cache eviction")]
|
||||
[Fact]
|
||||
public async Task GetCachedResponse_returns_null_after_ttl_expiry()
|
||||
{
|
||||
using var shortLimiter = new ApiRateLimiter(maxConcurrent: 4, dedupTtl: TimeSpan.FromMilliseconds(50));
|
||||
var response = JetStreamApiResponse.SuccessResponse();
|
||||
shortLimiter.CacheResponse("req-ttl", response);
|
||||
|
||||
// Verify it's cached before expiry.
|
||||
shortLimiter.GetCachedResponse("req-ttl").ShouldNotBeNull();
|
||||
|
||||
// Wait for TTL to expire.
|
||||
await Task.Delay(120);
|
||||
|
||||
// Should be null after expiry.
|
||||
shortLimiter.GetCachedResponse("req-ttl").ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — null/empty Nats-Msg-Id is ignored for dedup.
|
||||
[Fact]
|
||||
public void CacheResponse_ignores_null_request_id()
|
||||
{
|
||||
var response = JetStreamApiResponse.SuccessResponse();
|
||||
|
||||
// These should not throw and should not increment the cache count.
|
||||
_limiter.CacheResponse(null, response);
|
||||
_limiter.CacheResponse("", response);
|
||||
_limiter.CacheResponse(string.Empty, response);
|
||||
|
||||
_limiter.DedupCacheCount.ShouldBe(0);
|
||||
_limiter.GetCachedResponse(null).ShouldBeNull();
|
||||
_limiter.GetCachedResponse("").ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — periodic sweep removes expired dedup entries.
|
||||
[SlopwatchSuppress("SW004", "TTL expiry test requires real wall-clock time to elapse; no synchronisation primitive can replace observing a time-based cache eviction")]
|
||||
[Fact]
|
||||
public async Task PurgeExpired_removes_old_entries()
|
||||
{
|
||||
using var shortLimiter = new ApiRateLimiter(maxConcurrent: 4, dedupTtl: TimeSpan.FromMilliseconds(50));
|
||||
|
||||
shortLimiter.CacheResponse("req-a", JetStreamApiResponse.SuccessResponse());
|
||||
shortLimiter.CacheResponse("req-b", JetStreamApiResponse.SuccessResponse());
|
||||
shortLimiter.CacheResponse("req-c", JetStreamApiResponse.SuccessResponse());
|
||||
|
||||
shortLimiter.DedupCacheCount.ShouldBe(3);
|
||||
|
||||
// Wait for all entries to expire.
|
||||
await Task.Delay(120);
|
||||
|
||||
var removed = shortLimiter.PurgeExpired();
|
||||
removed.ShouldBe(3);
|
||||
shortLimiter.DedupCacheCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — dedup cache count is observable.
|
||||
[Fact]
|
||||
public void DedupCacheCount_tracks_cached_entries()
|
||||
{
|
||||
_limiter.DedupCacheCount.ShouldBe(0);
|
||||
|
||||
_limiter.CacheResponse("req-1", JetStreamApiResponse.Ok());
|
||||
_limiter.CacheResponse("req-2", JetStreamApiResponse.Ok());
|
||||
_limiter.CacheResponse("req-3", JetStreamApiResponse.Ok());
|
||||
|
||||
_limiter.DedupCacheCount.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — semaphore enforces max-concurrent across goroutines.
|
||||
[Fact]
|
||||
public async Task Concurrent_acquire_respects_max()
|
||||
{
|
||||
using var limiter = new ApiRateLimiter(maxConcurrent: 5);
|
||||
|
||||
// Spin up 10 tasks, only 5 should succeed.
|
||||
var results = await Task.WhenAll(
|
||||
Enumerable.Range(0, 10).Select(_ => limiter.TryAcquireAsync()));
|
||||
|
||||
var acquired = results.Count(r => r);
|
||||
acquired.ShouldBe(5);
|
||||
}
|
||||
|
||||
// Go reference: jetstream_api.go — default maxConcurrentRequests = 256.
|
||||
[Fact]
|
||||
public async Task Default_max_concurrent_is_256()
|
||||
{
|
||||
using var defaultLimiter = new ApiRateLimiter();
|
||||
|
||||
// Acquire 256 slots — all should succeed.
|
||||
var tasks = Enumerable.Range(0, 256).Select(_ => defaultLimiter.TryAcquireAsync());
|
||||
var results = await Task.WhenAll(tasks);
|
||||
results.ShouldAllBe(r => r);
|
||||
|
||||
// 257th should fail.
|
||||
var rejected = await defaultLimiter.TryAcquireAsync();
|
||||
rejected.ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,241 @@
|
||||
// Go reference: jetstream_cluster.go:7620-8265 — clustered stream/consumer API handlers
|
||||
// propose to the meta RAFT group rather than applying locally to StreamManager/ConsumerManager.
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Api.Handlers;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Api;
|
||||
|
||||
public class ClusteredApiTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Stream clustered handlers
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// A successful clustered create proposes to the meta group, resulting in a new stream
|
||||
/// assignment tracked under the provided name.
|
||||
/// Go reference: jetstream_cluster.go:7620 jsClusteredStreamRequest.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task HandleClusteredCreate_proposes_to_meta_group()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 1);
|
||||
var payload = Encoding.UTF8.GetBytes("""{"name":"ORDERS","subjects":["orders.>"]}""");
|
||||
|
||||
var response = await StreamApiHandlers.HandleClusteredCreateAsync(
|
||||
"$JS.API.STREAM.CREATE.ORDERS", payload, metaGroup, CancellationToken.None);
|
||||
|
||||
response.Error.ShouldBeNull();
|
||||
response.Success.ShouldBeTrue();
|
||||
metaGroup.GetStreamAssignment("ORDERS").ShouldNotBeNull();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// A duplicate clustered create for the same stream name returns an error response.
|
||||
/// Go reference: jetstream_cluster.go — duplicate stream proposal returns error.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task HandleClusteredCreate_returns_error_for_duplicate()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 1);
|
||||
var payload = Encoding.UTF8.GetBytes("""{"name":"ORDERS","subjects":["orders.>"]}""");
|
||||
|
||||
// First create succeeds.
|
||||
var first = await StreamApiHandlers.HandleClusteredCreateAsync(
|
||||
"$JS.API.STREAM.CREATE.ORDERS", payload, metaGroup, CancellationToken.None);
|
||||
first.Error.ShouldBeNull();
|
||||
|
||||
// Second create for same name returns error.
|
||||
var second = await StreamApiHandlers.HandleClusteredCreateAsync(
|
||||
"$JS.API.STREAM.CREATE.ORDERS", payload, metaGroup, CancellationToken.None);
|
||||
second.Error.ShouldNotBeNull();
|
||||
second.Error!.Description.ShouldContain("ORDERS");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// When this node is not the meta-group leader, clustered create returns a not-leader error.
|
||||
/// Go reference: jetstream_cluster.go:7620 — leader check before proposing.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task HandleClusteredCreate_returns_error_when_not_leader()
|
||||
{
|
||||
// selfIndex=2, leaderIndex defaults to 1 — this node is NOT the leader.
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 3, selfIndex: 2);
|
||||
var payload = Encoding.UTF8.GetBytes("""{"name":"ORDERS","subjects":["orders.>"]}""");
|
||||
|
||||
var response = await StreamApiHandlers.HandleClusteredCreateAsync(
|
||||
"$JS.API.STREAM.CREATE.ORDERS", payload, metaGroup, CancellationToken.None);
|
||||
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Code.ShouldBe(10003);
|
||||
response.Error.Description.ShouldBe("not leader");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Clustered update proposes a config change to an existing stream assignment.
|
||||
/// Go reference: jetstream_cluster.go jsClusteredStreamUpdateRequest.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task HandleClusteredUpdate_updates_existing_stream()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 1);
|
||||
|
||||
// Create the stream first.
|
||||
var createPayload = Encoding.UTF8.GetBytes("""{"name":"EVENTS","subjects":["events.>"]}""");
|
||||
await StreamApiHandlers.HandleClusteredCreateAsync(
|
||||
"$JS.API.STREAM.CREATE.EVENTS", createPayload, metaGroup, CancellationToken.None);
|
||||
|
||||
// Now update it with a max_msgs constraint.
|
||||
var updatePayload = Encoding.UTF8.GetBytes("""{"name":"EVENTS","subjects":["events.>"],"max_msgs":500}""");
|
||||
var response = await StreamApiHandlers.HandleClusteredUpdateAsync(
|
||||
"$JS.API.STREAM.UPDATE.EVENTS", updatePayload, metaGroup, CancellationToken.None);
|
||||
|
||||
response.Error.ShouldBeNull();
|
||||
response.Success.ShouldBeTrue();
|
||||
|
||||
// The assignment should still exist.
|
||||
metaGroup.GetStreamAssignment("EVENTS").ShouldNotBeNull();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Clustered delete proposes removal of a stream from the meta group.
|
||||
/// Go reference: jetstream_cluster.go processStreamRemoval via meta leader.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task HandleClusteredDelete_proposes_deletion()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 1);
|
||||
var createPayload = Encoding.UTF8.GetBytes("""{"name":"ORDERS","subjects":["orders.>"]}""");
|
||||
await StreamApiHandlers.HandleClusteredCreateAsync(
|
||||
"$JS.API.STREAM.CREATE.ORDERS", createPayload, metaGroup, CancellationToken.None);
|
||||
|
||||
metaGroup.GetStreamAssignment("ORDERS").ShouldNotBeNull();
|
||||
|
||||
var response = await StreamApiHandlers.HandleClusteredDeleteAsync(
|
||||
"$JS.API.STREAM.DELETE.ORDERS", metaGroup, CancellationToken.None);
|
||||
|
||||
response.Error.ShouldBeNull();
|
||||
response.Success.ShouldBeTrue();
|
||||
metaGroup.GetStreamAssignment("ORDERS").ShouldBeNull();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Clustered delete of a non-existent stream returns a 404 not-found error.
|
||||
/// Go reference: jetstream_cluster.go — delete missing stream returns error.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task HandleClusteredDelete_returns_error_for_missing_stream()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 1);
|
||||
|
||||
var response = await StreamApiHandlers.HandleClusteredDeleteAsync(
|
||||
"$JS.API.STREAM.DELETE.GHOST", metaGroup, CancellationToken.None);
|
||||
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer clustered handlers
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Clustered consumer create proposes to the meta group, adding the consumer to the
|
||||
/// stream's assignment map.
|
||||
/// Go reference: jetstream_cluster.go:8100 jsClusteredConsumerRequest.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Consumer_clustered_create_proposes_to_meta()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 1);
|
||||
|
||||
// Create parent stream first.
|
||||
await StreamApiHandlers.HandleClusteredCreateAsync(
|
||||
"$JS.API.STREAM.CREATE.ORDERS",
|
||||
Encoding.UTF8.GetBytes("""{"name":"ORDERS","subjects":["orders.>"]}"""),
|
||||
metaGroup,
|
||||
CancellationToken.None);
|
||||
|
||||
var consumerPayload = Encoding.UTF8.GetBytes("""{"durable_name":"MON","filter_subject":"orders.created"}""");
|
||||
var response = await ConsumerApiHandlers.HandleClusteredCreateAsync(
|
||||
"$JS.API.CONSUMER.CREATE.ORDERS.MON", consumerPayload, metaGroup, CancellationToken.None);
|
||||
|
||||
response.Error.ShouldBeNull();
|
||||
response.Success.ShouldBeTrue();
|
||||
metaGroup.GetConsumerAssignment("ORDERS", "MON").ShouldNotBeNull();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creating a consumer on a stream that does not exist in the meta group returns an error.
|
||||
/// Go reference: jetstream_cluster.go — consumer proposal validates stream existence.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Consumer_clustered_create_returns_error_for_missing_stream()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 1);
|
||||
var payload = Encoding.UTF8.GetBytes("""{"durable_name":"MON","filter_subject":"orders.created"}""");
|
||||
|
||||
var response = await ConsumerApiHandlers.HandleClusteredCreateAsync(
|
||||
"$JS.API.CONSUMER.CREATE.GHOST.MON", payload, metaGroup, CancellationToken.None);
|
||||
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Description.ShouldContain("GHOST");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Clustered consumer delete removes the consumer from the stream assignment.
|
||||
/// Go reference: jetstream_cluster.go processConsumerRemoval via meta leader.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Consumer_clustered_delete_removes_consumer()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 1);
|
||||
|
||||
// Set up stream and consumer.
|
||||
await StreamApiHandlers.HandleClusteredCreateAsync(
|
||||
"$JS.API.STREAM.CREATE.ORDERS",
|
||||
Encoding.UTF8.GetBytes("""{"name":"ORDERS","subjects":["orders.>"]}"""),
|
||||
metaGroup,
|
||||
CancellationToken.None);
|
||||
|
||||
await ConsumerApiHandlers.HandleClusteredCreateAsync(
|
||||
"$JS.API.CONSUMER.CREATE.ORDERS.MON",
|
||||
Encoding.UTF8.GetBytes("""{"durable_name":"MON"}"""),
|
||||
metaGroup,
|
||||
CancellationToken.None);
|
||||
|
||||
metaGroup.GetConsumerAssignment("ORDERS", "MON").ShouldNotBeNull();
|
||||
|
||||
var response = await ConsumerApiHandlers.HandleClusteredDeleteAsync(
|
||||
"$JS.API.CONSUMER.DELETE.ORDERS.MON", metaGroup, CancellationToken.None);
|
||||
|
||||
response.Error.ShouldBeNull();
|
||||
response.Success.ShouldBeTrue();
|
||||
metaGroup.GetConsumerAssignment("ORDERS", "MON").ShouldBeNull();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Deleting a non-existent consumer returns a 404 not-found error.
|
||||
/// Go reference: jetstream_cluster.go — consumer delete validates existence.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Consumer_clustered_delete_returns_not_found_for_missing()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 1);
|
||||
|
||||
// Create the stream but not the consumer.
|
||||
await StreamApiHandlers.HandleClusteredCreateAsync(
|
||||
"$JS.API.STREAM.CREATE.ORDERS",
|
||||
Encoding.UTF8.GetBytes("""{"name":"ORDERS","subjects":["orders.>"]}"""),
|
||||
metaGroup,
|
||||
CancellationToken.None);
|
||||
|
||||
var response = await ConsumerApiHandlers.HandleClusteredDeleteAsync(
|
||||
"$JS.API.CONSUMER.DELETE.ORDERS.GHOST", metaGroup, CancellationToken.None);
|
||||
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,303 @@
|
||||
// Go reference: jetstream_cluster.go:7620-7701 — jsClusteredStreamRequest lifecycle:
|
||||
// propose to meta RAFT → wait for result → deliver or time out.
|
||||
// ClusteredRequestProcessor tracks pending requests and delivers results when RAFT entries
|
||||
// are applied, matching the Go server's callback-based completion mechanism.
|
||||
|
||||
using NATS.Server.JetStream.Api;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Api;
|
||||
|
||||
public class ClusteredRequestTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// RegisterPending
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Each call to RegisterPending returns a distinct, non-empty string identifier.
|
||||
/// Go reference: jetstream_cluster.go:7620 — each clustered request gets a unique ID
|
||||
/// used to correlate the RAFT apply callback with the waiting caller.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void RegisterPending_returns_unique_id()
|
||||
{
|
||||
var processor = new ClusteredRequestProcessor();
|
||||
|
||||
var id1 = processor.RegisterPending();
|
||||
var id2 = processor.RegisterPending();
|
||||
|
||||
id1.ShouldNotBeNullOrWhiteSpace();
|
||||
id2.ShouldNotBeNullOrWhiteSpace();
|
||||
id1.ShouldNotBe(id2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// WaitForResult
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// When a result is delivered for a pending request, WaitForResultAsync returns that response.
|
||||
/// Go reference: jetstream_cluster.go:7620 — the waiting goroutine receives the result
|
||||
/// via channel once the RAFT leader applies the entry.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task WaitForResult_returns_delivered_response()
|
||||
{
|
||||
var processor = new ClusteredRequestProcessor(timeout: TimeSpan.FromSeconds(5));
|
||||
var requestId = processor.RegisterPending();
|
||||
var expected = JetStreamApiResponse.SuccessResponse();
|
||||
|
||||
// Use a semaphore so the wait starts before delivery occurs — no timing dependency.
|
||||
var waitStarted = new SemaphoreSlim(0, 1);
|
||||
var deliverTask = Task.Run(async () =>
|
||||
{
|
||||
// Wait until WaitForResultAsync has been entered before delivering.
|
||||
await waitStarted.WaitAsync();
|
||||
processor.DeliverResult(requestId, expected);
|
||||
});
|
||||
|
||||
// Signal the deliver task once we begin waiting.
|
||||
waitStarted.Release();
|
||||
var result = await processor.WaitForResultAsync(requestId);
|
||||
|
||||
await deliverTask;
|
||||
result.ShouldBeSameAs(expected);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// When no result is delivered within the timeout, WaitForResultAsync returns a 408 error.
|
||||
/// Go reference: jetstream_cluster.go:7620 — if the RAFT group does not respond in time,
|
||||
/// the request is considered timed out and an error is returned to the client.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task WaitForResult_times_out_after_timeout()
|
||||
{
|
||||
var processor = new ClusteredRequestProcessor(timeout: TimeSpan.FromMilliseconds(50));
|
||||
var requestId = processor.RegisterPending();
|
||||
|
||||
var result = await processor.WaitForResultAsync(requestId);
|
||||
|
||||
result.Error.ShouldNotBeNull();
|
||||
result.Error!.Code.ShouldBe(408);
|
||||
result.Error.Description.ShouldContain("timeout");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// WaitForResultAsync returns a 500 error for an ID that was never registered.
|
||||
/// Go reference: jetstream_cluster.go — requesting a result for an unknown request ID
|
||||
/// is a programming error; return an internal server error.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task WaitForResult_returns_error_for_unknown_id()
|
||||
{
|
||||
var processor = new ClusteredRequestProcessor();
|
||||
|
||||
var result = await processor.WaitForResultAsync("nonexistent-id");
|
||||
|
||||
result.Error.ShouldNotBeNull();
|
||||
result.Error!.Code.ShouldBe(500);
|
||||
result.Error.Description.ShouldContain("not found");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// When the caller's CancellationToken is triggered, WaitForResultAsync returns a timeout error.
|
||||
/// Go reference: jetstream_cluster.go:7620 — callers can cancel waiting for a RAFT result
|
||||
/// if their own request context is cancelled.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task WaitForResult_respects_cancellation_token()
|
||||
{
|
||||
var processor = new ClusteredRequestProcessor(timeout: TimeSpan.FromSeconds(30));
|
||||
var requestId = processor.RegisterPending();
|
||||
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromMilliseconds(50));
|
||||
var result = await processor.WaitForResultAsync(requestId, cts.Token);
|
||||
|
||||
result.Error.ShouldNotBeNull();
|
||||
result.Error!.Code.ShouldBe(408);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// DeliverResult
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// DeliverResult returns true when the request ID is known and pending.
|
||||
/// Go reference: jetstream_cluster.go:7620 — the RAFT apply callback signals success
|
||||
/// by resolving the pending request.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void DeliverResult_returns_true_for_pending_request()
|
||||
{
|
||||
var processor = new ClusteredRequestProcessor();
|
||||
var requestId = processor.RegisterPending();
|
||||
|
||||
var delivered = processor.DeliverResult(requestId, JetStreamApiResponse.SuccessResponse());
|
||||
|
||||
delivered.ShouldBeTrue();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// DeliverResult returns false when the request ID is not found.
|
||||
/// Go reference: jetstream_cluster.go — delivering a result for an unknown or already-completed
|
||||
/// request is a no-op; return false so the caller knows the result was not consumed.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void DeliverResult_returns_false_for_unknown_request()
|
||||
{
|
||||
var processor = new ClusteredRequestProcessor();
|
||||
|
||||
var delivered = processor.DeliverResult("unknown-id", JetStreamApiResponse.SuccessResponse());
|
||||
|
||||
delivered.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// PendingCount
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// PendingCount increases with each RegisterPending call and decreases when a result is
|
||||
/// delivered or the request times out.
|
||||
/// Go reference: jetstream_cluster.go — the server tracks pending RAFT proposals for
|
||||
/// observability and to detect stuck requests.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task PendingCount_tracks_active_requests()
|
||||
{
|
||||
var processor = new ClusteredRequestProcessor(timeout: TimeSpan.FromMilliseconds(50));
|
||||
|
||||
processor.PendingCount.ShouldBe(0);
|
||||
|
||||
var id1 = processor.RegisterPending();
|
||||
processor.PendingCount.ShouldBe(1);
|
||||
|
||||
var id2 = processor.RegisterPending();
|
||||
processor.PendingCount.ShouldBe(2);
|
||||
|
||||
// Deliver one request.
|
||||
processor.DeliverResult(id1, JetStreamApiResponse.SuccessResponse());
|
||||
processor.PendingCount.ShouldBe(1);
|
||||
|
||||
// Let id2 time out.
|
||||
await processor.WaitForResultAsync(id2);
|
||||
processor.PendingCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// CancelAll
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// CancelAll completes all pending requests with a 503 error response.
|
||||
/// Go reference: jetstream_cluster.go — when this node loses RAFT leadership, all
|
||||
/// in-flight proposals must be failed so callers do not hang indefinitely.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task CancelAll_completes_all_pending_with_error()
|
||||
{
|
||||
var processor = new ClusteredRequestProcessor(timeout: TimeSpan.FromSeconds(30));
|
||||
|
||||
var id1 = processor.RegisterPending();
|
||||
var id2 = processor.RegisterPending();
|
||||
|
||||
var task1 = processor.WaitForResultAsync(id1);
|
||||
var task2 = processor.WaitForResultAsync(id2);
|
||||
|
||||
processor.CancelAll("leadership changed");
|
||||
|
||||
var result1 = await task1;
|
||||
var result2 = await task2;
|
||||
|
||||
result1.Error.ShouldNotBeNull();
|
||||
result1.Error!.Code.ShouldBe(503);
|
||||
result1.Error.Description.ShouldContain("leadership changed");
|
||||
|
||||
result2.Error.ShouldNotBeNull();
|
||||
result2.Error!.Code.ShouldBe(503);
|
||||
result2.Error.Description.ShouldContain("leadership changed");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// After CancelAll, PendingCount drops to zero.
|
||||
/// Go reference: jetstream_cluster.go — a leadership change clears all pending state.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void CancelAll_clears_pending_count()
|
||||
{
|
||||
var processor = new ClusteredRequestProcessor(timeout: TimeSpan.FromSeconds(30));
|
||||
|
||||
processor.RegisterPending();
|
||||
processor.RegisterPending();
|
||||
processor.RegisterPending();
|
||||
|
||||
processor.PendingCount.ShouldBe(3);
|
||||
|
||||
processor.CancelAll();
|
||||
|
||||
processor.PendingCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// CancelAll uses a default reason of "leadership changed" when no reason is provided.
|
||||
/// Go reference: jetstream_cluster.go — default cancellation reason matches NATS cluster semantics.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task CancelAll_uses_default_reason()
|
||||
{
|
||||
var processor = new ClusteredRequestProcessor(timeout: TimeSpan.FromSeconds(30));
|
||||
|
||||
var id = processor.RegisterPending();
|
||||
var task = processor.WaitForResultAsync(id);
|
||||
|
||||
processor.CancelAll(); // no reason argument
|
||||
|
||||
var result = await task;
|
||||
|
||||
result.Error.ShouldNotBeNull();
|
||||
result.Error!.Description.ShouldContain("leadership changed");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Concurrency
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Concurrent registrations and deliveries all receive the correct response.
|
||||
/// Go reference: jetstream_cluster.go — in a cluster, many API requests may be in-flight
|
||||
/// simultaneously, each waiting for its own RAFT entry to be applied.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Concurrent_register_and_deliver()
|
||||
{
|
||||
const int count = 50;
|
||||
var processor = new ClusteredRequestProcessor(timeout: TimeSpan.FromSeconds(10));
|
||||
|
||||
var requestIds = new string[count];
|
||||
for (var i = 0; i < count; i++)
|
||||
requestIds[i] = processor.RegisterPending();
|
||||
|
||||
// Start all waits concurrently.
|
||||
var waitTasks = requestIds.Select(id => processor.WaitForResultAsync(id)).ToArray();
|
||||
|
||||
// Deliver all results concurrently — no delay needed; the ThreadPool provides
|
||||
// sufficient interleaving to exercise concurrent access patterns.
|
||||
var deliverTasks = requestIds.Select((id, i) => Task.Run(() =>
|
||||
{
|
||||
processor.DeliverResult(id, JetStreamApiResponse.ErrorResponse(200 + i, $"response-{i}"));
|
||||
})).ToArray();
|
||||
|
||||
await Task.WhenAll(deliverTasks);
|
||||
var results = await Task.WhenAll(waitTasks);
|
||||
|
||||
// Every result should be a valid response (no null errors from "not found").
|
||||
results.Length.ShouldBe(count);
|
||||
foreach (var result in results)
|
||||
{
|
||||
// Each result was an explicitly delivered response with a known code.
|
||||
result.Error.ShouldNotBeNull();
|
||||
result.Error!.Code.ShouldBeGreaterThanOrEqualTo(200);
|
||||
result.Error.Code.ShouldBeLessThan(200 + count);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,182 @@
|
||||
// Go reference: server/consumer.go — pauseConsumer / resumeConsumer / isPaused
|
||||
// Tests for the consumer pause/resume API endpoint, including pause_until (RFC3339)
|
||||
// time-bounded pauses and response body containing pause state.
|
||||
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Api;
|
||||
|
||||
public class ConsumerPauseApiTests : IAsyncLifetime
|
||||
{
|
||||
private JetStreamApiFixture _fx = null!;
|
||||
|
||||
public async Task InitializeAsync()
|
||||
{
|
||||
_fx = await JetStreamApiFixture.StartWithStreamAsync("ORDERS", "orders.*");
|
||||
_ = await _fx.CreateConsumerAsync("ORDERS", "MON", "orders.created");
|
||||
}
|
||||
|
||||
public async Task DisposeAsync() => await _fx.DisposeAsync();
|
||||
|
||||
// Go ref: consumer.go pauseConsumer — pause=true pauses consumer.
|
||||
[Fact]
|
||||
public async Task HandlePause_with_pause_true_pauses_consumer()
|
||||
{
|
||||
var resp = await _fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.ORDERS.MON",
|
||||
"{\"pause\":true}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.Success.ShouldBeTrue();
|
||||
resp.Paused.ShouldBe(true);
|
||||
}
|
||||
|
||||
// Go ref: consumer.go resumeConsumer — pause=false resumes consumer.
|
||||
[Fact]
|
||||
public async Task HandlePause_with_pause_false_resumes_consumer()
|
||||
{
|
||||
// First pause
|
||||
await _fx.RequestLocalAsync("$JS.API.CONSUMER.PAUSE.ORDERS.MON", "{\"pause\":true}");
|
||||
|
||||
// Then resume
|
||||
var resp = await _fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.ORDERS.MON",
|
||||
"{\"pause\":false}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.Success.ShouldBeTrue();
|
||||
resp.Paused.ShouldBe(false);
|
||||
}
|
||||
|
||||
// Go ref: consumer.go pauseConsumer — pause_until sets deadline UTC datetime.
|
||||
[Fact]
|
||||
public async Task HandlePause_with_pause_until_sets_deadline()
|
||||
{
|
||||
var future = DateTime.UtcNow.AddHours(1);
|
||||
var iso = future.ToString("O"); // RFC3339 round-trip format
|
||||
|
||||
var resp = await _fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.ORDERS.MON",
|
||||
$"{{\"pause_until\":\"{iso}\"}}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.PauseUntil.ShouldNotBeNull();
|
||||
resp.PauseUntil!.Value.Should_Be_Close_To_Utc(future, tolerance: TimeSpan.FromSeconds(2));
|
||||
}
|
||||
|
||||
// Go ref: consumer.go pauseConsumer — pause_until implies pause=true.
|
||||
[Fact]
|
||||
public async Task HandlePause_with_pause_until_implies_pause_true()
|
||||
{
|
||||
var future = DateTime.UtcNow.AddHours(1);
|
||||
var iso = future.ToString("O");
|
||||
|
||||
var resp = await _fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.ORDERS.MON",
|
||||
$"{{\"pause_until\":\"{iso}\"}}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.Paused.ShouldBe(true);
|
||||
}
|
||||
|
||||
// Go ref: consumer.go isPaused — response includes current pause state.
|
||||
[Fact]
|
||||
public async Task HandlePause_returns_pause_state_in_response()
|
||||
{
|
||||
var resp = await _fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.ORDERS.MON",
|
||||
"{\"pause\":true}");
|
||||
|
||||
resp.Paused.ShouldBe(true);
|
||||
|
||||
var resumeResp = await _fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.ORDERS.MON",
|
||||
"{\"pause\":false}");
|
||||
|
||||
resumeResp.Paused.ShouldBe(false);
|
||||
}
|
||||
|
||||
// Go ref: consumer.go pauseUntil — response includes pause_until when set.
|
||||
[Fact]
|
||||
public async Task HandlePause_returns_pause_until_in_response()
|
||||
{
|
||||
var future = DateTime.UtcNow.AddMinutes(30);
|
||||
var iso = future.ToString("O");
|
||||
|
||||
var resp = await _fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.ORDERS.MON",
|
||||
$"{{\"pause_until\":\"{iso}\"}}");
|
||||
|
||||
resp.PauseUntil.ShouldNotBeNull();
|
||||
resp.PauseUntil!.Value.Kind.ShouldBe(DateTimeKind.Utc);
|
||||
}
|
||||
|
||||
// Go ref: consumer.go pauseConsumer — 404 when consumer not found.
|
||||
[Fact]
|
||||
public async Task HandlePause_returns_not_found_for_missing_consumer()
|
||||
{
|
||||
var resp = await _fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.ORDERS.NONEXISTENT",
|
||||
"{\"pause\":true}");
|
||||
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// Go ref: consumer.go resumeConsumer — empty payload resumes consumer.
|
||||
[Fact]
|
||||
public async Task HandlePause_with_empty_payload_resumes()
|
||||
{
|
||||
// Pause first
|
||||
await _fx.RequestLocalAsync("$JS.API.CONSUMER.PAUSE.ORDERS.MON", "{\"pause\":true}");
|
||||
|
||||
// Empty body = resume
|
||||
var resp = await _fx.RequestLocalAsync("$JS.API.CONSUMER.PAUSE.ORDERS.MON", "");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.Success.ShouldBeTrue();
|
||||
resp.Paused.ShouldBe(false);
|
||||
}
|
||||
|
||||
// Go ref: consumer.go pauseConsumer — past pause_until auto-resumes immediately.
|
||||
[Fact]
|
||||
public async Task HandlePause_with_past_pause_until_auto_resumes()
|
||||
{
|
||||
var past = DateTime.UtcNow.AddHours(-1);
|
||||
var iso = past.ToString("O");
|
||||
|
||||
var resp = await _fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.ORDERS.MON",
|
||||
$"{{\"pause_until\":\"{iso}\"}}");
|
||||
|
||||
// Deadline already passed — consumer should auto-resume, so paused=false.
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.Success.ShouldBeTrue();
|
||||
resp.Paused.ShouldBe(false);
|
||||
}
|
||||
|
||||
// Go ref: jsConsumerPauseT — bad subject (not matching stream.consumer pattern) returns 404.
|
||||
[Fact]
|
||||
public async Task HandlePause_returns_not_found_for_bad_subject()
|
||||
{
|
||||
var resp = await _fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.ONLY_ONE_TOKEN",
|
||||
"{\"pause\":true}");
|
||||
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Shouldly-compatible extension for DateTime proximity assertions.
|
||||
/// </summary>
|
||||
internal static class DateTimeAssertExtensions
|
||||
{
|
||||
public static void Should_Be_Close_To_Utc(this DateTime actual, DateTime expected, TimeSpan tolerance)
|
||||
{
|
||||
var diff = (actual.ToUniversalTime() - expected.ToUniversalTime()).Duration();
|
||||
diff.ShouldBeLessThanOrEqualTo(tolerance);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,108 @@
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Validation;
|
||||
using NATS.Server.JetStream;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Api;
|
||||
|
||||
public class JetStreamApiLimitsParityBatch1Tests
|
||||
{
|
||||
[Fact]
|
||||
public void Constants_match_go_reference_values()
|
||||
{
|
||||
JetStreamApiLimits.JSMaxDescriptionLen.ShouldBe(4_096);
|
||||
JetStreamApiLimits.JSMaxMetadataLen.ShouldBe(128 * 1024);
|
||||
JetStreamApiLimits.JSMaxNameLen.ShouldBe(255);
|
||||
JetStreamApiLimits.JSDefaultRequestQueueLimit.ShouldBe(10_000);
|
||||
}
|
||||
|
||||
[Theory]
|
||||
[InlineData(null, false)]
|
||||
[InlineData("", false)]
|
||||
[InlineData(" ", false)]
|
||||
[InlineData("ORDERS", true)]
|
||||
[InlineData("ORD ERS", false)]
|
||||
[InlineData("ORDERS.*", false)]
|
||||
[InlineData("ORDERS.>", false)]
|
||||
public void IsValidName_enforces_expected_rules(string? name, bool expected)
|
||||
{
|
||||
JetStreamConfigValidator.IsValidName(name).ShouldBe(expected);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Stream_create_rejects_name_over_max_length()
|
||||
{
|
||||
var manager = new StreamManager();
|
||||
var response = manager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = new string('S', JetStreamApiLimits.JSMaxNameLen + 1),
|
||||
Subjects = ["a"],
|
||||
});
|
||||
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Description.ShouldBe("invalid stream name");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Stream_create_rejects_description_over_max_bytes()
|
||||
{
|
||||
var manager = new StreamManager();
|
||||
var response = manager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "LIMITDESC",
|
||||
Subjects = ["a"],
|
||||
Description = new string('d', JetStreamApiLimits.JSMaxDescriptionLen + 1),
|
||||
});
|
||||
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Description.ShouldBe("stream description is too long");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Stream_create_rejects_metadata_over_max_bytes()
|
||||
{
|
||||
var manager = new StreamManager();
|
||||
var response = manager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "LIMITMETA",
|
||||
Subjects = ["a"],
|
||||
Metadata = new Dictionary<string, string>
|
||||
{
|
||||
["k"] = new string('m', JetStreamApiLimits.JSMaxMetadataLen),
|
||||
},
|
||||
});
|
||||
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Description.ShouldBe("stream metadata exceeds maximum size");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Consumer_create_rejects_durable_name_over_max_length()
|
||||
{
|
||||
var manager = new ConsumerManager();
|
||||
var response = manager.CreateOrUpdate("S", new ConsumerConfig
|
||||
{
|
||||
DurableName = new string('C', JetStreamApiLimits.JSMaxNameLen + 1),
|
||||
});
|
||||
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Description.ShouldBe("invalid durable name");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Consumer_create_rejects_metadata_over_max_bytes()
|
||||
{
|
||||
var manager = new ConsumerManager();
|
||||
var response = manager.CreateOrUpdate("S", new ConsumerConfig
|
||||
{
|
||||
DurableName = "C1",
|
||||
Metadata = new Dictionary<string, string>
|
||||
{
|
||||
["k"] = new string('m', JetStreamApiLimits.JSMaxMetadataLen),
|
||||
},
|
||||
});
|
||||
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Description.ShouldBe("consumer metadata exceeds maximum size");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,412 @@
|
||||
// Go reference: jetstream_api.go:200-300 — API requests at non-leader nodes must be
|
||||
// forwarded to the current leader. Mutating operations return a not-leader error with
|
||||
// a leader_hint field; read-only operations are handled locally on any node.
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Api;
|
||||
|
||||
/// <summary>
|
||||
/// Simple test double for ILeaderForwarder.
|
||||
/// Returns a predetermined response or null depending on the constructor.
|
||||
/// </summary>
|
||||
file sealed class StubForwarder(JetStreamApiResponse? response) : ILeaderForwarder
|
||||
{
|
||||
public int CallCount { get; private set; }
|
||||
public string? LastSubject { get; private set; }
|
||||
public ReadOnlyMemory<byte> LastPayload { get; private set; }
|
||||
public string? LastLeaderName { get; private set; }
|
||||
|
||||
public Task<JetStreamApiResponse?> ForwardAsync(
|
||||
string subject, ReadOnlyMemory<byte> payload, string leaderName, CancellationToken ct)
|
||||
{
|
||||
CallCount++;
|
||||
LastSubject = subject;
|
||||
LastPayload = payload;
|
||||
LastLeaderName = leaderName;
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Test double that throws OperationCanceledException to simulate a timeout.
|
||||
/// </summary>
|
||||
file sealed class TimeoutForwarder : ILeaderForwarder
|
||||
{
|
||||
public Task<JetStreamApiResponse?> ForwardAsync(
|
||||
string subject, ReadOnlyMemory<byte> payload, string leaderName, CancellationToken ct)
|
||||
=> Task.FromException<JetStreamApiResponse?>(new OperationCanceledException("simulated timeout"));
|
||||
}
|
||||
|
||||
public class LeaderForwardingTests
|
||||
{
|
||||
/// <summary>
|
||||
/// When this node IS the leader, mutating requests are handled locally.
|
||||
/// Go reference: jetstream_api.go — leader handles requests directly.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Route_WhenLeader_HandlesLocally()
|
||||
{
|
||||
// selfIndex=1 matches default leaderIndex=1, so this node is the leader.
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 3, selfIndex: 1);
|
||||
var streamManager = new StreamManager(metaGroup);
|
||||
var consumerManager = new ConsumerManager();
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, metaGroup);
|
||||
|
||||
// Create a stream first so the purge has something to operate on.
|
||||
var createPayload = Encoding.UTF8.GetBytes("""{"name":"TEST","subjects":["test.>"]}""");
|
||||
var createResult = router.Route("$JS.API.STREAM.CREATE.TEST", createPayload);
|
||||
createResult.Error.ShouldBeNull();
|
||||
createResult.StreamInfo.ShouldNotBeNull();
|
||||
|
||||
// A mutating operation (delete) should succeed locally.
|
||||
var deleteResult = router.Route("$JS.API.STREAM.DELETE.TEST", ReadOnlySpan<byte>.Empty);
|
||||
deleteResult.Error.ShouldBeNull();
|
||||
deleteResult.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// When this node is NOT the leader, mutating operations return a not-leader error
|
||||
/// with the current leader's identifier in the leader_hint field.
|
||||
/// Go reference: jetstream_api.go:200-300 — not-leader response.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Route_WhenNotLeader_MutatingOp_ReturnsNotLeaderError()
|
||||
{
|
||||
// selfIndex=2, leaderIndex defaults to 1 — this node is NOT the leader.
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 3, selfIndex: 2);
|
||||
var streamManager = new StreamManager(metaGroup);
|
||||
var consumerManager = new ConsumerManager();
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, metaGroup);
|
||||
|
||||
var payload = Encoding.UTF8.GetBytes("""{"name":"TEST","subjects":["test.>"]}""");
|
||||
var result = router.Route("$JS.API.STREAM.CREATE.TEST", payload);
|
||||
|
||||
result.Error.ShouldNotBeNull();
|
||||
result.Error!.Code.ShouldBe(10003);
|
||||
result.Error.Description.ShouldBe("not leader");
|
||||
result.Error.LeaderHint.ShouldNotBeNull();
|
||||
result.Error.LeaderHint.ShouldBe("meta-1");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Read-only operations (INFO, NAMES, LIST) are handled locally even when
|
||||
/// this node is not the leader.
|
||||
/// Go reference: jetstream_api.go — read operations do not require leadership.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Route_WhenNotLeader_ReadOp_HandlesLocally()
|
||||
{
|
||||
// selfIndex=2, leaderIndex defaults to 1 — this node is NOT the leader.
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 3, selfIndex: 2);
|
||||
var streamManager = new StreamManager(metaGroup);
|
||||
var consumerManager = new ConsumerManager();
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, metaGroup);
|
||||
|
||||
// $JS.API.INFO is a read-only operation.
|
||||
var infoResult = router.Route("$JS.API.INFO", ReadOnlySpan<byte>.Empty);
|
||||
infoResult.Error.ShouldBeNull();
|
||||
|
||||
// $JS.API.STREAM.NAMES is a read-only operation.
|
||||
var namesResult = router.Route("$JS.API.STREAM.NAMES", ReadOnlySpan<byte>.Empty);
|
||||
namesResult.Error.ShouldBeNull();
|
||||
namesResult.StreamNames.ShouldNotBeNull();
|
||||
|
||||
// $JS.API.STREAM.LIST is a read-only operation.
|
||||
var listResult = router.Route("$JS.API.STREAM.LIST", ReadOnlySpan<byte>.Empty);
|
||||
listResult.Error.ShouldBeNull();
|
||||
listResult.StreamNames.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// When there is no meta-group (single-server mode), all operations are handled
|
||||
/// locally regardless of the subject type.
|
||||
/// Go reference: jetstream_api.go — standalone servers have no meta-group.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Route_NoMetaGroup_HandlesLocally()
|
||||
{
|
||||
// No meta-group — single server mode.
|
||||
var streamManager = new StreamManager();
|
||||
var consumerManager = new ConsumerManager();
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, metaGroup: null);
|
||||
|
||||
var payload = Encoding.UTF8.GetBytes("""{"name":"TEST","subjects":["test.>"]}""");
|
||||
var result = router.Route("$JS.API.STREAM.CREATE.TEST", payload);
|
||||
|
||||
// Should succeed — no leader check in single-server mode.
|
||||
result.Error.ShouldBeNull();
|
||||
result.StreamInfo.ShouldNotBeNull();
|
||||
result.StreamInfo!.Config.Name.ShouldBe("TEST");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// IsLeaderRequired returns true for Create, Update, Delete, and Purge operations.
|
||||
/// Go reference: jetstream_api.go:200-300 — mutating operations require leader.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void IsLeaderRequired_CreateUpdate_ReturnsTrue()
|
||||
{
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.STREAM.CREATE.TEST").ShouldBeTrue();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.STREAM.UPDATE.TEST").ShouldBeTrue();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.STREAM.DELETE.TEST").ShouldBeTrue();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.STREAM.PURGE.TEST").ShouldBeTrue();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.STREAM.RESTORE.TEST").ShouldBeTrue();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.STREAM.MSG.DELETE.TEST").ShouldBeTrue();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.CONSUMER.CREATE.STREAM.CON").ShouldBeTrue();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.CONSUMER.DELETE.STREAM.CON").ShouldBeTrue();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// IsLeaderRequired returns false for Info, Names, List, and other read operations.
|
||||
/// Go reference: jetstream_api.go — read-only operations do not need leadership.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void IsLeaderRequired_InfoList_ReturnsFalse()
|
||||
{
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.INFO").ShouldBeFalse();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.STREAM.INFO.TEST").ShouldBeFalse();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.STREAM.NAMES").ShouldBeFalse();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.STREAM.LIST").ShouldBeFalse();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.STREAM.MSG.GET.TEST").ShouldBeFalse();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.STREAM.SNAPSHOT.TEST").ShouldBeFalse();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.CONSUMER.INFO.STREAM.CON").ShouldBeFalse();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.CONSUMER.NAMES.STREAM").ShouldBeFalse();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.CONSUMER.LIST.STREAM").ShouldBeFalse();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.CONSUMER.MSG.NEXT.STREAM.CON").ShouldBeFalse();
|
||||
JetStreamApiRouter.IsLeaderRequired("$JS.API.DIRECT.GET.TEST").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// New tests for Task 19: Leader Forwarding (Gap 7.1)
|
||||
// Go reference: jetstream_api.go:200-300 — jsClusteredStreamXxxRequest helpers.
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// When not leader and a forwarder is provided, RouteAsync calls forward for mutating ops.
|
||||
/// Go reference: jetstream_api.go — non-leader nodes forward mutating ops to the leader.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Route_forwards_mutating_request_when_not_leader()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 3, selfIndex: 2);
|
||||
var streamManager = new StreamManager(metaGroup);
|
||||
var consumerManager = new ConsumerManager();
|
||||
var forwarded = JetStreamApiResponse.SuccessResponse();
|
||||
var forwarder = new StubForwarder(forwarded);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, metaGroup, forwarder);
|
||||
|
||||
var payload = Encoding.UTF8.GetBytes("""{"name":"FWD","subjects":["fwd.>"]}""");
|
||||
var result = await router.RouteAsync("$JS.API.STREAM.CREATE.FWD", payload.AsMemory());
|
||||
|
||||
forwarder.CallCount.ShouldBe(1);
|
||||
forwarder.LastSubject.ShouldBe("$JS.API.STREAM.CREATE.FWD");
|
||||
forwarder.LastLeaderName.ShouldBe("meta-1");
|
||||
result.ShouldBeSameAs(forwarded);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// When not leader and no forwarder is provided, RouteAsync returns a NotLeader error.
|
||||
/// Go reference: jetstream_api.go — fallback to not-leader error when no forwarder.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Route_returns_not_leader_when_no_forwarder()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 3, selfIndex: 2);
|
||||
var streamManager = new StreamManager(metaGroup);
|
||||
var consumerManager = new ConsumerManager();
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, metaGroup, forwarder: null);
|
||||
|
||||
var payload = Encoding.UTF8.GetBytes("""{"name":"TEST","subjects":["test.>"]}""");
|
||||
var result = await router.RouteAsync("$JS.API.STREAM.CREATE.TEST", payload.AsMemory());
|
||||
|
||||
result.Error.ShouldNotBeNull();
|
||||
result.Error!.Code.ShouldBe(10003);
|
||||
result.Error.Description.ShouldBe("not leader");
|
||||
result.Error.LeaderHint.ShouldBe("meta-1");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Read-only operations are handled locally even when not leader and a forwarder is set.
|
||||
/// Go reference: jetstream_api.go — read ops do not require leadership, never forwarded.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Route_does_not_forward_read_only_requests()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 3, selfIndex: 2);
|
||||
var streamManager = new StreamManager(metaGroup);
|
||||
var consumerManager = new ConsumerManager();
|
||||
var forwarder = new StubForwarder(JetStreamApiResponse.SuccessResponse());
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, metaGroup, forwarder);
|
||||
|
||||
// $JS.API.INFO — read only
|
||||
var infoResult = await router.RouteAsync("$JS.API.INFO", ReadOnlyMemory<byte>.Empty);
|
||||
infoResult.Error.ShouldBeNull();
|
||||
|
||||
// $JS.API.STREAM.NAMES — read only
|
||||
var namesResult = await router.RouteAsync("$JS.API.STREAM.NAMES", ReadOnlyMemory<byte>.Empty);
|
||||
namesResult.Error.ShouldBeNull();
|
||||
|
||||
// Forwarder should never have been called for read-only subjects.
|
||||
forwarder.CallCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// When the forwarder returns null, RouteAsync falls back to a NotLeader response.
|
||||
/// Go reference: jetstream_api.go — null forward result means forwarding unavailable.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Route_handles_forward_returning_null_gracefully()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 3, selfIndex: 2);
|
||||
var streamManager = new StreamManager(metaGroup);
|
||||
var consumerManager = new ConsumerManager();
|
||||
var forwarder = new StubForwarder(null); // returns null
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, metaGroup, forwarder);
|
||||
|
||||
var payload = Encoding.UTF8.GetBytes("""{"name":"TEST","subjects":["test.>"]}""");
|
||||
var result = await router.RouteAsync("$JS.API.STREAM.CREATE.TEST", payload.AsMemory());
|
||||
|
||||
forwarder.CallCount.ShouldBe(1);
|
||||
result.Error.ShouldNotBeNull();
|
||||
result.Error!.Code.ShouldBe(10003);
|
||||
result.Error.Description.ShouldBe("not leader");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// When the forwarder throws OperationCanceledException (timeout), RouteAsync falls back to NotLeader.
|
||||
/// Go reference: jetstream_api.go — timeout/cancellation during forwarding falls back gracefully.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Route_handles_forward_timeout()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 3, selfIndex: 2);
|
||||
var streamManager = new StreamManager(metaGroup);
|
||||
var consumerManager = new ConsumerManager();
|
||||
var forwarder = new TimeoutForwarder();
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, metaGroup, forwarder);
|
||||
|
||||
var payload = Encoding.UTF8.GetBytes("""{"name":"TEST","subjects":["test.>"]}""");
|
||||
var result = await router.RouteAsync("$JS.API.STREAM.CREATE.TEST", payload.AsMemory());
|
||||
|
||||
result.Error.ShouldNotBeNull();
|
||||
result.Error!.Code.ShouldBe(10003);
|
||||
result.Error.Description.ShouldBe("not leader");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// ForwardedCount increments on each successful (non-null) forward result.
|
||||
/// Go reference: jetstream_api.go — monitoring/observability for forwarded requests.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task ForwardedCount_increments_on_successful_forward()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 3, selfIndex: 2);
|
||||
var streamManager = new StreamManager(metaGroup);
|
||||
var consumerManager = new ConsumerManager();
|
||||
var forwarder = new StubForwarder(JetStreamApiResponse.SuccessResponse());
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, metaGroup, forwarder);
|
||||
|
||||
router.ForwardedCount.ShouldBe(0);
|
||||
|
||||
var payload = Encoding.UTF8.GetBytes("""{"name":"A","subjects":["a.>"]}""");
|
||||
await router.RouteAsync("$JS.API.STREAM.CREATE.A", payload.AsMemory());
|
||||
router.ForwardedCount.ShouldBe(1);
|
||||
|
||||
await router.RouteAsync("$JS.API.STREAM.DELETE.A", ReadOnlyMemory<byte>.Empty);
|
||||
router.ForwardedCount.ShouldBe(2);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// When this node is the leader, RouteAsync handles requests locally and does not call the forwarder.
|
||||
/// Go reference: jetstream_api.go — leader handles requests directly.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Route_processes_locally_when_leader()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 3, selfIndex: 1); // IS leader
|
||||
var streamManager = new StreamManager(metaGroup);
|
||||
var consumerManager = new ConsumerManager();
|
||||
var forwarder = new StubForwarder(JetStreamApiResponse.SuccessResponse());
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, metaGroup, forwarder);
|
||||
|
||||
var payload = Encoding.UTF8.GetBytes("""{"name":"LOCAL","subjects":["local.>"]}""");
|
||||
var result = await router.RouteAsync("$JS.API.STREAM.CREATE.LOCAL", payload.AsMemory());
|
||||
|
||||
forwarder.CallCount.ShouldBe(0);
|
||||
result.Error.ShouldBeNull();
|
||||
result.StreamInfo.ShouldNotBeNull();
|
||||
result.StreamInfo!.Config.Name.ShouldBe("LOCAL");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// When no meta-group is configured (single-server), RouteAsync handles all requests locally.
|
||||
/// Go reference: jetstream_api.go — standalone servers have no meta-group.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Route_processes_locally_when_no_meta_group()
|
||||
{
|
||||
var streamManager = new StreamManager();
|
||||
var consumerManager = new ConsumerManager();
|
||||
var forwarder = new StubForwarder(JetStreamApiResponse.SuccessResponse());
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, metaGroup: null, forwarder);
|
||||
|
||||
var payload = Encoding.UTF8.GetBytes("""{"name":"SOLO","subjects":["solo.>"]}""");
|
||||
var result = await router.RouteAsync("$JS.API.STREAM.CREATE.SOLO", payload.AsMemory());
|
||||
|
||||
forwarder.CallCount.ShouldBe(0);
|
||||
result.Error.ShouldBeNull();
|
||||
result.StreamInfo.ShouldNotBeNull();
|
||||
result.StreamInfo!.Config.Name.ShouldBe("SOLO");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// RouteAsync passes the payload bytes verbatim to the forwarder.
|
||||
/// Go reference: jetstream_api.go — forwarded request includes the original payload.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task RouteAsync_forwards_to_leader_with_payload()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(nodes: 3, selfIndex: 2);
|
||||
var streamManager = new StreamManager(metaGroup);
|
||||
var consumerManager = new ConsumerManager();
|
||||
var forwarded = JetStreamApiResponse.SuccessResponse();
|
||||
var forwarder = new StubForwarder(forwarded);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, metaGroup, forwarder);
|
||||
|
||||
var payloadBytes = Encoding.UTF8.GetBytes("""{"name":"PAYLOAD","subjects":["p.>"]}""");
|
||||
await router.RouteAsync("$JS.API.STREAM.CREATE.PAYLOAD", payloadBytes.AsMemory());
|
||||
|
||||
forwarder.LastPayload.Length.ShouldBe(payloadBytes.Length);
|
||||
var receivedBytes = forwarder.LastPayload.ToArray();
|
||||
receivedBytes.ShouldBe(payloadBytes);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// DefaultLeaderForwarder accepts a custom timeout value.
|
||||
/// Go reference: jetstream_api.go — configurable forward timeout for slow leader responses.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Forward_timeout_configurable()
|
||||
{
|
||||
var customTimeout = TimeSpan.FromSeconds(10);
|
||||
var forwarder = new DefaultLeaderForwarder(customTimeout);
|
||||
|
||||
forwarder.Timeout.ShouldBe(customTimeout);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// DefaultLeaderForwarder uses a 5-second default timeout when none is provided.
|
||||
/// Go reference: jetstream_api.go — default forward timeout.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Forward_timeout_defaults_to_five_seconds()
|
||||
{
|
||||
var forwarder = new DefaultLeaderForwarder();
|
||||
|
||||
forwarder.Timeout.ShouldBe(TimeSpan.FromSeconds(5));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,317 @@
|
||||
// Go reference: server/jetstream_api.go — jsStreamSnapshotT and jsStreamRestoreT handlers.
|
||||
// Snapshot creates a serialized byte representation of stream state; restore re-applies it.
|
||||
// The async variants (HandleSnapshotAsync / HandleRestoreAsync) add stream name and chunk
|
||||
// metadata to the response and provide richer error codes compared to the sync stubs.
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Api.Handlers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Api;
|
||||
|
||||
public class SnapshotApiTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
private static StreamManager CreateManagerWithStream(string streamName, string subjectPattern)
|
||||
{
|
||||
var sm = new StreamManager();
|
||||
sm.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = streamName,
|
||||
Subjects = [subjectPattern],
|
||||
});
|
||||
return sm;
|
||||
}
|
||||
|
||||
private static async Task AppendAsync(StreamManager sm, string subject, string payload)
|
||||
{
|
||||
var handle = sm.FindBySubject(subject);
|
||||
handle.ShouldNotBeNull();
|
||||
await handle!.Store.AppendAsync(subject, Encoding.UTF8.GetBytes(payload), default);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// HandleSnapshot (sync, existing)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Go ref: jsStreamSnapshotT — snapshot of an existing stream returns a non-empty base64 payload.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task HandleSnapshot_returns_base64_payload_for_existing_stream()
|
||||
{
|
||||
var sm = CreateManagerWithStream("ORDERS", "orders.>");
|
||||
await AppendAsync(sm, "orders.1", "hello");
|
||||
|
||||
var response = StreamApiHandlers.HandleSnapshot("$JS.API.STREAM.SNAPSHOT.ORDERS", sm);
|
||||
|
||||
response.Error.ShouldBeNull();
|
||||
response.Snapshot.ShouldNotBeNull();
|
||||
response.Snapshot!.Payload.ShouldNotBeNullOrEmpty();
|
||||
|
||||
// Verify it is valid base64.
|
||||
var bytes = Convert.FromBase64String(response.Snapshot.Payload);
|
||||
bytes.ShouldNotBeEmpty();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Go ref: jsStreamSnapshotT — snapshot of a non-existent stream returns 404.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void HandleSnapshot_returns_not_found_for_missing_stream()
|
||||
{
|
||||
var sm = new StreamManager();
|
||||
|
||||
var response = StreamApiHandlers.HandleSnapshot("$JS.API.STREAM.SNAPSHOT.MISSING", sm);
|
||||
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// HandleRestore (sync, existing)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Go ref: jsStreamRestoreT — restore with a valid base64 snapshot payload succeeds.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task HandleRestore_succeeds_with_valid_payload()
|
||||
{
|
||||
var sm = CreateManagerWithStream("ORDERS", "orders.>");
|
||||
await AppendAsync(sm, "orders.1", "msg1");
|
||||
|
||||
// Obtain a snapshot first.
|
||||
var snapshotResponse = StreamApiHandlers.HandleSnapshot("$JS.API.STREAM.SNAPSHOT.ORDERS", sm);
|
||||
snapshotResponse.Snapshot.ShouldNotBeNull();
|
||||
var base64 = snapshotResponse.Snapshot!.Payload;
|
||||
|
||||
// Restore back using the base64 bytes directly as the payload.
|
||||
var payloadBytes = Encoding.UTF8.GetBytes(base64);
|
||||
var response = StreamApiHandlers.HandleRestore(
|
||||
"$JS.API.STREAM.RESTORE.ORDERS",
|
||||
payloadBytes,
|
||||
sm);
|
||||
|
||||
response.Error.ShouldBeNull();
|
||||
response.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Go ref: jsStreamRestoreT — empty payload returns a 400 error.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void HandleRestore_returns_error_for_empty_payload()
|
||||
{
|
||||
var sm = CreateManagerWithStream("ORDERS", "orders.>");
|
||||
|
||||
var response = StreamApiHandlers.HandleRestore(
|
||||
"$JS.API.STREAM.RESTORE.ORDERS",
|
||||
ReadOnlySpan<byte>.Empty,
|
||||
sm);
|
||||
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Code.ShouldBe(400);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Go ref: jsStreamRestoreT — bad subject token (no trailing stream name) returns 404.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void HandleRestore_returns_not_found_for_bad_subject()
|
||||
{
|
||||
var sm = new StreamManager();
|
||||
// Subject without trailing token — ExtractTrailingToken returns null.
|
||||
var payload = Encoding.UTF8.GetBytes(Convert.ToBase64String([1, 2, 3]));
|
||||
|
||||
var response = StreamApiHandlers.HandleRestore(
|
||||
"$JS.API.STREAM.RESTORE.",
|
||||
payload,
|
||||
sm);
|
||||
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// HandleSnapshotAsync (new)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Go ref: jsStreamSnapshotT — async handler populates StreamName in the response.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task HandleSnapshotAsync_includes_stream_name_in_response()
|
||||
{
|
||||
var sm = CreateManagerWithStream("EVENTS", "events.>");
|
||||
await AppendAsync(sm, "events.1", "data");
|
||||
|
||||
var response = await StreamApiHandlers.HandleSnapshotAsync(
|
||||
"$JS.API.STREAM.SNAPSHOT.EVENTS",
|
||||
sm,
|
||||
CancellationToken.None);
|
||||
|
||||
response.Error.ShouldBeNull();
|
||||
response.Snapshot.ShouldNotBeNull();
|
||||
response.Snapshot!.StreamName.ShouldBe("EVENTS");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Go ref: jsStreamSnapshotT — async handler sets NumChunks=1 and BlkSize equal to the
|
||||
/// length of the raw (pre-base64) snapshot bytes.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task HandleSnapshotAsync_includes_chunk_metadata()
|
||||
{
|
||||
var sm = CreateManagerWithStream("EVENTS", "events.>");
|
||||
await AppendAsync(sm, "events.1", "payload-data");
|
||||
|
||||
var response = await StreamApiHandlers.HandleSnapshotAsync(
|
||||
"$JS.API.STREAM.SNAPSHOT.EVENTS",
|
||||
sm,
|
||||
CancellationToken.None);
|
||||
|
||||
response.Error.ShouldBeNull();
|
||||
var snap = response.Snapshot!;
|
||||
snap.NumChunks.ShouldBe(1);
|
||||
snap.BlkSize.ShouldBeGreaterThan(0);
|
||||
|
||||
// BlkSize should match the raw snapshot byte count.
|
||||
var rawBytes = Convert.FromBase64String(snap.Payload);
|
||||
snap.BlkSize.ShouldBe(rawBytes.Length);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// HandleSnapshotAsync returns 404 when the stream does not exist.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task HandleSnapshotAsync_returns_not_found_for_missing_stream()
|
||||
{
|
||||
var sm = new StreamManager();
|
||||
|
||||
var response = await StreamApiHandlers.HandleSnapshotAsync(
|
||||
"$JS.API.STREAM.SNAPSHOT.NOPE",
|
||||
sm,
|
||||
CancellationToken.None);
|
||||
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// HandleRestoreAsync (new)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Go ref: jsStreamRestoreT — async restore validates the base64 payload and succeeds.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task HandleRestoreAsync_validates_base64_payload()
|
||||
{
|
||||
var sm = CreateManagerWithStream("ORDERS", "orders.>");
|
||||
await AppendAsync(sm, "orders.1", "hello");
|
||||
|
||||
// Take a snapshot, then restore it using the async path.
|
||||
var snapResp = await StreamApiHandlers.HandleSnapshotAsync(
|
||||
"$JS.API.STREAM.SNAPSHOT.ORDERS",
|
||||
sm,
|
||||
CancellationToken.None);
|
||||
snapResp.Snapshot.ShouldNotBeNull();
|
||||
var base64Payload = Encoding.UTF8.GetBytes(snapResp.Snapshot!.Payload);
|
||||
|
||||
var response = await StreamApiHandlers.HandleRestoreAsync(
|
||||
"$JS.API.STREAM.RESTORE.ORDERS",
|
||||
base64Payload,
|
||||
sm,
|
||||
CancellationToken.None);
|
||||
|
||||
response.Error.ShouldBeNull();
|
||||
response.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// HandleRestoreAsync returns 400 when given an empty payload array.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task HandleRestoreAsync_returns_error_for_empty_payload()
|
||||
{
|
||||
var sm = CreateManagerWithStream("ORDERS", "orders.>");
|
||||
|
||||
var response = await StreamApiHandlers.HandleRestoreAsync(
|
||||
"$JS.API.STREAM.RESTORE.ORDERS",
|
||||
[],
|
||||
sm,
|
||||
CancellationToken.None);
|
||||
|
||||
response.Error.ShouldNotBeNull();
|
||||
response.Error!.Code.ShouldBe(400);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Round-trip
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Go ref: jsStreamSnapshotT / jsStreamRestoreT — full snapshot-then-restore round-trip:
|
||||
/// messages written before snapshot are recoverable after restore.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Snapshot_round_trip_create_and_restore()
|
||||
{
|
||||
var sm = CreateManagerWithStream("LOGS", "logs.>");
|
||||
await AppendAsync(sm, "logs.a", "alpha");
|
||||
await AppendAsync(sm, "logs.b", "beta");
|
||||
await AppendAsync(sm, "logs.c", "gamma");
|
||||
|
||||
var stateBefore = await sm.GetStateAsync("LOGS", default);
|
||||
stateBefore.Messages.ShouldBe(3UL);
|
||||
|
||||
// Snapshot via async handler.
|
||||
var snapResp = await StreamApiHandlers.HandleSnapshotAsync(
|
||||
"$JS.API.STREAM.SNAPSHOT.LOGS",
|
||||
sm,
|
||||
CancellationToken.None);
|
||||
snapResp.Error.ShouldBeNull();
|
||||
var base64Payload = Encoding.UTF8.GetBytes(snapResp.Snapshot!.Payload);
|
||||
|
||||
// Restore via async handler.
|
||||
var restoreResp = await StreamApiHandlers.HandleRestoreAsync(
|
||||
"$JS.API.STREAM.RESTORE.LOGS",
|
||||
base64Payload,
|
||||
sm,
|
||||
CancellationToken.None);
|
||||
restoreResp.Error.ShouldBeNull();
|
||||
restoreResp.Success.ShouldBeTrue();
|
||||
|
||||
// State should still be consistent (restore does not clear — it re-applies).
|
||||
var stateAfter = await sm.GetStateAsync("LOGS", default);
|
||||
stateAfter.Messages.ShouldBeGreaterThanOrEqualTo(3UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Subject extraction
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Go ref: jsStreamSnapshotT — the stream name is correctly extracted from the API subject.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task HandleSnapshot_extracts_stream_name_from_subject()
|
||||
{
|
||||
var sm = CreateManagerWithStream("MY_STREAM", "mystream.>");
|
||||
|
||||
var response = await StreamApiHandlers.HandleSnapshotAsync(
|
||||
"$JS.API.STREAM.SNAPSHOT.MY_STREAM",
|
||||
sm,
|
||||
CancellationToken.None);
|
||||
|
||||
response.Error.ShouldBeNull();
|
||||
response.Snapshot.ShouldNotBeNull();
|
||||
response.Snapshot!.StreamName.ShouldBe("MY_STREAM");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,193 @@
|
||||
// Go reference: jetstream_api.go:1200-1350 — stream purge supports options: subject filter,
|
||||
// sequence cutoff, and keep-last-N. Combinations like filter+keep allow keeping the last N
|
||||
// messages per matching subject.
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Api;
|
||||
|
||||
public class StreamPurgeOptionsTests
|
||||
{
|
||||
private static JetStreamApiRouter CreateRouterWithStream(string streamName, string subjectPattern, out StreamManager streamManager)
|
||||
{
|
||||
streamManager = new StreamManager();
|
||||
var consumerManager = new ConsumerManager();
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager);
|
||||
|
||||
var payload = Encoding.UTF8.GetBytes($$$"""{"name":"{{{streamName}}}","subjects":["{{{subjectPattern}}}"]}""");
|
||||
var result = router.Route($"$JS.API.STREAM.CREATE.{streamName}", payload);
|
||||
result.Error.ShouldBeNull();
|
||||
|
||||
return router;
|
||||
}
|
||||
|
||||
private static async Task PublishAsync(StreamManager streamManager, string subject, string payload)
|
||||
{
|
||||
var stream = streamManager.FindBySubject(subject);
|
||||
stream.ShouldNotBeNull();
|
||||
await stream.Store.AppendAsync(subject, Encoding.UTF8.GetBytes(payload), default);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Purge with no options removes all messages and returns the count.
|
||||
/// Go reference: jetstream_api.go — basic purge with empty request body.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Purge_NoOptions_RemovesAll()
|
||||
{
|
||||
var router = CreateRouterWithStream("TEST", "test.>", out var sm);
|
||||
|
||||
await PublishAsync(sm, "test.a", "1");
|
||||
await PublishAsync(sm, "test.b", "2");
|
||||
await PublishAsync(sm, "test.c", "3");
|
||||
|
||||
var result = router.Route("$JS.API.STREAM.PURGE.TEST", Encoding.UTF8.GetBytes("{}"));
|
||||
result.Error.ShouldBeNull();
|
||||
result.Success.ShouldBeTrue();
|
||||
result.Purged.ShouldBe(3UL);
|
||||
|
||||
var state = await sm.GetStateAsync("TEST", default);
|
||||
state.Messages.ShouldBe(0UL);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Purge with a subject filter removes only messages matching the pattern.
|
||||
/// Go reference: jetstream_api.go:1200-1350 — filter option.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Purge_WithSubjectFilter_RemovesOnlyMatching()
|
||||
{
|
||||
var router = CreateRouterWithStream("TEST", ">", out var sm);
|
||||
|
||||
await PublishAsync(sm, "orders.a", "1");
|
||||
await PublishAsync(sm, "orders.b", "2");
|
||||
await PublishAsync(sm, "logs.x", "3");
|
||||
await PublishAsync(sm, "orders.c", "4");
|
||||
|
||||
var payload = Encoding.UTF8.GetBytes("""{"filter":"orders.*"}""");
|
||||
var result = router.Route("$JS.API.STREAM.PURGE.TEST", payload);
|
||||
result.Error.ShouldBeNull();
|
||||
result.Success.ShouldBeTrue();
|
||||
result.Purged.ShouldBe(3UL);
|
||||
|
||||
var state = await sm.GetStateAsync("TEST", default);
|
||||
state.Messages.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Purge with seq option removes all messages with sequence strictly less than the given value.
|
||||
/// Go reference: jetstream_api.go:1200-1350 — seq option.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Purge_WithSeq_RemovesBelowSequence()
|
||||
{
|
||||
var router = CreateRouterWithStream("TEST", "test.>", out var sm);
|
||||
|
||||
await PublishAsync(sm, "test.a", "1"); // seq 1
|
||||
await PublishAsync(sm, "test.b", "2"); // seq 2
|
||||
await PublishAsync(sm, "test.c", "3"); // seq 3
|
||||
await PublishAsync(sm, "test.d", "4"); // seq 4
|
||||
await PublishAsync(sm, "test.e", "5"); // seq 5
|
||||
|
||||
// Remove all messages with seq < 4 (i.e., sequences 1, 2, 3).
|
||||
var payload = Encoding.UTF8.GetBytes("""{"seq":4}""");
|
||||
var result = router.Route("$JS.API.STREAM.PURGE.TEST", payload);
|
||||
result.Error.ShouldBeNull();
|
||||
result.Success.ShouldBeTrue();
|
||||
result.Purged.ShouldBe(3UL);
|
||||
|
||||
var state = await sm.GetStateAsync("TEST", default);
|
||||
state.Messages.ShouldBe(2UL);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Purge with keep option retains the last N messages globally.
|
||||
/// Go reference: jetstream_api.go:1200-1350 — keep option.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Purge_WithKeep_KeepsLastN()
|
||||
{
|
||||
var router = CreateRouterWithStream("TEST", "test.>", out var sm);
|
||||
|
||||
await PublishAsync(sm, "test.a", "1"); // seq 1
|
||||
await PublishAsync(sm, "test.b", "2"); // seq 2
|
||||
await PublishAsync(sm, "test.c", "3"); // seq 3
|
||||
await PublishAsync(sm, "test.d", "4"); // seq 4
|
||||
await PublishAsync(sm, "test.e", "5"); // seq 5
|
||||
|
||||
// Keep the last 2 messages (seq 4, 5); purge 1, 2, 3.
|
||||
var payload = Encoding.UTF8.GetBytes("""{"keep":2}""");
|
||||
var result = router.Route("$JS.API.STREAM.PURGE.TEST", payload);
|
||||
result.Error.ShouldBeNull();
|
||||
result.Success.ShouldBeTrue();
|
||||
result.Purged.ShouldBe(3UL);
|
||||
|
||||
var state = await sm.GetStateAsync("TEST", default);
|
||||
state.Messages.ShouldBe(2UL);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Purge with both filter and keep retains the last N messages per matching subject.
|
||||
/// Go reference: jetstream_api.go:1200-1350 — filter+keep combination.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Purge_FilterAndKeep_KeepsLastNPerFilter()
|
||||
{
|
||||
var router = CreateRouterWithStream("TEST", ">", out var sm);
|
||||
|
||||
// Publish multiple messages on two subjects.
|
||||
await PublishAsync(sm, "orders.a", "o1"); // seq 1
|
||||
await PublishAsync(sm, "orders.a", "o2"); // seq 2
|
||||
await PublishAsync(sm, "orders.a", "o3"); // seq 3
|
||||
await PublishAsync(sm, "logs.x", "l1"); // seq 4 — not matching filter
|
||||
await PublishAsync(sm, "orders.b", "ob1"); // seq 5
|
||||
await PublishAsync(sm, "orders.b", "ob2"); // seq 6
|
||||
|
||||
// Keep last 1 per matching subject "orders.*".
|
||||
// orders.a has 3 msgs -> keep seq 3, purge seq 1, 2
|
||||
// orders.b has 2 msgs -> keep seq 6, purge seq 5
|
||||
// logs.x is unaffected (does not match filter)
|
||||
var payload = Encoding.UTF8.GetBytes("""{"filter":"orders.*","keep":1}""");
|
||||
var result = router.Route("$JS.API.STREAM.PURGE.TEST", payload);
|
||||
result.Error.ShouldBeNull();
|
||||
result.Success.ShouldBeTrue();
|
||||
result.Purged.ShouldBe(3UL);
|
||||
|
||||
var state = await sm.GetStateAsync("TEST", default);
|
||||
// Remaining: orders.a seq 3, logs.x seq 4, orders.b seq 6 = 3 messages
|
||||
state.Messages.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Purge on a non-existent stream returns a 404 not-found error.
|
||||
/// Go reference: jetstream_api.go — stream not found.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Purge_InvalidStream_ReturnsNotFound()
|
||||
{
|
||||
var streamManager = new StreamManager();
|
||||
var consumerManager = new ConsumerManager();
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager);
|
||||
|
||||
var result = router.Route("$JS.API.STREAM.PURGE.NONEXISTENT", Encoding.UTF8.GetBytes("{}"));
|
||||
result.Error.ShouldNotBeNull();
|
||||
result.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Purge on an empty stream returns success with zero purged count.
|
||||
/// Go reference: jetstream_api.go — purge on empty stream.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Purge_EmptyStream_ReturnsZeroPurged()
|
||||
{
|
||||
var router = CreateRouterWithStream("TEST", "test.>", out _);
|
||||
|
||||
var result = router.Route("$JS.API.STREAM.PURGE.TEST", Encoding.UTF8.GetBytes("{}"));
|
||||
result.Error.ShouldBeNull();
|
||||
result.Success.ShouldBeTrue();
|
||||
result.Purged.ShouldBe(0UL);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,367 @@
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for AssignmentCodec: binary serialization for stream and consumer assignments
|
||||
/// with optional S2/Snappy compression for large payloads.
|
||||
/// Go reference: jetstream_cluster.go:8703-9246 (encodeAddStreamAssignment,
|
||||
/// encodeAddConsumerAssignment, decodeStreamAssignment, decodeConsumerAssignment,
|
||||
/// encodeAddConsumerAssignmentCompressed, decodeConsumerAssignmentCompressed).
|
||||
/// </summary>
|
||||
public class AssignmentCodecTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// StreamAssignment round-trip
|
||||
// Go reference: jetstream_cluster.go:8703 encodeAddStreamAssignment /
|
||||
// 8733 decodeStreamAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Encode_decode_stream_assignment_round_trip()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:8703 encodeAddStreamAssignment + 8733 decodeStreamAssignment
|
||||
var created = new DateTime(2025, 3, 15, 9, 0, 0, DateTimeKind.Utc);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "orders",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "rg-orders",
|
||||
Peers = ["peer-1", "peer-2", "peer-3"],
|
||||
StorageType = "file",
|
||||
Cluster = "cluster-east",
|
||||
Preferred = "peer-1",
|
||||
DesiredReplicas = 3,
|
||||
},
|
||||
Created = created,
|
||||
ConfigJson = """{"subjects":["orders.>"],"storage":"file","replicas":3}""",
|
||||
SyncSubject = "$JS.SYNC.orders",
|
||||
Responded = true,
|
||||
Recovering = false,
|
||||
Reassigning = true,
|
||||
};
|
||||
|
||||
var encoded = AssignmentCodec.EncodeStreamAssignment(sa);
|
||||
encoded.ShouldNotBeEmpty();
|
||||
|
||||
var decoded = AssignmentCodec.DecodeStreamAssignment(encoded);
|
||||
decoded.ShouldNotBeNull();
|
||||
decoded!.StreamName.ShouldBe("orders");
|
||||
decoded.Group.Name.ShouldBe("rg-orders");
|
||||
decoded.Group.Peers.ShouldBe(["peer-1", "peer-2", "peer-3"]);
|
||||
decoded.Group.StorageType.ShouldBe("file");
|
||||
decoded.Group.Cluster.ShouldBe("cluster-east");
|
||||
decoded.Group.Preferred.ShouldBe("peer-1");
|
||||
decoded.Group.DesiredReplicas.ShouldBe(3);
|
||||
decoded.Created.ShouldBe(created);
|
||||
decoded.ConfigJson.ShouldBe("""{"subjects":["orders.>"],"storage":"file","replicas":3}""");
|
||||
decoded.SyncSubject.ShouldBe("$JS.SYNC.orders");
|
||||
decoded.Responded.ShouldBeTrue();
|
||||
decoded.Recovering.ShouldBeFalse();
|
||||
decoded.Reassigning.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ConsumerAssignment round-trip
|
||||
// Go reference: jetstream_cluster.go:9175 encodeAddConsumerAssignment /
|
||||
// 9195 decodeConsumerAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Encode_decode_consumer_assignment_round_trip()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:9175 encodeAddConsumerAssignment + 9195 decodeConsumerAssignment
|
||||
var created = new DateTime(2025, 6, 1, 12, 0, 0, DateTimeKind.Utc);
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "push-consumer",
|
||||
StreamName = "events",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "rg-push",
|
||||
Peers = ["node-a", "node-b"],
|
||||
StorageType = "memory",
|
||||
DesiredReplicas = 2,
|
||||
},
|
||||
Created = created,
|
||||
ConfigJson = """{"deliver_subject":"push.out","filter_subject":"events.>"}""",
|
||||
Responded = true,
|
||||
Recovering = true,
|
||||
};
|
||||
|
||||
var encoded = AssignmentCodec.EncodeConsumerAssignment(ca);
|
||||
encoded.ShouldNotBeEmpty();
|
||||
|
||||
var decoded = AssignmentCodec.DecodeConsumerAssignment(encoded);
|
||||
decoded.ShouldNotBeNull();
|
||||
decoded!.ConsumerName.ShouldBe("push-consumer");
|
||||
decoded.StreamName.ShouldBe("events");
|
||||
decoded.Group.Name.ShouldBe("rg-push");
|
||||
decoded.Group.Peers.ShouldBe(["node-a", "node-b"]);
|
||||
decoded.Group.StorageType.ShouldBe("memory");
|
||||
decoded.Group.DesiredReplicas.ShouldBe(2);
|
||||
decoded.Created.ShouldBe(created);
|
||||
decoded.ConfigJson.ShouldBe("""{"deliver_subject":"push.out","filter_subject":"events.>"}""");
|
||||
decoded.Responded.ShouldBeTrue();
|
||||
decoded.Recovering.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Error handling
|
||||
// Go reference: jetstream_cluster.go:8733 error return on bad unmarshal
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Decode_returns_null_for_invalid_data()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:8736 json.Unmarshal error → nil, error
|
||||
var garbage = new byte[] { 0xDE, 0xAD, 0xBE, 0xEF, 0x00, 0x01, 0x02, 0x03 };
|
||||
var result = AssignmentCodec.DecodeStreamAssignment(garbage);
|
||||
result.ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Decode_returns_null_for_empty_data()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:8733 empty buf → json.Unmarshal fails → nil
|
||||
var result = AssignmentCodec.DecodeStreamAssignment(ReadOnlySpan<byte>.Empty);
|
||||
result.ShouldBeNull();
|
||||
|
||||
var caResult = AssignmentCodec.DecodeConsumerAssignment(ReadOnlySpan<byte>.Empty);
|
||||
caResult.ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Compression: CompressIfLarge
|
||||
// Go reference: jetstream_cluster.go:9226 encodeAddConsumerAssignmentCompressed
|
||||
// uses s2.NewWriter for large consumer configs
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void CompressIfLarge_compresses_when_above_threshold()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:9226 — S2 compression applied to large consumer assignments
|
||||
var largeData = Encoding.UTF8.GetBytes(new string('X', 2048));
|
||||
var compressed = AssignmentCodec.CompressIfLarge(largeData, threshold: 1024);
|
||||
|
||||
// Snappy compressed data with the stream magic is larger for uniform input but will differ
|
||||
// The important thing is that the result is NOT the same bytes as the input
|
||||
compressed.ShouldNotBeSameAs(largeData);
|
||||
// Compressed form of repeated bytes should typically be shorter
|
||||
compressed.Length.ShouldBeLessThan(largeData.Length);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CompressIfLarge_no_compress_below_threshold()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go — small consumer assignments sent uncompressed
|
||||
var smallData = Encoding.UTF8.GetBytes("""{"stream_name":"foo"}""");
|
||||
var result = AssignmentCodec.CompressIfLarge(smallData, threshold: 1024);
|
||||
|
||||
result.ShouldBe(smallData);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Compression: DecompressIfNeeded
|
||||
// Go reference: jetstream_cluster.go:9238 decodeConsumerAssignmentCompressed
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void DecompressIfNeeded_decompresses_snappy_data()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:9238 decodeConsumerAssignmentCompressed
|
||||
var original = Encoding.UTF8.GetBytes("""{"stream_name":"test","group":{"name":"rg"}}""");
|
||||
var compressed = AssignmentCodec.CompressIfLarge(original, threshold: 0); // force compress
|
||||
|
||||
var decompressed = AssignmentCodec.DecompressIfNeeded(compressed);
|
||||
decompressed.ShouldBe(original);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void DecompressIfNeeded_returns_raw_for_non_compressed()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:9195 decodeConsumerAssignment (non-compressed path)
|
||||
var plainJson = Encoding.UTF8.GetBytes("""{"stream_name":"test"}""");
|
||||
var result = AssignmentCodec.DecompressIfNeeded(plainJson);
|
||||
result.ShouldBe(plainJson);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer preservation in StreamAssignment round-trip
|
||||
// Go reference: jetstream_cluster.go streamAssignment.Consumers map serialization
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Stream_assignment_preserves_consumer_assignments()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go streamAssignment consumers map preserved in encoding
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "events",
|
||||
Group = new RaftGroup { Name = "rg-events", Peers = ["n1", "n2", "n3"] },
|
||||
ConfigJson = """{"subjects":["events.>"]}""",
|
||||
};
|
||||
|
||||
sa.Consumers["consumer-alpha"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "consumer-alpha",
|
||||
StreamName = "events",
|
||||
Group = new RaftGroup { Name = "rg-alpha", Peers = ["n1"] },
|
||||
ConfigJson = """{"deliver_subject":"out.alpha"}""",
|
||||
Responded = true,
|
||||
};
|
||||
sa.Consumers["consumer-beta"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "consumer-beta",
|
||||
StreamName = "events",
|
||||
Group = new RaftGroup { Name = "rg-beta", Peers = ["n2"] },
|
||||
Recovering = true,
|
||||
};
|
||||
sa.Consumers["consumer-gamma"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "consumer-gamma",
|
||||
StreamName = "events",
|
||||
Group = new RaftGroup { Name = "rg-gamma", Peers = ["n3"] },
|
||||
};
|
||||
|
||||
var encoded = AssignmentCodec.EncodeStreamAssignment(sa);
|
||||
var decoded = AssignmentCodec.DecodeStreamAssignment(encoded);
|
||||
|
||||
decoded.ShouldNotBeNull();
|
||||
decoded!.Consumers.Count.ShouldBe(3);
|
||||
decoded.Consumers["consumer-alpha"].ConsumerName.ShouldBe("consumer-alpha");
|
||||
decoded.Consumers["consumer-alpha"].Responded.ShouldBeTrue();
|
||||
decoded.Consumers["consumer-beta"].Recovering.ShouldBeTrue();
|
||||
decoded.Consumers["consumer-gamma"].Group.Name.ShouldBe("rg-gamma");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// RaftGroup peer list preservation
|
||||
// Go reference: jetstream_cluster.go raftGroup.Peers serialization
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Stream_assignment_preserves_raft_group_peers()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:154 raftGroup.Peers in assignment encoding
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "telemetry",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "rg-telemetry",
|
||||
Peers = ["peer-alpha", "peer-beta", "peer-gamma"],
|
||||
DesiredReplicas = 3,
|
||||
},
|
||||
};
|
||||
|
||||
var encoded = AssignmentCodec.EncodeStreamAssignment(sa);
|
||||
var decoded = AssignmentCodec.DecodeStreamAssignment(encoded);
|
||||
|
||||
decoded.ShouldNotBeNull();
|
||||
decoded!.Group.Peers.Count.ShouldBe(3);
|
||||
decoded.Group.Peers.ShouldContain("peer-alpha");
|
||||
decoded.Group.Peers.ShouldContain("peer-beta");
|
||||
decoded.Group.Peers.ShouldContain("peer-gamma");
|
||||
decoded.Group.DesiredReplicas.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Large ConfigJson round-trip through compression
|
||||
// Go reference: jetstream_cluster.go:9226 encodeAddConsumerAssignmentCompressed
|
||||
// for large consumer configs
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Compress_decompress_round_trip_with_large_config()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:9226 — compressed consumer assignment with large config
|
||||
var largeConfig = """{"subjects":[""" +
|
||||
string.Join(",", Enumerable.Range(1, 50).Select(i => $"\"events.topic.{i}.>\"")) +
|
||||
"""],"storage":"file","replicas":3,"max_msgs":1000000,"max_bytes":1073741824}""";
|
||||
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "large-config-consumer",
|
||||
StreamName = "big-stream",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "rg-large",
|
||||
Peers = ["n1", "n2", "n3"],
|
||||
},
|
||||
ConfigJson = largeConfig,
|
||||
};
|
||||
|
||||
var encoded = AssignmentCodec.EncodeConsumerAssignment(ca);
|
||||
var compressed = AssignmentCodec.CompressIfLarge(encoded, threshold: 512);
|
||||
|
||||
// Compressed should be present (input is large)
|
||||
compressed.Length.ShouldBeGreaterThan(0);
|
||||
|
||||
var decompressed = AssignmentCodec.DecompressIfNeeded(compressed);
|
||||
var decoded = AssignmentCodec.DecodeConsumerAssignment(decompressed);
|
||||
|
||||
decoded.ShouldNotBeNull();
|
||||
decoded!.ConsumerName.ShouldBe("large-config-consumer");
|
||||
decoded.ConfigJson.ShouldBe(largeConfig);
|
||||
decoded.Group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Golden fixture test: known-good JSON bytes decode correctly
|
||||
// Go reference: jetstream_cluster.go decodeStreamAssignment / decodeConsumerAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Golden_fixture_known_bytes()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:8733 decodeStreamAssignment — format stability test.
|
||||
// This fixture encodes a specific known StreamAssignment JSON and verifies that
|
||||
// the codec can decode it correctly, ensuring the serialization format remains stable.
|
||||
//
|
||||
// The JSON uses snake_case property names (JsonNamingPolicy.SnakeCaseLower).
|
||||
// Created timestamp: 2025-01-15T00:00:00Z = 638717280000000000 ticks.
|
||||
const string goldenJson = """
|
||||
{
|
||||
"stream_name": "golden-stream",
|
||||
"group": {
|
||||
"name": "rg-golden",
|
||||
"peers": ["node-1", "node-2", "node-3"],
|
||||
"storage_type": "file",
|
||||
"cluster": "us-east",
|
||||
"preferred": "node-1",
|
||||
"desired_replicas": 3
|
||||
},
|
||||
"created": "2025-01-15T00:00:00Z",
|
||||
"config_json": "{\"subjects\":[\"golden.>\"]}",
|
||||
"sync_subject": "$JS.SYNC.golden-stream",
|
||||
"responded": true,
|
||||
"recovering": false,
|
||||
"reassigning": false,
|
||||
"consumers": {}
|
||||
}
|
||||
""";
|
||||
|
||||
var bytes = Encoding.UTF8.GetBytes(goldenJson);
|
||||
var decoded = AssignmentCodec.DecodeStreamAssignment(bytes);
|
||||
|
||||
decoded.ShouldNotBeNull();
|
||||
decoded!.StreamName.ShouldBe("golden-stream");
|
||||
decoded.Group.Name.ShouldBe("rg-golden");
|
||||
decoded.Group.Peers.ShouldBe(["node-1", "node-2", "node-3"]);
|
||||
decoded.Group.StorageType.ShouldBe("file");
|
||||
decoded.Group.Cluster.ShouldBe("us-east");
|
||||
decoded.Group.Preferred.ShouldBe("node-1");
|
||||
decoded.Group.DesiredReplicas.ShouldBe(3);
|
||||
decoded.Created.ShouldBe(new DateTime(2025, 1, 15, 0, 0, 0, DateTimeKind.Utc));
|
||||
decoded.ConfigJson.ShouldBe("""{"subjects":["golden.>"]}""");
|
||||
decoded.SyncSubject.ShouldBe("$JS.SYNC.golden-stream");
|
||||
decoded.Responded.ShouldBeTrue();
|
||||
decoded.Recovering.ShouldBeFalse();
|
||||
decoded.Reassigning.ShouldBeFalse();
|
||||
decoded.Consumers.ShouldBeEmpty();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,245 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go
|
||||
// Covers: RaftGroup quorum calculation, HasQuorum checks, StreamAssignment
|
||||
// and ConsumerAssignment creation, consumer dictionary operations,
|
||||
// Preferred peer tracking.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for ClusterAssignmentTypes: RaftGroup quorum semantics,
|
||||
/// StreamAssignment lifecycle, and ConsumerAssignment defaults.
|
||||
/// Go reference: jetstream_cluster.go:154-266 (raftGroup, streamAssignment, consumerAssignment).
|
||||
/// </summary>
|
||||
public class AssignmentSerializationTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// RaftGroup quorum calculation
|
||||
// Go reference: jetstream_cluster.go:154-163 raftGroup.quorumNeeded()
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_quorum_size_for_single_node_is_one()
|
||||
{
|
||||
var group = new RaftGroup { Name = "test-r1", Peers = ["peer-1"] };
|
||||
|
||||
group.QuorumSize.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_quorum_size_for_three_nodes_is_two()
|
||||
{
|
||||
var group = new RaftGroup { Name = "test-r3", Peers = ["p1", "p2", "p3"] };
|
||||
|
||||
group.QuorumSize.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_quorum_size_for_five_nodes_is_three()
|
||||
{
|
||||
var group = new RaftGroup { Name = "test-r5", Peers = ["p1", "p2", "p3", "p4", "p5"] };
|
||||
|
||||
group.QuorumSize.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_quorum_size_for_empty_peers_is_one()
|
||||
{
|
||||
var group = new RaftGroup { Name = "test-empty", Peers = [] };
|
||||
|
||||
// (0 / 2) + 1 = 1
|
||||
group.QuorumSize.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// HasQuorum checks
|
||||
// Go reference: jetstream_cluster.go raftGroup quorum check
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void HasQuorum_returns_true_when_acks_meet_quorum()
|
||||
{
|
||||
var group = new RaftGroup { Name = "q-test", Peers = ["p1", "p2", "p3"] };
|
||||
|
||||
group.HasQuorum(2).ShouldBeTrue();
|
||||
group.HasQuorum(3).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void HasQuorum_returns_false_when_acks_below_quorum()
|
||||
{
|
||||
var group = new RaftGroup { Name = "q-test", Peers = ["p1", "p2", "p3"] };
|
||||
|
||||
group.HasQuorum(1).ShouldBeFalse();
|
||||
group.HasQuorum(0).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void HasQuorum_single_node_requires_one_ack()
|
||||
{
|
||||
var group = new RaftGroup { Name = "q-r1", Peers = ["p1"] };
|
||||
|
||||
group.HasQuorum(1).ShouldBeTrue();
|
||||
group.HasQuorum(0).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void HasQuorum_five_nodes_requires_three_acks()
|
||||
{
|
||||
var group = new RaftGroup { Name = "q-r5", Peers = ["p1", "p2", "p3", "p4", "p5"] };
|
||||
|
||||
group.HasQuorum(2).ShouldBeFalse();
|
||||
group.HasQuorum(3).ShouldBeTrue();
|
||||
group.HasQuorum(5).ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// RaftGroup property defaults
|
||||
// Go reference: jetstream_cluster.go:154-163
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_defaults_storage_to_file()
|
||||
{
|
||||
var group = new RaftGroup { Name = "defaults" };
|
||||
|
||||
group.StorageType.ShouldBe("file");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_defaults_cluster_to_empty()
|
||||
{
|
||||
var group = new RaftGroup { Name = "defaults" };
|
||||
|
||||
group.Cluster.ShouldBe(string.Empty);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_preferred_peer_tracking()
|
||||
{
|
||||
var group = new RaftGroup { Name = "pref-test", Peers = ["p1", "p2", "p3"] };
|
||||
|
||||
group.Preferred.ShouldBe(string.Empty);
|
||||
|
||||
group.Preferred = "p2";
|
||||
group.Preferred.ShouldBe("p2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// StreamAssignment creation
|
||||
// Go reference: jetstream_cluster.go:166-184 streamAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void StreamAssignment_created_with_defaults()
|
||||
{
|
||||
var group = new RaftGroup { Name = "sa-group", Peers = ["p1"] };
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "TEST-STREAM",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
sa.StreamName.ShouldBe("TEST-STREAM");
|
||||
sa.Group.ShouldBeSameAs(group);
|
||||
sa.ConfigJson.ShouldBe("{}");
|
||||
sa.SyncSubject.ShouldBe(string.Empty);
|
||||
sa.Responded.ShouldBeFalse();
|
||||
sa.Recovering.ShouldBeFalse();
|
||||
sa.Reassigning.ShouldBeFalse();
|
||||
sa.Consumers.ShouldBeEmpty();
|
||||
sa.Created.ShouldBeGreaterThan(DateTime.MinValue);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StreamAssignment_consumers_dictionary_operations()
|
||||
{
|
||||
var group = new RaftGroup { Name = "sa-cons", Peers = ["p1", "p2", "p3"] };
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "MY-STREAM",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
var consumerGroup = new RaftGroup { Name = "cons-group", Peers = ["p1"] };
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "durable-1",
|
||||
StreamName = "MY-STREAM",
|
||||
Group = consumerGroup,
|
||||
};
|
||||
|
||||
sa.Consumers["durable-1"] = ca;
|
||||
sa.Consumers.Count.ShouldBe(1);
|
||||
sa.Consumers["durable-1"].ConsumerName.ShouldBe("durable-1");
|
||||
|
||||
sa.Consumers.Remove("durable-1");
|
||||
sa.Consumers.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ConsumerAssignment creation
|
||||
// Go reference: jetstream_cluster.go:250-266 consumerAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ConsumerAssignment_created_with_defaults()
|
||||
{
|
||||
var group = new RaftGroup { Name = "ca-group", Peers = ["p1"] };
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "my-consumer",
|
||||
StreamName = "MY-STREAM",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
ca.ConsumerName.ShouldBe("my-consumer");
|
||||
ca.StreamName.ShouldBe("MY-STREAM");
|
||||
ca.Group.ShouldBeSameAs(group);
|
||||
ca.ConfigJson.ShouldBe("{}");
|
||||
ca.Responded.ShouldBeFalse();
|
||||
ca.Recovering.ShouldBeFalse();
|
||||
ca.Created.ShouldBeGreaterThan(DateTime.MinValue);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ConsumerAssignment_mutable_flags()
|
||||
{
|
||||
var group = new RaftGroup { Name = "ca-flags", Peers = ["p1"] };
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "c1",
|
||||
StreamName = "S1",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
ca.Responded = true;
|
||||
ca.Recovering = true;
|
||||
|
||||
ca.Responded.ShouldBeTrue();
|
||||
ca.Recovering.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StreamAssignment_mutable_flags()
|
||||
{
|
||||
var group = new RaftGroup { Name = "sa-flags", Peers = ["p1"] };
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "S1",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
sa.Responded = true;
|
||||
sa.Recovering = true;
|
||||
sa.Reassigning = true;
|
||||
sa.ConfigJson = """{"subjects":["test.>"]}""";
|
||||
sa.SyncSubject = "$JS.SYNC.S1";
|
||||
|
||||
sa.Responded.ShouldBeTrue();
|
||||
sa.Recovering.ShouldBeTrue();
|
||||
sa.Reassigning.ShouldBeTrue();
|
||||
sa.ConfigJson.ShouldBe("""{"subjects":["test.>"]}""");
|
||||
sa.SyncSubject.ShouldBe("$JS.SYNC.S1");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,723 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go
|
||||
// Covers: RaftGroup quorum semantics, StreamAssignment/ConsumerAssignment initialization,
|
||||
// JetStreamMetaGroup proposal workflow (create/delete stream + consumer), GetStreamAssignment,
|
||||
// GetAllAssignments, and PlacementEngine peer selection with topology filtering.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for B7 (ClusterAssignmentTypes), B8 (JetStreamMetaGroup proposal workflow),
|
||||
/// and B9 (PlacementEngine peer selection).
|
||||
/// Go reference: jetstream_cluster.go raftGroup, streamAssignment, consumerAssignment,
|
||||
/// selectPeerGroup (line 7212).
|
||||
/// </summary>
|
||||
public class ClusterAssignmentAndPlacementTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// B7: RaftGroup — quorum and HasQuorum
|
||||
// Go: jetstream_cluster.go:154 raftGroup struct
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_quorum_size_for_single_node_is_one()
|
||||
{
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "R1",
|
||||
Peers = ["n1"],
|
||||
};
|
||||
|
||||
group.QuorumSize.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_quorum_size_for_three_nodes_is_two()
|
||||
{
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "R3",
|
||||
Peers = ["n1", "n2", "n3"],
|
||||
};
|
||||
|
||||
group.QuorumSize.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_quorum_size_for_five_nodes_is_three()
|
||||
{
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "R5",
|
||||
Peers = ["n1", "n2", "n3", "n4", "n5"],
|
||||
};
|
||||
|
||||
group.QuorumSize.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_has_quorum_with_majority_acks()
|
||||
{
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "R3",
|
||||
Peers = ["n1", "n2", "n3"],
|
||||
};
|
||||
|
||||
// Quorum = 2; 2 acks is sufficient.
|
||||
group.HasQuorum(2).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_no_quorum_with_minority_acks()
|
||||
{
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "R3",
|
||||
Peers = ["n1", "n2", "n3"],
|
||||
};
|
||||
|
||||
// Quorum = 2; 1 ack is not sufficient.
|
||||
group.HasQuorum(1).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_has_quorum_with_all_acks()
|
||||
{
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "R5",
|
||||
Peers = ["n1", "n2", "n3", "n4", "n5"],
|
||||
};
|
||||
|
||||
group.HasQuorum(5).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_no_quorum_with_zero_acks()
|
||||
{
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "R3",
|
||||
Peers = ["n1", "n2", "n3"],
|
||||
};
|
||||
|
||||
group.HasQuorum(0).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B7: StreamAssignment — initialization and consumer tracking
|
||||
// Go: jetstream_cluster.go:166 streamAssignment struct
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void StreamAssignment_initializes_with_empty_consumers()
|
||||
{
|
||||
var group = new RaftGroup { Name = "g1", Peers = ["n1", "n2", "n3"] };
|
||||
var assignment = new StreamAssignment
|
||||
{
|
||||
StreamName = "ORDERS",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
assignment.StreamName.ShouldBe("ORDERS");
|
||||
assignment.Consumers.ShouldBeEmpty();
|
||||
assignment.ConfigJson.ShouldBe("{}");
|
||||
assignment.Responded.ShouldBeFalse();
|
||||
assignment.Recovering.ShouldBeFalse();
|
||||
assignment.Reassigning.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StreamAssignment_created_timestamp_is_recent()
|
||||
{
|
||||
var before = DateTime.UtcNow.AddSeconds(-1);
|
||||
|
||||
var group = new RaftGroup { Name = "g1", Peers = ["n1"] };
|
||||
var assignment = new StreamAssignment
|
||||
{
|
||||
StreamName = "TS_STREAM",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
var after = DateTime.UtcNow.AddSeconds(1);
|
||||
|
||||
assignment.Created.ShouldBeGreaterThan(before);
|
||||
assignment.Created.ShouldBeLessThan(after);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StreamAssignment_consumers_dict_is_ordinal_keyed()
|
||||
{
|
||||
var group = new RaftGroup { Name = "g1", Peers = ["n1"] };
|
||||
var assignment = new StreamAssignment
|
||||
{
|
||||
StreamName = "S",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
var consGroup = new RaftGroup { Name = "cg", Peers = ["n1"] };
|
||||
assignment.Consumers["ALPHA"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "ALPHA",
|
||||
StreamName = "S",
|
||||
Group = consGroup,
|
||||
};
|
||||
|
||||
assignment.Consumers.ContainsKey("ALPHA").ShouldBeTrue();
|
||||
assignment.Consumers.ContainsKey("alpha").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B7: ConsumerAssignment — initialization
|
||||
// Go: jetstream_cluster.go:250 consumerAssignment struct
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ConsumerAssignment_initializes_correctly()
|
||||
{
|
||||
var group = new RaftGroup { Name = "cg1", Peers = ["n1", "n2"] };
|
||||
var assignment = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "PUSH_CONSUMER",
|
||||
StreamName = "EVENTS",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
assignment.ConsumerName.ShouldBe("PUSH_CONSUMER");
|
||||
assignment.StreamName.ShouldBe("EVENTS");
|
||||
assignment.Group.ShouldBeSameAs(group);
|
||||
assignment.ConfigJson.ShouldBe("{}");
|
||||
assignment.Responded.ShouldBeFalse();
|
||||
assignment.Recovering.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ConsumerAssignment_created_timestamp_is_recent()
|
||||
{
|
||||
var before = DateTime.UtcNow.AddSeconds(-1);
|
||||
|
||||
var group = new RaftGroup { Name = "cg", Peers = ["n1"] };
|
||||
var assignment = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "C",
|
||||
StreamName = "S",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
var after = DateTime.UtcNow.AddSeconds(1);
|
||||
|
||||
assignment.Created.ShouldBeGreaterThan(before);
|
||||
assignment.Created.ShouldBeLessThan(after);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B8: JetStreamMetaGroup — ProposeCreateStreamAsync with assignment
|
||||
// Go: jetstream_cluster.go processStreamAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeCreateStream_with_group_stores_assignment()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "ORDERS_grp", Peers = ["n1", "n2", "n3"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "ORDERS" }, group, default);
|
||||
|
||||
var assignment = meta.GetStreamAssignment("ORDERS");
|
||||
assignment.ShouldNotBeNull();
|
||||
assignment!.StreamName.ShouldBe("ORDERS");
|
||||
assignment.Group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeCreateStream_without_group_still_stores_assignment()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "NOGROUP" }, default);
|
||||
|
||||
var assignment = meta.GetStreamAssignment("NOGROUP");
|
||||
assignment.ShouldNotBeNull();
|
||||
assignment!.StreamName.ShouldBe("NOGROUP");
|
||||
assignment.Group.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeCreateStream_also_appears_in_GetState_streams()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "g", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "VISIBLE" }, group, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
state.Streams.ShouldContain("VISIBLE");
|
||||
state.AssignmentCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeCreateStream_duplicate_is_idempotent()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "g", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "DUP" }, group, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "DUP" }, group, default);
|
||||
|
||||
meta.GetAllAssignments().Count.ShouldBe(1);
|
||||
meta.GetState().Streams.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B8: JetStreamMetaGroup — ProposeDeleteStreamAsync
|
||||
// Go: jetstream_cluster.go processStreamDelete
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeDeleteStream_removes_assignment_and_stream_name()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "g", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "DELETEME" }, group, default);
|
||||
|
||||
meta.GetStreamAssignment("DELETEME").ShouldNotBeNull();
|
||||
meta.GetState().Streams.ShouldContain("DELETEME");
|
||||
|
||||
await meta.ProposeDeleteStreamAsync("DELETEME", default);
|
||||
|
||||
meta.GetStreamAssignment("DELETEME").ShouldBeNull();
|
||||
meta.GetState().Streams.ShouldNotContain("DELETEME");
|
||||
meta.GetState().AssignmentCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeDeleteStream_nonexistent_stream_is_safe()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
// Should not throw.
|
||||
await meta.ProposeDeleteStreamAsync("MISSING", default);
|
||||
meta.GetAllAssignments().Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeDeleteStream_only_removes_target_not_others()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "g", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "KEEP" }, group, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "REMOVE" }, group, default);
|
||||
|
||||
await meta.ProposeDeleteStreamAsync("REMOVE", default);
|
||||
|
||||
meta.GetStreamAssignment("KEEP").ShouldNotBeNull();
|
||||
meta.GetStreamAssignment("REMOVE").ShouldBeNull();
|
||||
meta.GetState().Streams.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B8: JetStreamMetaGroup — ProposeCreateConsumerAsync
|
||||
// Go: jetstream_cluster.go processConsumerAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeCreateConsumer_adds_consumer_to_stream_assignment()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var streamGroup = new RaftGroup { Name = "sg", Peers = ["n1", "n2", "n3"] };
|
||||
var consumerGroup = new RaftGroup { Name = "cg", Peers = ["n1", "n2"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "ORDERS" }, streamGroup, default);
|
||||
await meta.ProposeCreateConsumerAsync("ORDERS", "PROCESSOR", consumerGroup, default);
|
||||
|
||||
var assignment = meta.GetStreamAssignment("ORDERS");
|
||||
assignment.ShouldNotBeNull();
|
||||
assignment!.Consumers.ContainsKey("PROCESSOR").ShouldBeTrue();
|
||||
assignment.Consumers["PROCESSOR"].ConsumerName.ShouldBe("PROCESSOR");
|
||||
assignment.Consumers["PROCESSOR"].StreamName.ShouldBe("ORDERS");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeCreateConsumer_multiple_consumers_on_same_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["n1"] };
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "MULTI" }, sg, default);
|
||||
await meta.ProposeCreateConsumerAsync("MULTI", "C1", cg, default);
|
||||
await meta.ProposeCreateConsumerAsync("MULTI", "C2", cg, default);
|
||||
await meta.ProposeCreateConsumerAsync("MULTI", "C3", cg, default);
|
||||
|
||||
var assignment = meta.GetStreamAssignment("MULTI");
|
||||
assignment!.Consumers.Count.ShouldBe(3);
|
||||
assignment.Consumers.ContainsKey("C1").ShouldBeTrue();
|
||||
assignment.Consumers.ContainsKey("C2").ShouldBeTrue();
|
||||
assignment.Consumers.ContainsKey("C3").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeCreateConsumer_on_nonexistent_stream_is_safe()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["n1"] };
|
||||
|
||||
// Should not throw — stream not found means consumer is simply not tracked.
|
||||
await meta.ProposeCreateConsumerAsync("MISSING_STREAM", "C1", cg, default);
|
||||
meta.GetStreamAssignment("MISSING_STREAM").ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B8: JetStreamMetaGroup — ProposeDeleteConsumerAsync
|
||||
// Go: jetstream_cluster.go processConsumerDelete
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeDeleteConsumer_removes_consumer_from_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["n1"] };
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "EVENTS" }, sg, default);
|
||||
await meta.ProposeCreateConsumerAsync("EVENTS", "PUSH", cg, default);
|
||||
|
||||
meta.GetStreamAssignment("EVENTS")!.Consumers.ContainsKey("PUSH").ShouldBeTrue();
|
||||
|
||||
await meta.ProposeDeleteConsumerAsync("EVENTS", "PUSH", default);
|
||||
|
||||
meta.GetStreamAssignment("EVENTS")!.Consumers.ContainsKey("PUSH").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeDeleteConsumer_only_removes_target_consumer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["n1"] };
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "S" }, sg, default);
|
||||
await meta.ProposeCreateConsumerAsync("S", "KEEP", cg, default);
|
||||
await meta.ProposeCreateConsumerAsync("S", "REMOVE", cg, default);
|
||||
|
||||
await meta.ProposeDeleteConsumerAsync("S", "REMOVE", default);
|
||||
|
||||
var assignment = meta.GetStreamAssignment("S");
|
||||
assignment!.Consumers.ContainsKey("KEEP").ShouldBeTrue();
|
||||
assignment.Consumers.ContainsKey("REMOVE").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeDeleteConsumer_on_nonexistent_consumer_is_safe()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "S" }, sg, default);
|
||||
|
||||
// Should not throw.
|
||||
await meta.ProposeDeleteConsumerAsync("S", "MISSING_CONSUMER", default);
|
||||
meta.GetStreamAssignment("S")!.Consumers.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B8: JetStreamMetaGroup — GetStreamAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void GetStreamAssignment_returns_null_for_missing_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.GetStreamAssignment("NOT_THERE").ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAllAssignments_returns_all_tracked_streams()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(5);
|
||||
var group = new RaftGroup { Name = "g", Peers = ["n1", "n2", "n3"] };
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = $"STREAM{i}" }, group, default);
|
||||
|
||||
meta.GetAllAssignments().Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B9: PlacementEngine — basic selection
|
||||
// Go: jetstream_cluster.go:7212 selectPeerGroup
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_selects_requested_number_of_peers()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1" },
|
||||
new() { PeerId = "n2" },
|
||||
new() { PeerId = "n3" },
|
||||
new() { PeerId = "n4" },
|
||||
new() { PeerId = "n5" },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("TEST", replicas: 3, peers);
|
||||
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
group.Name.ShouldBe("TEST");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_returns_raft_group_with_correct_name()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1" },
|
||||
new() { PeerId = "n2" },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("MY_GROUP", replicas: 1, peers);
|
||||
|
||||
group.Name.ShouldBe("MY_GROUP");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B9: PlacementEngine — cluster affinity filtering
|
||||
// Go: jetstream_cluster.go selectPeerGroup cluster filter
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_cluster_affinity_filters_to_matching_cluster()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Cluster = "east" },
|
||||
new() { PeerId = "n2", Cluster = "east" },
|
||||
new() { PeerId = "n3", Cluster = "west" },
|
||||
new() { PeerId = "n4", Cluster = "west" },
|
||||
};
|
||||
|
||||
var policy = new PlacementPolicy { Cluster = "east" };
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldContain("n1");
|
||||
group.Peers.ShouldContain("n2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_cluster_affinity_is_case_insensitive()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Cluster = "EAST" },
|
||||
new() { PeerId = "n2", Cluster = "west" },
|
||||
};
|
||||
|
||||
var policy = new PlacementPolicy { Cluster = "east" };
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 1, peers, policy);
|
||||
|
||||
group.Peers.ShouldContain("n1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B9: PlacementEngine — tag filtering
|
||||
// Go: jetstream_cluster.go selectPeerGroup tag filter
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_tag_filter_selects_peers_with_all_required_tags()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Tags = new(StringComparer.OrdinalIgnoreCase) { "ssd", "fast" } },
|
||||
new() { PeerId = "n2", Tags = new(StringComparer.OrdinalIgnoreCase) { "ssd" } },
|
||||
new() { PeerId = "n3", Tags = new(StringComparer.OrdinalIgnoreCase) { "fast" } },
|
||||
new() { PeerId = "n4", Tags = new(StringComparer.OrdinalIgnoreCase) { "ssd", "fast" } },
|
||||
};
|
||||
|
||||
var policy = new PlacementPolicy
|
||||
{
|
||||
Tags = new(StringComparer.OrdinalIgnoreCase) { "ssd", "fast" },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.All(p => p == "n1" || p == "n4").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_tag_filter_is_case_insensitive()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Tags = new(StringComparer.OrdinalIgnoreCase) { "SSD" } },
|
||||
new() { PeerId = "n2", Tags = new(StringComparer.OrdinalIgnoreCase) { "hdd" } },
|
||||
};
|
||||
|
||||
var policy = new PlacementPolicy
|
||||
{
|
||||
Tags = new(StringComparer.OrdinalIgnoreCase) { "ssd" },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 1, peers, policy);
|
||||
|
||||
group.Peers.ShouldContain("n1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B9: PlacementEngine — exclude tag filtering
|
||||
// Go: jetstream_cluster.go selectPeerGroup exclude-tag logic
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_exclude_tag_filters_out_peers_with_those_tags()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Tags = new(StringComparer.OrdinalIgnoreCase) { "nvme" } },
|
||||
new() { PeerId = "n2", Tags = new(StringComparer.OrdinalIgnoreCase) { "spinning" } },
|
||||
new() { PeerId = "n3", Tags = new(StringComparer.OrdinalIgnoreCase) { "nvme" } },
|
||||
new() { PeerId = "n4" },
|
||||
};
|
||||
|
||||
var policy = new PlacementPolicy
|
||||
{
|
||||
ExcludeTags = new(StringComparer.OrdinalIgnoreCase) { "spinning" },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 3, peers, policy);
|
||||
|
||||
group.Peers.ShouldNotContain("n2");
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_exclude_tag_is_case_insensitive()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Tags = new(StringComparer.OrdinalIgnoreCase) { "SLOW" } },
|
||||
new() { PeerId = "n2" },
|
||||
};
|
||||
|
||||
var policy = new PlacementPolicy
|
||||
{
|
||||
ExcludeTags = new(StringComparer.OrdinalIgnoreCase) { "slow" },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 1, peers, policy);
|
||||
|
||||
group.Peers.ShouldNotContain("n1");
|
||||
group.Peers.ShouldContain("n2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B9: PlacementEngine — throws when not enough peers
|
||||
// Go: jetstream_cluster.go selectPeerGroup insufficient peer error
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_throws_when_not_enough_peers()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1" },
|
||||
};
|
||||
|
||||
var act = () => PlacementEngine.SelectPeerGroup("G", replicas: 3, peers);
|
||||
|
||||
act.ShouldThrow<InvalidOperationException>();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_throws_when_filter_leaves_insufficient_peers()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Cluster = "east" },
|
||||
new() { PeerId = "n2", Cluster = "east" },
|
||||
new() { PeerId = "n3", Cluster = "west" },
|
||||
};
|
||||
|
||||
var policy = new PlacementPolicy { Cluster = "east" };
|
||||
var act = () => PlacementEngine.SelectPeerGroup("G", replicas: 3, peers, policy);
|
||||
|
||||
act.ShouldThrow<InvalidOperationException>();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_throws_when_unavailable_peers_reduce_below_requested()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Available = true },
|
||||
new() { PeerId = "n2", Available = false },
|
||||
new() { PeerId = "n3", Available = false },
|
||||
};
|
||||
|
||||
var act = () => PlacementEngine.SelectPeerGroup("G", replicas: 2, peers);
|
||||
|
||||
act.ShouldThrow<InvalidOperationException>();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B9: PlacementEngine — sorts by available storage descending
|
||||
// Go: jetstream_cluster.go selectPeerGroup storage sort
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_sorts_by_available_storage_descending()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "small", AvailableStorage = 100 },
|
||||
new() { PeerId = "large", AvailableStorage = 10_000 },
|
||||
new() { PeerId = "medium", AvailableStorage = 500 },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 2, peers);
|
||||
|
||||
// Should pick the two with most storage: large and medium.
|
||||
group.Peers.ShouldContain("large");
|
||||
group.Peers.ShouldContain("medium");
|
||||
group.Peers.ShouldNotContain("small");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_unavailable_peers_are_excluded()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "online1", Available = true },
|
||||
new() { PeerId = "offline1", Available = false },
|
||||
new() { PeerId = "online2", Available = true },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 2, peers);
|
||||
|
||||
group.Peers.ShouldContain("online1");
|
||||
group.Peers.ShouldContain("online2");
|
||||
group.Peers.ShouldNotContain("offline1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_no_policy_selects_all_available_up_to_replicas()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1" },
|
||||
new() { PeerId = "n2" },
|
||||
new() { PeerId = "n3" },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 3, peers);
|
||||
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,251 @@
|
||||
using System.Text;
|
||||
using NATS.Server.Configuration;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
using NATS.Server.JetStream.Validation;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Go parity tests for JetStream cluster formation and multi-replica streams.
|
||||
/// Reference: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
/// - TestJetStreamClusterConfig (line 43)
|
||||
/// - TestJetStreamClusterMultiReplicaStreams (line 299)
|
||||
/// </summary>
|
||||
public class ClusterFormationParityTests
|
||||
{
|
||||
/// <summary>
|
||||
/// Validates that JetStream cluster mode requires server_name to be set.
|
||||
/// When JetStream and cluster are both configured but server_name is missing,
|
||||
/// validation must fail with an appropriate error.
|
||||
/// Go parity: TestJetStreamClusterConfig — check("requires `server_name`")
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Cluster_config_requires_server_name_when_jetstream_and_cluster_enabled()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
ServerName = null,
|
||||
JetStream = new JetStreamOptions
|
||||
{
|
||||
StoreDir = "/tmp/js",
|
||||
MaxMemoryStore = 16L * 1024 * 1024 * 1024,
|
||||
MaxFileStore = 10L * 1024 * 1024 * 1024 * 1024,
|
||||
},
|
||||
Cluster = new ClusterOptions
|
||||
{
|
||||
Port = 6222,
|
||||
},
|
||||
};
|
||||
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
|
||||
result.IsValid.ShouldBeFalse();
|
||||
result.Message.ShouldContain("server_name");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Validates that JetStream cluster mode requires cluster.name to be set.
|
||||
/// When JetStream, cluster, and server_name are configured but cluster.name
|
||||
/// is missing, validation must fail.
|
||||
/// Go parity: TestJetStreamClusterConfig — check("requires `cluster.name`")
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Cluster_config_requires_cluster_name_when_jetstream_and_cluster_enabled()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
ServerName = "TEST",
|
||||
JetStream = new JetStreamOptions
|
||||
{
|
||||
StoreDir = "/tmp/js",
|
||||
MaxMemoryStore = 16L * 1024 * 1024 * 1024,
|
||||
MaxFileStore = 10L * 1024 * 1024 * 1024 * 1024,
|
||||
},
|
||||
Cluster = new ClusterOptions
|
||||
{
|
||||
Name = null,
|
||||
Port = 6222,
|
||||
},
|
||||
};
|
||||
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
|
||||
result.IsValid.ShouldBeFalse();
|
||||
result.Message.ShouldContain("cluster.name");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Validates that when both server_name and cluster.name are set alongside
|
||||
/// JetStream and cluster config, the validation passes.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Cluster_config_passes_when_server_name_and_cluster_name_are_set()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
ServerName = "TEST",
|
||||
JetStream = new JetStreamOptions
|
||||
{
|
||||
StoreDir = "/tmp/js",
|
||||
},
|
||||
Cluster = new ClusterOptions
|
||||
{
|
||||
Name = "JSC",
|
||||
Port = 6222,
|
||||
},
|
||||
};
|
||||
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
|
||||
result.IsValid.ShouldBeTrue();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a 3-replica stream in a simulated 5-node cluster, publishes
|
||||
/// 10 messages, verifies stream info and state, then creates a durable
|
||||
/// consumer and confirms pending count matches published message count.
|
||||
/// Go parity: TestJetStreamClusterMultiReplicaStreams (line 299)
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Multi_replica_stream_accepts_publishes_and_consumer_tracks_pending()
|
||||
{
|
||||
await using var fixture = await ClusterFormationFixture.StartAsync(nodes: 5);
|
||||
|
||||
// Create a 3-replica stream (Go: js.AddStream with Replicas=3)
|
||||
var createResult = await fixture.CreateStreamAsync("TEST", ["foo", "bar"], replicas: 3);
|
||||
createResult.Error.ShouldBeNull();
|
||||
createResult.StreamInfo.ShouldNotBeNull();
|
||||
createResult.StreamInfo!.Config.Name.ShouldBe("TEST");
|
||||
|
||||
// Publish 10 messages (Go: js.Publish("foo", msg) x 10)
|
||||
const int toSend = 10;
|
||||
for (var i = 0; i < toSend; i++)
|
||||
{
|
||||
var ack = await fixture.PublishAsync("foo", $"Hello JS Clustering {i}");
|
||||
ack.Stream.ShouldBe("TEST");
|
||||
ack.Seq.ShouldBeGreaterThan((ulong)0);
|
||||
}
|
||||
|
||||
// Verify stream info reports correct message count
|
||||
var info = await fixture.GetStreamInfoAsync("TEST");
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.Config.Name.ShouldBe("TEST");
|
||||
info.StreamInfo.State.Messages.ShouldBe((ulong)toSend);
|
||||
|
||||
// Create a durable consumer and verify pending count
|
||||
var consumer = await fixture.CreateConsumerAsync("TEST", "dlc");
|
||||
consumer.Error.ShouldBeNull();
|
||||
consumer.ConsumerInfo.ShouldNotBeNull();
|
||||
|
||||
// Verify replica group was formed with the correct replica count
|
||||
var replicaGroup = fixture.GetReplicaGroup("TEST");
|
||||
replicaGroup.ShouldNotBeNull();
|
||||
replicaGroup!.Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that the asset placement planner caps replica count at the
|
||||
/// cluster size. Requesting more replicas than available nodes produces
|
||||
/// a placement list bounded by the node count.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Placement_planner_caps_replicas_at_cluster_size()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
|
||||
var placement = planner.PlanReplicas(replicas: 5);
|
||||
|
||||
placement.Count.ShouldBe(3);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Test fixture simulating a JetStream cluster with meta group, stream manager,
|
||||
/// consumer manager, and replica groups. Duplicates helpers locally per project
|
||||
/// conventions (no shared TestHelpers).
|
||||
/// </summary>
|
||||
internal sealed class ClusterFormationFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private ClusterFormationFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router,
|
||||
JetStreamPublisher publisher)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = publisher;
|
||||
}
|
||||
|
||||
public static Task<ClusterFormationFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var streamManager = new StreamManager(meta);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
return Task.FromResult(new ClusterFormationFixture(meta, streamManager, consumerManager, router, publisher));
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), out var ack))
|
||||
return Task.FromResult(ack);
|
||||
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match any stream.");
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> GetStreamInfoAsync(string name)
|
||||
{
|
||||
var response = _streamManager.GetInfo(name);
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateConsumerAsync(string stream, string durableName)
|
||||
{
|
||||
var response = _consumerManager.CreateOrUpdate(stream, new ConsumerConfig
|
||||
{
|
||||
DurableName = durableName,
|
||||
});
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public StreamReplicaGroup? GetReplicaGroup(string streamName)
|
||||
{
|
||||
// Access internal replica group state via stream manager reflection-free approach:
|
||||
// The StreamManager creates replica groups internally. We verify via the meta group state.
|
||||
var meta = _metaGroup.GetState();
|
||||
if (!meta.Streams.Contains(streamName))
|
||||
return null;
|
||||
|
||||
// Create a parallel replica group to verify the expected structure.
|
||||
// The real replica group is managed internally by StreamManager.
|
||||
return new StreamReplicaGroup(streamName, replicas: 3);
|
||||
}
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,522 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// golang/nats-server/server/jetstream_cluster_2_test.go
|
||||
// Covers: per-consumer RAFT groups, consumer assignment, ack state
|
||||
// replication, consumer failover, pull request forwarding, ephemeral
|
||||
// consumer lifecycle, delivery policy handling.
|
||||
using System.Collections.Concurrent;
|
||||
using System.Reflection;
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering per-consumer RAFT groups: consumer assignment, ack state
|
||||
/// replication, consumer failover, pull request forwarding, ephemeral
|
||||
/// consumer lifecycle, and delivery policy handling in clustered mode.
|
||||
/// Ported from Go jetstream_cluster_1_test.go and jetstream_cluster_2_test.go.
|
||||
/// </summary>
|
||||
public class ConsumerReplicaGroupTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerState server/jetstream_cluster_1_test.go:700
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_creation_registers_in_manager()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("REG", ["reg.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.CreateConsumerAsync("REG", "d1");
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.DurableName.ShouldBe("d1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerState server/jetstream_cluster_1_test.go:700
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_pending_count_tracks_unacked_messages()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PEND", ["pend.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("PEND", "acker", filterSubject: "pend.>", ackPolicy: AckPolicy.Explicit);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("pend.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("PEND", "acker", 3);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
|
||||
fx.GetPendingCount("PEND", "acker").ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterFullConsumerState server/jetstream_cluster_1_test.go:795
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AckAll_reduces_pending_count()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("ACKRED", ["ar.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("ACKRED", "acker", filterSubject: "ar.>", ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("ar.event", $"msg-{i}");
|
||||
|
||||
await fx.FetchAsync("ACKRED", "acker", 10);
|
||||
fx.AckAll("ACKRED", "acker", 7);
|
||||
|
||||
fx.GetPendingCount("ACKRED", "acker").ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterFullConsumerState server/jetstream_cluster_1_test.go:795
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AckAll_to_last_seq_clears_all_pending()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("ACKCLEAR", ["ac.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("ACKCLEAR", "acker", filterSubject: "ac.>", ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("ac.event", $"msg-{i}");
|
||||
|
||||
await fx.FetchAsync("ACKCLEAR", "acker", 5);
|
||||
fx.AckAll("ACKCLEAR", "acker", 5);
|
||||
|
||||
fx.GetPendingCount("ACKCLEAR", "acker").ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerRedeliveredInfo server/jetstream_cluster_1_test.go:659
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_redelivery_sets_redelivered_flag()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("REDEL", ["rd.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("REDEL", "rdc", filterSubject: "rd.>",
|
||||
ackPolicy: AckPolicy.Explicit, ackWaitMs: 1, maxDeliver: 5);
|
||||
|
||||
await fx.PublishAsync("rd.event", "will-redeliver");
|
||||
|
||||
var batch1 = await fx.FetchAsync("REDEL", "rdc", 1);
|
||||
batch1.Messages.Count.ShouldBe(1);
|
||||
batch1.Messages[0].Redelivered.ShouldBeFalse();
|
||||
|
||||
await Task.Delay(50);
|
||||
|
||||
var batch2 = await fx.FetchAsync("REDEL", "rdc", 1);
|
||||
batch2.Messages.Count.ShouldBe(1);
|
||||
batch2.Messages[0].Redelivered.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterRestoreSingleConsumer server/jetstream_cluster_1_test.go:1028
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_survives_stream_leader_stepdown()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CSURV", ["csv.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CSURV", "durable1", filterSubject: "csv.>");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("csv.event", $"msg-{i}");
|
||||
|
||||
var batch1 = await fx.FetchAsync("CSURV", "durable1", 5);
|
||||
batch1.Messages.Count.ShouldBe(5);
|
||||
|
||||
await fx.StepDownStreamLeaderAsync("CSURV");
|
||||
|
||||
var batch2 = await fx.FetchAsync("CSURV", "durable1", 5);
|
||||
batch2.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterPullConsumerLeakedSubs server/jetstream_cluster_2_test.go:2239
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Pull_consumer_fetch_returns_correct_batch()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PULL", ["pull.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("PULL", "puller", filterSubject: "pull.>");
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await fx.PublishAsync("pull.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("PULL", "puller", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerLastActiveReporting server/jetstream_cluster_2_test.go:2371
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_info_returns_correct_config()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("INFO", ["ci.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("INFO", "info_dur", filterSubject: "ci.>", ackPolicy: AckPolicy.Explicit);
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("INFO", "info_dur");
|
||||
info.Config.DurableName.ShouldBe("info_dur");
|
||||
info.Config.AckPolicy.ShouldBe(AckPolicy.Explicit);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterEphemeralConsumerNoImmediateInterest server/jetstream_cluster_1_test.go:2481
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Ephemeral_consumer_creation_succeeds()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("EPHEM", ["eph.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.CreateConsumerAsync("EPHEM", null, ephemeral: true);
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.DurableName.ShouldNotBeNullOrEmpty();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterEphemeralConsumersNotReplicated server/jetstream_cluster_1_test.go:2599
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Ephemeral_consumers_get_unique_names()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("UNIQ", ["u.>"], replicas: 3);
|
||||
|
||||
var resp1 = await fx.CreateConsumerAsync("UNIQ", null, ephemeral: true);
|
||||
var resp2 = await fx.CreateConsumerAsync("UNIQ", null, ephemeral: true);
|
||||
|
||||
resp1.ConsumerInfo!.Config.DurableName
|
||||
.ShouldNotBe(resp2.ConsumerInfo!.Config.DurableName);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterCreateConcurrentDurableConsumers server/jetstream_cluster_2_test.go:1572
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Durable_consumer_create_is_idempotent()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("IDEMP", ["id.>"], replicas: 3);
|
||||
|
||||
var resp1 = await fx.CreateConsumerAsync("IDEMP", "same");
|
||||
var resp2 = await fx.CreateConsumerAsync("IDEMP", "same");
|
||||
|
||||
resp1.ConsumerInfo!.Config.DurableName.ShouldBe("same");
|
||||
resp2.ConsumerInfo!.Config.DurableName.ShouldBe("same");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMaxConsumers server/jetstream_cluster_2_test.go:1978
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_delete_succeeds()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("DEL", ["del.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("DEL", "to_delete");
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerDelete}DEL.to_delete", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerPause server/jetstream_cluster_1_test.go:4203
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_pause_and_resume_via_api()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PAUSE", ["pause.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("PAUSE", "pausable");
|
||||
|
||||
var pause = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerPause}PAUSE.pausable", """{"pause":true}""");
|
||||
pause.Success.ShouldBeTrue();
|
||||
|
||||
var resume = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerPause}PAUSE.pausable", """{"pause":false}""");
|
||||
resume.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerResetPendingDeliveriesOnMaxAckPendingUpdate
|
||||
// server/jetstream_cluster_1_test.go:8696
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_reset_resets_sequence_to_beginning()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("RESET", ["reset.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("RESET", "resettable", filterSubject: "reset.>");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("reset.event", $"msg-{i}");
|
||||
|
||||
// Advance the consumer
|
||||
await fx.FetchAsync("RESET", "resettable", 3);
|
||||
|
||||
// Reset
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerReset}RESET.resettable", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
|
||||
// After reset should re-deliver from sequence 1
|
||||
var batch = await fx.FetchAsync("RESET", "resettable", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
batch.Messages[0].Sequence.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterFlowControlRequiresHeartbeats server/jetstream_cluster_2_test.go:2712
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_with_filter_subject_delivers_matching_only()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("FILT", ["filt.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("FILT", "filtered", filterSubject: "filt.alpha");
|
||||
|
||||
await fx.PublishAsync("filt.alpha", "match");
|
||||
await fx.PublishAsync("filt.beta", "no-match");
|
||||
await fx.PublishAsync("filt.alpha", "match2");
|
||||
|
||||
var batch = await fx.FetchAsync("FILT", "filtered", 10);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerDeliverPolicy server/jetstream_cluster_2_test.go:550
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task DeliverPolicy_Last_starts_at_last_message()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("DLAST", ["dl.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("dl.event", $"msg-{i}");
|
||||
|
||||
await fx.CreateConsumerAsync("DLAST", "last_c", filterSubject: "dl.>",
|
||||
deliverPolicy: DeliverPolicy.Last);
|
||||
|
||||
var batch = await fx.FetchAsync("DLAST", "last_c", 10);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
batch.Messages[0].Sequence.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerDeliverPolicy server/jetstream_cluster_2_test.go:550
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task DeliverPolicy_New_skips_existing_messages()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("DNEW", ["dn.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("dn.event", $"msg-{i}");
|
||||
|
||||
await fx.CreateConsumerAsync("DNEW", "new_c", filterSubject: "dn.>",
|
||||
deliverPolicy: DeliverPolicy.New);
|
||||
|
||||
var batch = await fx.FetchAsync("DNEW", "new_c", 10);
|
||||
batch.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerDeliverPolicy server/jetstream_cluster_2_test.go:550
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task DeliverPolicy_ByStartSequence_starts_at_given_seq()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("DSTART", ["ds.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("ds.event", $"msg-{i}");
|
||||
|
||||
await fx.CreateConsumerAsync("DSTART", "start_c", filterSubject: "ds.>",
|
||||
deliverPolicy: DeliverPolicy.ByStartSequence, optStartSeq: 7);
|
||||
|
||||
var batch = await fx.FetchAsync("DSTART", "start_c", 10);
|
||||
batch.Messages.Count.ShouldBe(4);
|
||||
batch.Messages[0].Sequence.ShouldBe(7UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerUnpin server/jetstream_cluster_1_test.go:4109
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_unpin_api_returns_success()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("UNPIN", ["unpin.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("UNPIN", "pinned");
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerUnpin}UNPIN.pinned", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerLeaderStepdown server/jetstream_cluster_2_test.go:1400
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_leader_stepdown_api_returns_success()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CLS", ["cls.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CLS", "dur1");
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerLeaderStepdown}CLS.dur1", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Self-contained fixture for consumer replica group tests.
|
||||
/// </summary>
|
||||
internal sealed class ConsumerReplicaFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private ConsumerReplicaFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router,
|
||||
JetStreamPublisher publisher)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = publisher;
|
||||
}
|
||||
|
||||
public static Task<ConsumerReplicaFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var streamManager = new StreamManager(meta, consumerManager: consumerManager);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
return Task.FromResult(new ConsumerReplicaFixture(meta, streamManager, consumerManager, router, publisher));
|
||||
}
|
||||
|
||||
public Task CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
if (response.Error is not null)
|
||||
throw new InvalidOperationException(response.Error.Description);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateConsumerAsync(
|
||||
string stream,
|
||||
string? durableName,
|
||||
string? filterSubject = null,
|
||||
AckPolicy ackPolicy = AckPolicy.None,
|
||||
int ackWaitMs = 30_000,
|
||||
int maxDeliver = 1,
|
||||
bool ephemeral = false,
|
||||
DeliverPolicy deliverPolicy = DeliverPolicy.All,
|
||||
ulong optStartSeq = 0)
|
||||
{
|
||||
var config = new ConsumerConfig
|
||||
{
|
||||
DurableName = durableName ?? string.Empty,
|
||||
AckPolicy = ackPolicy,
|
||||
AckWaitMs = ackWaitMs,
|
||||
MaxDeliver = maxDeliver,
|
||||
Ephemeral = ephemeral,
|
||||
DeliverPolicy = deliverPolicy,
|
||||
OptStartSeq = optStartSeq,
|
||||
};
|
||||
if (!string.IsNullOrWhiteSpace(filterSubject))
|
||||
config.FilterSubject = filterSubject;
|
||||
|
||||
return Task.FromResult(_consumerManager.CreateOrUpdate(stream, config));
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), null, out var ack))
|
||||
{
|
||||
if (ack.ErrorCode == null && _streamManager.TryGet(ack.Stream, out var handle))
|
||||
{
|
||||
var stored = handle.Store.LoadAsync(ack.Seq, default).GetAwaiter().GetResult();
|
||||
if (stored != null)
|
||||
_consumerManager.OnPublished(ack.Stream, stored);
|
||||
}
|
||||
|
||||
return Task.FromResult(ack);
|
||||
}
|
||||
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match a stream.");
|
||||
}
|
||||
|
||||
public Task<PullFetchBatch> FetchAsync(string stream, string durableName, int batch)
|
||||
=> _consumerManager.FetchAsync(stream, durableName, batch, _streamManager, default).AsTask();
|
||||
|
||||
public void AckAll(string stream, string durableName, ulong sequence)
|
||||
=> _consumerManager.AckAll(stream, durableName, sequence);
|
||||
|
||||
public int GetPendingCount(string stream, string durableName)
|
||||
=> _consumerManager.GetPendingCount(stream, durableName);
|
||||
|
||||
public Task<JetStreamConsumerInfo> GetConsumerInfoAsync(string stream, string durableName)
|
||||
{
|
||||
var resp = _consumerManager.GetInfo(stream, durableName);
|
||||
if (resp.ConsumerInfo == null)
|
||||
throw new InvalidOperationException("Consumer not found.");
|
||||
return Task.FromResult(resp.ConsumerInfo);
|
||||
}
|
||||
|
||||
public Task StepDownStreamLeaderAsync(string stream)
|
||||
=> _streamManager.StepDownStreamLeaderAsync(stream, default);
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
=> Task.FromResult(_router.Route(subject, Encoding.UTF8.GetBytes(payload)));
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,496 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go:2474-4261
|
||||
// Covers: entry application pipeline for JetStreamMetaGroup and StreamReplicaGroup —
|
||||
// meta entry dispatch (StreamCreate, StreamDelete, ConsumerCreate, ConsumerDelete,
|
||||
// PeerAdd, PeerRemove), stream-level message ops (Store, Remove, Purge),
|
||||
// consumer-level ops (Ack, Nak, Deliver, Term, Progress), unknown-entry handling.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for the entry application pipeline in JetStreamMetaGroup and StreamReplicaGroup.
|
||||
/// Go reference: jetstream_cluster.go:2474-4261 processStreamEntries / processConsumerEntries.
|
||||
/// </summary>
|
||||
public class EntryApplicationTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyEntry — StreamCreate (existing behaviour verification)
|
||||
// Go reference: jetstream_cluster.go processStreamAssignment apply
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_StreamCreate_creates_stream()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:4541 processStreamAssignment — apply creates stream.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "orders-group", Peers = ["p1", "p2", "p3"] };
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "ORDERS", group: group);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.GetStreamAssignment("ORDERS").ShouldNotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_StreamDelete_removes_stream()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go processStreamRemoval apply.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "ORDERS");
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.StreamDelete, "ORDERS");
|
||||
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
meta.GetStreamAssignment("ORDERS").ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_ConsumerCreate_creates_consumer_on_stream()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:5300 processConsumerAssignment apply.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "ORDERS");
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.ConsumerCreate, "push-consumer", streamName: "ORDERS");
|
||||
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
meta.GetConsumerAssignment("ORDERS", "push-consumer").ShouldNotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_ConsumerDelete_removes_consumer()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go processConsumerRemoval apply.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "ORDERS");
|
||||
meta.ApplyEntry(MetaEntryType.ConsumerCreate, "push-consumer", streamName: "ORDERS");
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.ConsumerDelete, "push-consumer", streamName: "ORDERS");
|
||||
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_ConsumerCreate_without_streamName_throws()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
Should.Throw<ArgumentNullException>(() =>
|
||||
meta.ApplyEntry(MetaEntryType.ConsumerCreate, "consumer"));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyEntry — PeerAdd (new entry type dispatch)
|
||||
// Go reference: jetstream_cluster.go:2290 processAddPeer
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_PeerAdd_triggers_peer_processing()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:2290 processAddPeer — peer registered on apply.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
// Should not throw and should register the peer.
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-42");
|
||||
|
||||
meta.GetKnownPeers().ShouldContain("peer-42");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_PeerAdd_registers_multiple_peers()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-A");
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-B");
|
||||
|
||||
meta.GetKnownPeers().ShouldContain("peer-A");
|
||||
meta.GetKnownPeers().ShouldContain("peer-B");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_PeerAdd_is_idempotent_for_same_peer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-X");
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-X");
|
||||
|
||||
// HashSet deduplicates — exactly one entry.
|
||||
meta.GetKnownPeers().Count(p => p == "peer-X").ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyEntry — PeerRemove (new entry type dispatch)
|
||||
// Go reference: jetstream_cluster.go:2342 processRemovePeer
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_PeerRemove_triggers_peer_processing()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:2342 processRemovePeer — peer removed on apply.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-42");
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.PeerRemove, "peer-42");
|
||||
|
||||
meta.GetKnownPeers().ShouldNotContain("peer-42");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_PeerRemove_triggers_stream_reassignment()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:2342 processRemovePeer — affected streams identified.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "stream-group", Peers = ["peer-1", "peer-2", "peer-3"] };
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "EVENTS", group: group);
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-1");
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-replacement");
|
||||
|
||||
// Removing peer-1: the stream that had peer-1 should be reassigned.
|
||||
meta.ApplyEntry(MetaEntryType.PeerRemove, "peer-1");
|
||||
|
||||
// peer-1 should no longer be in the known peers set.
|
||||
meta.GetKnownPeers().ShouldNotContain("peer-1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyStreamMsgOp — Store
|
||||
// Go reference: jetstream_cluster.go processStreamMsg store
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyStreamMsgOp_Store_increments_message_count()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:2474 processStreamEntries — store op increments Msgs.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
var before = srg.MessageCount;
|
||||
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 1);
|
||||
|
||||
srg.MessageCount.ShouldBe(before + 1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyStreamMsgOp_Store_advances_last_sequence()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 42);
|
||||
|
||||
srg.LastSequence.ShouldBe(42L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyStreamMsgOp_Store_multiple_times_accumulates_count()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 1);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 2);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 3);
|
||||
|
||||
srg.MessageCount.ShouldBe(3L);
|
||||
srg.LastSequence.ShouldBe(3L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyStreamMsgOp — Remove
|
||||
// Go reference: jetstream_cluster.go processStreamMsg remove
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyStreamMsgOp_Remove_decrements_message_count()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:3100 processStreamEntries — remove op decrements Msgs.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 1);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 2);
|
||||
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Remove);
|
||||
|
||||
srg.MessageCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyStreamMsgOp_Remove_does_not_go_below_zero()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — safe guard on remove when already empty.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
// Remove from empty — should not underflow.
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Remove);
|
||||
|
||||
srg.MessageCount.ShouldBe(0L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyStreamMsgOp — Purge
|
||||
// Go reference: jetstream_cluster.go processStreamMsg purge
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyStreamMsgOp_Purge_clears_messages()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:3200 processStreamEntries — purge resets state.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 1);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 2);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 3);
|
||||
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Purge);
|
||||
|
||||
srg.MessageCount.ShouldBe(0L);
|
||||
srg.LastSequence.ShouldBe(0L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyStreamMsgOp_Purge_then_Store_increments_from_zero()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 5);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Purge);
|
||||
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 6);
|
||||
|
||||
srg.MessageCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyConsumerEntries — Ack
|
||||
// Go reference: jetstream_cluster.go processConsumerEntries ack
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyConsumerEntries_Ack_processes_acknowledgment()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:3500 processConsumerEntries — ack increments ack floor.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Ack);
|
||||
|
||||
srg.AckCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyConsumerEntries_Ack_accumulates_across_multiple_calls()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Ack);
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Ack);
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Ack);
|
||||
|
||||
srg.AckCount.ShouldBe(3L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyConsumerEntries — Nak
|
||||
// Go reference: jetstream_cluster.go processConsumerEntries nak
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyConsumerEntries_Nak_processes_negative_acknowledgment()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:3520 processConsumerEntries — nak schedules redelivery.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Nak);
|
||||
|
||||
srg.NakCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyConsumerEntries — Deliver
|
||||
// Go reference: jetstream_cluster.go processConsumerEntries deliver
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyConsumerEntries_Deliver_processes_delivery()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:3540 processConsumerEntries — deliver advances dseq.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Deliver);
|
||||
|
||||
srg.DeliverCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyConsumerEntries_Term_does_not_throw()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
// Term is valid but has no counter in this model.
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Term);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyConsumerEntries_Progress_does_not_throw()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Progress);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyCommittedEntriesAsync — smsg: dispatch
|
||||
// Go reference: jetstream_cluster.go processStreamEntries command routing
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_smsg_store_increments_count()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("smsg:store", default);
|
||||
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.MessageCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_smsg_purge_clears_messages()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("smsg:store", default);
|
||||
await srg.Leader.ProposeAsync("smsg:store", default);
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
await srg.Leader.ProposeAsync("smsg:purge", default);
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.MessageCount.ShouldBe(0L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_smsg_remove_decrements_count()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("smsg:store", default);
|
||||
await srg.Leader.ProposeAsync("smsg:store", default);
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
await srg.Leader.ProposeAsync("smsg:remove", default);
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.MessageCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyCommittedEntriesAsync — centry: dispatch
|
||||
// Go reference: jetstream_cluster.go processConsumerEntries command routing
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_centry_ack_increments_ack_count()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("centry:ack", default);
|
||||
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.AckCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_centry_nak_increments_nak_count()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("centry:nak", default);
|
||||
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.NakCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_centry_deliver_increments_deliver_count()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("centry:deliver", default);
|
||||
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.DeliverCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Unknown entry type — logged and skipped
|
||||
// Go reference: jetstream_cluster.go default case in apply loop
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Unknown_entry_type_logged_and_skipped()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go processStreamEntries — unknown ops are skipped.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("smsg:unknown-op", default);
|
||||
|
||||
// Should not throw.
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
// Message count unchanged; unknown command is recorded.
|
||||
srg.MessageCount.ShouldBe(0L);
|
||||
srg.LastUnknownCommand.ShouldBe("smsg:unknown-op");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Unknown_centry_op_logged_and_skipped()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("centry:bogus", default);
|
||||
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.AckCount.ShouldBe(0L);
|
||||
srg.LastUnknownCommand.ShouldBe("centry:bogus");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Completely_unknown_prefix_is_logged_and_skipped()
|
||||
{
|
||||
// A command with an entirely unrecognised prefix is recorded and skipped.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("xyzzy:something", default);
|
||||
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.LastUnknownCommand.ShouldBe("xyzzy:something");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// MetaEntryType enum values exist
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void MetaEntryType_enum_includes_PeerAdd_and_PeerRemove()
|
||||
{
|
||||
// Compile-time check: ensures the enum values exist.
|
||||
_ = MetaEntryType.PeerAdd;
|
||||
_ = MetaEntryType.PeerRemove;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// StreamMsgOp and ConsumerOp enum values exist
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void StreamMsgOp_enum_has_expected_values()
|
||||
{
|
||||
_ = StreamMsgOp.Store;
|
||||
_ = StreamMsgOp.Remove;
|
||||
_ = StreamMsgOp.Purge;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ConsumerOp_enum_has_expected_values()
|
||||
{
|
||||
_ = ConsumerOp.Ack;
|
||||
_ = ConsumerOp.Nak;
|
||||
_ = ConsumerOp.Deliver;
|
||||
_ = ConsumerOp.Term;
|
||||
_ = ConsumerOp.Progress;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,169 @@
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for validated stream/consumer assignment processing.
|
||||
/// Go reference: jetstream_cluster.go:4541-5925.
|
||||
/// </summary>
|
||||
public class JetStreamAssignmentProcessingTests
|
||||
{
|
||||
[Fact]
|
||||
public void ProcessStreamAssignment_validates_config()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "valid-stream",
|
||||
Group = new RaftGroup { Name = "rg-1", Peers = ["n1", "n2", "n3"] },
|
||||
ConfigJson = """{"subjects":["test.>"]}""",
|
||||
};
|
||||
|
||||
meta.ProcessStreamAssignment(sa).ShouldBeTrue();
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamAssignment_rejects_empty_name()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = CreateStreamAssignment("", "rg-1");
|
||||
meta.ProcessStreamAssignment(sa).ShouldBeFalse();
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessUpdateStreamAssignment_applies_config_change()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("updatable", "rg-u", """{"subjects":["old.>"]}"""));
|
||||
|
||||
var updated = CreateStreamAssignment("updatable", "rg-u", """{"subjects":["new.>"]}""");
|
||||
meta.ProcessUpdateStreamAssignment(updated).ShouldBeTrue();
|
||||
|
||||
var assignment = meta.GetStreamAssignment("updatable");
|
||||
assignment!.ConfigJson.ShouldContain("new.>");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessUpdateStreamAssignment_returns_false_for_nonexistent()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = CreateStreamAssignment("ghost", "rg-g");
|
||||
meta.ProcessUpdateStreamAssignment(sa).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerAssignment_requires_existing_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "orphan-consumer",
|
||||
StreamName = "nonexistent-stream",
|
||||
Group = new RaftGroup { Name = "rg-c", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.ProcessConsumerAssignment(ca).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerAssignment_succeeds_with_existing_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("s1", "rg-s1"));
|
||||
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "c1",
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.ProcessConsumerAssignment(ca).ShouldBeTrue();
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamRemoval_cascades_to_consumers()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("cascade", "rg-cas"));
|
||||
meta.ProcessConsumerAssignment(new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "c1",
|
||||
StreamName = "cascade",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2", "n3"] },
|
||||
});
|
||||
|
||||
meta.ProcessStreamRemoval("cascade").ShouldBeTrue();
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamRemoval_returns_false_for_nonexistent()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamRemoval("nope").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerRemoval_returns_false_for_nonexistent_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessConsumerRemoval("ghost", "c1").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerRemoval_returns_false_for_nonexistent_consumer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("s1", "rg-s1"));
|
||||
meta.ProcessConsumerRemoval("s1", "nope").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerRemoval_succeeds()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("s1", "rg-s1"));
|
||||
meta.ProcessConsumerAssignment(new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "c1",
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2"] },
|
||||
});
|
||||
|
||||
meta.ProcessConsumerRemoval("s1", "c1").ShouldBeTrue();
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessUpdateStreamAssignment_preserves_consumers()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("s1", "rg-s1", """{"subjects":["old"]}"""));
|
||||
meta.ProcessConsumerAssignment(new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "c1",
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2"] },
|
||||
});
|
||||
|
||||
var updated = CreateStreamAssignment("s1", "rg-s1", """{"subjects":["new"]}""");
|
||||
meta.ProcessUpdateStreamAssignment(updated).ShouldBeTrue();
|
||||
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
meta.GetConsumerAssignment("s1", "c1").ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Helper to create a StreamAssignment (StreamName is `required` so we must always provide it)
|
||||
private static StreamAssignment CreateStreamAssignment(string name, string groupName, string config = "{}")
|
||||
=> new()
|
||||
{
|
||||
StreamName = name,
|
||||
Group = new RaftGroup { Name = groupName, Peers = ["n1", "n2", "n3"] },
|
||||
ConfigJson = config,
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,644 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: consumer creation, ack propagation, consumer state,
|
||||
// ephemeral consumers, consumer scaling, pull/push delivery,
|
||||
// redelivery, ack policies, filter subjects.
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering clustered JetStream consumer creation, leader election,
|
||||
/// ack propagation, delivery policies, ephemeral consumers, and scaling.
|
||||
/// Ported from Go jetstream_cluster_1_test.go and jetstream_cluster_2_test.go.
|
||||
/// </summary>
|
||||
public class JetStreamClusterConsumerTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerState server/jetstream_cluster_1_test.go:700
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_state_tracks_pending_after_fetch()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("CSTATE", ["cs.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CSTATE", "track", filterSubject: "cs.>", ackPolicy: AckPolicy.Explicit);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("cs.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("CSTATE", "track", 3);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
|
||||
var pending = fx.GetPendingCount("CSTATE", "track");
|
||||
pending.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerRedeliveredInfo server/jetstream_cluster_1_test.go:659
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_redelivery_marks_messages_as_redelivered()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("REDELIV", ["rd.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("REDELIV", "rdc", filterSubject: "rd.>",
|
||||
ackPolicy: AckPolicy.Explicit, ackWaitMs: 1, maxDeliver: 5);
|
||||
|
||||
await fx.PublishAsync("rd.event", "will-redeliver");
|
||||
|
||||
// First fetch should get the message
|
||||
var batch1 = await fx.FetchAsync("REDELIV", "rdc", 1);
|
||||
batch1.Messages.Count.ShouldBe(1);
|
||||
batch1.Messages[0].Redelivered.ShouldBeFalse();
|
||||
|
||||
// Wait for ack timeout
|
||||
await Task.Delay(50);
|
||||
|
||||
// Second fetch should get redelivered message
|
||||
var batch2 = await fx.FetchAsync("REDELIV", "rdc", 1);
|
||||
batch2.Messages.Count.ShouldBe(1);
|
||||
batch2.Messages[0].Redelivered.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterFullConsumerState server/jetstream_cluster_1_test.go:795
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Full_consumer_state_reflects_ack_floor_after_ack_all()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("FULLCS", ["fcs.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("FULLCS", "full", filterSubject: "fcs.>", ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("fcs.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("FULLCS", "full", 10);
|
||||
batch.Messages.Count.ShouldBe(10);
|
||||
|
||||
// Ack all up to sequence 5
|
||||
fx.AckAll("FULLCS", "full", 5);
|
||||
|
||||
var pending = fx.GetPendingCount("FULLCS", "full");
|
||||
pending.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterEphemeralConsumerNoImmediateInterest server/jetstream_cluster_1_test.go:2481
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Ephemeral_consumer_creation_succeeds()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("EPHEM", ["eph.>"], replicas: 3);
|
||||
var resp = await fx.CreateConsumerAsync("EPHEM", null, ephemeral: true);
|
||||
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.DurableName.ShouldNotBeNullOrEmpty();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterEphemeralConsumersNotReplicated server/jetstream_cluster_1_test.go:2599
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_ephemeral_consumers_have_unique_names()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("EPHUNIQ", ["eu.>"], replicas: 3);
|
||||
|
||||
var resp1 = await fx.CreateConsumerAsync("EPHUNIQ", null, ephemeral: true);
|
||||
var resp2 = await fx.CreateConsumerAsync("EPHUNIQ", null, ephemeral: true);
|
||||
|
||||
resp1.ConsumerInfo!.Config.DurableName.ShouldNotBe(resp2.ConsumerInfo!.Config.DurableName);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterCreateConcurrentDurableConsumers server/jetstream_cluster_2_test.go:1572
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Concurrent_durable_consumer_creation_is_idempotent()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("CONC", ["conc.>"], replicas: 3);
|
||||
|
||||
// Create same consumer twice; both should succeed
|
||||
var resp1 = await fx.CreateConsumerAsync("CONC", "same");
|
||||
var resp2 = await fx.CreateConsumerAsync("CONC", "same");
|
||||
|
||||
resp1.ConsumerInfo.ShouldNotBeNull();
|
||||
resp2.ConsumerInfo.ShouldNotBeNull();
|
||||
resp1.ConsumerInfo!.Config.DurableName.ShouldBe("same");
|
||||
resp2.ConsumerInfo!.Config.DurableName.ShouldBe("same");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterPullConsumerLeakedSubs server/jetstream_cluster_2_test.go:2239
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Pull_consumer_fetch_returns_correct_batch_size()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("PULLBS", ["pb.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("PULLBS", "puller", filterSubject: "pb.>", ackPolicy: AckPolicy.None);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await fx.PublishAsync("pb.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("PULLBS", "puller", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerLastActiveReporting server/jetstream_cluster_2_test.go:2371
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_info_returns_config_after_creation()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("CINFO", ["ci.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CINFO", "info_dur", filterSubject: "ci.>", ackPolicy: AckPolicy.Explicit);
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("CINFO", "info_dur");
|
||||
info.ShouldNotBeNull();
|
||||
info.Config.DurableName.ShouldBe("info_dur");
|
||||
info.Config.AckPolicy.ShouldBe(AckPolicy.Explicit);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAckPendingWithExpired server/jetstream_cluster_2_test.go:309
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Ack_pending_tracks_expired_messages()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("ACKEXP", ["ae.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("ACKEXP", "acker", filterSubject: "ae.>",
|
||||
ackPolicy: AckPolicy.Explicit, ackWaitMs: 1, maxDeliver: 10);
|
||||
|
||||
await fx.PublishAsync("ae.event", "will-expire");
|
||||
|
||||
// Fetch to register pending
|
||||
var batch1 = await fx.FetchAsync("ACKEXP", "acker", 1);
|
||||
batch1.Messages.Count.ShouldBe(1);
|
||||
|
||||
fx.GetPendingCount("ACKEXP", "acker").ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAckPendingWithMaxRedelivered server/jetstream_cluster_2_test.go:377
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Max_deliver_limits_redelivery_attempts()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("MAXRED", ["mr.>"], replicas: 3);
|
||||
// maxDeliver=2: allows initial delivery (deliveries=1) + one redelivery (deliveries=2).
|
||||
// After ScheduleRedelivery increments to deliveries=2, the next check has deliveries=2 > maxDeliver=2 = false,
|
||||
// so it redelivers once more. Only at deliveries=3 > 2 does it stop.
|
||||
await fx.CreateConsumerAsync("MAXRED", "maxr", filterSubject: "mr.>",
|
||||
ackPolicy: AckPolicy.Explicit, ackWaitMs: 1, maxDeliver: 2);
|
||||
|
||||
await fx.PublishAsync("mr.event", "limited-redeliver");
|
||||
|
||||
// First fetch (initial delivery, Register sets deliveries=1)
|
||||
var batch1 = await fx.FetchAsync("MAXRED", "maxr", 1);
|
||||
batch1.Messages.Count.ShouldBe(1);
|
||||
|
||||
// Wait for expiry
|
||||
await Task.Delay(50);
|
||||
|
||||
// Second fetch: TryGetExpired returns deliveries=1, 1 > 2 is false, so redeliver.
|
||||
// ScheduleRedelivery increments to deliveries=2.
|
||||
var batch2 = await fx.FetchAsync("MAXRED", "maxr", 1);
|
||||
batch2.Messages.Count.ShouldBe(1);
|
||||
batch2.Messages[0].Redelivered.ShouldBeTrue();
|
||||
|
||||
// Wait for expiry
|
||||
await Task.Delay(50);
|
||||
|
||||
// Third fetch: TryGetExpired returns deliveries=2, 2 > 2 is false, so redeliver again.
|
||||
// ScheduleRedelivery increments to deliveries=3.
|
||||
var batch3 = await fx.FetchAsync("MAXRED", "maxr", 1);
|
||||
batch3.Messages.Count.ShouldBe(1);
|
||||
batch3.Messages[0].Redelivered.ShouldBeTrue();
|
||||
|
||||
// Wait for expiry
|
||||
await Task.Delay(50);
|
||||
|
||||
// Fourth fetch: TryGetExpired returns deliveries=3, 3 > 2 is true, so AckAll triggers
|
||||
// and returns empty batch (max deliver exceeded).
|
||||
var batch4 = await fx.FetchAsync("MAXRED", "maxr", 1);
|
||||
batch4.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMaxConsumers server/jetstream_cluster_2_test.go:1978
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_delete_succeeds_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("CDEL", ["cdel.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CDEL", "to_delete");
|
||||
|
||||
var del = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerDelete}CDEL.to_delete", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
|
||||
var info = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}CDEL.to_delete", "{}");
|
||||
info.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterFlowControlRequiresHeartbeats server/jetstream_cluster_2_test.go:2712
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_with_filter_subjects_delivers_matching_only()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("FILT", ["filt.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("FILT", "filtered", filterSubject: "filt.alpha");
|
||||
|
||||
await fx.PublishAsync("filt.alpha", "match");
|
||||
await fx.PublishAsync("filt.beta", "no-match");
|
||||
await fx.PublishAsync("filt.alpha", "match2");
|
||||
|
||||
var batch = await fx.FetchAsync("FILT", "filtered", 10);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
batch.Messages[0].Subject.ShouldBe("filt.alpha");
|
||||
batch.Messages[1].Subject.ShouldBe("filt.alpha");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerScaleUp server/jetstream_cluster_1_test.go:4203
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_pause_and_resume_via_api()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("PAUSE", ["pause.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("PAUSE", "pausable");
|
||||
|
||||
var pauseResp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerPause}PAUSE.pausable", """{"pause":true}""");
|
||||
pauseResp.Success.ShouldBeTrue();
|
||||
|
||||
var resumeResp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerPause}PAUSE.pausable", """{"pause":false}""");
|
||||
resumeResp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerResetPendingDeliveriesOnMaxAckPendingUpdate
|
||||
// server/jetstream_cluster_1_test.go:8696
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_reset_resets_next_sequence_and_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("RESET", ["reset.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("RESET", "resettable", filterSubject: "reset.>");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("reset.event", $"msg-{i}");
|
||||
|
||||
// Fetch some messages to advance the consumer
|
||||
var batch1 = await fx.FetchAsync("RESET", "resettable", 3);
|
||||
batch1.Messages.Count.ShouldBe(3);
|
||||
|
||||
// Reset via API
|
||||
var resetResp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerReset}RESET.resettable", "{}");
|
||||
resetResp.Success.ShouldBeTrue();
|
||||
|
||||
// After reset, consumer should re-deliver from sequence 1
|
||||
var batch2 = await fx.FetchAsync("RESET", "resettable", 5);
|
||||
batch2.Messages.Count.ShouldBe(5);
|
||||
batch2.Messages[0].Sequence.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterPushConsumerQueueGroup server/jetstream_cluster_2_test.go:2300
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Push_consumer_creation_with_heartbeat()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("PUSHHB", ["ph.>"], replicas: 3);
|
||||
var resp = await fx.CreateConsumerAsync("PUSHHB", "pusher", push: true, heartbeatMs: 100);
|
||||
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.Push.ShouldBeTrue();
|
||||
resp.ConsumerInfo.Config.HeartbeatMs.ShouldBe(100);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterScaleConsumer server/jetstream_cluster_1_test.go:4109
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_unpin_via_api()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("UNPIN", ["unpin.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("UNPIN", "pinned");
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerUnpin}UNPIN.pinned", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: Consumer AckAll policy acks all up to given sequence
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AckAll_policy_consumer_acks_all_preceding_messages()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("ACKALL", ["aa.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("ACKALL", "acker", filterSubject: "aa.>", ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("aa.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("ACKALL", "acker", 10);
|
||||
batch.Messages.Count.ShouldBe(10);
|
||||
|
||||
// Ack up to seq 7 (all 1-7 should be acked, 8-10 remain pending)
|
||||
fx.AckAll("ACKALL", "acker", 7);
|
||||
fx.GetPendingCount("ACKALL", "acker").ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: DeliverPolicy.Last consumer starts at last message
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task DeliverPolicy_Last_consumer_starts_at_last_sequence()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("DLAST", ["dl.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("dl.event", $"msg-{i}");
|
||||
|
||||
await fx.CreateConsumerAsync("DLAST", "last_cons", filterSubject: "dl.>",
|
||||
deliverPolicy: DeliverPolicy.Last);
|
||||
|
||||
var batch = await fx.FetchAsync("DLAST", "last_cons", 10);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
batch.Messages[0].Sequence.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: DeliverPolicy.New consumer skips existing messages
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task DeliverPolicy_New_consumer_skips_existing()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("DNEW", ["dn.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("dn.event", $"msg-{i}");
|
||||
|
||||
await fx.CreateConsumerAsync("DNEW", "new_cons", filterSubject: "dn.>",
|
||||
deliverPolicy: DeliverPolicy.New);
|
||||
|
||||
// Should get no messages since consumer starts at LastSeq+1
|
||||
var batch = await fx.FetchAsync("DNEW", "new_cons", 10);
|
||||
batch.Messages.Count.ShouldBe(0);
|
||||
|
||||
// Publish a new message after consumer creation
|
||||
await fx.PublishAsync("dn.event", "after-consumer");
|
||||
|
||||
var batch2 = await fx.FetchAsync("DNEW", "new_cons", 10);
|
||||
batch2.Messages.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: DeliverPolicy.ByStartSequence
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task DeliverPolicy_ByStartSequence_starts_at_given_sequence()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("DSTART", ["ds.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("ds.event", $"msg-{i}");
|
||||
|
||||
await fx.CreateConsumerAsync("DSTART", "start_cons", filterSubject: "ds.>",
|
||||
deliverPolicy: DeliverPolicy.ByStartSequence, optStartSeq: 7);
|
||||
|
||||
var batch = await fx.FetchAsync("DSTART", "start_cons", 10);
|
||||
batch.Messages.Count.ShouldBe(4); // seq 7, 8, 9, 10
|
||||
batch.Messages[0].Sequence.ShouldBe(7UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: Multiple filter subjects
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_with_multiple_filter_subjects()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("MFILT", ["mf.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("MFILT", "multi_filt",
|
||||
filterSubjects: ["mf.alpha", "mf.gamma"]);
|
||||
|
||||
await fx.PublishAsync("mf.alpha", "a");
|
||||
await fx.PublishAsync("mf.beta", "b");
|
||||
await fx.PublishAsync("mf.gamma", "g");
|
||||
await fx.PublishAsync("mf.delta", "d");
|
||||
|
||||
var batch = await fx.FetchAsync("MFILT", "multi_filt", 10);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: NoWait fetch returns empty when no messages
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task NoWait_fetch_returns_empty_when_no_pending()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("NOWAIT", ["nw.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("NOWAIT", "nw_cons", filterSubject: "nw.>");
|
||||
|
||||
var batch = await fx.FetchNoWaitAsync("NOWAIT", "nw_cons", 5);
|
||||
batch.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Self-contained fixture for JetStream cluster consumer tests.
|
||||
/// </summary>
|
||||
internal sealed class ClusterConsumerFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private ClusterConsumerFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router,
|
||||
JetStreamPublisher publisher)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = publisher;
|
||||
}
|
||||
|
||||
public static Task<ClusterConsumerFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var streamManager = new StreamManager(meta, consumerManager: consumerManager);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
return Task.FromResult(new ClusterConsumerFixture(meta, streamManager, consumerManager, router, publisher));
|
||||
}
|
||||
|
||||
public Task CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
if (response.Error is not null)
|
||||
throw new InvalidOperationException(response.Error.Description);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateConsumerAsync(
|
||||
string stream,
|
||||
string? durableName,
|
||||
string? filterSubject = null,
|
||||
AckPolicy ackPolicy = AckPolicy.None,
|
||||
int ackWaitMs = 30_000,
|
||||
int maxDeliver = 1,
|
||||
bool ephemeral = false,
|
||||
bool push = false,
|
||||
int heartbeatMs = 0,
|
||||
DeliverPolicy deliverPolicy = DeliverPolicy.All,
|
||||
ulong optStartSeq = 0,
|
||||
IReadOnlyList<string>? filterSubjects = null)
|
||||
{
|
||||
var config = new ConsumerConfig
|
||||
{
|
||||
DurableName = durableName ?? string.Empty,
|
||||
AckPolicy = ackPolicy,
|
||||
AckWaitMs = ackWaitMs,
|
||||
MaxDeliver = maxDeliver,
|
||||
Ephemeral = ephemeral,
|
||||
Push = push,
|
||||
HeartbeatMs = heartbeatMs,
|
||||
DeliverPolicy = deliverPolicy,
|
||||
OptStartSeq = optStartSeq,
|
||||
};
|
||||
if (!string.IsNullOrWhiteSpace(filterSubject))
|
||||
config.FilterSubject = filterSubject;
|
||||
if (filterSubjects is { Count: > 0 })
|
||||
config.FilterSubjects = [.. filterSubjects];
|
||||
|
||||
return Task.FromResult(_consumerManager.CreateOrUpdate(stream, config));
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), null, out var ack))
|
||||
{
|
||||
if (ack.ErrorCode == null && _streamManager.TryGet(ack.Stream, out var handle))
|
||||
{
|
||||
var stored = handle.Store.LoadAsync(ack.Seq, default).GetAwaiter().GetResult();
|
||||
if (stored != null)
|
||||
_consumerManager.OnPublished(ack.Stream, stored);
|
||||
}
|
||||
|
||||
return Task.FromResult(ack);
|
||||
}
|
||||
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match a stream.");
|
||||
}
|
||||
|
||||
public Task<PullFetchBatch> FetchAsync(string stream, string durableName, int batch)
|
||||
=> _consumerManager.FetchAsync(stream, durableName, batch, _streamManager, default).AsTask();
|
||||
|
||||
public Task<PullFetchBatch> FetchNoWaitAsync(string stream, string durableName, int batch)
|
||||
=> _consumerManager.FetchAsync(stream, durableName, new PullFetchRequest
|
||||
{
|
||||
Batch = batch,
|
||||
NoWait = true,
|
||||
}, _streamManager, default).AsTask();
|
||||
|
||||
public void AckAll(string stream, string durableName, ulong sequence)
|
||||
=> _consumerManager.AckAll(stream, durableName, sequence);
|
||||
|
||||
public int GetPendingCount(string stream, string durableName)
|
||||
=> _consumerManager.GetPendingCount(stream, durableName);
|
||||
|
||||
public Task<JetStreamConsumerInfo> GetConsumerInfoAsync(string stream, string durableName)
|
||||
{
|
||||
var resp = _consumerManager.GetInfo(stream, durableName);
|
||||
if (resp.ConsumerInfo == null)
|
||||
throw new InvalidOperationException("Consumer not found.");
|
||||
return Task.FromResult(resp.ConsumerInfo);
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
=> Task.FromResult(_router.Route(subject, Encoding.UTF8.GetBytes(payload)));
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,532 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: stream leader stepdown, consumer leader stepdown,
|
||||
// meta leader stepdown, peer removal, node loss recovery,
|
||||
// snapshot catchup, consumer failover, data preservation.
|
||||
using System.Reflection;
|
||||
using System.Collections.Concurrent;
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream cluster failover scenarios: leader stepdown,
|
||||
/// peer removal, node loss/recovery, snapshot catchup, and consumer failover.
|
||||
/// Ported from Go jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class JetStreamClusterFailoverTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_leader_stepdown_elects_new_leader_and_preserves_data()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("STEPDOWN", ["sd.>"], replicas: 3);
|
||||
|
||||
for (var i = 1; i <= 10; i++)
|
||||
(await fx.PublishAsync($"sd.{i}", $"msg-{i}")).Seq.ShouldBe((ulong)i);
|
||||
|
||||
var leaderBefore = fx.GetStreamLeaderId("STEPDOWN");
|
||||
leaderBefore.ShouldNotBeNullOrWhiteSpace();
|
||||
|
||||
var resp = await fx.StepDownStreamLeaderAsync("STEPDOWN");
|
||||
resp.Success.ShouldBeTrue();
|
||||
|
||||
var leaderAfter = fx.GetStreamLeaderId("STEPDOWN");
|
||||
leaderAfter.ShouldNotBe(leaderBefore);
|
||||
|
||||
var state = await fx.GetStreamStateAsync("STEPDOWN");
|
||||
state.Messages.ShouldBe(10UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(10UL);
|
||||
|
||||
// New leader accepts writes
|
||||
var ack = await fx.PublishAsync("sd.post", "after-stepdown");
|
||||
ack.Seq.ShouldBe(11UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeaderStepdown server/jetstream_cluster_1_test.go:5464
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_increments_version_and_preserves_streams()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("META_SD", ["meta.>"], replicas: 3);
|
||||
|
||||
var before = fx.GetMetaState();
|
||||
before.ClusterSize.ShouldBe(3);
|
||||
var leaderBefore = before.LeaderId;
|
||||
var versionBefore = before.LeadershipVersion;
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
|
||||
var after = fx.GetMetaState();
|
||||
after.LeaderId.ShouldNotBe(leaderBefore);
|
||||
after.LeadershipVersion.ShouldBe(versionBefore + 1);
|
||||
after.Streams.ShouldContain("META_SD");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consecutive_stepdowns_cycle_through_distinct_leaders()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CYCLE", ["cyc.>"], replicas: 3);
|
||||
|
||||
var leaders = new List<string> { fx.GetStreamLeaderId("CYCLE") };
|
||||
|
||||
(await fx.StepDownStreamLeaderAsync("CYCLE")).Success.ShouldBeTrue();
|
||||
leaders.Add(fx.GetStreamLeaderId("CYCLE"));
|
||||
|
||||
(await fx.StepDownStreamLeaderAsync("CYCLE")).Success.ShouldBeTrue();
|
||||
leaders.Add(fx.GetStreamLeaderId("CYCLE"));
|
||||
|
||||
leaders[1].ShouldNotBe(leaders[0]);
|
||||
leaders[2].ShouldNotBe(leaders[1]);
|
||||
|
||||
var ack = await fx.PublishAsync("cyc.verify", "alive");
|
||||
ack.Stream.ShouldBe("CYCLE");
|
||||
ack.Seq.ShouldBeGreaterThan(0UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterPeerRemovalAPI server/jetstream_cluster_1_test.go:3469
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Peer_removal_api_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PEERREM", ["pr.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.StreamPeerRemove}PEERREM", """{"peer":"n2"}""");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterPeerRemovalAndStreamReassignment server/jetstream_cluster_1_test.go:3544
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Peer_removal_preserves_stream_data()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("REASSIGN", ["ra.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("ra.event", $"msg-{i}");
|
||||
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamPeerRemove}REASSIGN", """{"peer":"n2"}""")).Success.ShouldBeTrue();
|
||||
|
||||
var state = await fx.GetStreamStateAsync("REASSIGN");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerLeaderStepdown (consumer stepdown)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_leader_stepdown_api_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CLSD", ["clsd.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CLSD", "dur1");
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerLeaderStepdown}CLSD.dur1", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamNormalCatchup server/jetstream_cluster_1_test.go:1607
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_publishes_survive_leader_stepdown_and_catchup()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CATCHUP", ["cu.>"], replicas: 3);
|
||||
|
||||
// Publish some messages
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("cu.event", $"before-{i}");
|
||||
|
||||
// Step down the leader
|
||||
(await fx.StepDownStreamLeaderAsync("CATCHUP")).Success.ShouldBeTrue();
|
||||
|
||||
// Publish more messages after stepdown
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("cu.event", $"after-{i}");
|
||||
|
||||
var state = await fx.GetStreamStateAsync("CATCHUP");
|
||||
state.Messages.ShouldBe(20UL);
|
||||
state.LastSeq.ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamSnapshotCatchup server/jetstream_cluster_1_test.go:1667
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Snapshot_and_restore_survives_leader_transition()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("SNAPCAT", ["sc.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("sc.event", $"msg-{i}");
|
||||
|
||||
// Take snapshot
|
||||
var snapshot = await fx.RequestAsync($"{JetStreamApiSubjects.StreamSnapshot}SNAPCAT", "{}");
|
||||
snapshot.Snapshot.ShouldNotBeNull();
|
||||
|
||||
// Step down leader
|
||||
(await fx.StepDownStreamLeaderAsync("SNAPCAT")).Success.ShouldBeTrue();
|
||||
|
||||
// Purge and restore
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamPurge}SNAPCAT", "{}")).Success.ShouldBeTrue();
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamRestore}SNAPCAT", snapshot.Snapshot!.Payload)).Success.ShouldBeTrue();
|
||||
|
||||
var state = await fx.GetStreamStateAsync("SNAPCAT");
|
||||
state.Messages.ShouldBe(10UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamSnapshotCatchupWithPurge server/jetstream_cluster_1_test.go:1822
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Snapshot_restore_after_purge_preserves_original_data()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PURGECAT", ["pc.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await fx.PublishAsync("pc.event", $"msg-{i}");
|
||||
|
||||
var snapshot = await fx.RequestAsync($"{JetStreamApiSubjects.StreamSnapshot}PURGECAT", "{}");
|
||||
|
||||
// Purge the stream
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamPurge}PURGECAT", "{}")).Success.ShouldBeTrue();
|
||||
var afterPurge = await fx.GetStreamStateAsync("PURGECAT");
|
||||
afterPurge.Messages.ShouldBe(0UL);
|
||||
|
||||
// Restore from snapshot
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamRestore}PURGECAT", snapshot.Snapshot!.Payload)).Success.ShouldBeTrue();
|
||||
var restored = await fx.GetStreamStateAsync("PURGECAT");
|
||||
restored.Messages.ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMetaSnapshotsAndCatchup server/jetstream_cluster_1_test.go:833
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_survives_multiple_stepdowns()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("META1", ["m1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("META2", ["m2.>"], replicas: 3);
|
||||
|
||||
// Step down meta leader twice
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var state = fx.GetMetaState();
|
||||
state.Streams.ShouldContain("META1");
|
||||
state.Streams.ShouldContain("META2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMetaSnapshotsMultiChange server/jetstream_cluster_1_test.go:881
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_and_create_across_stepdowns_reflected_in_stream_names()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("MULTI1", ["mul1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("MULTI2", ["mul2.>"], replicas: 3);
|
||||
|
||||
// Delete one stream
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamDelete}MULTI1", "{}")).Success.ShouldBeTrue();
|
||||
|
||||
// Step down meta leader
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
// Create another stream
|
||||
await fx.CreateStreamAsync("MULTI3", ["mul3.>"], replicas: 3);
|
||||
|
||||
// Verify via stream names API (reflects actual active streams)
|
||||
var names = await fx.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.ShouldNotContain("MULTI1");
|
||||
names.StreamNames.ShouldContain("MULTI2");
|
||||
names.StreamNames.ShouldContain("MULTI3");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDeleteMsgAndRestart server/jetstream_cluster_1_test.go:1785
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_message_survives_leader_stepdown()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("DELMSGSD", ["dms.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("dms.event", $"msg-{i}");
|
||||
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamMessageDelete}DELMSGSD", """{"seq":3}""")).Success.ShouldBeTrue();
|
||||
|
||||
(await fx.StepDownStreamLeaderAsync("DELMSGSD")).Success.ShouldBeTrue();
|
||||
|
||||
var state = await fx.GetStreamStateAsync("DELMSGSD");
|
||||
state.Messages.ShouldBe(4UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterRestoreSingleConsumer server/jetstream_cluster_1_test.go:1028
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_survives_stream_leader_stepdown()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CSURV", ["csv.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CSURV", "durable1", filterSubject: "csv.>");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("csv.event", $"msg-{i}");
|
||||
|
||||
// Fetch before stepdown
|
||||
var batch1 = await fx.FetchAsync("CSURV", "durable1", 5);
|
||||
batch1.Messages.Count.ShouldBe(5);
|
||||
|
||||
// Step down stream leader
|
||||
(await fx.StepDownStreamLeaderAsync("CSURV")).Success.ShouldBeTrue();
|
||||
|
||||
// Consumer should still be fetchable
|
||||
var batch2 = await fx.FetchAsync("CSURV", "durable1", 5);
|
||||
batch2.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: Multiple stepdowns do not lose accumulated state
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_stepdowns_preserve_accumulated_messages()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("ACCUM", ["acc.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("acc.event", $"batch1-{i}");
|
||||
|
||||
(await fx.StepDownStreamLeaderAsync("ACCUM")).Success.ShouldBeTrue();
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("acc.event", $"batch2-{i}");
|
||||
|
||||
(await fx.StepDownStreamLeaderAsync("ACCUM")).Success.ShouldBeTrue();
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("acc.event", $"batch3-{i}");
|
||||
|
||||
var state = await fx.GetStreamStateAsync("ACCUM");
|
||||
state.Messages.ShouldBe(15UL);
|
||||
state.LastSeq.ShouldBe(15UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: Stream info available after leader stepdown
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_available_after_leader_stepdown()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("INFOSD", ["isd.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
await fx.PublishAsync("isd.event", $"msg-{i}");
|
||||
|
||||
(await fx.StepDownStreamLeaderAsync("INFOSD")).Success.ShouldBeTrue();
|
||||
|
||||
var info = await fx.GetStreamInfoAsync("INFOSD");
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.Config.Name.ShouldBe("INFOSD");
|
||||
info.StreamInfo.State.Messages.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: Stepdown non-existent stream does not crash
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stepdown_non_existent_stream_returns_success_gracefully()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
|
||||
// Stepping down a non-existent stream should not throw
|
||||
var resp = await fx.StepDownStreamLeaderAsync("NONEXISTENT");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: AccountPurge returns success
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_purge_api_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PURGEACCT", ["pa.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.AccountPurge}GLOBAL", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: Server remove returns success
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Server_remove_api_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.ServerRemove, "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Self-contained fixture for JetStream cluster failover tests.
|
||||
/// </summary>
|
||||
internal sealed class ClusterFailoverFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private ClusterFailoverFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router,
|
||||
JetStreamPublisher publisher)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = publisher;
|
||||
}
|
||||
|
||||
public static Task<ClusterFailoverFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var streamManager = new StreamManager(meta, consumerManager: consumerManager);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
return Task.FromResult(new ClusterFailoverFixture(meta, streamManager, consumerManager, router, publisher));
|
||||
}
|
||||
|
||||
public Task CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
if (response.Error is not null)
|
||||
throw new InvalidOperationException(response.Error.Description);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateConsumerAsync(string stream, string durableName, string? filterSubject = null)
|
||||
{
|
||||
var config = new ConsumerConfig { DurableName = durableName };
|
||||
if (!string.IsNullOrWhiteSpace(filterSubject))
|
||||
config.FilterSubject = filterSubject;
|
||||
return Task.FromResult(_consumerManager.CreateOrUpdate(stream, config));
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), null, out var ack))
|
||||
{
|
||||
if (ack.ErrorCode == null && _streamManager.TryGet(ack.Stream, out var handle))
|
||||
{
|
||||
var stored = handle.Store.LoadAsync(ack.Seq, default).GetAwaiter().GetResult();
|
||||
if (stored != null)
|
||||
_consumerManager.OnPublished(ack.Stream, stored);
|
||||
}
|
||||
|
||||
return Task.FromResult(ack);
|
||||
}
|
||||
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match a stream.");
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> StepDownStreamLeaderAsync(string stream)
|
||||
=> Task.FromResult(_router.Route(
|
||||
$"{JetStreamApiSubjects.StreamLeaderStepdown}{stream}",
|
||||
"{}"u8));
|
||||
|
||||
public string GetStreamLeaderId(string stream)
|
||||
{
|
||||
var field = typeof(StreamManager)
|
||||
.GetField("_replicaGroups", BindingFlags.NonPublic | BindingFlags.Instance)!;
|
||||
var groups = (ConcurrentDictionary<string, StreamReplicaGroup>)field.GetValue(_streamManager)!;
|
||||
if (groups.TryGetValue(stream, out var group))
|
||||
return group.Leader.Id;
|
||||
return string.Empty;
|
||||
}
|
||||
|
||||
public MetaGroupState GetMetaState() => _metaGroup.GetState();
|
||||
|
||||
public Task<ApiStreamState> GetStreamStateAsync(string name)
|
||||
=> _streamManager.GetStateAsync(name, default).AsTask();
|
||||
|
||||
public Task<JetStreamApiResponse> GetStreamInfoAsync(string name)
|
||||
=> Task.FromResult(_streamManager.GetInfo(name));
|
||||
|
||||
public Task<PullFetchBatch> FetchAsync(string stream, string durableName, int batch)
|
||||
=> _consumerManager.FetchAsync(stream, durableName, batch, _streamManager, default).AsTask();
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
{
|
||||
var response = _router.Route(subject, Encoding.UTF8.GetBytes(payload));
|
||||
|
||||
if (subject.Equals(JetStreamApiSubjects.MetaLeaderStepdown, StringComparison.Ordinal) && response.Success)
|
||||
_metaGroup.BecomeLeader();
|
||||
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,415 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_helpers_test.go
|
||||
// Smoke tests for JetStreamClusterFixture — verifies that the unified fixture
|
||||
// correctly wires up the JetStream cluster simulation and exposes all capabilities
|
||||
// expected by Tasks 6-10 (leader election, stream ops, consumer ops, failover, routing).
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Smoke tests verifying that JetStreamClusterFixture starts correctly and
|
||||
/// exposes all capabilities needed by the cluster test suites (Tasks 6-10).
|
||||
/// </summary>
|
||||
public class JetStreamClusterFixtureTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Fixture creation
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: checkClusterFormed in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task Three_node_cluster_starts_and_reports_node_count()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
fx.NodeCount.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Five_node_cluster_starts_and_reports_node_count()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 5);
|
||||
fx.NodeCount.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream operations via fixture
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Create_stream_and_publish_returns_valid_ack()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
var resp = await fx.CreateStreamAsync("SMOKE", ["smoke.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("SMOKE");
|
||||
|
||||
var ack = await fx.PublishAsync("smoke.test", "hello");
|
||||
ack.Stream.ShouldBe("SMOKE");
|
||||
ack.Seq.ShouldBe(1UL);
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Create_multi_replica_stream_and_verify_info()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
var resp = await fx.CreateStreamAsync("MULTI", ["multi.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.Replicas.ShouldBe(3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("multi.event", $"msg-{i}");
|
||||
|
||||
var info = await fx.GetStreamInfoAsync("MULTI");
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.State.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Meta leader helpers
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: c.leader() in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task GetMetaLeaderId_returns_nonempty_leader()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
var leader = fx.GetMetaLeaderId();
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: c.leader().Shutdown() / waitOnLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task StepDownMetaLeader_changes_leader_id()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
var before = fx.GetMetaLeaderId();
|
||||
|
||||
fx.StepDownMetaLeader();
|
||||
|
||||
var after = fx.GetMetaLeaderId();
|
||||
after.ShouldNotBe(before);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream leader helpers
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: streamLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task GetStreamLeaderId_returns_leader_after_stream_creation()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("SLEADER", ["sl.>"], replicas: 3);
|
||||
|
||||
var leader = fx.GetStreamLeaderId("SLEADER");
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: waitOnStreamLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task WaitOnStreamLeaderAsync_succeeds_when_stream_exists()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("WAIT_LEADER", ["wl.>"], replicas: 3);
|
||||
|
||||
// Should complete immediately since the stream was just created
|
||||
await fx.WaitOnStreamLeaderAsync("WAIT_LEADER", timeoutMs: 2000);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WaitOnStreamLeaderAsync_throws_timeout_when_no_stream()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
|
||||
// No stream created — should time out quickly
|
||||
var ex = await Should.ThrowAsync<TimeoutException>(
|
||||
() => fx.WaitOnStreamLeaderAsync("NONEXISTENT", timeoutMs: 100));
|
||||
|
||||
ex.Message.ShouldContain("NONEXISTENT");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer operations
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Create_consumer_and_fetch_messages()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CFETCH", ["cf.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CFETCH", "dur1", filterSubject: "cf.>");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("cf.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("CFETCH", "dur1", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// Go ref: consumerLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task GetConsumerLeaderId_returns_id_after_consumer_creation()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CLEADER", ["cld.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CLEADER", "dur1");
|
||||
|
||||
var leader = fx.GetConsumerLeaderId("CLEADER", "dur1");
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: waitOnConsumerLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task WaitOnConsumerLeaderAsync_succeeds_when_consumer_exists()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("WCLEADER", ["wcl.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("WCLEADER", "durwc");
|
||||
|
||||
await fx.WaitOnConsumerLeaderAsync("WCLEADER", "durwc", timeoutMs: 2000);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WaitOnConsumerLeaderAsync_throws_timeout_when_consumer_missing()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("WCTIMEOUT", ["wct.>"], replicas: 3);
|
||||
|
||||
var ex = await Should.ThrowAsync<TimeoutException>(
|
||||
() => fx.WaitOnConsumerLeaderAsync("WCTIMEOUT", "ghost", timeoutMs: 100));
|
||||
|
||||
ex.Message.ShouldContain("ghost");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Failover
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterStreamLeaderStepDown jetstream_cluster_1_test.go:4925
|
||||
[Fact]
|
||||
public async Task StepDownStreamLeader_changes_stream_leader()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("SDTEST", ["sd.>"], replicas: 3);
|
||||
|
||||
var before = fx.GetStreamLeaderId("SDTEST");
|
||||
before.ShouldNotBeNullOrWhiteSpace();
|
||||
|
||||
var resp = await fx.StepDownStreamLeaderAsync("SDTEST");
|
||||
resp.Success.ShouldBeTrue();
|
||||
|
||||
var after = fx.GetStreamLeaderId("SDTEST");
|
||||
after.ShouldNotBe(before);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// API routing
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task RequestAsync_routes_stream_info_request()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("ROUTEINFO", ["ri.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.StreamInfo}ROUTEINFO", "{}");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("ROUTEINFO");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Edge cases
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: AssetPlacementPlanner.PlanReplicas caps replicas at cluster size.
|
||||
// StreamManager passes the raw Replicas value to StreamReplicaGroup; the
|
||||
// AssetPlacementPlanner is the layer that enforces the cap in real deployments.
|
||||
// This test verifies the fixture correctly creates the stream and that the
|
||||
// replica group holds the exact replica count requested by the config.
|
||||
[Fact]
|
||||
public async Task Create_stream_with_more_replicas_than_nodes_caps_at_node_count()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
|
||||
// Request 3 replicas on a 3-node cluster — exactly matching node count
|
||||
var resp = await fx.CreateStreamAsync("CAPPED", ["cap.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
|
||||
// Replica group should have exactly 3 nodes (one per cluster node)
|
||||
var group = fx.GetReplicaGroup("CAPPED");
|
||||
group.ShouldNotBeNull();
|
||||
group!.Nodes.Count.ShouldBe(3);
|
||||
group.Nodes.Count.ShouldBeLessThanOrEqualTo(fx.NodeCount);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetMetaState helper
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task GetMetaState_returns_correct_cluster_size()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 5);
|
||||
var state = fx.GetMetaState();
|
||||
state.ShouldNotBeNull();
|
||||
state!.ClusterSize.ShouldBe(5);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetMetaState_tracks_created_streams()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("TRACK1", ["t1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("TRACK2", ["t2.>"], replicas: 3);
|
||||
|
||||
var state = fx.GetMetaState();
|
||||
state.ShouldNotBeNull();
|
||||
state!.Streams.ShouldContain("TRACK1");
|
||||
state.Streams.ShouldContain("TRACK2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// UpdateStream helper
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task UpdateStream_reflects_new_subjects()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("UPDSUB", ["old.>"], replicas: 3);
|
||||
|
||||
var update = fx.UpdateStream("UPDSUB", ["new.>"], replicas: 3);
|
||||
update.Error.ShouldBeNull();
|
||||
update.StreamInfo!.Config.Subjects.ShouldContain("new.>");
|
||||
update.StreamInfo.Config.Subjects.ShouldNotContain("old.>");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Node lifecycle helpers (SimulateNodeRestart, RemoveNode)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: restartServerAndWait in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task SimulateNodeRestart_does_not_throw()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
fx.RemoveNode(1);
|
||||
fx.SimulateNodeRestart(1); // Should not throw
|
||||
}
|
||||
|
||||
// Go ref: shutdownServerAndRemoveStorage in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task RemoveNode_does_not_throw()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
fx.RemoveNode(2); // Should not throw
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetStoreBackendType
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task GetStoreBackendType_returns_memory_for_memory_stream()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("BACKEND", ["be.>"], replicas: 3, storage: StorageType.Memory);
|
||||
|
||||
var backend = fx.GetStoreBackendType("BACKEND");
|
||||
backend.ShouldBe("memory");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// AckAll helper
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AckAll_reduces_pending_messages()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("ACKSMOKE", ["acks.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("ACKSMOKE", "acker", filterSubject: "acks.>",
|
||||
ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("acks.event", $"msg-{i}");
|
||||
|
||||
await fx.FetchAsync("ACKSMOKE", "acker", 5);
|
||||
fx.AckAll("ACKSMOKE", "acker", 3);
|
||||
|
||||
// Pending should now reflect only sequences 4 and 5
|
||||
// (AckAll acks everything up to and including seq 3)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// CreateStreamDirect helper
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task CreateStreamDirect_accepts_full_config()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "DIRECTCFG",
|
||||
Subjects = ["dc.>"],
|
||||
Replicas = 2,
|
||||
MaxMsgs = 100,
|
||||
Retention = RetentionPolicy.Limits,
|
||||
};
|
||||
var resp = fx.CreateStreamDirect(cfg);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.MaxMsgs.ShouldBe(100);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetStreamStateAsync
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task GetStreamStateAsync_reflects_published_messages()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("STATECHECK", ["sc.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 7; i++)
|
||||
await fx.PublishAsync("sc.event", $"msg-{i}");
|
||||
|
||||
var state = await fx.GetStreamStateAsync("STATECHECK");
|
||||
state.Messages.ShouldBe(7UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(7UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetReplicaGroup
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task GetReplicaGroup_returns_null_for_unknown_stream()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
var group = fx.GetReplicaGroup("NO_SUCH_STREAM");
|
||||
group.ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetReplicaGroup_returns_group_with_correct_node_count()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("GROUPCHECK", ["gc.>"], replicas: 3);
|
||||
|
||||
var group = fx.GetReplicaGroup("GROUPCHECK");
|
||||
group.ShouldNotBeNull();
|
||||
group!.Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,617 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: cluster metadata operations, asset placement planner,
|
||||
// replica group management, stream scaling, config validation,
|
||||
// cluster expand, account info in cluster, max streams.
|
||||
using System.Text;
|
||||
using NATS.Server.Configuration;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
using NATS.Server.JetStream.Validation;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream cluster metadata operations: asset placement,
|
||||
/// replica group management, config validation, scaling, and account operations.
|
||||
/// Ported from Go jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class JetStreamClusterMetaTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConfig server/jetstream_cluster_1_test.go:43
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Config_requires_server_name_for_jetstream_cluster()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
ServerName = null,
|
||||
JetStream = new JetStreamOptions { StoreDir = "/tmp/js" },
|
||||
Cluster = new ClusterOptions { Port = 6222 },
|
||||
};
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
result.IsValid.ShouldBeFalse();
|
||||
result.Message.ShouldContain("server_name");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Config_requires_cluster_name_for_jetstream_cluster()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
ServerName = "S1",
|
||||
JetStream = new JetStreamOptions { StoreDir = "/tmp/js" },
|
||||
Cluster = new ClusterOptions { Name = null, Port = 6222 },
|
||||
};
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
result.IsValid.ShouldBeFalse();
|
||||
result.Message.ShouldContain("cluster.name");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Config_valid_when_server_and_cluster_names_set()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
ServerName = "S1",
|
||||
JetStream = new JetStreamOptions { StoreDir = "/tmp/js" },
|
||||
Cluster = new ClusterOptions { Name = "JSC", Port = 6222 },
|
||||
};
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
result.IsValid.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Config_skips_cluster_checks_when_no_cluster_configured()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
JetStream = new JetStreamOptions { StoreDir = "/tmp/js" },
|
||||
};
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
result.IsValid.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Config_skips_cluster_checks_when_no_jetstream_configured()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
Cluster = new ClusterOptions { Port = 6222 },
|
||||
};
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
result.IsValid.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Placement planner tests
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_returns_requested_replica_count()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 5);
|
||||
var placement = planner.PlanReplicas(replicas: 3);
|
||||
placement.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_caps_at_cluster_size()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: 5);
|
||||
placement.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_minimum_is_one_replica()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: 0);
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_handles_single_node_cluster()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 1);
|
||||
var placement = planner.PlanReplicas(replicas: 3);
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Meta group lifecycle tests
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_initial_state_is_correct()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var state = meta.GetState();
|
||||
|
||||
state.ClusterSize.ShouldBe(3);
|
||||
state.LeaderId.ShouldNotBeNullOrWhiteSpace();
|
||||
state.LeadershipVersion.ShouldBe(1);
|
||||
state.Streams.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_group_tracks_stream_proposals()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "S1" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "S2" }, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
state.Streams.Count.ShouldBe(2);
|
||||
state.Streams.ShouldContain("S1");
|
||||
state.Streams.ShouldContain("S2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_stepdown_cycles_leader()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var leader1 = meta.GetState().LeaderId;
|
||||
|
||||
meta.StepDown();
|
||||
var leader2 = meta.GetState().LeaderId;
|
||||
leader2.ShouldNotBe(leader1);
|
||||
|
||||
meta.StepDown();
|
||||
var leader3 = meta.GetState().LeaderId;
|
||||
leader3.ShouldNotBe(leader2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_stepdown_wraps_around()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(2);
|
||||
var leaders = new HashSet<string>();
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
{
|
||||
leaders.Add(meta.GetState().LeaderId);
|
||||
meta.StepDown();
|
||||
}
|
||||
|
||||
// Should cycle between 2 leaders
|
||||
leaders.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_leadership_version_increments()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.GetState().LeadershipVersion.ShouldBe(1);
|
||||
|
||||
meta.StepDown();
|
||||
meta.GetState().LeadershipVersion.ShouldBe(2);
|
||||
|
||||
meta.StepDown();
|
||||
meta.GetState().LeadershipVersion.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Replica group tests
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_creates_correct_node_count()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 3);
|
||||
group.Nodes.Count.ShouldBe(3);
|
||||
group.StreamName.ShouldBe("TEST");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_elects_initial_leader()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 3);
|
||||
group.Leader.ShouldNotBeNull();
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_stepdown_changes_leader()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 3);
|
||||
var leaderBefore = group.Leader.Id;
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
var leaderAfter = group.Leader.Id;
|
||||
|
||||
leaderAfter.ShouldNotBe(leaderBefore);
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_leader_accepts_proposals()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 3);
|
||||
|
||||
var index = await group.ProposeAsync("PUB test.1", default);
|
||||
index.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_apply_placement_scales_up()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 1);
|
||||
group.Nodes.Count.ShouldBe(1);
|
||||
|
||||
await group.ApplyPlacementAsync([1, 2, 3], default);
|
||||
group.Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_apply_placement_scales_down()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 5);
|
||||
group.Nodes.Count.ShouldBe(5);
|
||||
|
||||
await group.ApplyPlacementAsync([1, 2], default);
|
||||
group.Nodes.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_apply_same_size_is_noop()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 3);
|
||||
var leaderBefore = group.Leader.Id;
|
||||
|
||||
await group.ApplyPlacementAsync([1, 2, 3], default);
|
||||
group.Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAccountInfo server/jetstream_cluster_1_test.go:94
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_info_tracks_streams_and_consumers_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("ACCT1", ["a1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("ACCT2", ["a2.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("ACCT1", "c1");
|
||||
await fx.CreateConsumerAsync("ACCT1", "c2");
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
resp.AccountInfo.ShouldNotBeNull();
|
||||
resp.AccountInfo!.Streams.ShouldBe(2);
|
||||
resp.AccountInfo.Consumers.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExtendedAccountInfo server/jetstream_cluster_1_test.go:3389
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_info_after_stream_delete_reflects_removal()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("DEL1", ["d1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("DEL2", ["d2.>"], replicas: 3);
|
||||
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DEL1", "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
resp.AccountInfo!.Streams.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAccountPurge server/jetstream_cluster_1_test.go:3891
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_purge_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PURGE1", ["pur.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.AccountPurge}GLOBAL", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLimitWithAccountDefaults server/jetstream_cluster_1_test.go:124
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_with_max_bytes_and_replicas_created_successfully()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "MBLIMIT",
|
||||
Subjects = ["mbl.>"],
|
||||
Replicas = 2,
|
||||
MaxBytes = 4 * 1024 * 1024,
|
||||
};
|
||||
var resp = fx.CreateStreamDirect(cfg);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.MaxBytes.ShouldBe(4 * 1024 * 1024);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMaxStreamsReached server/jetstream_cluster_1_test.go:3177
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_streams_tracked_correctly_in_meta()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.CreateStreamAsync($"MS{i}", [$"ms{i}.>"], replicas: 3);
|
||||
|
||||
var names = await fx.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames!.Count.ShouldBe(10);
|
||||
|
||||
var meta = fx.GetMetaState();
|
||||
meta.Streams.Count.ShouldBe(10);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Direct API tests (DirectGet)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Direct_get_returns_message_by_sequence()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "DIRECT",
|
||||
Subjects = ["dir.>"],
|
||||
Replicas = 3,
|
||||
AllowDirect = true,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("dir.event", $"msg-{i}");
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.DirectGet}DIRECT", """{"seq":3}""");
|
||||
resp.DirectMessage.ShouldNotBeNull();
|
||||
resp.DirectMessage!.Sequence.ShouldBe(3UL);
|
||||
resp.DirectMessage.Subject.ShouldBe("dir.event");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream message get
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_message_get_returns_correct_payload()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("MSGGET", ["mg.>"], replicas: 3);
|
||||
|
||||
await fx.PublishAsync("mg.event", "payload-1");
|
||||
await fx.PublishAsync("mg.event", "payload-2");
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.StreamMessageGet}MSGGET", """{"seq":2}""");
|
||||
resp.StreamMessage.ShouldNotBeNull();
|
||||
resp.StreamMessage!.Sequence.ShouldBe(2UL);
|
||||
resp.StreamMessage.Payload.ShouldBe("payload-2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer list and names
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_list_via_api_router()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("CLISTM", ["clm.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CLISTM", "d1");
|
||||
await fx.CreateConsumerAsync("CLISTM", "d2");
|
||||
|
||||
var names = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}CLISTM", "{}");
|
||||
names.ConsumerNames.ShouldNotBeNull();
|
||||
names.ConsumerNames!.Count.ShouldBe(2);
|
||||
|
||||
var list = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerList}CLISTM", "{}");
|
||||
list.ConsumerNames.ShouldNotBeNull();
|
||||
list.ConsumerNames!.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Account stream move returns success shape
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_stream_move_api_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.AccountStreamMove}TEST", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Account stream move cancel returns success shape
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_stream_move_cancel_api_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.AccountStreamMoveCancel}TEST", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream create requires name
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Stream_create_without_name_returns_error()
|
||||
{
|
||||
var streamManager = new StreamManager();
|
||||
var resp = streamManager.CreateOrUpdate(new StreamConfig { Name = "" });
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Description.ShouldContain("name");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// NotFound for unknown API subject
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Unknown_api_subject_returns_not_found()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync("$JS.API.UNKNOWN.SUBJECT", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream info for non-existent stream returns 404
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_nonexistent_returns_not_found()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.StreamInfo}NOSTREAM", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer info for non-existent consumer returns 404
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_info_nonexistent_returns_not_found()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("NOCONS", ["nc.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}NOCONS.MISSING", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Delete non-existent stream returns 404
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_nonexistent_stream_returns_not_found()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.StreamDelete}GONE", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Delete non-existent consumer returns 404
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_nonexistent_consumer_returns_not_found()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("NODEL", ["nd.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerDelete}NODEL.MISSING", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Self-contained fixture for JetStream cluster meta tests.
|
||||
/// </summary>
|
||||
internal sealed class ClusterMetaFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private ClusterMetaFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router,
|
||||
JetStreamPublisher publisher)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = publisher;
|
||||
}
|
||||
|
||||
public static Task<ClusterMetaFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var streamManager = new StreamManager(meta, consumerManager: consumerManager);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
return Task.FromResult(new ClusterMetaFixture(meta, streamManager, consumerManager, router, publisher));
|
||||
}
|
||||
|
||||
public Task CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
if (response.Error is not null)
|
||||
throw new InvalidOperationException(response.Error.Description);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public JetStreamApiResponse CreateStreamDirect(StreamConfig config)
|
||||
=> _streamManager.CreateOrUpdate(config);
|
||||
|
||||
public Task<JetStreamApiResponse> CreateConsumerAsync(string stream, string durableName)
|
||||
{
|
||||
return Task.FromResult(_consumerManager.CreateOrUpdate(stream, new ConsumerConfig
|
||||
{
|
||||
DurableName = durableName,
|
||||
}));
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), null, out var ack))
|
||||
return Task.FromResult(ack);
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match a stream.");
|
||||
}
|
||||
|
||||
public MetaGroupState GetMetaState() => _metaGroup.GetState();
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
=> Task.FromResult(_router.Route(subject, Encoding.UTF8.GetBytes(payload)));
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,344 @@
|
||||
using System.Threading.Channels;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.Raft;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for JetStreamClusterMonitor — background meta entry processing.
|
||||
/// Go reference: jetstream_cluster.go:1455-1825 (monitorCluster).
|
||||
/// </summary>
|
||||
public class JetStreamClusterMonitorTests
|
||||
{
|
||||
// Each test uses a 5-second CancellationToken as a hard upper bound so a
|
||||
// hung monitor doesn't stall the test run indefinitely.
|
||||
private static CancellationTokenSource TestTimeout() =>
|
||||
new(TimeSpan.FromSeconds(5));
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_processes_stream_assignment_entry()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster assignStream op
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "test-stream",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
Config = """{"subjects":["test.>"]}""",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(1, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.GetStreamAssignment("test-stream").ShouldNotBeNull();
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_processes_consumer_assignment_entry()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster assignConsumer op
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var streamJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "s1",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
Config = """{"subjects":["x.>"]}""",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, streamJson));
|
||||
|
||||
var consumerJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignConsumer",
|
||||
StreamName = "s1",
|
||||
ConsumerName = "c1",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, consumerJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_processes_stream_removal()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster removeStream op
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "to-remove",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
Config = """{"subjects":["rm.>"]}""",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, assignJson));
|
||||
|
||||
var removeJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "removeStream",
|
||||
StreamName = "to-remove",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, removeJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_applies_meta_snapshot()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster snapshot op — replaces all state
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var assignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["snap-stream"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "snap-stream",
|
||||
Group = new RaftGroup { Name = "rg-snap", Peers = ["n1", "n2", "n3"] },
|
||||
},
|
||||
};
|
||||
var snapshotB64 = Convert.ToBase64String(MetaSnapshotCodec.Encode(assignments));
|
||||
|
||||
var snapshotJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "snapshot",
|
||||
Data = snapshotB64,
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, snapshotJson));
|
||||
await monitor.WaitForProcessedAsync(1, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.GetStreamAssignment("snap-stream").ShouldNotBeNull();
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_processes_consumer_removal()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster removeConsumer op
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var streamJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "s1",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, streamJson));
|
||||
|
||||
var consumerJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignConsumer",
|
||||
StreamName = "s1",
|
||||
ConsumerName = "c1",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, consumerJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
|
||||
var removeJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "removeConsumer",
|
||||
StreamName = "s1",
|
||||
ConsumerName = "c1",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(3, 1, removeJson));
|
||||
await monitor.WaitForProcessedAsync(3, cts.Token);
|
||||
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_skips_malformed_entries()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster — malformed entries must not abort the loop
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, "not-json"));
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "after-bad",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.GetStreamAssignment("after-bad").ShouldNotBeNull();
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_stops_on_cancellation()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster shuts down cleanly when stop channel closes
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask; // Should complete without throwing
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_ignores_entry_with_no_op_field()
|
||||
{
|
||||
// Entries missing the "Op" property are silently ignored (forward-compat).
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, """{"NotOp":"whatever"}"""));
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "after-no-op",
|
||||
Peers = new[] { "n1" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_ignores_unknown_op()
|
||||
{
|
||||
// Unknown op names are silently ignored — forward compatibility.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, """{"Op":"futureFoo","Data":"xyz"}"""));
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "after-unknown-op",
|
||||
Peers = new[] { "n1" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_snapshot_replaces_existing_state()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go — snapshot apply wipes old assignments
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "old-stream",
|
||||
Peers = new[] { "n1" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(1, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
|
||||
var newAssignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["new-stream"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "new-stream",
|
||||
Group = new RaftGroup { Name = "rg-new", Peers = ["n1", "n2", "n3"] },
|
||||
},
|
||||
};
|
||||
var snapshotB64 = Convert.ToBase64String(MetaSnapshotCodec.Encode(newAssignments));
|
||||
var snapshotJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "snapshot",
|
||||
Data = snapshotB64,
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, snapshotJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.GetStreamAssignment("old-stream").ShouldBeNull();
|
||||
meta.GetStreamAssignment("new-stream").ShouldNotBeNull();
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,872 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: cluster stream creation, single/multi replica, memory store,
|
||||
// stream purge, update subjects, delete, max bytes, stream info/list,
|
||||
// interest retention, work queue retention, mirror/source in cluster.
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering clustered JetStream stream creation, replication, storage,
|
||||
/// purge, update, delete, retention policies, and mirror/source in cluster mode.
|
||||
/// Ported from Go jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class JetStreamClusterStreamTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterSingleReplicaStreams server/jetstream_cluster_1_test.go:223
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Single_replica_stream_creation_and_publish_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.CreateStreamAsync("R1S", ["foo", "bar"], replicas: 1);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("R1S");
|
||||
|
||||
const int toSend = 10;
|
||||
for (var i = 0; i < toSend; i++)
|
||||
{
|
||||
var ack = await fx.PublishAsync("foo", $"Hello R1 {i}");
|
||||
ack.Stream.ShouldBe("R1S");
|
||||
ack.Seq.ShouldBe((ulong)(i + 1));
|
||||
}
|
||||
|
||||
var info = await fx.GetStreamInfoAsync("R1S");
|
||||
info.StreamInfo!.State.Messages.ShouldBe((ulong)toSend);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreamsDefaultFileMem server/jetstream_cluster_1_test.go:355
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Multi_replica_stream_defaults_to_memory_store()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.CreateStreamAsync("MEMTEST", ["mem.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.Storage.ShouldBe(StorageType.Memory);
|
||||
|
||||
var backend = fx.GetStoreBackendType("MEMTEST");
|
||||
backend.ShouldBe("memory");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMemoryStore server/jetstream_cluster_1_test.go:423
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Memory_store_replicated_stream_accepts_100_messages()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.CreateStreamAsync("R3M", ["foo", "bar"], replicas: 3, storage: StorageType.Memory);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
const int toSend = 100;
|
||||
for (var i = 0; i < toSend; i++)
|
||||
{
|
||||
var ack = await fx.PublishAsync("foo", "Hello MemoryStore");
|
||||
ack.Stream.ShouldBe("R3M");
|
||||
}
|
||||
|
||||
var info = await fx.GetStreamInfoAsync("R3M");
|
||||
info.StreamInfo!.Config.Name.ShouldBe("R3M");
|
||||
info.StreamInfo.State.Messages.ShouldBe((ulong)toSend);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDelete server/jetstream_cluster_1_test.go:472
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_consumer_then_stream_clears_account_info()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("C22", ["foo", "bar", "baz"], replicas: 2);
|
||||
await fx.CreateConsumerAsync("C22", "dlc");
|
||||
|
||||
// Delete consumer then stream
|
||||
var delConsumer = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerDelete}C22.dlc", "{}");
|
||||
delConsumer.Success.ShouldBeTrue();
|
||||
|
||||
var delStream = await fx.RequestAsync($"{JetStreamApiSubjects.StreamDelete}C22", "{}");
|
||||
delStream.Success.ShouldBeTrue();
|
||||
|
||||
// Account info should show zero streams
|
||||
var accountInfo = await fx.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
accountInfo.AccountInfo.ShouldNotBeNull();
|
||||
accountInfo.AccountInfo!.Streams.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamPurge server/jetstream_cluster_1_test.go:522
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_purge_clears_all_messages_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 5);
|
||||
|
||||
await fx.CreateStreamAsync("PURGE", ["foo", "bar"], replicas: 3);
|
||||
|
||||
const int toSend = 100;
|
||||
for (var i = 0; i < toSend; i++)
|
||||
await fx.PublishAsync("foo", "Hello JS Clustering");
|
||||
|
||||
var before = await fx.GetStreamInfoAsync("PURGE");
|
||||
before.StreamInfo!.State.Messages.ShouldBe((ulong)toSend);
|
||||
|
||||
var purge = await fx.RequestAsync($"{JetStreamApiSubjects.StreamPurge}PURGE", "{}");
|
||||
purge.Success.ShouldBeTrue();
|
||||
|
||||
var after = await fx.GetStreamInfoAsync("PURGE");
|
||||
after.StreamInfo!.State.Messages.ShouldBe(0UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamUpdateSubjects server/jetstream_cluster_1_test.go:571
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_update_subjects_reflects_new_configuration()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("SUBUPDATE", ["foo", "bar"], replicas: 3);
|
||||
|
||||
// Update subjects to bar, baz
|
||||
var update = fx.UpdateStream("SUBUPDATE", ["bar", "baz"], replicas: 3);
|
||||
update.Error.ShouldBeNull();
|
||||
update.StreamInfo.ShouldNotBeNull();
|
||||
update.StreamInfo!.Config.Subjects.ShouldContain("bar");
|
||||
update.StreamInfo.Config.Subjects.ShouldContain("baz");
|
||||
update.StreamInfo.Config.Subjects.ShouldNotContain("foo");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInfoList server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_names_and_list_return_all_streams()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("S1", ["s1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("S2", ["s2.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("S3", ["s3.>"], replicas: 1);
|
||||
|
||||
var names = await fx.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.Count.ShouldBe(3);
|
||||
names.StreamNames.ShouldContain("S1");
|
||||
names.StreamNames.ShouldContain("S2");
|
||||
names.StreamNames.ShouldContain("S3");
|
||||
|
||||
var list = await fx.RequestAsync(JetStreamApiSubjects.StreamList, "{}");
|
||||
list.StreamNames.ShouldNotBeNull();
|
||||
list.StreamNames!.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMaxBytesForStream server/jetstream_cluster_1_test.go:1099
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Max_bytes_stream_limits_enforced_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "MAXBYTES",
|
||||
Subjects = ["mb.>"],
|
||||
Replicas = 3,
|
||||
MaxBytes = 512,
|
||||
Discard = DiscardPolicy.Old,
|
||||
};
|
||||
var resp = fx.CreateStreamDirect(cfg);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
// Publish messages exceeding max bytes; old messages should be discarded
|
||||
for (var i = 0; i < 20; i++)
|
||||
await fx.PublishAsync("mb.data", new string('X', 64));
|
||||
|
||||
var state = await fx.GetStreamStateAsync("MAXBYTES");
|
||||
// Total bytes should not exceed max_bytes by much after enforcement
|
||||
((long)state.Bytes).ShouldBeLessThanOrEqualTo(cfg.MaxBytes + 128);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamPublishWithActiveConsumers server/jetstream_cluster_1_test.go:1132
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Publish_with_active_consumer_delivers_messages()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("ACTIVE", ["active.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("ACTIVE", "durable1", filterSubject: "active.>");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("active.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("ACTIVE", "durable1", 10);
|
||||
batch.Messages.Count.ShouldBe(10);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDoubleAdd server/jetstream_cluster_1_test.go:1551
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Double_add_stream_with_same_config_succeeds()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var first = await fx.CreateStreamAsync("DUP", ["dup.>"], replicas: 3);
|
||||
first.Error.ShouldBeNull();
|
||||
|
||||
// Adding the same stream again should succeed (idempotent)
|
||||
var second = await fx.CreateStreamAsync("DUP", ["dup.>"], replicas: 3);
|
||||
second.Error.ShouldBeNull();
|
||||
second.StreamInfo!.Config.Name.ShouldBe("DUP");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamOverlapSubjects server/jetstream_cluster_1_test.go:1248
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Publish_routes_to_correct_stream_among_non_overlapping()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("ALPHA", ["alpha.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("BETA", ["beta.>"], replicas: 3);
|
||||
|
||||
var ack1 = await fx.PublishAsync("alpha.one", "A");
|
||||
ack1.Stream.ShouldBe("ALPHA");
|
||||
|
||||
var ack2 = await fx.PublishAsync("beta.one", "B");
|
||||
ack2.Stream.ShouldBe("BETA");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterInterestRetention server/jetstream_cluster_1_test.go:2109
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Interest_retention_stream_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "INTEREST",
|
||||
Subjects = ["interest.>"],
|
||||
Replicas = 3,
|
||||
Retention = RetentionPolicy.Interest,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("interest.event", "msg");
|
||||
|
||||
var state = await fx.GetStreamStateAsync("INTEREST");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterWorkQueueRetention server/jetstream_cluster_1_test.go:2179
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Work_queue_retention_removes_acked_messages_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "WQ",
|
||||
Subjects = ["wq.>"],
|
||||
Replicas = 2,
|
||||
Retention = RetentionPolicy.WorkQueue,
|
||||
MaxConsumers = 1,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
await fx.CreateConsumerAsync("WQ", "worker", filterSubject: "wq.>", ackPolicy: AckPolicy.All);
|
||||
|
||||
await fx.PublishAsync("wq.task", "job-1");
|
||||
|
||||
var stateBefore = await fx.GetStreamStateAsync("WQ");
|
||||
stateBefore.Messages.ShouldBe(1UL);
|
||||
|
||||
// Ack all up to sequence 1, triggering work queue cleanup
|
||||
fx.AckAll("WQ", "worker", 1);
|
||||
|
||||
// Publish again to trigger runtime retention enforcement
|
||||
await fx.PublishAsync("wq.task", "job-2");
|
||||
|
||||
var stateAfter = await fx.GetStreamStateAsync("WQ");
|
||||
// After ack, only the new message should remain
|
||||
stateAfter.Messages.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDeleteMsg server/jetstream_cluster_1_test.go:1748
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_individual_message_in_clustered_stream()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("DELMSG", ["dm.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("dm.event", $"msg-{i}");
|
||||
|
||||
var before = await fx.GetStreamStateAsync("DELMSG");
|
||||
before.Messages.ShouldBe(5UL);
|
||||
|
||||
// Delete message at sequence 3
|
||||
var del = await fx.RequestAsync($"{JetStreamApiSubjects.StreamMessageDelete}DELMSG", """{"seq":3}""");
|
||||
del.Success.ShouldBeTrue();
|
||||
|
||||
var after = await fx.GetStreamStateAsync("DELMSG");
|
||||
after.Messages.ShouldBe(4UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamUpdate server/jetstream_cluster_1_test.go:1433
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_update_preserves_existing_messages()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("UPD", ["upd.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("upd.event", $"msg-{i}");
|
||||
|
||||
// Update max_msgs
|
||||
var update = fx.UpdateStream("UPD", ["upd.>"], replicas: 3, maxMsgs: 10);
|
||||
update.Error.ShouldBeNull();
|
||||
|
||||
var state = await fx.GetStreamStateAsync("UPD");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAccountInfo server/jetstream_cluster_1_test.go:94
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_info_reports_stream_and_consumer_counts()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("AI1", ["ai1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("AI2", ["ai2.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("AI1", "c1");
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
resp.AccountInfo.ShouldNotBeNull();
|
||||
resp.AccountInfo!.Streams.ShouldBe(2);
|
||||
resp.AccountInfo.Consumers.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpand server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Cluster_expand_adds_peer_to_meta_group()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(2);
|
||||
var state = meta.GetState();
|
||||
state.ClusterSize.ShouldBe(2);
|
||||
|
||||
// Expanding is modeled by creating a new meta group with more nodes
|
||||
var expanded = new JetStreamMetaGroup(3);
|
||||
expanded.GetState().ClusterSize.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMirrorAndSourceWorkQueues server/jetstream_cluster_1_test.go:2233
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Mirror_stream_replicates_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
// Create origin stream
|
||||
await fx.CreateStreamAsync("ORIGIN", ["origin.>"], replicas: 3);
|
||||
|
||||
// Create mirror stream
|
||||
fx.CreateStreamDirect(new StreamConfig
|
||||
{
|
||||
Name = "MIRROR",
|
||||
Subjects = ["mirror.>"],
|
||||
Replicas = 3,
|
||||
Mirror = "ORIGIN",
|
||||
});
|
||||
|
||||
// Publish to origin
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("origin.event", $"mirrored-{i}");
|
||||
|
||||
// Mirror should have replicated messages
|
||||
var mirrorState = await fx.GetStreamStateAsync("MIRROR");
|
||||
mirrorState.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMirrorAndSourceInterestPolicyStream server/jetstream_cluster_1_test.go:2290
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Source_stream_replicates_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
// Create source origin
|
||||
await fx.CreateStreamAsync("SRC", ["src.>"], replicas: 3);
|
||||
|
||||
// Create aggregate stream sourcing from SRC
|
||||
fx.CreateStreamDirect(new StreamConfig
|
||||
{
|
||||
Name = "AGG",
|
||||
Subjects = ["agg.>"],
|
||||
Replicas = 3,
|
||||
Sources = [new StreamSourceConfig { Name = "SRC" }],
|
||||
});
|
||||
|
||||
// Publish to source
|
||||
for (var i = 0; i < 3; i++)
|
||||
await fx.PublishAsync("src.event", $"sourced-{i}");
|
||||
|
||||
var aggState = await fx.GetStreamStateAsync("AGG");
|
||||
aggState.Messages.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterUserSnapshotAndRestore server/jetstream_cluster_1_test.go:2652
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Snapshot_and_restore_preserves_messages_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("SNAP", ["snap.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("snap.event", $"msg-{i}");
|
||||
|
||||
// Create snapshot
|
||||
var snapshot = await fx.RequestAsync($"{JetStreamApiSubjects.StreamSnapshot}SNAP", "{}");
|
||||
snapshot.Snapshot.ShouldNotBeNull();
|
||||
snapshot.Snapshot!.Payload.ShouldNotBeNullOrEmpty();
|
||||
|
||||
// Purge the stream
|
||||
await fx.RequestAsync($"{JetStreamApiSubjects.StreamPurge}SNAP", "{}");
|
||||
var afterPurge = await fx.GetStreamStateAsync("SNAP");
|
||||
afterPurge.Messages.ShouldBe(0UL);
|
||||
|
||||
// Restore from snapshot
|
||||
var restore = await fx.RequestAsync($"{JetStreamApiSubjects.StreamRestore}SNAP", snapshot.Snapshot.Payload);
|
||||
restore.Success.ShouldBeTrue();
|
||||
|
||||
var afterRestore = await fx.GetStreamStateAsync("SNAP");
|
||||
afterRestore.Messages.ShouldBe(10UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamSynchedTimeStamps server/jetstream_cluster_1_test.go:977
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replicated_stream_messages_have_monotonic_sequences()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("SEQ", ["seq.>"], replicas: 3);
|
||||
|
||||
var sequences = new List<ulong>();
|
||||
for (var i = 0; i < 20; i++)
|
||||
{
|
||||
var ack = await fx.PublishAsync("seq.event", $"msg-{i}");
|
||||
sequences.Add(ack.Seq);
|
||||
}
|
||||
|
||||
// Verify strictly monotonically increasing sequences
|
||||
for (var i = 1; i < sequences.Count; i++)
|
||||
sequences[i].ShouldBeGreaterThan(sequences[i - 1]);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLimits server/jetstream_cluster_1_test.go:3248
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Max_msgs_limit_enforced_in_clustered_stream()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "LIMITED",
|
||||
Subjects = ["limited.>"],
|
||||
Replicas = 3,
|
||||
MaxMsgs = 5,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("limited.event", $"msg-{i}");
|
||||
|
||||
var state = await fx.GetStreamStateAsync("LIMITED");
|
||||
state.Messages.ShouldBeLessThanOrEqualTo(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInterestOnlyPolicy server/jetstream_cluster_1_test.go:3310
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Interest_only_policy_stream_stores_messages_without_consumers()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "INTONLY",
|
||||
Subjects = ["intonly.>"],
|
||||
Replicas = 3,
|
||||
Retention = RetentionPolicy.Interest,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
await fx.PublishAsync("intonly.data", $"msg-{i}");
|
||||
|
||||
// Without consumers, interest retention still stores messages
|
||||
// (they are removed only when all consumers have acked)
|
||||
var state = await fx.GetStreamStateAsync("INTONLY");
|
||||
state.Messages.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerInfoList server/jetstream_cluster_1_test.go:1349
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_names_and_list_return_all_consumers()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("CLIST", ["clist.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CLIST", "c1");
|
||||
await fx.CreateConsumerAsync("CLIST", "c2");
|
||||
await fx.CreateConsumerAsync("CLIST", "c3");
|
||||
|
||||
var names = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}CLIST", "{}");
|
||||
names.ConsumerNames.ShouldNotBeNull();
|
||||
names.ConsumerNames!.Count.ShouldBe(3);
|
||||
names.ConsumerNames.ShouldContain("c1");
|
||||
names.ConsumerNames.ShouldContain("c2");
|
||||
names.ConsumerNames.ShouldContain("c3");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDefaultMaxAckPending server/jetstream_cluster_1_test.go:1580
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_default_ack_policy_is_none()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("ACKDEF", ["ackdef.>"], replicas: 3);
|
||||
var resp = await fx.CreateConsumerAsync("ACKDEF", "test_consumer");
|
||||
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.AckPolicy.ShouldBe(AckPolicy.None);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExtendedStreamInfo server/jetstream_cluster_1_test.go:1878
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_returns_config_and_state()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("EXTINFO", ["ext.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("ext.event", $"msg-{i}");
|
||||
|
||||
var info = await fx.GetStreamInfoAsync("EXTINFO");
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.Config.Name.ShouldBe("EXTINFO");
|
||||
info.StreamInfo.Config.Replicas.ShouldBe(3);
|
||||
info.StreamInfo.State.Messages.ShouldBe(5UL);
|
||||
info.StreamInfo.State.FirstSeq.ShouldBe(1UL);
|
||||
info.StreamInfo.State.LastSeq.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExtendedStreamInfoSingleReplica server/jetstream_cluster_1_test.go:2033
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Single_replica_stream_info_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("R1INFO", ["r1info.>"], replicas: 1);
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
await fx.PublishAsync("r1info.event", $"msg-{i}");
|
||||
|
||||
var info = await fx.GetStreamInfoAsync("R1INFO");
|
||||
info.StreamInfo!.Config.Replicas.ShouldBe(1);
|
||||
info.StreamInfo.State.Messages.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams (maxmsgs_per behavior)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Max_msgs_per_subject_enforced_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "PERSUBJ",
|
||||
Subjects = ["ps.>"],
|
||||
Replicas = 3,
|
||||
MaxMsgsPer = 2,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
// Publish 5 messages to same subject; only 2 should remain
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("ps.topic", $"msg-{i}");
|
||||
|
||||
var state = await fx.GetStreamStateAsync("PERSUBJ");
|
||||
state.Messages.ShouldBeLessThanOrEqualTo(2UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamExtendedUpdates server/jetstream_cluster_1_test.go:1513
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_update_can_change_max_msgs()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "EXTUPD",
|
||||
Subjects = ["eu.>"],
|
||||
Replicas = 3,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("eu.event", $"msg-{i}");
|
||||
|
||||
// Update to limit max_msgs
|
||||
var update = fx.UpdateStream("EXTUPD", ["eu.>"], replicas: 3, maxMsgs: 5);
|
||||
update.Error.ShouldBeNull();
|
||||
update.StreamInfo!.Config.MaxMsgs.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: Sealed stream rejects purge
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Sealed_stream_rejects_purge_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "SEALED",
|
||||
Subjects = ["sealed.>"],
|
||||
Replicas = 3,
|
||||
Sealed = true,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
var purge = await fx.RequestAsync($"{JetStreamApiSubjects.StreamPurge}SEALED", "{}");
|
||||
// Sealed streams should not allow purge
|
||||
purge.Success.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: DenyDelete stream rejects message delete
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task DenyDelete_stream_rejects_message_delete()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "NODELDENY",
|
||||
Subjects = ["nodel.>"],
|
||||
Replicas = 3,
|
||||
DenyDelete = true,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
await fx.PublishAsync("nodel.event", "msg");
|
||||
|
||||
var del = await fx.RequestAsync($"{JetStreamApiSubjects.StreamMessageDelete}NODELDENY", """{"seq":1}""");
|
||||
del.Success.ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Self-contained fixture for JetStream cluster stream tests. Wires up
|
||||
/// meta group, stream manager, consumer manager, API router, and publisher.
|
||||
/// </summary>
|
||||
internal sealed class ClusterStreamFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private ClusterStreamFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router,
|
||||
JetStreamPublisher publisher)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = publisher;
|
||||
}
|
||||
|
||||
public static Task<ClusterStreamFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var streamManager = new StreamManager(meta, consumerManager: consumerManager);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
return Task.FromResult(new ClusterStreamFixture(meta, streamManager, consumerManager, router, publisher));
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateStreamAsync(string name, string[] subjects, int replicas, StorageType storage = StorageType.Memory)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
Storage = storage,
|
||||
});
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public JetStreamApiResponse CreateStreamDirect(StreamConfig config)
|
||||
=> _streamManager.CreateOrUpdate(config);
|
||||
|
||||
public JetStreamApiResponse UpdateStream(string name, string[] subjects, int replicas, int maxMsgs = 0)
|
||||
{
|
||||
return _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
MaxMsgs = maxMsgs,
|
||||
});
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), null, out var ack))
|
||||
{
|
||||
if (ack.ErrorCode == null && _streamManager.TryGet(ack.Stream, out var handle))
|
||||
{
|
||||
var stored = handle.Store.LoadAsync(ack.Seq, default).GetAwaiter().GetResult();
|
||||
if (stored != null)
|
||||
_consumerManager.OnPublished(ack.Stream, stored);
|
||||
}
|
||||
|
||||
return Task.FromResult(ack);
|
||||
}
|
||||
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match a stream.");
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> GetStreamInfoAsync(string name)
|
||||
=> Task.FromResult(_streamManager.GetInfo(name));
|
||||
|
||||
public Task<ApiStreamState> GetStreamStateAsync(string name)
|
||||
=> _streamManager.GetStateAsync(name, default).AsTask();
|
||||
|
||||
public string GetStoreBackendType(string name) => _streamManager.GetStoreBackendType(name);
|
||||
|
||||
public Task<JetStreamApiResponse> CreateConsumerAsync(
|
||||
string stream,
|
||||
string durableName,
|
||||
string? filterSubject = null,
|
||||
AckPolicy ackPolicy = AckPolicy.None)
|
||||
{
|
||||
var config = new ConsumerConfig
|
||||
{
|
||||
DurableName = durableName,
|
||||
AckPolicy = ackPolicy,
|
||||
};
|
||||
if (!string.IsNullOrWhiteSpace(filterSubject))
|
||||
config.FilterSubject = filterSubject;
|
||||
|
||||
return Task.FromResult(_consumerManager.CreateOrUpdate(stream, config));
|
||||
}
|
||||
|
||||
public Task<PullFetchBatch> FetchAsync(string stream, string durableName, int batch)
|
||||
=> _consumerManager.FetchAsync(stream, durableName, batch, _streamManager, default).AsTask();
|
||||
|
||||
public void AckAll(string stream, string durableName, ulong sequence)
|
||||
=> _consumerManager.AckAll(stream, durableName, sequence);
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
=> Task.FromResult(_router.Route(subject, Encoding.UTF8.GetBytes(payload)));
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,146 @@
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
public class JetStreamInflightTrackingTests
|
||||
{
|
||||
[Fact]
|
||||
public void TrackInflightStreamProposal_increments_ops()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "inflight-1",
|
||||
Group = new RaftGroup { Name = "rg-inf", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.InflightStreamCount.ShouldBe(1);
|
||||
meta.IsStreamInflight("ACC", "inflight-1").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemoveInflightStreamProposal_clears_when_zero()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "inflight-2",
|
||||
Group = new RaftGroup { Name = "rg-inf2", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.RemoveInflightStreamProposal("ACC", "inflight-2");
|
||||
meta.IsStreamInflight("ACC", "inflight-2").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Duplicate_proposal_increments_ops_count()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "dup-stream",
|
||||
Group = new RaftGroup { Name = "rg-dup", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.InflightStreamCount.ShouldBe(1); // still one unique stream
|
||||
|
||||
// Need two removes to fully clear
|
||||
meta.RemoveInflightStreamProposal("ACC", "dup-stream");
|
||||
meta.IsStreamInflight("ACC", "dup-stream").ShouldBeTrue(); // ops > 0
|
||||
meta.RemoveInflightStreamProposal("ACC", "dup-stream");
|
||||
meta.IsStreamInflight("ACC", "dup-stream").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsStreamInflight_returns_false_for_unknown_account()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.IsStreamInflight("UNKNOWN", "no-stream").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void TrackInflightConsumerProposal_tracks_by_account()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.TrackInflightConsumerProposal("ACC", "stream1", "consumer1");
|
||||
|
||||
meta.InflightConsumerCount.ShouldBe(1);
|
||||
meta.IsConsumerInflight("ACC", "stream1", "consumer1").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemoveInflightConsumerProposal_clears_when_zero()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.TrackInflightConsumerProposal("ACC", "stream1", "consumer1");
|
||||
meta.RemoveInflightConsumerProposal("ACC", "stream1", "consumer1");
|
||||
|
||||
meta.IsConsumerInflight("ACC", "stream1", "consumer1").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ClearAllInflight_removes_everything()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC1", sa);
|
||||
meta.TrackInflightConsumerProposal("ACC2", "s2", "c1");
|
||||
|
||||
meta.ClearAllInflight();
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
meta.InflightConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StepDown_clears_inflight()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.StepDown();
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Multiple_accounts_tracked_independently()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa1 = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg1", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
var sa2 = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1", // same stream name, different account
|
||||
Group = new RaftGroup { Name = "rg2", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC1", sa1);
|
||||
meta.TrackInflightStreamProposal("ACC2", sa2);
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(2); // one per account
|
||||
meta.IsStreamInflight("ACC1", "s1").ShouldBeTrue();
|
||||
meta.IsStreamInflight("ACC2", "s1").ShouldBeTrue();
|
||||
|
||||
meta.RemoveInflightStreamProposal("ACC1", "s1");
|
||||
meta.IsStreamInflight("ACC1", "s1").ShouldBeFalse();
|
||||
meta.IsStreamInflight("ACC2", "s1").ShouldBeTrue(); // still tracked
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
public class JetStreamLeadershipTests
|
||||
{
|
||||
[Fact]
|
||||
public void ProcessLeaderChange_clears_inflight_on_step_down()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.TrackInflightStreamProposal("ACC", new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg", Peers = ["n1", "n2", "n3"] },
|
||||
});
|
||||
|
||||
meta.ProcessLeaderChange(isLeader: false);
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessLeaderChange_fires_event_on_become_leader()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var leaderChanged = false;
|
||||
meta.OnLeaderChange += (isLeader) => leaderChanged = true;
|
||||
|
||||
meta.ProcessLeaderChange(isLeader: true);
|
||||
|
||||
leaderChanged.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessLeaderChange_fires_event_on_step_down()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
bool? receivedIsLeader = null;
|
||||
meta.OnLeaderChange += (isLeader) => receivedIsLeader = isLeader;
|
||||
|
||||
meta.ProcessLeaderChange(isLeader: false);
|
||||
|
||||
receivedIsLeader.ShouldNotBeNull();
|
||||
receivedIsLeader.Value.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StepDown_triggers_leader_change_event()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
bool? receivedIsLeader = null;
|
||||
meta.OnLeaderChange += (isLeader) => receivedIsLeader = isLeader;
|
||||
|
||||
meta.StepDown();
|
||||
|
||||
receivedIsLeader.ShouldNotBeNull();
|
||||
receivedIsLeader.Value.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StepDown_clears_inflight_via_process_leader_change()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.TrackInflightStreamProposal("ACC", new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg", Peers = ["n1", "n2", "n3"] },
|
||||
});
|
||||
meta.TrackInflightConsumerProposal("ACC", "s1", "c1");
|
||||
|
||||
meta.StepDown();
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
meta.InflightConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void BecomeLeader_makes_IsLeader_true()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.StepDown(); // move leader away from self
|
||||
meta.IsLeader().ShouldBeFalse();
|
||||
|
||||
meta.BecomeLeader();
|
||||
|
||||
meta.IsLeader().ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void OnLeaderChange_not_fired_when_no_subscribers()
|
||||
{
|
||||
// Should not throw when no handlers attached
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
Should.NotThrow(() => meta.ProcessLeaderChange(isLeader: true));
|
||||
Should.NotThrow(() => meta.ProcessLeaderChange(isLeader: false));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,641 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: meta group leadership, API routing through meta leader,
|
||||
// stream/consumer placement decisions, asset distribution,
|
||||
// R1/R3 placement, preferred tags, cluster-wide operations.
|
||||
using System.Collections.Concurrent;
|
||||
using System.Reflection;
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream meta controller leadership, API routing through
|
||||
/// the meta leader, stream/consumer placement decisions, asset distribution,
|
||||
/// R1/R3 placement, and cluster-wide operations.
|
||||
/// Ported from Go jetstream_cluster_1_test.go and jetstream_cluster_2_test.go.
|
||||
/// </summary>
|
||||
public class JetStreamMetaControllerTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_initial_leader_is_meta_1()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var state = meta.GetState();
|
||||
|
||||
state.LeaderId.ShouldBe("meta-1");
|
||||
state.ClusterSize.ShouldBe(3);
|
||||
state.LeadershipVersion.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_stepdown_advances_leader_id()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.GetState().LeaderId.ShouldBe("meta-1");
|
||||
|
||||
meta.StepDown();
|
||||
meta.GetState().LeaderId.ShouldBe("meta-2");
|
||||
|
||||
meta.StepDown();
|
||||
meta.GetState().LeaderId.ShouldBe("meta-3");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_stepdown_wraps_around_to_first_node()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.StepDown(); // meta-2
|
||||
meta.StepDown(); // meta-3
|
||||
meta.StepDown(); // meta-1 (wrap)
|
||||
|
||||
meta.GetState().LeaderId.ShouldBe("meta-1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_leadership_version_increments_on_each_stepdown()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
for (var i = 1; i <= 5; i++)
|
||||
{
|
||||
meta.GetState().LeadershipVersion.ShouldBe(i);
|
||||
meta.StepDown();
|
||||
}
|
||||
|
||||
meta.GetState().LeadershipVersion.ShouldBe(6);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConfig server/jetstream_cluster_1_test.go:43
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_group_propose_creates_stream_record()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "TEST" }, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
state.Streams.Count.ShouldBe(1);
|
||||
state.Streams.ShouldContain("TEST");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_group_tracks_multiple_stream_proposals()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(5);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = $"S{i}" }, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
state.Streams.Count.ShouldBe(10);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_group_streams_are_sorted_alphabetically()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "ZULU" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "ALPHA" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "MIKE" }, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
state.Streams[0].ShouldBe("ALPHA");
|
||||
state.Streams[1].ShouldBe("MIKE");
|
||||
state.Streams[2].ShouldBe("ZULU");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConfig server/jetstream_cluster_1_test.go:43
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_group_duplicate_stream_proposal_is_idempotent()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "DUP" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "DUP" }, default);
|
||||
|
||||
meta.GetState().Streams.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_single_node_cluster_has_leader()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(1);
|
||||
var state = meta.GetState();
|
||||
|
||||
state.ClusterSize.ShouldBe(1);
|
||||
state.LeaderId.ShouldBe("meta-1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_single_node_stepdown_returns_to_same_leader()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(1);
|
||||
meta.StepDown();
|
||||
|
||||
meta.GetState().LeaderId.ShouldBe("meta-1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeaderStepdown server/jetstream_cluster_1_test.go:5464
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Api_meta_leader_stepdown_changes_leader_and_preserves_streams()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("KEEPME", ["keep.>"], replicas: 3);
|
||||
|
||||
var before = fx.GetMetaState();
|
||||
var leaderBefore = before.LeaderId;
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
|
||||
var after = fx.GetMetaState();
|
||||
after.LeaderId.ShouldNotBe(leaderBefore);
|
||||
after.Streams.ShouldContain("KEEPME");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAccountInfo server/jetstream_cluster_1_test.go:94
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Api_routing_through_meta_leader_returns_account_info()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("A", ["a.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("B", ["b.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("A", "c1");
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
resp.AccountInfo.ShouldNotBeNull();
|
||||
resp.AccountInfo!.Streams.ShouldBe(2);
|
||||
resp.AccountInfo.Consumers.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLimitWithAccountDefaults server/jetstream_cluster_1_test.go:124
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Placement_planner_r1_creates_single_node_placement()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 5);
|
||||
var placement = planner.PlanReplicas(replicas: 1);
|
||||
|
||||
placement.Count.ShouldBe(1);
|
||||
placement[0].ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_r3_creates_three_node_placement()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 5);
|
||||
var placement = planner.PlanReplicas(replicas: 3);
|
||||
|
||||
placement.Count.ShouldBe(3);
|
||||
placement[0].ShouldBe(1);
|
||||
placement[1].ShouldBe(2);
|
||||
placement[2].ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_caps_replicas_at_cluster_size()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: 7);
|
||||
|
||||
placement.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_negative_replicas_returns_one()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 5);
|
||||
var placement = planner.PlanReplicas(replicas: -1);
|
||||
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConfig server/jetstream_cluster_1_test.go:43
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_zero_nodes_returns_one()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 0);
|
||||
var placement = planner.PlanReplicas(replicas: 3);
|
||||
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamCreate server/jetstream_cluster_1_test.go:160
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_via_meta_leader_sets_replica_group()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 5);
|
||||
|
||||
var resp = await fx.CreateStreamAsync("REPGRP", ["rg.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
// The stream manager creates a replica group internally
|
||||
var meta = fx.GetMetaState();
|
||||
meta.Streams.ShouldContain("REPGRP");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMaxStreamsReached server/jetstream_cluster_1_test.go:3177
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_stream_creates_all_tracked_in_meta_group()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await fx.CreateStreamAsync($"MS{i}", [$"ms{i}.>"], replicas: 3);
|
||||
|
||||
var meta = fx.GetMetaState();
|
||||
meta.Streams.Count.ShouldBe(20);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamNames server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_names_api_returns_all_streams_through_meta_leader()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("S1", ["s1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("S2", ["s2.>"], replicas: 1);
|
||||
await fx.CreateStreamAsync("S3", ["s3.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
resp.StreamNames.ShouldNotBeNull();
|
||||
resp.StreamNames!.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDelete server/jetstream_cluster_1_test.go:472
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_removes_from_active_names()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("DEL1", ["d1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("DEL2", ["d2.>"], replicas: 3);
|
||||
|
||||
var del = await fx.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DEL1", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
|
||||
var names = await fx.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames!.Count.ShouldBe(1);
|
||||
names.StreamNames.ShouldContain("DEL2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDoubleAdd server/jetstream_cluster_1_test.go:1551
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_idempotent_with_same_config()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
var first = await fx.CreateStreamAsync("IDEM", ["idem.>"], replicas: 3);
|
||||
first.Error.ShouldBeNull();
|
||||
|
||||
var second = await fx.CreateStreamAsync("IDEM", ["idem.>"], replicas: 3);
|
||||
second.Error.ShouldBeNull();
|
||||
|
||||
var meta = fx.GetMetaState();
|
||||
meta.Streams.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInfoList server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_create_tracked_in_cluster()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("CC", ["cc.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CC", "d1");
|
||||
await fx.CreateConsumerAsync("CC", "d2");
|
||||
|
||||
var names = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}CC", "{}");
|
||||
names.ConsumerNames.ShouldNotBeNull();
|
||||
names.ConsumerNames!.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterPeerRemovalAPI server/jetstream_cluster_1_test.go:3469
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Peer_removal_api_routed_through_meta()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PR", ["pr.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.StreamPeerRemove}PR", """{"peer":"n2"}""");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMetaSnapshotsAndCatchup server/jetstream_cluster_1_test.go:833
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_preserved_across_multiple_stepdowns()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("M1", ["m1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("M2", ["m2.>"], replicas: 3);
|
||||
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var state = fx.GetMetaState();
|
||||
state.Streams.ShouldContain("M1");
|
||||
state.Streams.ShouldContain("M2");
|
||||
state.LeadershipVersion.ShouldBe(4);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMetaSnapshotsMultiChange server/jetstream_cluster_1_test.go:881
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Create_and_delete_across_stepdowns_reflected_in_names()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("A", ["a.>"], replicas: 3);
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
await fx.CreateStreamAsync("B", ["b.>"], replicas: 3);
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamDelete}A", "{}")).Success.ShouldBeTrue();
|
||||
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var names = await fx.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames!.Count.ShouldBe(1);
|
||||
names.StreamNames.ShouldContain("B");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamCreate server/jetstream_cluster_1_test.go:160
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_for_nonexistent_stream_returns_404()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.StreamInfo}MISSING", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerCreate server/jetstream_cluster_1_test.go:700
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_info_for_nonexistent_consumer_returns_404()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("NOCON", ["nc.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}NOCON.MISSING", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamCreate server/jetstream_cluster_1_test.go:160
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Stream_create_without_name_returns_error()
|
||||
{
|
||||
var streamManager = new StreamManager();
|
||||
var resp = streamManager.CreateOrUpdate(new StreamConfig { Name = "" });
|
||||
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Description.ShouldContain("name");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamCreate server/jetstream_cluster_1_test.go:160
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Unknown_api_subject_returns_404()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync("$JS.API.UNKNOWN.SUBJECT", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAccountPurge server/jetstream_cluster_1_test.go:3891
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_purge_via_meta_returns_success()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("P", ["p.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.AccountPurge}GLOBAL", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterServerRemove server/jetstream_cluster_1_test.go:3620
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Server_remove_via_meta_returns_success()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.ServerRemove, "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAccountStreamMove server/jetstream_cluster_1_test.go:3750
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_stream_move_via_meta_returns_success()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.AccountStreamMove}TEST", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAccountStreamMoveCancel server/jetstream_cluster_1_test.go:3780
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_stream_move_cancel_via_meta_returns_success()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.AccountStreamMoveCancel}TEST", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Self-contained fixture for JetStream meta controller tests.
|
||||
/// </summary>
|
||||
internal sealed class MetaControllerFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private MetaControllerFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router,
|
||||
JetStreamPublisher publisher)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = publisher;
|
||||
}
|
||||
|
||||
public static Task<MetaControllerFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var streamManager = new StreamManager(meta, consumerManager: consumerManager);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
return Task.FromResult(new MetaControllerFixture(meta, streamManager, consumerManager, router, publisher));
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateConsumerAsync(string stream, string durableName)
|
||||
{
|
||||
return Task.FromResult(_consumerManager.CreateOrUpdate(stream, new ConsumerConfig
|
||||
{
|
||||
DurableName = durableName,
|
||||
}));
|
||||
}
|
||||
|
||||
public MetaGroupState GetMetaState() => _metaGroup.GetState();
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
{
|
||||
var response = _router.Route(subject, Encoding.UTF8.GetBytes(payload));
|
||||
|
||||
// In a real cluster, after stepdown a new leader is elected.
|
||||
// Simulate this node becoming the new leader so subsequent mutating
|
||||
// operations through the router succeed.
|
||||
if (subject.Equals(JetStreamApiSubjects.MetaLeaderStepdown, StringComparison.Ordinal) && response.Success)
|
||||
_metaGroup.BecomeLeader();
|
||||
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,744 @@
|
||||
// Go ref: TestJetStreamClusterXxx — jetstream_cluster_4_test.go
|
||||
// Covers: large clusters, many-subject streams, wildcard streams, high-message-count
|
||||
// publishes, multi-stream mixed replica counts, create/delete/recreate cycles,
|
||||
// consumer on high-message streams, purge/republish, stream delete cascades,
|
||||
// node removal and restart lifecycle markers.
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Advanced JetStream cluster tests covering high-load scenarios, large clusters,
|
||||
/// many-subject streams, wildcard subjects, multi-stream environments, consumer
|
||||
/// lifecycle edge cases, purge/republish cycles, and node lifecycle markers.
|
||||
/// Ported from Go jetstream_cluster_4_test.go.
|
||||
/// </summary>
|
||||
public class JsClusterAdvancedTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLargeClusterR5 — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Large_seven_node_cluster_with_R5_stream_accepts_publishes()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLargeClusterR5 — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(7);
|
||||
|
||||
cluster.NodeCount.ShouldBe(7);
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("R5LARGE", ["r5.>"], replicas: 5);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Replicas.ShouldBe(5);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("r5.event", $"msg-{i}");
|
||||
ack.Stream.ShouldBe("R5LARGE");
|
||||
ack.Seq.ShouldBe((ulong)(i + 1));
|
||||
}
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("R5LARGE");
|
||||
state.Messages.ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamWithManySubjects — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_with_twenty_subjects_routes_all_correctly()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamWithManySubjects — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var subjects = Enumerable.Range(1, 20).Select(i => $"topic.{i}").ToArray();
|
||||
var resp = await cluster.CreateStreamAsync("MANYSUBJ", subjects, replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.Subjects.Count.ShouldBe(20);
|
||||
|
||||
// Publish to each subject
|
||||
for (var i = 1; i <= 20; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync($"topic.{i}", $"payload-{i}");
|
||||
ack.Stream.ShouldBe("MANYSUBJ");
|
||||
}
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("MANYSUBJ");
|
||||
state.Messages.ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterWildcardSubjectStream — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_with_wildcard_gt_subject_captures_all_sub_subjects()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterWildcardSubjectStream — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("WILDCARD", [">"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
await cluster.PublishAsync("any.subject.here", "msg1");
|
||||
await cluster.PublishAsync("totally.different", "msg2");
|
||||
await cluster.PublishAsync("nested.deep.path.to.leaf", "msg3");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("WILDCARD");
|
||||
state.Messages.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterPublish1000MessagesToReplicatedStream — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Publish_1000_messages_to_R3_stream_all_acknowledged()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterPublish1000MessagesToReplicatedStream — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("BIG3", ["big.>"], replicas: 3);
|
||||
|
||||
var lastSeq = 0UL;
|
||||
for (var i = 0; i < 1000; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("big.event", $"msg-{i}");
|
||||
ack.Stream.ShouldBe("BIG3");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
lastSeq = ack.Seq;
|
||||
}
|
||||
|
||||
lastSeq.ShouldBe(1000UL);
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("BIG3");
|
||||
state.Messages.ShouldBe(1000UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(1000UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterPublish1000MessagesToR1Stream — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Publish_1000_messages_to_R1_stream_all_acknowledged()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterPublish1000MessagesToR1Stream — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("BIG1", ["b1.>"], replicas: 1);
|
||||
|
||||
for (var i = 0; i < 1000; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("b1.event", $"msg-{i}");
|
||||
ack.Stream.ShouldBe("BIG1");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("BIG1");
|
||||
state.Messages.ShouldBe(1000UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamStateAfter1000Messages — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_state_accurate_after_1000_messages()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamStateAfter1000Messages — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("STATE1K", ["s1k.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 1000; i++)
|
||||
await cluster.PublishAsync("s1k.data", $"payload-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("STATE1K");
|
||||
state.Messages.ShouldBe(1000UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(1000UL);
|
||||
state.Bytes.ShouldBeGreaterThan(0UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterMultipleStreamsMixedReplicas — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Ten_streams_with_mixed_replica_counts_all_independent()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterMultipleStreamsMixedReplicas — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
{
|
||||
var replicas = (i % 3) + 1;
|
||||
var resp = await cluster.CreateStreamAsync($"MIX{i}", [$"mix{i}.>"], replicas: replicas);
|
||||
resp.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
// Publish to each stream independently
|
||||
for (var i = 0; i < 10; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync($"mix{i}.event", $"stream-{i}-msg");
|
||||
ack.Stream.ShouldBe($"MIX{i}");
|
||||
}
|
||||
|
||||
// Verify each stream has exactly 1 message
|
||||
for (var i = 0; i < 10; i++)
|
||||
{
|
||||
var state = await cluster.GetStreamStateAsync($"MIX{i}");
|
||||
state.Messages.ShouldBe(1UL);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterCreatePublishDeleteRecreate — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Create_publish_delete_recreate_cycle_three_times()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterCreatePublishDeleteRecreate — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var cycle = 0; cycle < 3; cycle++)
|
||||
{
|
||||
// Create stream
|
||||
var create = await cluster.CreateStreamAsync("CYCLE", ["cyc.>"], replicas: 3);
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
// Publish messages
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("cyc.event", $"cycle-{cycle}-msg-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("CYCLE");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
|
||||
// Delete stream
|
||||
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}CYCLE", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterConsumerOn1000MessageStream — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_on_stream_with_1000_messages_fetches_correctly()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterConsumerOn1000MessageStream — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("FETCH1K", ["f1k.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 1000; i++)
|
||||
await cluster.PublishAsync("f1k.event", $"msg-{i}");
|
||||
|
||||
await cluster.CreateConsumerAsync("FETCH1K", "fetcher", filterSubject: "f1k.>");
|
||||
|
||||
var batch = await cluster.FetchAsync("FETCH1K", "fetcher", 100);
|
||||
batch.Messages.Count.ShouldBe(100);
|
||||
batch.Messages[0].Sequence.ShouldBe(1UL);
|
||||
batch.Messages[99].Sequence.ShouldBe(100UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterAckAllFor1000Messages — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AckAll_for_1000_messages_reduces_pending_to_zero()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterAckAllFor1000Messages — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("ACKBIG", ["ab.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("ACKBIG", "acker", filterSubject: "ab.>",
|
||||
ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 1000; i++)
|
||||
await cluster.PublishAsync("ab.event", $"msg-{i}");
|
||||
|
||||
var batch = await cluster.FetchAsync("ACKBIG", "acker", 1000);
|
||||
batch.Messages.Count.ShouldBe(1000);
|
||||
|
||||
// AckAll up to last sequence
|
||||
cluster.AckAll("ACKBIG", "acker", 1000);
|
||||
|
||||
// After acking all 1000, state remains but pending is cleared
|
||||
var state = await cluster.GetStreamStateAsync("ACKBIG");
|
||||
state.Messages.ShouldBe(1000UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamInfoConsistentAfterManyOps — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_consistent_after_many_operations()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamInfoConsistentAfterManyOps — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("INFOCONSIST", ["ic.>"], replicas: 3);
|
||||
|
||||
// Interleave publishes and info requests
|
||||
for (var i = 0; i < 50; i++)
|
||||
{
|
||||
await cluster.PublishAsync("ic.event", $"msg-{i}");
|
||||
var info = await cluster.GetStreamInfoAsync("INFOCONSIST");
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.State.Messages.ShouldBe((ulong)(i + 1));
|
||||
}
|
||||
|
||||
var finalInfo = await cluster.GetStreamInfoAsync("INFOCONSIST");
|
||||
finalInfo.StreamInfo!.Config.Name.ShouldBe("INFOCONSIST");
|
||||
finalInfo.StreamInfo.Config.Replicas.ShouldBe(3);
|
||||
finalInfo.StreamInfo.State.Messages.ShouldBe(50UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterMetaStateAfter10StreamOps — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_after_creating_and_deleting_ten_streams()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterMetaStateAfter10StreamOps — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.CreateStreamAsync($"META{i}", [$"meta{i}.>"], replicas: 3);
|
||||
|
||||
// Delete half
|
||||
for (var i = 0; i < 5; i++)
|
||||
{
|
||||
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}META{i}", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
var metaState = cluster.GetMetaState();
|
||||
metaState.ShouldNotBeNull();
|
||||
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.Count.ShouldBe(5);
|
||||
for (var i = 5; i < 10; i++)
|
||||
names.StreamNames.ShouldContain($"META{i}");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterMultipleConsumersIndependentPending — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Five_consumers_on_same_stream_have_independent_pending()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterMultipleConsumersIndependentPending — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("MULTIDUP", ["md.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("md.event", $"msg-{i}");
|
||||
|
||||
for (var c = 0; c < 5; c++)
|
||||
await cluster.CreateConsumerAsync("MULTIDUP", $"consumer{c}", filterSubject: "md.>");
|
||||
|
||||
// Each consumer should independently see all 10 messages
|
||||
for (var c = 0; c < 5; c++)
|
||||
{
|
||||
var batch = await cluster.FetchAsync("MULTIDUP", $"consumer{c}", 10);
|
||||
batch.Messages.Count.ShouldBe(10);
|
||||
batch.Messages[0].Sequence.ShouldBe(1UL);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterConsumerWildcardFilter — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_with_wildcard_filter_delivers_only_matching_messages()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterConsumerWildcardFilter — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("WFILT", ["wf.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("WFILT", "wildcons", filterSubject: "wf.alpha.>");
|
||||
|
||||
await cluster.PublishAsync("wf.alpha.one", "match1");
|
||||
await cluster.PublishAsync("wf.beta.two", "no-match");
|
||||
await cluster.PublishAsync("wf.alpha.three", "match2");
|
||||
await cluster.PublishAsync("wf.gamma.four", "no-match2");
|
||||
await cluster.PublishAsync("wf.alpha.five", "match3");
|
||||
|
||||
var batch = await cluster.FetchAsync("WFILT", "wildcons", 10);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
foreach (var msg in batch.Messages)
|
||||
msg.Subject.ShouldStartWith("wf.alpha.");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamUpdateAddSubjectsAfterPublish — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_update_adding_subjects_after_publishes_works()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamUpdateAddSubjectsAfterPublish — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("ADDSUB", ["as.alpha"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("as.alpha", $"msg-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("ADDSUB");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
|
||||
// Add more subjects via update
|
||||
var update = cluster.UpdateStream("ADDSUB", ["as.alpha", "as.beta", "as.gamma"], replicas: 3);
|
||||
update.Error.ShouldBeNull();
|
||||
update.StreamInfo!.Config.Subjects.Count.ShouldBe(3);
|
||||
update.StreamInfo.Config.Subjects.ShouldContain("as.beta");
|
||||
|
||||
// Now publish to new subjects
|
||||
await cluster.PublishAsync("as.beta", "beta-msg");
|
||||
await cluster.PublishAsync("as.gamma", "gamma-msg");
|
||||
|
||||
var finalState = await cluster.GetStreamStateAsync("ADDSUB");
|
||||
finalState.Messages.ShouldBe(7UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamPurgeAndRepublish — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_purge_in_cluster_then_republish_works_correctly()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamPurgeAndRepublish — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("PURGEREP", ["pr.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 100; i++)
|
||||
await cluster.PublishAsync("pr.data", $"msg-{i}");
|
||||
|
||||
var before = await cluster.GetStreamStateAsync("PURGEREP");
|
||||
before.Messages.ShouldBe(100UL);
|
||||
|
||||
// Purge
|
||||
var purge = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamPurge}PURGEREP", "{}");
|
||||
purge.Success.ShouldBeTrue();
|
||||
|
||||
var afterPurge = await cluster.GetStreamStateAsync("PURGEREP");
|
||||
afterPurge.Messages.ShouldBe(0UL);
|
||||
|
||||
// Re-publish
|
||||
for (var i = 0; i < 50; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("pr.data", $"new-msg-{i}");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
var final = await cluster.GetStreamStateAsync("PURGEREP");
|
||||
final.Messages.ShouldBe(50UL);
|
||||
// Sequences restart after purge
|
||||
final.FirstSeq.ShouldBeGreaterThan(0UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterFetchEmptyAfterPurge — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Fetch_empty_after_stream_purge()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterFetchEmptyAfterPurge — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("PURGEDRAIN", ["pd.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("PURGEDRAIN", "reader", filterSubject: "pd.>");
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await cluster.PublishAsync("pd.event", $"msg-{i}");
|
||||
|
||||
// Fetch to advance the consumer
|
||||
var pre = await cluster.FetchAsync("PURGEDRAIN", "reader", 20);
|
||||
pre.Messages.Count.ShouldBe(20);
|
||||
|
||||
// Purge the stream
|
||||
(await cluster.RequestAsync($"{JetStreamApiSubjects.StreamPurge}PURGEDRAIN", "{}")).Success.ShouldBeTrue();
|
||||
|
||||
// Fetch should now return empty
|
||||
var post = await cluster.FetchAsync("PURGEDRAIN", "reader", 20);
|
||||
post.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamDeleteCascadesConsumers — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_cascades_consumer_removal()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamDeleteCascadesConsumers — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("CASCADE", ["cas.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("CASCADE", "c1");
|
||||
await cluster.CreateConsumerAsync("CASCADE", "c2");
|
||||
await cluster.CreateConsumerAsync("CASCADE", "c3");
|
||||
|
||||
// Verify consumers exist
|
||||
var names = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}CASCADE", "{}");
|
||||
names.ConsumerNames!.Count.ShouldBe(3);
|
||||
|
||||
// Delete the stream
|
||||
(await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}CASCADE", "{}")).Success.ShouldBeTrue();
|
||||
|
||||
// Stream no longer exists
|
||||
var info = await cluster.GetStreamInfoAsync("CASCADE");
|
||||
info.Error.ShouldNotBeNull();
|
||||
info.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterNodeRemovalPreservesDataReads — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Node_removal_does_not_affect_stream_data_reads()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterNodeRemovalPreservesDataReads — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
await cluster.CreateStreamAsync("NODEREM", ["nr.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 30; i++)
|
||||
await cluster.PublishAsync("nr.event", $"msg-{i}");
|
||||
|
||||
var before = await cluster.GetStreamStateAsync("NODEREM");
|
||||
before.Messages.ShouldBe(30UL);
|
||||
|
||||
// Simulate removing a node
|
||||
cluster.RemoveNode(4);
|
||||
|
||||
// Data reads should still work on remaining nodes
|
||||
var after = await cluster.GetStreamStateAsync("NODEREM");
|
||||
after.Messages.ShouldBe(30UL);
|
||||
after.LastSeq.ShouldBe(30UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterNodeRestartPreservesLifecycleMarkers — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Node_restart_records_lifecycle_markers_correctly()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterNodeRestartPreservesLifecycleMarkers — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("RESTART", ["rs.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("rs.event", $"msg-{i}");
|
||||
|
||||
// Simulate node removal
|
||||
cluster.RemoveNode(2);
|
||||
|
||||
// State still accessible with remaining nodes
|
||||
var mid = await cluster.GetStreamStateAsync("RESTART");
|
||||
mid.Messages.ShouldBe(10UL);
|
||||
|
||||
// Publish more while node is "down"
|
||||
for (var i = 10; i < 20; i++)
|
||||
await cluster.PublishAsync("rs.event", $"msg-{i}");
|
||||
|
||||
// Simulate node restart
|
||||
cluster.SimulateNodeRestart(2);
|
||||
|
||||
// All messages still accessible
|
||||
var final = await cluster.GetStreamStateAsync("RESTART");
|
||||
final.Messages.ShouldBe(20UL);
|
||||
final.LastSeq.ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLeaderStepdownDuringPublish — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Leader_stepdown_during_publish_sequence_is_monotonic()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLeaderStepdownDuringPublish — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("SEQSTEP", ["seq.>"], replicas: 3);
|
||||
|
||||
var seqs = new List<ulong>();
|
||||
for (var i = 0; i < 10; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("seq.event", $"msg-{i}");
|
||||
seqs.Add(ack.Seq);
|
||||
}
|
||||
|
||||
// Step down leader
|
||||
(await cluster.StepDownStreamLeaderAsync("SEQSTEP")).Success.ShouldBeTrue();
|
||||
|
||||
for (var i = 10; i < 20; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("seq.event", $"msg-{i}");
|
||||
seqs.Add(ack.Seq);
|
||||
}
|
||||
|
||||
// All sequences must be strictly increasing
|
||||
for (var i = 1; i < seqs.Count; i++)
|
||||
seqs[i].ShouldBeGreaterThan(seqs[i - 1]);
|
||||
|
||||
seqs[^1].ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamInfoAfterStepdown — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_accurate_after_leader_stepdown_with_many_messages()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamInfoAfterStepdown — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("INFOSD1K", ["isd.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 500; i++)
|
||||
await cluster.PublishAsync("isd.event", $"msg-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("INFOSD1K")).Success.ShouldBeTrue();
|
||||
|
||||
for (var i = 500; i < 1000; i++)
|
||||
await cluster.PublishAsync("isd.event", $"msg-{i}");
|
||||
|
||||
var info = await cluster.GetStreamInfoAsync("INFOSD1K");
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.State.Messages.ShouldBe(1000UL);
|
||||
info.StreamInfo.State.FirstSeq.ShouldBe(1UL);
|
||||
info.StreamInfo.State.LastSeq.ShouldBe(1000UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamReplicaGroupHasCorrectNodes — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_for_stream_has_correct_node_count()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamReplicaGroupHasCorrectNodes — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
await cluster.CreateStreamAsync("GRPCHECK", ["gc.>"], replicas: 3);
|
||||
|
||||
var group = cluster.GetReplicaGroup("GRPCHECK");
|
||||
group.ShouldNotBeNull();
|
||||
group!.Nodes.Count.ShouldBe(3);
|
||||
group.Leader.ShouldNotBeNull();
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterConsumerLeaderAfterStreamStepdown — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_leader_remains_valid_after_stream_stepdown()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterConsumerLeaderAfterStreamStepdown — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("CONSLEADER", ["cl.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("CONSLEADER", "durable1");
|
||||
|
||||
var leaderBefore = cluster.GetConsumerLeaderId("CONSLEADER", "durable1");
|
||||
leaderBefore.ShouldNotBeNullOrWhiteSpace();
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("CONSLEADER")).Success.ShouldBeTrue();
|
||||
|
||||
var leaderAfter = cluster.GetConsumerLeaderId("CONSLEADER", "durable1");
|
||||
leaderAfter.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterWaitOnStreamLeaderAfterCreation — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task WaitOnStreamLeader_resolves_immediately_for_existing_stream()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterWaitOnStreamLeaderAfterCreation — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("WLEADER", ["wl.>"], replicas: 3);
|
||||
|
||||
// Should complete immediately, no timeout
|
||||
await cluster.WaitOnStreamLeaderAsync("WLEADER", timeoutMs: 1000);
|
||||
|
||||
var leaderId = cluster.GetStreamLeaderId("WLEADER");
|
||||
leaderId.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterConsumerWaitOnLeaderAfterCreation — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task WaitOnConsumerLeader_resolves_for_existing_consumer()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterConsumerWaitOnLeaderAfterCreation — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("WCLEADER2", ["wcl2.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("WCLEADER2", "dur-wc");
|
||||
|
||||
await cluster.WaitOnConsumerLeaderAsync("WCLEADER2", "dur-wc", timeoutMs: 1000);
|
||||
|
||||
var leaderId = cluster.GetConsumerLeaderId("WCLEADER2", "dur-wc");
|
||||
leaderId.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterAccountInfoAfterBatchDelete — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_info_reflects_accurate_stream_count_after_batch_delete()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterAccountInfoAfterBatchDelete — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 8; i++)
|
||||
await cluster.CreateStreamAsync($"BATCH{i}", [$"batch{i}.>"], replicas: 3);
|
||||
|
||||
var pre = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
pre.AccountInfo!.Streams.ShouldBe(8);
|
||||
|
||||
// Delete 3 streams
|
||||
for (var i = 0; i < 3; i++)
|
||||
(await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}BATCH{i}", "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var post = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
post.AccountInfo!.Streams.ShouldBe(5);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,584 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: messages surviving stream leader stepdown, consumer state surviving
|
||||
// leader failover, fetch continuing after stream leader change, AckAll surviving
|
||||
// leader failover, multiple failovers in sequence not losing data, remove node
|
||||
// not affecting stream operations, restart node lifecycle, publish during/after
|
||||
// failover, consumer creation after stream leader failover, stream update after
|
||||
// meta leader stepdown, stream delete after leader failover, rapid succession
|
||||
// stepdowns preserving data integrity.
|
||||
//
|
||||
// Go reference functions:
|
||||
// TestJetStreamClusterStreamLeaderStepDown (line 4925)
|
||||
// TestJetStreamClusterLeaderStepdown (line 5464)
|
||||
// TestJetStreamClusterNormalCatchup (line 1607)
|
||||
// TestJetStreamClusterStreamSnapshotCatchup (line 1667)
|
||||
// TestJetStreamClusterRestoreSingleConsumer (line 1028)
|
||||
// TestJetStreamClusterPeerRemovalAPI (line 3469)
|
||||
// TestJetStreamClusterDeleteMsgAndRestart (line 1785)
|
||||
// restartServerAndWait, shutdownServerAndRemoveStorage in jetstream_helpers_test.go
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream cluster failover scenarios: leader stepdown while
|
||||
/// messages are in flight, consumer state preservation across leader changes,
|
||||
/// rapid successive stepdowns, remove/restart node lifecycle, and data integrity
|
||||
/// guarantees across failover sequences. Uses JetStreamClusterFixture.
|
||||
/// Ported from Go jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class JsClusterFailoverTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown line 4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: publish before stepdown, verify state and new leader after
|
||||
[Fact]
|
||||
public async Task Messages_survive_stream_leader_stepdown_state_preserved()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("SURVIVE", ["sv.>"], replicas: 3);
|
||||
|
||||
for (var i = 1; i <= 10; i++)
|
||||
(await cluster.PublishAsync($"sv.{i}", $"msg-{i}")).Seq.ShouldBe((ulong)i);
|
||||
|
||||
var leaderBefore = cluster.GetStreamLeaderId("SURVIVE");
|
||||
(await cluster.StepDownStreamLeaderAsync("SURVIVE")).Success.ShouldBeTrue();
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("SURVIVE");
|
||||
state.Messages.ShouldBe(10UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(10UL);
|
||||
|
||||
cluster.GetStreamLeaderId("SURVIVE").ShouldNotBe(leaderBefore);
|
||||
}
|
||||
|
||||
// Go ref: TestJetStreamClusterStreamLeaderStepDown — write after stepdown is accepted
|
||||
[Fact]
|
||||
public async Task New_leader_accepts_writes_after_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("POSTSD", ["psd.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("psd.pre", $"before-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("POSTSD")).Success.ShouldBeTrue();
|
||||
|
||||
var ack = await cluster.PublishAsync("psd.post", "after-stepdown");
|
||||
ack.Seq.ShouldBe(6UL);
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer state survives leader failover
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterRestoreSingleConsumer line 1028
|
||||
[Fact]
|
||||
public async Task Consumer_state_survives_stream_leader_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CSURVFO", ["csf.>"], replicas: 3);
|
||||
// Use AckPolicy.None so fetch cursor advances without pending-check blocking the second fetch.
|
||||
await cluster.CreateConsumerAsync("CSURVFO", "durable1", filterSubject: "csf.>");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("csf.event", $"msg-{i}");
|
||||
|
||||
var batch1 = await cluster.FetchAsync("CSURVFO", "durable1", 5);
|
||||
batch1.Messages.Count.ShouldBe(5);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("CSURVFO")).Success.ShouldBeTrue();
|
||||
|
||||
// New leader: consumer cursor is at seq 6; remaining 5 messages are still deliverable.
|
||||
var batch2 = await cluster.FetchAsync("CSURVFO", "durable1", 5);
|
||||
batch2.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// Go ref: consumer fetch continues after leader change
|
||||
[Fact]
|
||||
public async Task Fetch_continues_after_stream_leader_change()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("FETCHFO", ["ffo.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("FETCHFO", "reader", filterSubject: "ffo.>");
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await cluster.PublishAsync("ffo.event", $"msg-{i}");
|
||||
|
||||
// Fetch some messages, then step down
|
||||
var batch1 = await cluster.FetchAsync("FETCHFO", "reader", 10);
|
||||
batch1.Messages.Count.ShouldBe(10);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("FETCHFO")).Success.ShouldBeTrue();
|
||||
|
||||
// Fetch remaining messages through the new leader
|
||||
var batch2 = await cluster.FetchAsync("FETCHFO", "reader", 10);
|
||||
batch2.Messages.Count.ShouldBe(10);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// AckAll survives leader failover
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: ackAll state persisted across failover
|
||||
[Fact]
|
||||
public async Task AckAll_survives_stream_leader_failover()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("ACKFO", ["afo.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("ACKFO", "acker", filterSubject: "afo.>",
|
||||
ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("afo.event", $"msg-{i}");
|
||||
|
||||
// Fetch all 10 messages; AckPolicy.All leaves them pending until explicitly acked.
|
||||
var batch = await cluster.FetchAsync("ACKFO", "acker", 10);
|
||||
batch.Messages.Count.ShouldBe(10);
|
||||
|
||||
// Ack the first 5 (seq 1-5); 5 messages (seq 6-10) remain pending.
|
||||
cluster.AckAll("ACKFO", "acker", 5);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("ACKFO")).Success.ShouldBeTrue();
|
||||
|
||||
// After failover the stream leader has changed, but the consumer state persists —
|
||||
// the stream itself (managed by StreamManager) is unaffected by the leader election model.
|
||||
// Verify by confirming the stream still has all 10 messages.
|
||||
var state = await cluster.GetStreamStateAsync("ACKFO");
|
||||
state.Messages.ShouldBe(10UL);
|
||||
|
||||
// Verify stream leader changed (failover happened).
|
||||
cluster.GetStreamLeaderId("ACKFO").ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Multiple failovers in sequence don't lose data
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterNormalCatchup line 1607 — data survives multiple transitions
|
||||
[Fact]
|
||||
public async Task Multiple_failovers_in_sequence_preserve_all_data()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("MULTI_FO", ["mfo.>"], replicas: 3);
|
||||
|
||||
// Publish batch 1
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("mfo.event", $"b1-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("MULTI_FO")).Success.ShouldBeTrue();
|
||||
|
||||
// Publish batch 2 after first failover
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("mfo.event", $"b2-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("MULTI_FO")).Success.ShouldBeTrue();
|
||||
|
||||
// Publish batch 3 after second failover
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("mfo.event", $"b3-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("MULTI_FO");
|
||||
state.Messages.ShouldBe(15UL);
|
||||
state.LastSeq.ShouldBe(15UL);
|
||||
}
|
||||
|
||||
// Go ref: rapid 5x stepdowns preserve data integrity
|
||||
[Fact]
|
||||
public async Task Rapid_five_stepdowns_preserve_all_published_messages()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("RAPID5", ["r5.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await cluster.PublishAsync("r5.event", $"msg-{i}");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
(await cluster.StepDownStreamLeaderAsync("RAPID5")).Success.ShouldBeTrue();
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("RAPID5");
|
||||
state.Messages.ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Remove node doesn't affect stream operations
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: shutdownServerAndRemoveStorage — stream still readable after node removal
|
||||
[Fact]
|
||||
public async Task Stream_state_intact_after_node_removal()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("NODEREM", ["nr.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("nr.event", $"msg-{i}");
|
||||
|
||||
cluster.RemoveNode(2);
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("NODEREM");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// Go ref: publish still works after node removal
|
||||
[Fact]
|
||||
public async Task Publish_still_works_after_node_removal()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("PUBNR", ["pnr.>"], replicas: 3);
|
||||
|
||||
cluster.RemoveNode(1);
|
||||
|
||||
var ack = await cluster.PublishAsync("pnr.event", "after-removal");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
ack.Stream.ShouldBe("PUBNR");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Restart node lifecycle
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: restartServerAndWait — stream accessible after node restart
|
||||
[Fact]
|
||||
public async Task Stream_accessible_after_node_restart()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("RESTART", ["rst.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("rst.event", $"msg-{i}");
|
||||
|
||||
cluster.RemoveNode(1);
|
||||
cluster.SimulateNodeRestart(1);
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("RESTART");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// Go ref: node restart cycle does not affect consumer fetch
|
||||
[Fact]
|
||||
public async Task Consumer_fetch_works_after_node_restart_cycle()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("RSTCONS", ["rsc.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("RSTCONS", "reader", filterSubject: "rsc.>");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("rsc.event", $"msg-{i}");
|
||||
|
||||
cluster.RemoveNode(2);
|
||||
cluster.SimulateNodeRestart(2);
|
||||
|
||||
var batch = await cluster.FetchAsync("RSTCONS", "reader", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Publish during/after failover sequence
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: publish interleaved with stepdown sequence
|
||||
[Fact]
|
||||
public async Task Publish_before_and_after_each_stepdown_maintains_monotonic_sequences()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("INTERLEAVE", ["il.>"], replicas: 3);
|
||||
|
||||
var seqs = new List<ulong>();
|
||||
|
||||
// Publish -> stepdown -> publish -> stepdown -> publish
|
||||
seqs.Add((await cluster.PublishAsync("il.event", "pre-1")).Seq);
|
||||
seqs.Add((await cluster.PublishAsync("il.event", "pre-2")).Seq);
|
||||
await cluster.StepDownStreamLeaderAsync("INTERLEAVE");
|
||||
seqs.Add((await cluster.PublishAsync("il.event", "mid-1")).Seq);
|
||||
seqs.Add((await cluster.PublishAsync("il.event", "mid-2")).Seq);
|
||||
await cluster.StepDownStreamLeaderAsync("INTERLEAVE");
|
||||
seqs.Add((await cluster.PublishAsync("il.event", "post-1")).Seq);
|
||||
|
||||
// Sequences must be strictly increasing
|
||||
for (var i = 1; i < seqs.Count; i++)
|
||||
seqs[i].ShouldBeGreaterThan(seqs[i - 1]);
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("INTERLEAVE");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
state.LastSeq.ShouldBe(seqs[^1]);
|
||||
}
|
||||
|
||||
// Go ref: publish immediately after stepdown uses new leader
|
||||
[Fact]
|
||||
public async Task Publish_immediately_after_stepdown_routes_to_new_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("IMMPOST", ["ip.>"], replicas: 3);
|
||||
|
||||
var ack1 = await cluster.PublishAsync("ip.event", "first");
|
||||
ack1.Seq.ShouldBe(1UL);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("IMMPOST")).Success.ShouldBeTrue();
|
||||
|
||||
var ack2 = await cluster.PublishAsync("ip.event", "second");
|
||||
ack2.Seq.ShouldBe(2UL);
|
||||
ack2.Stream.ShouldBe("IMMPOST");
|
||||
ack2.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer creation after stream leader failover
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: consumer created on new leader is functional
|
||||
[Fact]
|
||||
public async Task Consumer_created_after_stream_leader_failover_is_functional()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CPOSTFO", ["cpf.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("cpf.event", $"pre-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("CPOSTFO")).Success.ShouldBeTrue();
|
||||
|
||||
// Create consumer on new leader
|
||||
var resp = await cluster.CreateConsumerAsync("CPOSTFO", "post_failover", filterSubject: "cpf.>");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
|
||||
var batch = await cluster.FetchAsync("CPOSTFO", "post_failover", 10);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// Go ref: consumer created before failover accessible after new messages and stepdown
|
||||
[Fact]
|
||||
public async Task Consumer_created_before_failover_still_delivers_new_messages_after_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CBEFORE", ["cbf.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("CBEFORE", "pre_dur", filterSubject: "cbf.>");
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
await cluster.PublishAsync("cbf.event", $"before-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("CBEFORE")).Success.ShouldBeTrue();
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
await cluster.PublishAsync("cbf.event", $"after-{i}");
|
||||
|
||||
var batch = await cluster.FetchAsync("CBEFORE", "pre_dur", 10);
|
||||
batch.Messages.Count.ShouldBe(6);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream update after meta leader stepdown
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterLeaderStepdown — stream operations post meta stepdown
|
||||
[Fact]
|
||||
public async Task Stream_update_succeeds_after_meta_leader_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("UPDSD", ["upd.>"], replicas: 3);
|
||||
|
||||
(await cluster.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var update = cluster.UpdateStream("UPDSD", ["upd.>", "extra.>"], replicas: 3);
|
||||
update.Error.ShouldBeNull();
|
||||
update.StreamInfo!.Config.Subjects.ShouldContain("extra.>");
|
||||
}
|
||||
|
||||
// Go ref: create new stream after meta leader stepdown
|
||||
[Fact]
|
||||
public async Task Create_stream_after_meta_leader_stepdown_succeeds()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
(await cluster.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("POST_META_SD", ["pms.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("POST_META_SD");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream delete after leader failover
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: stream delete after failover returns success
|
||||
[Fact]
|
||||
public async Task Stream_delete_succeeds_after_stream_leader_failover()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("DELFO", ["dfo.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("dfo.event", $"msg-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("DELFO")).Success.ShouldBeTrue();
|
||||
|
||||
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DELFO", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go ref: stream info reflects deletion after failover
|
||||
[Fact]
|
||||
public async Task Stream_info_returns_404_after_delete_following_failover()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("DELFOI", ["dfoi.>"], replicas: 3);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("DELFOI")).Success.ShouldBeTrue();
|
||||
(await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DELFOI", "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var info = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamInfo}DELFOI", "{}");
|
||||
info.Error.ShouldNotBeNull();
|
||||
info.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream info and state consistent after failover
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: stream info available through new leader
|
||||
[Fact]
|
||||
public async Task Stream_info_available_from_new_leader_after_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("INFOFO", ["ifo.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("ifo.event", $"msg-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("INFOFO")).Success.ShouldBeTrue();
|
||||
|
||||
var info = await cluster.GetStreamInfoAsync("INFOFO");
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.Config.Name.ShouldBe("INFOFO");
|
||||
info.StreamInfo.State.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// Go ref: first/last sequence intact after failover
|
||||
[Fact]
|
||||
public async Task First_and_last_sequence_intact_after_stream_leader_failover()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("SEQFO", ["sfo.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 7; i++)
|
||||
await cluster.PublishAsync("sfo.event", $"msg-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("SEQFO")).Success.ShouldBeTrue();
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("SEQFO");
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(7UL);
|
||||
state.Messages.ShouldBe(7UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Meta state survives stream leader failover
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: meta tracks streams even after stream leader stepdown
|
||||
[Fact]
|
||||
public async Task Meta_state_still_tracks_stream_after_stream_leader_failover()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("METATRK", ["mtk.>"], replicas: 3);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("METATRK")).Success.ShouldBeTrue();
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta.ShouldNotBeNull();
|
||||
meta!.Streams.ShouldContain("METATRK");
|
||||
}
|
||||
|
||||
// Go ref: multiple streams tracked after mixed stepdowns
|
||||
[Fact]
|
||||
public async Task Meta_state_tracks_multiple_streams_across_mixed_stepdowns()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("MIX1", ["mix1.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("MIX2", ["mix2.>"], replicas: 1);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("MIX1")).Success.ShouldBeTrue();
|
||||
(await cluster.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.Streams.ShouldContain("MIX1");
|
||||
meta.Streams.ShouldContain("MIX2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// WaitOnStreamLeader after stepdown
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: waitOnStreamLeader resolves after stepdown
|
||||
[Fact]
|
||||
public async Task WaitOnStreamLeader_resolves_after_stream_leader_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("WAITSD", ["wsd.>"], replicas: 3);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("WAITSD")).Success.ShouldBeTrue();
|
||||
|
||||
// New leader should be immediately available
|
||||
await cluster.WaitOnStreamLeaderAsync("WAITSD", timeoutMs: 2000);
|
||||
cluster.GetStreamLeaderId("WAITSD").ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Message delete survives leader transition
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterDeleteMsgAndRestart line 1785
|
||||
[Fact]
|
||||
public async Task Message_delete_survives_leader_transition()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("DELMSGFO", ["dmf.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("dmf.event", $"msg-{i}");
|
||||
|
||||
(await cluster.RequestAsync(
|
||||
$"{JetStreamApiSubjects.StreamMessageDelete}DELMSGFO",
|
||||
"""{"seq":3}""")).Success.ShouldBeTrue();
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("DELMSGFO")).Success.ShouldBeTrue();
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("DELMSGFO");
|
||||
state.Messages.ShouldBe(4UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Multiple streams — stepdown on one does not affect the other
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: independent streams have independent leader groups
|
||||
[Fact]
|
||||
public async Task Stepdown_on_one_stream_does_not_affect_sibling_stream()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("SIBLING_A", ["siba.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("SIBLING_B", ["sibb.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("siba.event", $"a-{i}");
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("sibb.event", $"b-{i}");
|
||||
|
||||
var leaderB = cluster.GetStreamLeaderId("SIBLING_B");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("SIBLING_A")).Success.ShouldBeTrue();
|
||||
|
||||
cluster.GetStreamLeaderId("SIBLING_B").ShouldBe(leaderB);
|
||||
(await cluster.GetStreamStateAsync("SIBLING_B")).Messages.ShouldBe(5UL);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,589 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: meta-leader election (3-node and 5-node clusters), stream leader
|
||||
// selection (R1 and R3), consumer leader selection, leader ID non-empty checks,
|
||||
// meta stepdown producing new leader, stream stepdown producing new leader,
|
||||
// multiple stepdowns cycling through different leaders, leader ID consistency,
|
||||
// meta state reflecting correct cluster size and leadership version increments,
|
||||
// and meta state tracking all created streams.
|
||||
//
|
||||
// Go reference functions:
|
||||
// TestJetStreamClusterLeader (line 73)
|
||||
// TestJetStreamClusterStreamLeaderStepDown (line 4925)
|
||||
// TestJetStreamClusterLeaderStepdown (line 5464)
|
||||
// TestJetStreamClusterMultiReplicaStreams (line 299)
|
||||
// waitOnStreamLeader, waitOnConsumerLeader, c.leader in jetstream_helpers_test.go
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream cluster leader election for the meta-cluster,
|
||||
/// streams, and consumers. Uses the unified JetStreamClusterFixture.
|
||||
/// Ported from Go jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class JsClusterLeaderElectionTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader line 73 — meta leader election
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: c.leader() in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task Three_node_cluster_elects_nonempty_meta_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var leader = cluster.GetMetaLeaderId();
|
||||
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: c.leader() in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task Five_node_cluster_elects_nonempty_meta_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
var leader = cluster.GetMetaLeaderId();
|
||||
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: checkClusterFormed — meta cluster size is equal to node count
|
||||
[Fact]
|
||||
public async Task Three_node_cluster_meta_state_reports_correct_size()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var state = cluster.GetMetaState();
|
||||
|
||||
state.ShouldNotBeNull();
|
||||
state!.ClusterSize.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go ref: checkClusterFormed — meta cluster size is equal to node count
|
||||
[Fact]
|
||||
public async Task Five_node_cluster_meta_state_reports_correct_size()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
var state = cluster.GetMetaState();
|
||||
|
||||
state.ShouldNotBeNull();
|
||||
state!.ClusterSize.ShouldBe(5);
|
||||
}
|
||||
|
||||
// Go ref: TestJetStreamClusterLeader — initial leadership version is 1
|
||||
[Fact]
|
||||
public async Task Three_node_cluster_initial_leadership_version_is_one()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var state = cluster.GetMetaState();
|
||||
|
||||
state!.LeadershipVersion.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream leader selection — R1
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: streamLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task R1_stream_has_nonempty_leader_after_creation()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("R1ELECT", ["r1e.>"], replicas: 1);
|
||||
|
||||
var leader = cluster.GetStreamLeaderId("R1ELECT");
|
||||
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: streamLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task R3_stream_has_nonempty_leader_after_creation_in_3_node_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("R3ELECT", ["r3e.>"], replicas: 3);
|
||||
|
||||
var leader = cluster.GetStreamLeaderId("R3ELECT");
|
||||
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: streamLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task R3_stream_has_nonempty_leader_after_creation_in_5_node_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
await cluster.CreateStreamAsync("R3E5", ["r3e5.>"], replicas: 3);
|
||||
|
||||
var leader = cluster.GetStreamLeaderId("R3E5");
|
||||
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: waitOnStreamLeader in jetstream_helpers_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task WaitOnStreamLeader_completes_immediately_when_stream_already_has_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("WAITLDR", ["wl.>"], replicas: 3);
|
||||
|
||||
await cluster.WaitOnStreamLeaderAsync("WAITLDR", timeoutMs: 2000);
|
||||
|
||||
cluster.GetStreamLeaderId("WAITLDR").ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WaitOnStreamLeader_throws_timeout_for_nonexistent_stream()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var ex = await Should.ThrowAsync<TimeoutException>(
|
||||
() => cluster.WaitOnStreamLeaderAsync("GHOST", timeoutMs: 100));
|
||||
|
||||
ex.Message.ShouldContain("GHOST");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer leader selection
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: consumerLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task Durable_consumer_on_R3_stream_has_nonempty_leader_id()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CLELECT", ["cle.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("CLELECT", "dlc");
|
||||
|
||||
var leader = cluster.GetConsumerLeaderId("CLELECT", "dlc");
|
||||
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: consumerLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task Durable_consumer_on_R1_stream_has_nonempty_leader_id()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CLELECTR1", ["cler1.>"], replicas: 1);
|
||||
await cluster.CreateConsumerAsync("CLELECTR1", "consumer1");
|
||||
|
||||
var leader = cluster.GetConsumerLeaderId("CLELECTR1", "consumer1");
|
||||
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: waitOnConsumerLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task WaitOnConsumerLeader_completes_when_consumer_exists()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("WCLE", ["wcle.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("WCLE", "dur1");
|
||||
|
||||
await cluster.WaitOnConsumerLeaderAsync("WCLE", "dur1", timeoutMs: 2000);
|
||||
|
||||
cluster.GetConsumerLeaderId("WCLE", "dur1").ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WaitOnConsumerLeader_throws_timeout_when_consumer_missing()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("WCLETOUT", ["wclet.>"], replicas: 3);
|
||||
|
||||
var ex = await Should.ThrowAsync<TimeoutException>(
|
||||
() => cluster.WaitOnConsumerLeaderAsync("WCLETOUT", "ghost-consumer", timeoutMs: 100));
|
||||
|
||||
ex.Message.ShouldContain("ghost-consumer");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeaderStepdown line 5464 — meta leader stepdown
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: c.leader().Shutdown() + waitOnLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_produces_different_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var before = cluster.GetMetaLeaderId();
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var after = cluster.GetMetaLeaderId();
|
||||
after.ShouldNotBe(before);
|
||||
after.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: meta stepdown via API subject $JS.API.META.LEADER.STEPDOWN
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_via_api_returns_success()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp = await cluster.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}");
|
||||
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go ref: meta step-down increments leadership version
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_increments_leadership_version()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var versionBefore = cluster.GetMetaState()!.LeadershipVersion;
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var versionAfter = cluster.GetMetaState()!.LeadershipVersion;
|
||||
versionAfter.ShouldBe(versionBefore + 1);
|
||||
}
|
||||
|
||||
// Go ref: multiple meta step-downs each increment the version
|
||||
[Fact]
|
||||
public async Task Multiple_meta_stepdowns_increment_leadership_version_sequentially()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
cluster.GetMetaState()!.LeadershipVersion.ShouldBe(4);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown line 4925 — stream leader stepdown
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: JSApiStreamLeaderStepDownT in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task Stream_leader_stepdown_produces_different_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("SLEADSD", ["sls.>"], replicas: 3);
|
||||
var before = cluster.GetStreamLeaderId("SLEADSD");
|
||||
|
||||
var resp = await cluster.StepDownStreamLeaderAsync("SLEADSD");
|
||||
|
||||
resp.Success.ShouldBeTrue();
|
||||
var after = cluster.GetStreamLeaderId("SLEADSD");
|
||||
after.ShouldNotBe(before);
|
||||
after.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: TestJetStreamClusterStreamLeaderStepDown — new leader still accepts writes
|
||||
[Fact]
|
||||
public async Task Stream_leader_stepdown_new_leader_accepts_writes()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("SDWRITE", ["sdw.>"], replicas: 3);
|
||||
await cluster.PublishAsync("sdw.pre", "before");
|
||||
|
||||
await cluster.StepDownStreamLeaderAsync("SDWRITE");
|
||||
var ack = await cluster.PublishAsync("sdw.post", "after");
|
||||
|
||||
ack.Stream.ShouldBe("SDWRITE");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Multiple stepdowns cycle through different leaders
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterLeader line 73 — consecutive elections
|
||||
[Fact]
|
||||
public async Task Two_consecutive_stream_stepdowns_cycle_through_different_leaders()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CYCLE2", ["cy2.>"], replicas: 3);
|
||||
|
||||
var l0 = cluster.GetStreamLeaderId("CYCLE2");
|
||||
(await cluster.StepDownStreamLeaderAsync("CYCLE2")).Success.ShouldBeTrue();
|
||||
var l1 = cluster.GetStreamLeaderId("CYCLE2");
|
||||
(await cluster.StepDownStreamLeaderAsync("CYCLE2")).Success.ShouldBeTrue();
|
||||
var l2 = cluster.GetStreamLeaderId("CYCLE2");
|
||||
|
||||
l1.ShouldNotBe(l0);
|
||||
l2.ShouldNotBe(l1);
|
||||
}
|
||||
|
||||
// Go ref: multiple stepdowns in sequence — each produces a distinct leader
|
||||
[Fact]
|
||||
public async Task Three_consecutive_meta_stepdowns_cycle_through_distinct_leaders()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var observed = new HashSet<string>();
|
||||
|
||||
observed.Add(cluster.GetMetaLeaderId());
|
||||
cluster.StepDownMetaLeader();
|
||||
observed.Add(cluster.GetMetaLeaderId());
|
||||
cluster.StepDownMetaLeader();
|
||||
observed.Add(cluster.GetMetaLeaderId());
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
// With 3 nodes cycling round-robin we see at least 2 unique leaders
|
||||
observed.Count.ShouldBeGreaterThanOrEqualTo(2);
|
||||
}
|
||||
|
||||
// Go ref: TestJetStreamClusterLeader — wraps around after exhausting peers
|
||||
[Fact]
|
||||
public async Task Meta_stepdowns_wrap_around_producing_only_node_count_unique_leaders()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var observed = new HashSet<string>();
|
||||
|
||||
for (var i = 0; i < 9; i++)
|
||||
{
|
||||
observed.Add(cluster.GetMetaLeaderId());
|
||||
cluster.StepDownMetaLeader();
|
||||
}
|
||||
|
||||
// 3-node cluster cycles through exactly 3 unique leader IDs
|
||||
observed.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Leader ID consistency
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: streamLeader queried multiple times returns same stable ID
|
||||
[Fact]
|
||||
public async Task Stream_leader_id_is_stable_across_repeated_queries_without_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("STABLE", ["stb.>"], replicas: 3);
|
||||
|
||||
var ids = Enumerable.Range(0, 5)
|
||||
.Select(_ => cluster.GetStreamLeaderId("STABLE"))
|
||||
.ToList();
|
||||
|
||||
ids.Distinct().Count().ShouldBe(1);
|
||||
ids[0].ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: meta leader queried multiple times is stable between stepdowns
|
||||
[Fact]
|
||||
public async Task Meta_leader_id_is_stable_between_stepdowns()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var a = cluster.GetMetaLeaderId();
|
||||
var b = cluster.GetMetaLeaderId();
|
||||
a.ShouldBe(b);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var c = cluster.GetMetaLeaderId();
|
||||
var d = cluster.GetMetaLeaderId();
|
||||
c.ShouldBe(d);
|
||||
|
||||
c.ShouldNotBe(a);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Meta state reflecting all created streams
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: getMetaState in tests — streams tracked in meta state
|
||||
[Fact]
|
||||
public async Task Meta_state_tracks_single_created_stream()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("MTRACK1", ["mt1.>"], replicas: 3);
|
||||
|
||||
var state = cluster.GetMetaState();
|
||||
|
||||
state.ShouldNotBeNull();
|
||||
state!.Streams.ShouldContain("MTRACK1");
|
||||
}
|
||||
|
||||
// Go ref: getMetaState tracks multiple streams
|
||||
[Fact]
|
||||
public async Task Meta_state_tracks_all_created_streams()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("MTRK_A", ["mta.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("MTRK_B", ["mtb.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("MTRK_C", ["mtc.>"], replicas: 1);
|
||||
|
||||
var state = cluster.GetMetaState();
|
||||
|
||||
state!.Streams.ShouldContain("MTRK_A");
|
||||
state.Streams.ShouldContain("MTRK_B");
|
||||
state.Streams.ShouldContain("MTRK_C");
|
||||
state.Streams.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go ref: meta state survives a stepdown
|
||||
[Fact]
|
||||
public async Task Meta_state_streams_survive_meta_leader_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("SURVSD1", ["ss1.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("SURVSD2", ["ss2.>"], replicas: 3);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var state = cluster.GetMetaState();
|
||||
state!.Streams.ShouldContain("SURVSD1");
|
||||
state.Streams.ShouldContain("SURVSD2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown — data survives leader election
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterStreamLeaderStepDown line 4925 — all messages preserved
|
||||
[Fact]
|
||||
public async Task Messages_survive_stream_leader_election()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("ELECT_DATA", ["ed.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("ed.event", $"msg-{i}");
|
||||
|
||||
await cluster.StepDownStreamLeaderAsync("ELECT_DATA");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("ELECT_DATA");
|
||||
state.Messages.ShouldBe(10UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Replica group structure after election
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: replica group has correct node count
|
||||
[Fact]
|
||||
public async Task R3_stream_replica_group_has_three_nodes()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("RG3", ["rg3.>"], replicas: 3);
|
||||
|
||||
var group = cluster.GetReplicaGroup("RG3");
|
||||
|
||||
group.ShouldNotBeNull();
|
||||
group!.Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go ref: replica group leader is marked as leader
|
||||
[Fact]
|
||||
public async Task R3_stream_replica_group_leader_is_marked_as_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("RGLDR", ["rgl.>"], replicas: 3);
|
||||
|
||||
var group = cluster.GetReplicaGroup("RGLDR");
|
||||
|
||||
group.ShouldNotBeNull();
|
||||
group!.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go ref: replica group for unknown stream is null
|
||||
[Fact]
|
||||
public async Task Replica_group_for_unknown_stream_is_null()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var group = cluster.GetReplicaGroup("NONEXISTENT");
|
||||
|
||||
group.ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Leadership version increments on each stepdown
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: leadership version tracks stepdown count
|
||||
[Fact]
|
||||
public async Task Leadership_version_increments_on_each_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
cluster.GetMetaState()!.LeadershipVersion.ShouldBe(1);
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.GetMetaState()!.LeadershipVersion.ShouldBe(2);
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.GetMetaState()!.LeadershipVersion.ShouldBe(3);
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.GetMetaState()!.LeadershipVersion.ShouldBe(4);
|
||||
}
|
||||
|
||||
// Go ref: meta leader stepdown via API also increments version
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_via_api_increments_leadership_version()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("VERSIONAPI", ["va.>"], replicas: 3);
|
||||
var vBefore = cluster.GetMetaState()!.LeadershipVersion;
|
||||
|
||||
(await cluster.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
cluster.GetMetaState()!.LeadershipVersion.ShouldBe(vBefore + 1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer leader ID is consistent with stream
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: consumerLeader — consumer leader ID includes consumer name
|
||||
[Fact]
|
||||
public async Task Consumer_leader_ids_are_distinct_for_different_consumers_on_same_stream()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("MULTICONS", ["mc.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("MULTICONS", "consA");
|
||||
await cluster.CreateConsumerAsync("MULTICONS", "consB");
|
||||
|
||||
var leaderA = cluster.GetConsumerLeaderId("MULTICONS", "consA");
|
||||
var leaderB = cluster.GetConsumerLeaderId("MULTICONS", "consB");
|
||||
|
||||
leaderA.ShouldNotBeNullOrWhiteSpace();
|
||||
leaderB.ShouldNotBeNullOrWhiteSpace();
|
||||
leaderA.ShouldNotBe(leaderB);
|
||||
}
|
||||
|
||||
// Go ref: consumer leader ID for unknown stream returns empty
|
||||
[Fact]
|
||||
public async Task Consumer_leader_id_for_unknown_stream_is_empty()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var leader = cluster.GetConsumerLeaderId("NO_SUCH_STREAM", "no_consumer");
|
||||
|
||||
leader.ShouldBeNullOrEmpty();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Node lifecycle helpers do not affect stream state
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: shutdownServerAndRemoveStorage + restartServerAndWait
|
||||
[Fact]
|
||||
public async Task RemoveNode_and_restart_does_not_affect_stream_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("LIFECYCLE", ["lc.>"], replicas: 3);
|
||||
var leaderBefore = cluster.GetStreamLeaderId("LIFECYCLE");
|
||||
|
||||
cluster.RemoveNode(2);
|
||||
cluster.SimulateNodeRestart(2);
|
||||
|
||||
var leaderAfter = cluster.GetStreamLeaderId("LIFECYCLE");
|
||||
leaderBefore.ShouldNotBeNullOrWhiteSpace();
|
||||
leaderAfter.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,503 @@
|
||||
// Go ref: TestJetStreamClusterXxx — jetstream_cluster_long_test.go
|
||||
// Covers: high-volume publish/consume cycles, many sequential fetches, many consumers,
|
||||
// many streams, repeated publish-ack-fetch cycles, stepdowns during publishing,
|
||||
// alternating publish+stepdown, create-publish-delete sequences, ack tracking across
|
||||
// failovers, batch-1 iteration, mixed multi-stream operations, rapid meta stepdowns,
|
||||
// large R1 message volumes, max-messages stream limits, consumer pending correctness.
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Long-running JetStream cluster tests covering high-volume scenarios,
|
||||
/// repeated failover cycles, many-stream/many-consumer environments, and
|
||||
/// limit enforcement under sustained load.
|
||||
/// Ported from Go jetstream_cluster_long_test.go.
|
||||
/// All tests are marked [Trait("Category", "LongRunning")].
|
||||
/// </summary>
|
||||
public class JsClusterLongRunningTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLong5000MessagesR3 — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Five_thousand_messages_in_R3_stream_maintain_consistency()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLong5000MessagesR3 — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("LONG5K", ["long5k.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5000; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("long5k.data", $"msg-{i}");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
ack.Seq.ShouldBe((ulong)(i + 1));
|
||||
}
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("LONG5K");
|
||||
state.Messages.ShouldBe(5000UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(5000UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLong100SequentialFetchesOf50 — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task One_hundred_sequential_fetches_of_fifty_messages_each()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLong100SequentialFetchesOf50 — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("SEQFETCH", ["sf.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("SEQFETCH", "batcher", filterSubject: "sf.>");
|
||||
|
||||
// Pre-publish 5000 messages
|
||||
for (var i = 0; i < 5000; i++)
|
||||
await cluster.PublishAsync("sf.event", $"msg-{i}");
|
||||
|
||||
var totalFetched = 0;
|
||||
for (var batch = 0; batch < 100; batch++)
|
||||
{
|
||||
var result = await cluster.FetchAsync("SEQFETCH", "batcher", 50);
|
||||
result.Messages.Count.ShouldBe(50);
|
||||
totalFetched += result.Messages.Count;
|
||||
|
||||
// Verify sequences are contiguous within each batch
|
||||
for (var j = 1; j < result.Messages.Count; j++)
|
||||
result.Messages[j].Sequence.ShouldBe(result.Messages[j - 1].Sequence + 1);
|
||||
}
|
||||
|
||||
totalFetched.ShouldBe(5000);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLong50ConsumersOnSameStream — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Fifty_consumers_on_same_stream_all_see_all_messages()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLong50ConsumersOnSameStream — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("FIFTYCONSUMERS", ["fc.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 100; i++)
|
||||
await cluster.PublishAsync("fc.event", $"msg-{i}");
|
||||
|
||||
for (var c = 0; c < 50; c++)
|
||||
await cluster.CreateConsumerAsync("FIFTYCONSUMERS", $"cons{c}", filterSubject: "fc.>");
|
||||
|
||||
// Each consumer should see all 100 messages independently
|
||||
for (var c = 0; c < 50; c++)
|
||||
{
|
||||
var batch = await cluster.FetchAsync("FIFTYCONSUMERS", $"cons{c}", 100);
|
||||
batch.Messages.Count.ShouldBe(100);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLong20StreamsIn5NodeCluster — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Twenty_streams_in_five_node_cluster_are_independent()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLong20StreamsIn5NodeCluster — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await cluster.CreateStreamAsync($"IND{i}", [$"ind{i}.>"], replicas: 3);
|
||||
|
||||
// Publish to each stream
|
||||
for (var i = 0; i < 20; i++)
|
||||
for (var j = 0; j < 10; j++)
|
||||
await cluster.PublishAsync($"ind{i}.event", $"stream{i}-msg{j}");
|
||||
|
||||
// Verify each stream is independent
|
||||
for (var i = 0; i < 20; i++)
|
||||
{
|
||||
var state = await cluster.GetStreamStateAsync($"IND{i}");
|
||||
state.Messages.ShouldBe(10UL);
|
||||
}
|
||||
|
||||
var accountInfo = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
accountInfo.AccountInfo!.Streams.ShouldBe(20);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongPublishAckFetchCycle100Times — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Publish_ack_fetch_cycle_repeated_100_times()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongPublishAckFetchCycle100Times — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("PAFCYCLE", ["paf.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("PAFCYCLE", "cycler", filterSubject: "paf.>",
|
||||
ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var cycle = 0; cycle < 100; cycle++)
|
||||
{
|
||||
// Publish one message per cycle
|
||||
var ack = await cluster.PublishAsync("paf.event", $"cycle-{cycle}");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
|
||||
// Fetch one message
|
||||
var batch = await cluster.FetchAsync("PAFCYCLE", "cycler", 1);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
batch.Messages[0].Sequence.ShouldBe(ack.Seq);
|
||||
|
||||
// Ack it
|
||||
cluster.AckAll("PAFCYCLE", "cycler", ack.Seq);
|
||||
}
|
||||
|
||||
var finalState = await cluster.GetStreamStateAsync("PAFCYCLE");
|
||||
finalState.Messages.ShouldBe(100UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLong10StepdownsDuringPublish — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Ten_stepdowns_during_continuous_publish_preserve_all_messages()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLong10StepdownsDuringPublish — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("STEPDURINGPUB", ["sdp.>"], replicas: 3);
|
||||
|
||||
var totalPublished = 0;
|
||||
|
||||
// Publish 50 messages per batch, then step down (10 iterations = 500 msgs + 10 stepdowns)
|
||||
for (var sd = 0; sd < 10; sd++)
|
||||
{
|
||||
for (var i = 0; i < 50; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("sdp.event", $"batch{sd}-msg{i}");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
totalPublished++;
|
||||
}
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("STEPDURINGPUB")).Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("STEPDURINGPUB");
|
||||
state.Messages.ShouldBe((ulong)totalPublished);
|
||||
state.LastSeq.ShouldBe((ulong)totalPublished);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongAlternatingPublishAndStepdown — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Alternating_publish_and_stepdown_20_iterations_preserves_monotonic_sequence()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongAlternatingPublishAndStepdown — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("ALTPUBSD", ["aps.>"], replicas: 3);
|
||||
|
||||
var allSeqs = new List<ulong>();
|
||||
|
||||
for (var iter = 0; iter < 20; iter++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("aps.event", $"iter-{iter}");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
allSeqs.Add(ack.Seq);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("ALTPUBSD")).Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Verify strictly monotonically increasing sequences across all stepdowns
|
||||
for (var i = 1; i < allSeqs.Count; i++)
|
||||
allSeqs[i].ShouldBeGreaterThan(allSeqs[i - 1]);
|
||||
|
||||
allSeqs[^1].ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongCreatePublishDelete20Streams — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Create_publish_delete_20_streams_sequentially()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongCreatePublishDelete20Streams — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
{
|
||||
var streamName = $"SEQ{i}";
|
||||
|
||||
var create = await cluster.CreateStreamAsync(streamName, [$"seq{i}.>"], replicas: 3);
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
for (var j = 0; j < 10; j++)
|
||||
await cluster.PublishAsync($"seq{i}.event", $"msg-{j}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync(streamName);
|
||||
state.Messages.ShouldBe(10UL);
|
||||
|
||||
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}{streamName}", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// All streams deleted
|
||||
var accountInfo = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
accountInfo.AccountInfo!.Streams.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongConsumerAckAfter10Failovers — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Consumer_ack_tracking_correct_after_ten_leader_failovers()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongConsumerAckAfter10Failovers — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("ACKFAIL", ["af.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("ACKFAIL", "tracker", filterSubject: "af.>",
|
||||
ackPolicy: AckPolicy.All);
|
||||
|
||||
// Pre-publish 100 messages
|
||||
for (var i = 0; i < 100; i++)
|
||||
await cluster.PublishAsync("af.event", $"msg-{i}");
|
||||
|
||||
// Fetch and ack in batches across 10 failovers
|
||||
var ackedThrough = 0UL;
|
||||
for (var failover = 0; failover < 10; failover++)
|
||||
{
|
||||
var batch = await cluster.FetchAsync("ACKFAIL", "tracker", 10);
|
||||
batch.Messages.Count.ShouldBe(10);
|
||||
|
||||
var lastSeq = batch.Messages[^1].Sequence;
|
||||
cluster.AckAll("ACKFAIL", "tracker", lastSeq);
|
||||
ackedThrough = lastSeq;
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("ACKFAIL")).Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
ackedThrough.ShouldBe(100UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongFetchBatch1Iterated500Times — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Fetch_with_batch_1_iterated_500_times_reads_all_messages()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongFetchBatch1Iterated500Times — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("BATCH1ITER", ["b1i.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("BATCH1ITER", "one_at_a_time", filterSubject: "b1i.>");
|
||||
|
||||
for (var i = 0; i < 500; i++)
|
||||
await cluster.PublishAsync("b1i.event", $"msg-{i}");
|
||||
|
||||
var allSeqs = new List<ulong>();
|
||||
for (var i = 0; i < 500; i++)
|
||||
{
|
||||
var batch = await cluster.FetchAsync("BATCH1ITER", "one_at_a_time", 1);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
allSeqs.Add(batch.Messages[0].Sequence);
|
||||
}
|
||||
|
||||
// All 500 sequences read, strictly increasing
|
||||
allSeqs.Count.ShouldBe(500);
|
||||
for (var i = 1; i < allSeqs.Count; i++)
|
||||
allSeqs[i].ShouldBeGreaterThan(allSeqs[i - 1]);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongMixedMultiStreamOps — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Mixed_ops_five_streams_100_messages_each_consumers_fetch_all()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongMixedMultiStreamOps — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
// Create 5 streams
|
||||
for (var s = 0; s < 5; s++)
|
||||
await cluster.CreateStreamAsync($"MIXED{s}", [$"mixed{s}.>"], replicas: 3);
|
||||
|
||||
// Publish 100 messages to each
|
||||
for (var s = 0; s < 5; s++)
|
||||
for (var i = 0; i < 100; i++)
|
||||
await cluster.PublishAsync($"mixed{s}.event", $"stream{s}-msg{i}");
|
||||
|
||||
// Create one consumer per stream
|
||||
for (var s = 0; s < 5; s++)
|
||||
await cluster.CreateConsumerAsync($"MIXED{s}", $"reader{s}", filterSubject: $"mixed{s}.>");
|
||||
|
||||
// Fetch all messages from each stream consumer
|
||||
for (var s = 0; s < 5; s++)
|
||||
{
|
||||
var batch = await cluster.FetchAsync($"MIXED{s}", $"reader{s}", 100);
|
||||
batch.Messages.Count.ShouldBe(100);
|
||||
batch.Messages[0].Sequence.ShouldBe(1UL);
|
||||
batch.Messages[^1].Sequence.ShouldBe(100UL);
|
||||
}
|
||||
|
||||
var info = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
info.AccountInfo!.Streams.ShouldBe(5);
|
||||
info.AccountInfo.Consumers.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongRapidMetaStepdowns — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Rapid_meta_stepdowns_20_times_all_streams_remain_accessible()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongRapidMetaStepdowns — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
// Create streams before stepdowns
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.CreateStreamAsync($"RAPID{i}", [$"rapid{i}.>"], replicas: 3);
|
||||
|
||||
var leaderVersions = new List<long>();
|
||||
var initialState = cluster.GetMetaState();
|
||||
leaderVersions.Add(initialState!.LeadershipVersion);
|
||||
|
||||
// Perform 20 rapid meta stepdowns
|
||||
for (var sd = 0; sd < 20; sd++)
|
||||
{
|
||||
cluster.StepDownMetaLeader();
|
||||
var state = cluster.GetMetaState();
|
||||
leaderVersions.Add(state!.LeadershipVersion);
|
||||
}
|
||||
|
||||
// Leadership version must monotonically increase
|
||||
for (var i = 1; i < leaderVersions.Count; i++)
|
||||
leaderVersions[i].ShouldBeGreaterThan(leaderVersions[i - 1]);
|
||||
|
||||
// All streams still accessible
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames!.Count.ShouldBe(5);
|
||||
for (var i = 0; i < 5; i++)
|
||||
names.StreamNames.ShouldContain($"RAPID{i}");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLong10000MessagesR1 — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Ten_thousand_small_messages_in_R1_stream()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLong10000MessagesR1 — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("R1HUGE", ["r1h.>"], replicas: 1);
|
||||
|
||||
for (var i = 0; i < 10000; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("r1h.event", $"x{i}");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("R1HUGE");
|
||||
state.Messages.ShouldBe(10000UL);
|
||||
state.LastSeq.ShouldBe(10000UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongMaxMessagesLimit — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Stream_with_max_messages_100_has_exactly_100_after_1000_publishes()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongMaxMessagesLimit — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "MAXLIMIT",
|
||||
Subjects = ["ml.>"],
|
||||
Replicas = 3,
|
||||
MaxMsgs = 100,
|
||||
};
|
||||
cluster.CreateStreamDirect(cfg);
|
||||
|
||||
for (var i = 0; i < 1000; i++)
|
||||
await cluster.PublishAsync("ml.event", $"msg-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("MAXLIMIT");
|
||||
// MaxMsgs=100: only the latest 100 messages retained (old ones discarded)
|
||||
state.Messages.ShouldBeLessThanOrEqualTo(100UL);
|
||||
state.Messages.ShouldBeGreaterThan(0UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongConsumerPendingWithMaxMessages — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Consumer_on_max_messages_stream_tracks_correct_pending()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongConsumerPendingWithMaxMessages — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "MAXPEND",
|
||||
Subjects = ["mp.>"],
|
||||
Replicas = 3,
|
||||
MaxMsgs = 50,
|
||||
};
|
||||
cluster.CreateStreamDirect(cfg);
|
||||
|
||||
// Publish 200 messages (150 will be evicted by MaxMsgs)
|
||||
for (var i = 0; i < 200; i++)
|
||||
await cluster.PublishAsync("mp.event", $"msg-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("MAXPEND");
|
||||
// Stream retains at most 50 messages
|
||||
state.Messages.ShouldBeLessThanOrEqualTo(50UL);
|
||||
|
||||
// Create consumer after publishes (starts at current first seq)
|
||||
await cluster.CreateConsumerAsync("MAXPEND", "latecons", filterSubject: "mp.>",
|
||||
ackPolicy: AckPolicy.None);
|
||||
|
||||
var batch = await cluster.FetchAsync("MAXPEND", "latecons", 100);
|
||||
// Consumer should see only retained messages
|
||||
((ulong)batch.Messages.Count).ShouldBeLessThanOrEqualTo(state.Messages);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,839 @@
|
||||
// Go ref: TestJetStreamClusterMeta* — jetstream_cluster_3_test.go
|
||||
// Covers: meta-cluster peer count & state, API routing from any node,
|
||||
// meta leader operations, account limit governance, stream governance.
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream cluster meta-cluster governance: meta peer count,
|
||||
/// meta state, API routing from any node, leader stepdown, account limits,
|
||||
/// and stream governance in cluster mode.
|
||||
/// Ported from Go jetstream_cluster_3_test.go.
|
||||
/// </summary>
|
||||
public class JsClusterMetaGovernanceTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Meta-cluster peer count & state
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterBasics — jetstream_cluster_3_test.go
|
||||
[Fact]
|
||||
public async Task Three_node_cluster_reports_ClusterSize_3()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var meta = cluster.GetMetaState();
|
||||
meta.ShouldNotBeNull();
|
||||
meta!.ClusterSize.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Five_node_cluster_reports_ClusterSize_5()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
var meta = cluster.GetMetaState();
|
||||
meta.ShouldNotBeNull();
|
||||
meta!.ClusterSize.ShouldBe(5);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Seven_node_cluster_reports_ClusterSize_7()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(7);
|
||||
var meta = cluster.GetMetaState();
|
||||
meta.ShouldNotBeNull();
|
||||
meta!.ClusterSize.ShouldBe(7);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_has_non_empty_leader_id()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var meta = cluster.GetMetaState();
|
||||
meta.ShouldNotBeNull();
|
||||
meta!.LeaderId.ShouldNotBeNullOrEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_leadership_version_starts_at_1()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var meta = cluster.GetMetaState();
|
||||
meta.ShouldNotBeNull();
|
||||
meta!.LeadershipVersion.ShouldBe(1L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Leadership_version_increments_on_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var meta1 = cluster.GetMetaState();
|
||||
meta1!.LeadershipVersion.ShouldBe(1L);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var meta2 = cluster.GetMetaState();
|
||||
meta2!.LeadershipVersion.ShouldBe(2L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_stepdowns_increment_version_correctly()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.LeadershipVersion.ShouldBe(6L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_streams_list_is_empty_initially()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var meta = cluster.GetMetaState();
|
||||
meta.ShouldNotBeNull();
|
||||
meta!.Streams.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_streams_list_grows_with_stream_creation()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("GROW1", ["grow1.>"], 1);
|
||||
await cluster.CreateStreamAsync("GROW2", ["grow2.>"], 1);
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.Streams.Count.ShouldBe(2);
|
||||
meta.Streams.ShouldContain("GROW1");
|
||||
meta.Streams.ShouldContain("GROW2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_streams_list_is_ordered_alphabetically()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("ZSTREAM", ["zs.>"], 1);
|
||||
await cluster.CreateStreamAsync("ASTREAM", ["as.>"], 1);
|
||||
await cluster.CreateStreamAsync("MSTREAM", ["ms.>"], 1);
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
var streams = meta!.Streams.ToList();
|
||||
streams.Count.ShouldBe(3);
|
||||
streams[0].ShouldBe("ASTREAM");
|
||||
streams[1].ShouldBe("MSTREAM");
|
||||
streams[2].ShouldBe("ZSTREAM");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_after_10_stream_creations_tracks_all()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.CreateStreamAsync($"BULK{i:D2}", [$"bulk{i:D2}.>"], 1);
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.Streams.Count.ShouldBe(10);
|
||||
for (var i = 0; i < 10; i++)
|
||||
meta.Streams.ShouldContain($"BULK{i:D2}");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// API routing from any node
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterStreamCRUD — jetstream_cluster_3_test.go
|
||||
[Fact]
|
||||
public async Task Stream_create_via_RequestAsync_routes_correctly()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp = await cluster.RequestAsync(
|
||||
$"{JetStreamApiSubjects.StreamCreate}APITEST",
|
||||
"{\"name\":\"APITEST\",\"subjects\":[\"api.>\"],\"retention\":\"limits\",\"storage\":\"memory\",\"num_replicas\":1}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("APITEST");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_via_RequestAsync_returns_valid_info()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("INFOAPI", ["infoapi.>"], 1);
|
||||
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamInfo}INFOAPI", "{}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("INFOAPI");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_names_via_RequestAsync_lists_all_streams()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("NAMES1", ["n1.>"], 1);
|
||||
await cluster.CreateStreamAsync("NAMES2", ["n2.>"], 1);
|
||||
await cluster.CreateStreamAsync("NAMES3", ["n3.>"], 1);
|
||||
|
||||
var resp = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamNames.ShouldNotBeNull();
|
||||
resp.StreamNames!.Count.ShouldBe(3);
|
||||
resp.StreamNames.ShouldContain("NAMES1");
|
||||
resp.StreamNames.ShouldContain("NAMES2");
|
||||
resp.StreamNames.ShouldContain("NAMES3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_list_via_RequestAsync_returns_all_streams()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("LIST1", ["l1.>"], 1);
|
||||
await cluster.CreateStreamAsync("LIST2", ["l2.>"], 1);
|
||||
|
||||
var resp = await cluster.RequestAsync(JetStreamApiSubjects.StreamList, "{}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamNames.ShouldNotBeNull();
|
||||
resp.StreamNames!.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_create_via_RequestAsync_routes_correctly()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CONCREATE", ["cc.>"], 1);
|
||||
|
||||
var resp = await cluster.RequestAsync(
|
||||
$"{JetStreamApiSubjects.ConsumerCreate}CONCREATE.dur1",
|
||||
"{\"durable_name\":\"dur1\",\"ack_policy\":\"none\"}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.DurableName.ShouldBe("dur1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_info_via_RequestAsync_returns_valid_info()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CONINFO", ["ci.>"], 1);
|
||||
await cluster.CreateConsumerAsync("CONINFO", "infoconsumer");
|
||||
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}CONINFO.infoconsumer", "{}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.DurableName.ShouldBe("infoconsumer");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_names_via_RequestAsync_lists_consumers()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CONNAMES", ["cn.>"], 1);
|
||||
await cluster.CreateConsumerAsync("CONNAMES", "cname1");
|
||||
await cluster.CreateConsumerAsync("CONNAMES", "cname2");
|
||||
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}CONNAMES", "{}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerNames.ShouldNotBeNull();
|
||||
resp.ConsumerNames!.Count.ShouldBe(2);
|
||||
resp.ConsumerNames.ShouldContain("cname1");
|
||||
resp.ConsumerNames.ShouldContain("cname2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Unknown_API_subject_returns_error_response()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp = await cluster.RequestAsync("$JS.API.UNKNOWN.ROUTE", "{}");
|
||||
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Empty_payload_to_stream_create_uses_name_from_subject()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
// Empty payload causes ParseConfig to return default config; the handler
|
||||
// falls back to extracting the stream name from the API subject.
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamCreate}EMPTYTEST", "");
|
||||
|
||||
// With name recovered from subject, the create should succeed
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("EMPTYTEST");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Invalid_JSON_to_API_falls_back_to_default_config()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
// Invalid JSON causes ParseConfig to fall back to a default config;
|
||||
// the stream name is extracted from the subject and a default subject is added.
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamCreate}BADJSONTEST", "not-valid-json{{{{");
|
||||
|
||||
// The handler is resilient: it defaults to the name from the subject.
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("BADJSONTEST");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Meta leader operations
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterMetaLeaderStepdown — jetstream_cluster_3_test.go
|
||||
[Fact]
|
||||
public async Task StepDownMetaLeader_changes_leader_id()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var oldLeader = cluster.GetMetaLeaderId();
|
||||
oldLeader.ShouldNotBeNullOrEmpty();
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var newLeader = cluster.GetMetaLeaderId();
|
||||
newLeader.ShouldNotBe(oldLeader);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task New_meta_leader_is_different_from_previous()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var leader1 = cluster.GetMetaLeaderId();
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
var leader2 = cluster.GetMetaLeaderId();
|
||||
|
||||
leader2.ShouldNotBe(leader1);
|
||||
leader2.ShouldNotBeNullOrEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_meta_stepdowns_cycle_leaders()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var seenLeaders = new HashSet<string>();
|
||||
|
||||
seenLeaders.Add(cluster.GetMetaLeaderId());
|
||||
cluster.StepDownMetaLeader();
|
||||
seenLeaders.Add(cluster.GetMetaLeaderId());
|
||||
cluster.StepDownMetaLeader();
|
||||
seenLeaders.Add(cluster.GetMetaLeaderId());
|
||||
|
||||
// With 3 nodes, stepping down twice should produce at least 2 distinct leaders
|
||||
seenLeaders.Count.ShouldBeGreaterThanOrEqualTo(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_creation_works_after_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("AFTERSTEP", ["after.>"], 1);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("AFTERSTEP");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_creation_works_after_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CONAFTERSTEP", ["cas.>"], 1);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var resp = await cluster.CreateConsumerAsync("CONAFTERSTEP", "postdown");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.DurableName.ShouldBe("postdown");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Publish_works_after_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("PUBAFTERSTEP", ["pub.>"], 1);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var ack = await cluster.PublishAsync("pub.event", "post-stepdown-message");
|
||||
ack.Stream.ShouldBe("PUBAFTERSTEP");
|
||||
ack.Seq.ShouldBe(1UL);
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Fetch_works_after_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("FETCHAFTERSTEP", ["fetch.>"], 1);
|
||||
await cluster.CreateConsumerAsync("FETCHAFTERSTEP", "fetchcons", filterSubject: "fetch.>");
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
await cluster.PublishAsync("fetch.event", $"msg-{i}");
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var batch = await cluster.FetchAsync("FETCHAFTERSTEP", "fetchcons", 3);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_accurate_after_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("INFOAFTERSTEP", ["ias.>"], 1);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("ias.event", $"msg-{i}");
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var info = await cluster.GetStreamInfoAsync("INFOAFTERSTEP");
|
||||
info.Error.ShouldBeNull();
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.State.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_works_after_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("DELAFTERSTEP", ["das.>"], 1);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DELAFTERSTEP", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Three_meta_stepdowns_followed_by_stream_creation_works()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("TRIPLE", ["triple.>"], 1);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("TRIPLE");
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.Streams.ShouldContain("TRIPLE");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Account limit governance (cluster mode)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterStreamLimitWithAccountDefaults — jetstream_cluster_1_test.go:124
|
||||
[Fact]
|
||||
public async Task Multiple_streams_up_to_limit_succeed()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
{
|
||||
var resp = await cluster.CreateStreamAsync($"LIMIT{i}", [$"lim{i}.>"], 1);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.Streams.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_with_max_messages_enforced_in_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "MAXMSGCLUSTER",
|
||||
Subjects = ["mmcluster.>"],
|
||||
Replicas = 1,
|
||||
MaxMsgs = 3,
|
||||
};
|
||||
var resp = cluster.CreateStreamDirect(cfg);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("mmcluster.event", $"msg-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("MAXMSGCLUSTER");
|
||||
state.Messages.ShouldBeLessThanOrEqualTo(3UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_with_max_bytes_enforced_in_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "MAXBYTECLUSTER",
|
||||
Subjects = ["mbcluster.>"],
|
||||
Replicas = 1,
|
||||
MaxBytes = 256,
|
||||
Discard = DiscardPolicy.Old,
|
||||
};
|
||||
var resp = cluster.CreateStreamDirect(cfg);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await cluster.PublishAsync("mbcluster.event", new string('X', 64));
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("MAXBYTECLUSTER");
|
||||
// MaxBytes enforcement ensures total bytes stays bounded
|
||||
((long)state.Bytes).ShouldBeLessThanOrEqualTo(cfg.MaxBytes + 128);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_then_recreate_stays_within_limits()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp1 = await cluster.CreateStreamAsync("RECREATE", ["rec.>"], 1);
|
||||
resp1.Error.ShouldBeNull();
|
||||
|
||||
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}RECREATE", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
|
||||
var resp2 = await cluster.CreateStreamAsync("RECREATE", ["rec.>"], 1);
|
||||
resp2.Error.ShouldBeNull();
|
||||
resp2.StreamInfo!.Config.Name.ShouldBe("RECREATE");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_creation_respects_limits()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CONLIMIT", ["conlim.>"], 1);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
{
|
||||
var resp = await cluster.CreateConsumerAsync("CONLIMIT", $"conlim{i}");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
var names = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}CONLIMIT", "{}");
|
||||
names.ConsumerNames.ShouldNotBeNull();
|
||||
names.ConsumerNames!.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream governance
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterStreamCreate — jetstream_cluster_3_test.go
|
||||
[Fact]
|
||||
public void Stream_create_validation_requires_name()
|
||||
{
|
||||
var streamManager = new StreamManager();
|
||||
var resp = streamManager.CreateOrUpdate(new StreamConfig { Name = "" });
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Description.ShouldContain("name");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_validation_requires_subjects_via_router()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
// Providing a name but no subjects — router should handle gracefully
|
||||
var resp = await cluster.RequestAsync(
|
||||
$"{JetStreamApiSubjects.StreamCreate}NOSUBJ",
|
||||
"{\"name\":\"NOSUBJ\"}");
|
||||
|
||||
// Either succeeds (subjects optional) or returns an error; it must not throw
|
||||
(resp.Error is not null || resp.StreamInfo is not null).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_with_empty_name_fails()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp = await cluster.RequestAsync(
|
||||
$"{JetStreamApiSubjects.StreamCreate}",
|
||||
"{\"name\":\"\",\"subjects\":[\"x.>\"]}");
|
||||
|
||||
resp.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_with_duplicate_name_returns_existing()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var first = await cluster.CreateStreamAsync("DUP_GOV", ["dupgov.>"], 1);
|
||||
first.Error.ShouldBeNull();
|
||||
first.StreamInfo!.Config.Name.ShouldBe("DUP_GOV");
|
||||
|
||||
// Creating the same stream again (idempotent)
|
||||
var second = await cluster.CreateStreamAsync("DUP_GOV", ["dupgov.>"], 1);
|
||||
second.Error.ShouldBeNull();
|
||||
second.StreamInfo!.Config.Name.ShouldBe("DUP_GOV");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_update_preserves_messages()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("UPDPRES", ["updpres.>"], 1);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("updpres.event", $"msg-{i}");
|
||||
|
||||
var update = cluster.UpdateStream("UPDPRES", ["updpres.>"], replicas: 1, maxMsgs: 100);
|
||||
update.Error.ShouldBeNull();
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("UPDPRES");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_update_can_change_subjects()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("UPDSUBJ", ["old.>"], 1);
|
||||
|
||||
var update = cluster.UpdateStream("UPDSUBJ", ["new.>"], replicas: 1);
|
||||
update.Error.ShouldBeNull();
|
||||
update.StreamInfo!.Config.Subjects.ShouldContain("new.>");
|
||||
update.StreamInfo.Config.Subjects.ShouldNotContain("old.>");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_removes_from_meta_state()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("DELMETA", ["delmeta.>"], 1);
|
||||
|
||||
var metaBefore = cluster.GetMetaState();
|
||||
metaBefore!.Streams.ShouldContain("DELMETA");
|
||||
|
||||
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DELMETA", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
|
||||
// After delete, the stream manager no longer shows it, but meta group
|
||||
// state tracks what was proposed; verify via stream info being not found
|
||||
var info = await cluster.GetStreamInfoAsync("DELMETA");
|
||||
info.Error.ShouldNotBeNull();
|
||||
info.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Deleted_stream_not_in_stream_names_list()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("KEEPME", ["keep.>"], 1);
|
||||
await cluster.CreateStreamAsync("DELME", ["del.>"], 1);
|
||||
|
||||
await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DELME", "{}");
|
||||
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.ShouldContain("KEEPME");
|
||||
names.StreamNames.ShouldNotContain("DELME");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_after_delete_with_same_name_succeeds()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("RECYCLE", ["recycle.>"], 1);
|
||||
|
||||
await cluster.PublishAsync("recycle.event", "original");
|
||||
|
||||
await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}RECYCLE", "{}");
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("RECYCLE", ["recycle.>"], 1);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("RECYCLE");
|
||||
|
||||
// New stream starts at sequence 1
|
||||
var ack = await cluster.PublishAsync("recycle.event", "new-message");
|
||||
ack.Stream.ShouldBe("RECYCLE");
|
||||
ack.Seq.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Twenty_streams_in_same_cluster_all_tracked()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
{
|
||||
var resp = await cluster.CreateStreamAsync($"TWENTY{i:D2}", [$"twenty{i:D2}.>"], 1);
|
||||
resp.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.Streams.Count.ShouldBe(20);
|
||||
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.Count.ShouldBe(20);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_for_non_existent_stream_returns_error()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamInfo}DOESNOTEXIST", "{}");
|
||||
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional governance: Meta stepdown via API subject
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterMetaLeaderStepdown — jetstream_cluster_3_test.go
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_via_API_subject_changes_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var before = cluster.GetMetaLeaderId();
|
||||
before.ShouldNotBeNullOrEmpty();
|
||||
|
||||
var resp = await cluster.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
|
||||
var after = cluster.GetMetaLeaderId();
|
||||
after.ShouldNotBe(before);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_via_API_increments_leadership_version()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var versionBefore = cluster.GetMetaState()!.LeadershipVersion;
|
||||
|
||||
await cluster.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}");
|
||||
|
||||
var versionAfter = cluster.GetMetaState()!.LeadershipVersion;
|
||||
versionAfter.ShouldBeGreaterThan(versionBefore);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_publish_and_fetch_round_trip_in_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("ROUNDTRIP", ["rt.>"], 1);
|
||||
await cluster.CreateConsumerAsync("ROUNDTRIP", "rtcon", filterSubject: "rt.>");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("rt.event", $"round-trip-{i}");
|
||||
|
||||
var batch = await cluster.FetchAsync("ROUNDTRIP", "rtcon", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("ROUNDTRIP");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Account_info_reflects_stream_and_consumer_counts_in_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("ACCTGOV1", ["ag1.>"], 1);
|
||||
await cluster.CreateStreamAsync("ACCTGOV2", ["ag2.>"], 1);
|
||||
await cluster.CreateConsumerAsync("ACCTGOV1", "acctcon1");
|
||||
await cluster.CreateConsumerAsync("ACCTGOV1", "acctcon2");
|
||||
|
||||
var resp = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
resp.AccountInfo.ShouldNotBeNull();
|
||||
resp.AccountInfo!.Streams.ShouldBe(2);
|
||||
resp.AccountInfo.Consumers.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_purge_via_API_clears_messages_and_meta_stream_count_unchanged()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("PURGEMETA", ["purgemeta.>"], 1);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("purgemeta.event", $"msg-{i}");
|
||||
|
||||
var stateBefore = await cluster.GetStreamStateAsync("PURGEMETA");
|
||||
stateBefore.Messages.ShouldBe(10UL);
|
||||
|
||||
var purge = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamPurge}PURGEMETA", "{}");
|
||||
purge.Success.ShouldBeTrue();
|
||||
|
||||
var stateAfter = await cluster.GetStreamStateAsync("PURGEMETA");
|
||||
stateAfter.Messages.ShouldBe(0UL);
|
||||
|
||||
// Meta state still tracks the stream name after purge (purge != delete)
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.Streams.ShouldContain("PURGEMETA");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_list_returns_all_consumers_in_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CONLISTGOV", ["clgov.>"], 1);
|
||||
|
||||
await cluster.CreateConsumerAsync("CONLISTGOV", "gd1");
|
||||
await cluster.CreateConsumerAsync("CONLISTGOV", "gd2");
|
||||
await cluster.CreateConsumerAsync("CONLISTGOV", "gd3");
|
||||
|
||||
var list = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerList}CONLISTGOV", "{}");
|
||||
list.ConsumerNames.ShouldNotBeNull();
|
||||
list.ConsumerNames!.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_streams_list_shrinks_after_stream_delete_via_stream_manager()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("SHRINK1", ["sh1.>"], 1);
|
||||
await cluster.CreateStreamAsync("SHRINK2", ["sh2.>"], 1);
|
||||
|
||||
var metaBefore = cluster.GetMetaState();
|
||||
metaBefore!.Streams.Count.ShouldBe(2);
|
||||
|
||||
// Delete via API router which calls stream manager delete
|
||||
await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}SHRINK1", "{}");
|
||||
|
||||
// The stream names list from the router should reflect the deletion
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames!.Count.ShouldBe(1);
|
||||
names.StreamNames.ShouldContain("SHRINK2");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,825 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: placement caps, cluster size variations, replica defaults, R1/R3/R5/R7
|
||||
// placement, stepdown and info consistency, concurrent creation, long names,
|
||||
// subject overlap, re-create after delete, update without message loss.
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream cluster stream placement semantics:
|
||||
/// replica caps at cluster size, various cluster sizes, replica defaults,
|
||||
/// concurrent creation, leader stepdown, info consistency, and edge cases.
|
||||
/// Ported from Go jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class JsClusterStreamPlacementTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_caps_five_replicas_in_three_node_cluster()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: 5);
|
||||
placement.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_allows_exact_cluster_size_replicas()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: 3);
|
||||
placement.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_zero_replicas_defaults_to_one()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: 0);
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_negative_replicas_treated_as_one()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: -1);
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterSingleReplicaStreams server/jetstream_cluster_1_test.go:223
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_R1_in_single_node_cluster()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 1);
|
||||
var placement = planner.PlanReplicas(replicas: 1);
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_caps_to_single_node_in_one_node_cluster()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 1);
|
||||
var placement = planner.PlanReplicas(replicas: 3);
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_R1_in_three_node_cluster()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: 1);
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_R3_in_five_node_cluster()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 5);
|
||||
var placement = planner.PlanReplicas(replicas: 3);
|
||||
placement.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_R5_in_seven_node_cluster()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 7);
|
||||
var placement = planner.PlanReplicas(replicas: 5);
|
||||
placement.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_R7_in_seven_node_cluster_exact_match()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 7);
|
||||
var placement = planner.PlanReplicas(replicas: 7);
|
||||
placement.Count.ShouldBe(7);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_caps_R7_in_five_node_cluster_to_five()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 5);
|
||||
var placement = planner.PlanReplicas(replicas: 7);
|
||||
placement.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInfoList server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_streams_with_different_placements_coexist()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
await cluster.CreateStreamAsync("P1", ["p1.>"], replicas: 1);
|
||||
await cluster.CreateStreamAsync("P3", ["p3.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("P5", ["p5.>"], replicas: 5);
|
||||
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.Count.ShouldBe(3);
|
||||
names.StreamNames.ShouldContain("P1");
|
||||
names.StreamNames.ShouldContain("P3");
|
||||
names.StreamNames.ShouldContain("P5");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_with_replicas_equal_to_cluster_size_succeeds()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("FULL3", ["full3.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
var group = cluster.GetReplicaGroup("FULL3");
|
||||
group.ShouldNotBeNull();
|
||||
group!.Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInfoList server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_creation_after_another_stream_exists_succeeds()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("FIRST", ["first.>"], replicas: 3);
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("SECOND", ["second.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("SECOND");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMaxStreamsReached server/jetstream_cluster_1_test.go:3177
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Ten_streams_in_same_cluster_all_exist()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.CreateStreamAsync($"PLACE{i}", [$"place{i}.>"], replicas: 3);
|
||||
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.Count.ShouldBe(10);
|
||||
for (var i = 0; i < 10; i++)
|
||||
names.StreamNames.ShouldContain($"PLACE{i}");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replicated_stream_survives_meta_leader_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("SURV", ["surv.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("surv.event", $"msg-{i}");
|
||||
|
||||
var metaBefore = cluster.GetMetaLeaderId();
|
||||
cluster.StepDownMetaLeader();
|
||||
var metaAfter = cluster.GetMetaLeaderId();
|
||||
metaAfter.ShouldNotBe(metaBefore);
|
||||
|
||||
// Stream still accessible after meta stepdown
|
||||
var state = await cluster.GetStreamStateAsync("SURV");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_consistent_after_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("INFOSTEP", ["infostep.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 7; i++)
|
||||
await cluster.PublishAsync("infostep.event", $"msg-{i}");
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var info = await cluster.GetStreamInfoAsync("INFOSTEP");
|
||||
info.Error.ShouldBeNull();
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.Config.Name.ShouldBe("INFOSTEP");
|
||||
info.StreamInfo.State.Messages.ShouldBe(7UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_more_replicas_than_nodes_caps_not_errors()
|
||||
{
|
||||
// Verifies AssetPlacementPlanner silently caps rather than throwing
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
|
||||
var act = () => planner.PlanReplicas(replicas: 999);
|
||||
act.ShouldNotThrow();
|
||||
|
||||
var result = planner.PlanReplicas(replicas: 999);
|
||||
result.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_cluster_size_one_always_returns_one_replica()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 1);
|
||||
|
||||
for (var r = 1; r <= 10; r++)
|
||||
planner.PlanReplicas(replicas: r).Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamNormalCatchup server/jetstream_cluster_1_test.go:1607
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_exists_after_remove_and_restart_node_simulation()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("NODEREMOVE", ["noderemove.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("noderemove.event", $"msg-{i}");
|
||||
|
||||
cluster.RemoveNode(2);
|
||||
cluster.SimulateNodeRestart(2);
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("NODEREMOVE");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInfoList server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Concurrent_stream_creation_all_streams_verify_exist()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var tasks = Enumerable.Range(0, 5)
|
||||
.Select(i => cluster.CreateStreamAsync($"CONC{i}", [$"conc{i}.>"], replicas: 3))
|
||||
.ToArray();
|
||||
|
||||
await Task.WhenAll(tasks);
|
||||
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.Count.ShouldBe(5);
|
||||
for (var i = 0; i < 5; i++)
|
||||
names.StreamNames.ShouldContain($"CONC{i}");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_names_can_be_long_strings()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var longName = new string('A', 60);
|
||||
var resp = await cluster.CreateStreamAsync(longName, [$"{longName.ToLowerInvariant()}.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe(longName);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamOverlapSubjects server/jetstream_cluster_1_test.go:1248
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_subjects_can_be_completely_distinct_from_others()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("DISTINCT1", ["ns1.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("DISTINCT2", ["ns2.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("DISTINCT3", ["ns3.>"], replicas: 3);
|
||||
|
||||
var ack1 = await cluster.PublishAsync("ns1.event", "msg1");
|
||||
ack1.Stream.ShouldBe("DISTINCT1");
|
||||
|
||||
var ack2 = await cluster.PublishAsync("ns2.event", "msg2");
|
||||
ack2.Stream.ShouldBe("DISTINCT2");
|
||||
|
||||
var ack3 = await cluster.PublishAsync("ns3.event", "msg3");
|
||||
ack3.Stream.ShouldBe("DISTINCT3");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamUpdate server/jetstream_cluster_1_test.go:1433
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Re_creating_deleted_stream_with_same_placement_works()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("REDEL", ["redel.>"], replicas: 3);
|
||||
|
||||
await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}REDEL", "{}");
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("REDEL", ["redel.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("REDEL");
|
||||
resp.StreamInfo.Config.Replicas.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamUpdate server/jetstream_cluster_1_test.go:1433
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_update_does_not_lose_published_messages()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("NOLOSS", ["noloss.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 15; i++)
|
||||
await cluster.PublishAsync("noloss.event", $"msg-{i}");
|
||||
|
||||
var update = cluster.UpdateStream("NOLOSS", ["noloss.>"], replicas: 3, maxMsgs: 100);
|
||||
update.Error.ShouldBeNull();
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("NOLOSS");
|
||||
state.Messages.ShouldBe(15UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task R3_stream_leader_stepdown_elects_new_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("PLSTEP", ["plstep.>"], replicas: 3);
|
||||
|
||||
var before = cluster.GetStreamLeaderId("PLSTEP");
|
||||
before.ShouldNotBeNullOrWhiteSpace();
|
||||
|
||||
var resp = await cluster.StepDownStreamLeaderAsync("PLSTEP");
|
||||
resp.Success.ShouldBeTrue();
|
||||
|
||||
var after = cluster.GetStreamLeaderId("PLSTEP");
|
||||
after.ShouldNotBe(before);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_consistent_after_R3_stream_leader_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("PLINFOSTEP", ["plinfostep.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("plinfostep.event", $"msg-{i}");
|
||||
|
||||
await cluster.StepDownStreamLeaderAsync("PLINFOSTEP");
|
||||
|
||||
var info = await cluster.GetStreamInfoAsync("PLINFOSTEP");
|
||||
info.Error.ShouldBeNull();
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.Config.Replicas.ShouldBe(3);
|
||||
info.StreamInfo.State.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Placement_validation_replicas_capped_at_cluster_node_count()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
// StreamReplicaGroup internally caps replicas at cluster size
|
||||
var group = cluster.GetReplicaGroup("NOTEXIST");
|
||||
group.ShouldBeNull();
|
||||
|
||||
// Creating with excess replicas should work (streamed to cluster-size)
|
||||
var resp = await cluster.CreateStreamAsync("CAPTEST", ["captest.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
var g = cluster.GetReplicaGroup("CAPTEST");
|
||||
g.ShouldNotBeNull();
|
||||
g!.Nodes.Count.ShouldBeLessThanOrEqualTo(cluster.NodeCount);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_cluster_size_reflected_correctly_for_different_sizes()
|
||||
{
|
||||
// 1-node cluster
|
||||
new AssetPlacementPlanner(1).PlanReplicas(3).Count.ShouldBe(1);
|
||||
// 3-node cluster
|
||||
new AssetPlacementPlanner(3).PlanReplicas(3).Count.ShouldBe(3);
|
||||
// 5-node cluster
|
||||
new AssetPlacementPlanner(5).PlanReplicas(3).Count.ShouldBe(3);
|
||||
// 7-node cluster
|
||||
new AssetPlacementPlanner(7).PlanReplicas(3).Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMetaSnapshotsAndCatchup server/jetstream_cluster_1_test.go:833
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_group_tracks_stream_placement_changes_through_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("META_P1", ["meta_p1.>"], replicas: 1);
|
||||
await cluster.CreateStreamAsync("META_P3", ["meta_p3.>"], replicas: 3);
|
||||
|
||||
var stateBefore = cluster.GetMetaState();
|
||||
stateBefore.ShouldNotBeNull();
|
||||
stateBefore!.Streams.ShouldContain("META_P1");
|
||||
stateBefore.Streams.ShouldContain("META_P3");
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var stateAfter = cluster.GetMetaState();
|
||||
stateAfter.ShouldNotBeNull();
|
||||
stateAfter!.Streams.ShouldContain("META_P1");
|
||||
stateAfter.Streams.ShouldContain("META_P3");
|
||||
stateAfter.LeadershipVersion.ShouldBeGreaterThan(stateBefore.LeadershipVersion);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInfoList server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_list_api_returns_all_streams_in_five_node_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
await cluster.CreateStreamAsync("FL1", ["fl1.>"], replicas: 1);
|
||||
await cluster.CreateStreamAsync("FL3", ["fl3.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("FL5", ["fl5.>"], replicas: 5);
|
||||
|
||||
var list = await cluster.RequestAsync(JetStreamApiSubjects.StreamList, "{}");
|
||||
list.StreamNames.ShouldNotBeNull();
|
||||
list.StreamNames!.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterSingleReplicaStreams server/jetstream_cluster_1_test.go:223
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task R1_placement_in_five_node_cluster_creates_one_node_group()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
await cluster.CreateStreamAsync("R1IN5", ["r1in5.>"], replicas: 1);
|
||||
|
||||
var group = cluster.GetReplicaGroup("R1IN5");
|
||||
group.ShouldNotBeNull();
|
||||
group!.Nodes.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task R3_placement_in_five_node_cluster_creates_three_node_group()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
await cluster.CreateStreamAsync("R3IN5", ["r3in5.>"], replicas: 3);
|
||||
|
||||
var group = cluster.GetReplicaGroup("R3IN5");
|
||||
group.ShouldNotBeNull();
|
||||
group!.Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consecutive_meta_stepdowns_preserve_stream_placements()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("CONSEC1", ["consec1.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("CONSEC2", ["consec2.>"], replicas: 1);
|
||||
|
||||
// Perform multiple stepdowns
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.ShouldContain("CONSEC1");
|
||||
names.StreamNames.ShouldContain("CONSEC2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamUpdate server/jetstream_cluster_1_test.go:1433
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Publish_after_stream_update_works_correctly()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("POSTUPD", ["postupd.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("postupd.event", $"before-{i}");
|
||||
|
||||
cluster.UpdateStream("POSTUPD", ["postupd.>"], replicas: 3, maxMsgs: 100);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("postupd.event", $"after-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("POSTUPD");
|
||||
state.Messages.ShouldBe(10UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamPurge server/jetstream_cluster_1_test.go:522
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task R3_stream_purge_after_stepdown_clears_messages()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("PURGESTEP", ["purgestep.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("purgestep.event", $"msg-{i}");
|
||||
|
||||
await cluster.StepDownStreamLeaderAsync("PURGESTEP");
|
||||
|
||||
var purge = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamPurge}PURGESTEP", "{}");
|
||||
purge.Success.ShouldBeTrue();
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("PURGESTEP");
|
||||
state.Messages.ShouldBe(0UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task R3_stream_has_leader_with_naming_convention()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("LEADNM", ["leadnm.>"], replicas: 3);
|
||||
|
||||
var group = cluster.GetReplicaGroup("LEADNM");
|
||||
group.ShouldNotBeNull();
|
||||
group!.Leader.Id.ShouldNotBeNullOrWhiteSpace();
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMaxStreamsReached server/jetstream_cluster_1_test.go:3177
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_info_reflects_correct_stream_count_after_placements()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("ACCP1", ["accp1.>"], replicas: 1);
|
||||
await cluster.CreateStreamAsync("ACCP3", ["accp3.>"], replicas: 3);
|
||||
|
||||
var info = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
info.AccountInfo.ShouldNotBeNull();
|
||||
info.AccountInfo!.Streams.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamNormalCatchup server/jetstream_cluster_1_test.go:1607
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Wait_on_stream_leader_completes_for_newly_placed_stream()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("WAITPL", ["waitpl.>"], replicas: 3);
|
||||
|
||||
await cluster.WaitOnStreamLeaderAsync("WAITPL", timeoutMs: 2000);
|
||||
|
||||
var leaderId = cluster.GetStreamLeaderId("WAITPL");
|
||||
leaderId.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDelete server/jetstream_cluster_1_test.go:472
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_reduces_account_stream_count()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("DEL_A", ["del_a.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("DEL_B", ["del_b.>"], replicas: 3);
|
||||
|
||||
await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DEL_A", "{}");
|
||||
|
||||
var info = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
info.AccountInfo!.Streams.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInfoList server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_placement_info_accessible_via_api_router_subject()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("APIPLC", ["apiplc.>"], replicas: 3);
|
||||
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamInfo}APIPLC", "{}");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("APIPLC");
|
||||
resp.StreamInfo.Config.Replicas.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMemoryStore server/jetstream_cluster_1_test.go:423
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Memory_store_placement_in_three_node_cluster_accepts_publishes()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("MEMPLACE", ["memplace.>"], replicas: 3, storage: StorageType.Memory);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await cluster.PublishAsync("memplace.event", $"msg-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("MEMPLACE");
|
||||
state.Messages.ShouldBe(20UL);
|
||||
|
||||
cluster.GetStoreBackendType("MEMPLACE").ShouldBe("memory");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_leadership_version_increments_on_each_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var initial = cluster.GetMetaState();
|
||||
initial.ShouldNotBeNull();
|
||||
initial!.LeadershipVersion.ShouldBe(1L);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
var v2 = cluster.GetMetaState()!.LeadershipVersion;
|
||||
v2.ShouldBe(2L);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
var v3 = cluster.GetMetaState()!.LeadershipVersion;
|
||||
v3.ShouldBe(3L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Placement_group_leader_changes_on_stream_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("STEPPL", ["steppl.>"], replicas: 3);
|
||||
|
||||
var groupBefore = cluster.GetReplicaGroup("STEPPL");
|
||||
groupBefore.ShouldNotBeNull();
|
||||
var leaderBefore = groupBefore!.Leader.Id;
|
||||
|
||||
await cluster.StepDownStreamLeaderAsync("STEPPL");
|
||||
|
||||
var groupAfter = cluster.GetReplicaGroup("STEPPL");
|
||||
groupAfter.ShouldNotBeNull();
|
||||
var leaderAfter = groupAfter!.Leader.Id;
|
||||
|
||||
leaderAfter.ShouldNotBe(leaderBefore);
|
||||
groupAfter.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Placement_node_count_consistent_with_requested_replicas()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
await cluster.CreateStreamAsync("NODECNT1", ["nc1.>"], replicas: 1);
|
||||
await cluster.CreateStreamAsync("NODECNT2", ["nc2.>"], replicas: 2);
|
||||
await cluster.CreateStreamAsync("NODECNT5", ["nc5.>"], replicas: 5);
|
||||
|
||||
cluster.GetReplicaGroup("NODECNT1")!.Nodes.Count.ShouldBe(1);
|
||||
cluster.GetReplicaGroup("NODECNT2")!.Nodes.Count.ShouldBe(2);
|
||||
cluster.GetReplicaGroup("NODECNT5")!.Nodes.Count.ShouldBe(5);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,228 @@
|
||||
// Parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// TestJetStreamClusterStreamLeaderStepDown (line 4925)
|
||||
// TestJetStreamClusterLeaderStepdown (line 5464)
|
||||
// TestJetStreamClusterLeader (line 73)
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream leader election and failover scenarios,
|
||||
/// ported from the Go server's jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class LeaderFailoverParityTests
|
||||
{
|
||||
/// <summary>
|
||||
/// Go parity: TestJetStreamClusterStreamLeaderStepDown (line 4925).
|
||||
/// After publishing messages to an R=3 stream, stepping down the stream leader
|
||||
/// must elect a new leader and preserve all previously stored messages. The new
|
||||
/// leader must accept subsequent writes with correct sequencing.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Stream_leader_stepdown_preserves_data_and_elects_new_leader()
|
||||
{
|
||||
await using var fx = await LeaderFailoverFixture.StartAsync(nodes: 3);
|
||||
var streamName = "STEPDOWN_DATA";
|
||||
await fx.CreateStreamAsync(streamName, subjects: ["sd.>"], replicas: 3);
|
||||
|
||||
// Publish 10 messages before stepdown (Go: msg, toSend := []byte("Hello JS Clustering"), 10)
|
||||
for (var i = 1; i <= 10; i++)
|
||||
{
|
||||
var ack = await fx.PublishAsync($"sd.{i}", $"msg-{i}");
|
||||
ack.Seq.ShouldBe((ulong)i);
|
||||
ack.Stream.ShouldBe(streamName);
|
||||
}
|
||||
|
||||
// Capture current leader identity
|
||||
var leaderBefore = fx.GetStreamLeaderId(streamName);
|
||||
leaderBefore.ShouldNotBeNullOrWhiteSpace();
|
||||
|
||||
// Step down the stream leader (Go: nc.Request(JSApiStreamLeaderStepDownT, "TEST"))
|
||||
var stepdownResponse = await fx.StepDownStreamLeaderAsync(streamName);
|
||||
stepdownResponse.Success.ShouldBeTrue();
|
||||
|
||||
// Verify new leader was elected (Go: si.Cluster.Leader != oldLeader)
|
||||
var leaderAfter = fx.GetStreamLeaderId(streamName);
|
||||
leaderAfter.ShouldNotBe(leaderBefore);
|
||||
|
||||
// Verify all 10 messages survived the failover
|
||||
var state = await fx.GetStreamStateAsync(streamName);
|
||||
state.Messages.ShouldBe(10UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(10UL);
|
||||
|
||||
// Verify the new leader accepts writes with correct sequencing
|
||||
var postFailoverAck = await fx.PublishAsync("sd.post", "after-stepdown");
|
||||
postFailoverAck.Seq.ShouldBe(11UL);
|
||||
postFailoverAck.Stream.ShouldBe(streamName);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Go parity: TestJetStreamClusterLeaderStepdown (line 5464).
|
||||
/// Requesting a meta-leader stepdown via the $JS.API.META.LEADER.STEPDOWN subject
|
||||
/// must succeed and elect a new meta-leader with an incremented leadership version.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_elects_new_leader_with_incremented_version()
|
||||
{
|
||||
await using var fx = await LeaderFailoverFixture.StartAsync(nodes: 3);
|
||||
|
||||
// Create a stream so the meta group has some state
|
||||
await fx.CreateStreamAsync("META_SD", subjects: ["meta.>"], replicas: 3);
|
||||
|
||||
var metaBefore = fx.GetMetaState();
|
||||
metaBefore.ShouldNotBeNull();
|
||||
metaBefore.ClusterSize.ShouldBe(3);
|
||||
var leaderBefore = metaBefore.LeaderId;
|
||||
var versionBefore = metaBefore.LeadershipVersion;
|
||||
|
||||
// Step down meta leader via API (Go: nc.Request(JSApiLeaderStepDown, nil))
|
||||
var response = await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}");
|
||||
response.Success.ShouldBeTrue();
|
||||
|
||||
// Verify new meta leader elected (Go: cl != c.leader())
|
||||
var metaAfter = fx.GetMetaState();
|
||||
metaAfter.ShouldNotBeNull();
|
||||
metaAfter.LeaderId.ShouldNotBe(leaderBefore);
|
||||
metaAfter.LeadershipVersion.ShouldBe(versionBefore + 1);
|
||||
|
||||
// Stream metadata must survive the meta-leader transition
|
||||
metaAfter.Streams.ShouldContain("META_SD");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Go parity: TestJetStreamClusterLeader (line 73).
|
||||
/// After electing a stream leader, stepping down twice through consecutive
|
||||
/// elections must cycle through distinct leaders. Each election must produce
|
||||
/// a valid leader that can accept proposals.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Consecutive_leader_elections_cycle_through_distinct_peers()
|
||||
{
|
||||
await using var fx = await LeaderFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CYCLE", subjects: ["cycle.>"], replicas: 3);
|
||||
|
||||
// Track leaders across consecutive stepdowns
|
||||
var leaders = new List<string>();
|
||||
leaders.Add(fx.GetStreamLeaderId("CYCLE"));
|
||||
|
||||
// First stepdown
|
||||
var resp1 = await fx.StepDownStreamLeaderAsync("CYCLE");
|
||||
resp1.Success.ShouldBeTrue();
|
||||
leaders.Add(fx.GetStreamLeaderId("CYCLE"));
|
||||
|
||||
// Second stepdown
|
||||
var resp2 = await fx.StepDownStreamLeaderAsync("CYCLE");
|
||||
resp2.Success.ShouldBeTrue();
|
||||
leaders.Add(fx.GetStreamLeaderId("CYCLE"));
|
||||
|
||||
// Each consecutive leader must differ from its predecessor
|
||||
leaders[1].ShouldNotBe(leaders[0]);
|
||||
leaders[2].ShouldNotBe(leaders[1]);
|
||||
|
||||
// After cycling, the stream must still be writable
|
||||
var ack = await fx.PublishAsync("cycle.verify", "still-alive");
|
||||
ack.Stream.ShouldBe("CYCLE");
|
||||
ack.Seq.ShouldBeGreaterThan(0UL);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Test fixture that wires up a JetStream cluster with meta group, stream manager,
|
||||
/// consumer manager, and API router for leader failover testing.
|
||||
/// </summary>
|
||||
internal sealed class LeaderFailoverFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private LeaderFailoverFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = new JetStreamPublisher(_streamManager);
|
||||
}
|
||||
|
||||
public static Task<LeaderFailoverFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var streamManager = new StreamManager(meta);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
return Task.FromResult(new LeaderFailoverFixture(meta, streamManager, consumerManager, router));
|
||||
}
|
||||
|
||||
public Task CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
|
||||
if (response.Error is not null)
|
||||
throw new InvalidOperationException(response.Error.Description);
|
||||
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), null, out var ack))
|
||||
return Task.FromResult(ack);
|
||||
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match a stream.");
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> StepDownStreamLeaderAsync(string stream)
|
||||
{
|
||||
var response = _router.Route(
|
||||
$"{JetStreamApiSubjects.StreamLeaderStepdown}{stream}",
|
||||
"{}"u8);
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public string GetStreamLeaderId(string stream)
|
||||
{
|
||||
// The StreamManager exposes replica groups via step-down routing;
|
||||
// we also reflect the leader through the replica group directly.
|
||||
var field = typeof(StreamManager)
|
||||
.GetField("_replicaGroups", System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Instance)!;
|
||||
var groups = (System.Collections.Concurrent.ConcurrentDictionary<string, StreamReplicaGroup>)field.GetValue(_streamManager)!;
|
||||
if (groups.TryGetValue(stream, out var group))
|
||||
return group.Leader.Id;
|
||||
return string.Empty;
|
||||
}
|
||||
|
||||
public ValueTask<ApiStreamState> GetStreamStateAsync(string stream)
|
||||
=> _streamManager.GetStateAsync(stream, default);
|
||||
|
||||
public MetaGroupState? GetMetaState() => _streamManager.GetMetaState();
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
{
|
||||
var response = _router.Route(subject, Encoding.UTF8.GetBytes(payload));
|
||||
|
||||
if (subject.Equals(JetStreamApiSubjects.MetaLeaderStepdown, StringComparison.Ordinal) && response.Success)
|
||||
_metaGroup.BecomeLeader();
|
||||
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,463 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go
|
||||
// Covers: JetStreamMetaGroup RAFT proposal workflow — stream create/delete,
|
||||
// consumer create/delete, leader validation, duplicate rejection,
|
||||
// ApplyEntry dispatch, inflight tracking, leader change clearing inflight,
|
||||
// GetState snapshot with consumer counts.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for JetStreamMetaGroup RAFT proposal workflow.
|
||||
/// Go reference: jetstream_cluster.go:500-2000 (processStreamAssignment,
|
||||
/// processConsumerAssignment, meta group leader logic).
|
||||
/// </summary>
|
||||
public class MetaGroupProposalTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Stream create proposal
|
||||
// Go reference: jetstream_cluster.go processStreamAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_proposal_adds_stream_assignment()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "test-group", Peers = ["p1", "p2", "p3"] };
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "ORDERS" }, group, default);
|
||||
|
||||
var assignment = meta.GetStreamAssignment("ORDERS");
|
||||
assignment.ShouldNotBeNull();
|
||||
assignment.StreamName.ShouldBe("ORDERS");
|
||||
assignment.Group.ShouldBeSameAs(group);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_proposal_increments_stream_count()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "S1" }, null, default);
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "S2" }, null, default);
|
||||
|
||||
meta.StreamCount.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_proposal_appears_in_state()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "EVENTS" }, null, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
state.Streams.ShouldContain("EVENTS");
|
||||
state.AssignmentCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream delete proposal
|
||||
// Go reference: jetstream_cluster.go processStreamDelete
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_proposal_removes_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "DOOMED" }, null, default);
|
||||
|
||||
await meta.ProposeDeleteStreamValidatedAsync("DOOMED", default);
|
||||
|
||||
meta.GetStreamAssignment("DOOMED").ShouldBeNull();
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
meta.GetState().Streams.ShouldNotContain("DOOMED");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_with_consumers_decrements_consumer_count()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["p1"] };
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "S" }, sg, default);
|
||||
await meta.ProposeCreateConsumerValidatedAsync("S", "C1", cg, default);
|
||||
await meta.ProposeCreateConsumerValidatedAsync("S", "C2", cg, default);
|
||||
meta.ConsumerCount.ShouldBe(2);
|
||||
|
||||
await meta.ProposeDeleteStreamValidatedAsync("S", default);
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer create/delete proposal
|
||||
// Go reference: jetstream_cluster.go processConsumerAssignment/Delete
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_create_proposal_adds_consumer_to_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["p1", "p2", "p3"] };
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "ORDERS" }, sg, default);
|
||||
await meta.ProposeCreateConsumerValidatedAsync("ORDERS", "PROCESSOR", cg, default);
|
||||
|
||||
var ca = meta.GetConsumerAssignment("ORDERS", "PROCESSOR");
|
||||
ca.ShouldNotBeNull();
|
||||
ca.ConsumerName.ShouldBe("PROCESSOR");
|
||||
ca.StreamName.ShouldBe("ORDERS");
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_delete_proposal_removes_consumer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["p1"] };
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "S" }, sg, default);
|
||||
await meta.ProposeCreateConsumerValidatedAsync("S", "C1", cg, default);
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
|
||||
await meta.ProposeDeleteConsumerValidatedAsync("S", "C1", default);
|
||||
meta.GetConsumerAssignment("S", "C1").ShouldBeNull();
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_consumers_tracked_independently()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["p1"] };
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "MULTI" }, sg, default);
|
||||
await meta.ProposeCreateConsumerValidatedAsync("MULTI", "C1", cg, default);
|
||||
await meta.ProposeCreateConsumerValidatedAsync("MULTI", "C2", cg, default);
|
||||
await meta.ProposeCreateConsumerValidatedAsync("MULTI", "C3", cg, default);
|
||||
|
||||
meta.ConsumerCount.ShouldBe(3);
|
||||
meta.GetStreamAssignment("MULTI")!.Consumers.Count.ShouldBe(3);
|
||||
|
||||
await meta.ProposeDeleteConsumerValidatedAsync("MULTI", "C2", default);
|
||||
meta.ConsumerCount.ShouldBe(2);
|
||||
meta.GetConsumerAssignment("MULTI", "C2").ShouldBeNull();
|
||||
meta.GetConsumerAssignment("MULTI", "C1").ShouldNotBeNull();
|
||||
meta.GetConsumerAssignment("MULTI", "C3").ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Not-leader rejects proposals
|
||||
// Go reference: jetstream_api.go:200-300 — leader check
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Not_leader_rejects_stream_create()
|
||||
{
|
||||
// selfIndex=2 but leaderIndex starts at 1, so IsLeader() is false
|
||||
var meta = new JetStreamMetaGroup(3, selfIndex: 2);
|
||||
|
||||
var ex = Should.Throw<InvalidOperationException>(
|
||||
() => meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "FAIL" }, null, default));
|
||||
|
||||
ex.Message.ShouldContain("Not the meta-group leader");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Not_leader_rejects_stream_delete()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3, selfIndex: 2);
|
||||
|
||||
var ex = Should.Throw<InvalidOperationException>(
|
||||
() => meta.ProposeDeleteStreamValidatedAsync("S", default));
|
||||
|
||||
ex.Message.ShouldContain("Not the meta-group leader");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Not_leader_rejects_consumer_create()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3, selfIndex: 2);
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
|
||||
var ex = Should.Throw<InvalidOperationException>(
|
||||
() => meta.ProposeCreateConsumerValidatedAsync("S", "C1", cg, default));
|
||||
|
||||
ex.Message.ShouldContain("Not the meta-group leader");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Not_leader_rejects_consumer_delete()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3, selfIndex: 2);
|
||||
|
||||
var ex = Should.Throw<InvalidOperationException>(
|
||||
() => meta.ProposeDeleteConsumerValidatedAsync("S", "C1", default));
|
||||
|
||||
ex.Message.ShouldContain("Not the meta-group leader");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Duplicate stream name rejected (validated path)
|
||||
// Go reference: jetstream_cluster.go duplicate stream check
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Duplicate_stream_name_rejected_by_validated_proposal()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "DUP" }, null, default);
|
||||
|
||||
var ex = Should.Throw<InvalidOperationException>(
|
||||
() => meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "DUP" }, null, default));
|
||||
|
||||
ex.Message.ShouldContain("already exists");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer on non-existent stream rejected (validated path)
|
||||
// Go reference: jetstream_cluster.go stream existence check
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Consumer_on_nonexistent_stream_rejected_by_validated_proposal()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
|
||||
var ex = Should.Throw<InvalidOperationException>(
|
||||
() => meta.ProposeCreateConsumerValidatedAsync("MISSING", "C1", cg, default));
|
||||
|
||||
ex.Message.ShouldContain("not found");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyEntry dispatch
|
||||
// Go reference: jetstream_cluster.go RAFT apply for meta group
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_stream_create_adds_assignment()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "APPLIED", Peers = ["p1"] };
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "APPLIED", group: group);
|
||||
|
||||
meta.GetStreamAssignment("APPLIED").ShouldNotBeNull();
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_stream_delete_removes_assignment()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "TEMP");
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.StreamDelete, "TEMP");
|
||||
|
||||
meta.GetStreamAssignment("TEMP").ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_consumer_create_adds_consumer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "S");
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.ConsumerCreate, "C1", streamName: "S");
|
||||
|
||||
meta.GetConsumerAssignment("S", "C1").ShouldNotBeNull();
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_consumer_delete_removes_consumer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "S");
|
||||
meta.ApplyEntry(MetaEntryType.ConsumerCreate, "C1", streamName: "S");
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.ConsumerDelete, "C1", streamName: "S");
|
||||
|
||||
meta.GetConsumerAssignment("S", "C1").ShouldBeNull();
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_consumer_without_stream_name_throws()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
Should.Throw<ArgumentNullException>(
|
||||
() => meta.ApplyEntry(MetaEntryType.ConsumerCreate, "C1"));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Inflight tracking
|
||||
// Go reference: jetstream_cluster.go inflight tracking
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Inflight_cleared_after_stream_create()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "INF" }, default);
|
||||
|
||||
// Inflight should be cleared after proposal completes
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Inflight_cleared_after_consumer_create()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "S" }, default);
|
||||
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
await meta.ProposeCreateConsumerAsync("S", "C1", cg, default);
|
||||
|
||||
meta.InflightConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Leader change clears inflight
|
||||
// Go reference: jetstream_cluster.go leader stepdown
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Leader_change_clears_inflight()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
// Manually inspect that step down clears (inflight is always 0 after
|
||||
// synchronous proposal, but the StepDown path is the important semantic).
|
||||
meta.StepDown();
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
meta.InflightConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StepDown_increments_leadership_version()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var versionBefore = meta.GetState().LeadershipVersion;
|
||||
|
||||
meta.StepDown();
|
||||
|
||||
meta.GetState().LeadershipVersion.ShouldBeGreaterThan(versionBefore);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetState returns correct snapshot
|
||||
// Go reference: jetstream_cluster.go meta group state
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task GetState_returns_correct_snapshot()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(5);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "ALPHA" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "BETA" }, default);
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
await meta.ProposeCreateConsumerAsync("ALPHA", "C1", cg, default);
|
||||
await meta.ProposeCreateConsumerAsync("ALPHA", "C2", cg, default);
|
||||
await meta.ProposeCreateConsumerAsync("BETA", "C1", cg, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
|
||||
state.ClusterSize.ShouldBe(5);
|
||||
state.Streams.Count.ShouldBe(2);
|
||||
state.AssignmentCount.ShouldBe(2);
|
||||
state.ConsumerCount.ShouldBe(3);
|
||||
state.LeaderId.ShouldBe("meta-1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetState_streams_are_sorted()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "ZULU" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "ALPHA" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "MIKE" }, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
state.Streams[0].ShouldBe("ALPHA");
|
||||
state.Streams[1].ShouldBe("MIKE");
|
||||
state.Streams[2].ShouldBe("ZULU");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetAllAssignments
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task GetAllAssignments_returns_all_streams()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "A" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "B" }, default);
|
||||
|
||||
var all = meta.GetAllAssignments();
|
||||
all.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetConsumerAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void GetConsumerAssignment_returns_null_for_nonexistent_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.GetConsumerAssignment("MISSING", "C1").ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetConsumerAssignment_returns_null_for_nonexistent_consumer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "S" }, default);
|
||||
|
||||
meta.GetConsumerAssignment("S", "MISSING").ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Idempotent backward-compatible paths
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Duplicate_stream_create_is_idempotent_via_unvalidated_path()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "DUP" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "DUP" }, default);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_on_nonexistent_stream_is_silent_via_unvalidated_path()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
|
||||
// Should not throw
|
||||
await meta.ProposeCreateConsumerAsync("MISSING", "C1", cg, default);
|
||||
|
||||
meta.GetStreamAssignment("MISSING").ShouldBeNull();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,205 @@
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for MetaSnapshotCodec: encode/decode round-trip, S2 compression, versioning.
|
||||
/// Go reference: jetstream_cluster.go:2075-2145.
|
||||
/// </summary>
|
||||
public class MetaSnapshotCodecTests
|
||||
{
|
||||
[Fact]
|
||||
public void Encode_decode_round_trips()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go encodeMetaSnapshot/decodeMetaSnapshot round-trip
|
||||
var assignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["stream-A"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "stream-A",
|
||||
Group = new RaftGroup { Name = "rg-a", Peers = ["n1", "n2", "n3"] },
|
||||
ConfigJson = """{"subjects":["foo.>"]}""",
|
||||
},
|
||||
["stream-B"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "stream-B",
|
||||
Group = new RaftGroup { Name = "rg-b", Peers = ["n1", "n2"] },
|
||||
ConfigJson = """{"subjects":["bar.>"]}""",
|
||||
},
|
||||
};
|
||||
|
||||
// Add a consumer to stream-B
|
||||
assignments["stream-B"].Consumers["con-1"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "con-1",
|
||||
StreamName = "stream-B",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2"] },
|
||||
};
|
||||
|
||||
var encoded = MetaSnapshotCodec.Encode(assignments);
|
||||
encoded.ShouldNotBeEmpty();
|
||||
|
||||
var decoded = MetaSnapshotCodec.Decode(encoded);
|
||||
decoded.Count.ShouldBe(2);
|
||||
decoded["stream-A"].StreamName.ShouldBe("stream-A");
|
||||
decoded["stream-A"].Group.Peers.Count.ShouldBe(3);
|
||||
decoded["stream-B"].Consumers.Count.ShouldBe(1);
|
||||
decoded["stream-B"].Consumers["con-1"].ConsumerName.ShouldBe("con-1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Encoded_snapshot_is_compressed()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go S2 compression of meta snapshots
|
||||
var assignments = new Dictionary<string, StreamAssignment>();
|
||||
for (int i = 0; i < 100; i++)
|
||||
{
|
||||
assignments[$"stream-{i}"] = new StreamAssignment
|
||||
{
|
||||
StreamName = $"stream-{i}",
|
||||
Group = new RaftGroup { Name = $"rg-{i}", Peers = ["n1", "n2", "n3"] },
|
||||
ConfigJson = """{"subjects":["test.>"]}""",
|
||||
};
|
||||
}
|
||||
|
||||
var encoded = MetaSnapshotCodec.Encode(assignments);
|
||||
var json = System.Text.Json.JsonSerializer.SerializeToUtf8Bytes(assignments);
|
||||
|
||||
// S2 compressed + 2-byte version header should be smaller than raw JSON
|
||||
encoded.Length.ShouldBeLessThan(json.Length);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Empty_snapshot_round_trips()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go decodeMetaSnapshot handles empty map
|
||||
var empty = new Dictionary<string, StreamAssignment>();
|
||||
var encoded = MetaSnapshotCodec.Encode(empty);
|
||||
var decoded = MetaSnapshotCodec.Decode(encoded);
|
||||
decoded.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Versioned_format_rejects_unknown_version()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go version check in decodeMetaSnapshot
|
||||
var bad = new byte[] { 0xFF, 0xFF, 0, 0 }; // version 65535
|
||||
Should.Throw<InvalidOperationException>(() => MetaSnapshotCodec.Decode(bad));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Decode_rejects_too_short_input()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go guard against truncated snapshot
|
||||
Should.Throw<InvalidOperationException>(() => MetaSnapshotCodec.Decode([0x01]));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Encoded_snapshot_begins_with_version_one_header()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2075 — versioned header allows future format evolution
|
||||
var assignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["s1"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "g1", Peers = ["n1"] },
|
||||
},
|
||||
};
|
||||
|
||||
var encoded = MetaSnapshotCodec.Encode(assignments);
|
||||
|
||||
// Little-endian version 1: bytes [0x01, 0x00]
|
||||
encoded[0].ShouldBe((byte)0x01);
|
||||
encoded[1].ShouldBe((byte)0x00);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Round_trip_preserves_all_stream_assignment_fields()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go streamAssignment struct fields preserved across snapshot
|
||||
var created = new DateTime(2025, 6, 15, 12, 0, 0, DateTimeKind.Utc);
|
||||
var assignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["my-stream"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "my-stream",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "rg-main",
|
||||
Peers = ["peer-a", "peer-b", "peer-c"],
|
||||
StorageType = "memory",
|
||||
Cluster = "cluster-east",
|
||||
Preferred = "peer-a",
|
||||
},
|
||||
Created = created,
|
||||
ConfigJson = """{"subjects":["events.>"],"storage":"memory"}""",
|
||||
SyncSubject = "$JS.SYNC.my-stream",
|
||||
Responded = true,
|
||||
Recovering = false,
|
||||
Reassigning = true,
|
||||
},
|
||||
};
|
||||
|
||||
var decoded = MetaSnapshotCodec.Decode(MetaSnapshotCodec.Encode(assignments));
|
||||
|
||||
var sa = decoded["my-stream"];
|
||||
sa.StreamName.ShouldBe("my-stream");
|
||||
sa.Group.Name.ShouldBe("rg-main");
|
||||
sa.Group.Peers.ShouldBe(["peer-a", "peer-b", "peer-c"]);
|
||||
sa.Group.StorageType.ShouldBe("memory");
|
||||
sa.Group.Cluster.ShouldBe("cluster-east");
|
||||
sa.Group.Preferred.ShouldBe("peer-a");
|
||||
sa.Created.ShouldBe(created);
|
||||
sa.ConfigJson.ShouldBe("""{"subjects":["events.>"],"storage":"memory"}""");
|
||||
sa.SyncSubject.ShouldBe("$JS.SYNC.my-stream");
|
||||
sa.Responded.ShouldBeTrue();
|
||||
sa.Recovering.ShouldBeFalse();
|
||||
sa.Reassigning.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Round_trip_preserves_multiple_consumers_per_stream()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go consumerAssignment map restored in snapshot
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "multi-consumer-stream",
|
||||
Group = new RaftGroup { Name = "rg-mc", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
sa.Consumers["consumer-alpha"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "consumer-alpha",
|
||||
StreamName = "multi-consumer-stream",
|
||||
Group = new RaftGroup { Name = "rg-alpha", Peers = ["n1"] },
|
||||
ConfigJson = """{"deliver_subject":"out.alpha"}""",
|
||||
Responded = true,
|
||||
};
|
||||
sa.Consumers["consumer-beta"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "consumer-beta",
|
||||
StreamName = "multi-consumer-stream",
|
||||
Group = new RaftGroup { Name = "rg-beta", Peers = ["n2", "n3"] },
|
||||
Recovering = true,
|
||||
};
|
||||
|
||||
var assignments = new Dictionary<string, StreamAssignment> { ["multi-consumer-stream"] = sa };
|
||||
var decoded = MetaSnapshotCodec.Decode(MetaSnapshotCodec.Encode(assignments));
|
||||
|
||||
var dsa = decoded["multi-consumer-stream"];
|
||||
dsa.Consumers.Count.ShouldBe(2);
|
||||
|
||||
var alpha = dsa.Consumers["consumer-alpha"];
|
||||
alpha.ConsumerName.ShouldBe("consumer-alpha");
|
||||
alpha.StreamName.ShouldBe("multi-consumer-stream");
|
||||
alpha.Group.Name.ShouldBe("rg-alpha");
|
||||
alpha.ConfigJson.ShouldBe("""{"deliver_subject":"out.alpha"}""");
|
||||
alpha.Responded.ShouldBeTrue();
|
||||
|
||||
var beta = dsa.Consumers["consumer-beta"];
|
||||
beta.ConsumerName.ShouldBe("consumer-beta");
|
||||
beta.Group.Peers.Count.ShouldBe(2);
|
||||
beta.Recovering.ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,433 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go:2290-2439
|
||||
// Covers: ProcessAddPeer, ProcessRemovePeer, RemovePeerFromStream, RemapStreamAssignment —
|
||||
// peer-driven stream reassignment in the JetStreamMetaGroup.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for JetStreamMetaGroup peer management and stream reassignment.
|
||||
/// Go reference: jetstream_cluster.go:2290-2439 (processAddPeer, processRemovePeer,
|
||||
/// removePeerFromStreamLocked, remapStreamAssignment).
|
||||
/// </summary>
|
||||
public class PeerManagementTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// ProcessAddPeer — peer registration
|
||||
// Go reference: jetstream_cluster.go:2290 processAddPeer
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ProcessAddPeer_registers_new_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2290 processAddPeer — peer is tracked
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.ProcessAddPeer("peer-1");
|
||||
|
||||
meta.GetKnownPeers().ShouldContain("peer-1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessAddPeer_registers_multiple_peers_independently()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2290 — each peer is independently tracked
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.ProcessAddPeer("peer-1");
|
||||
meta.ProcessAddPeer("peer-2");
|
||||
meta.ProcessAddPeer("peer-3");
|
||||
|
||||
var known = meta.GetKnownPeers();
|
||||
known.Count.ShouldBe(3);
|
||||
known.ShouldContain("peer-1");
|
||||
known.ShouldContain("peer-2");
|
||||
known.ShouldContain("peer-3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessAddPeer_duplicate_add_is_idempotent()
|
||||
{
|
||||
// AddKnownPeer uses a HashSet so duplicates do not inflate the count.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.ProcessAddPeer("peer-1");
|
||||
meta.ProcessAddPeer("peer-1");
|
||||
|
||||
meta.GetKnownPeers().Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ProcessAddPeer — under-replication detection
|
||||
// Go reference: jetstream_cluster.go:2311-2339 missingPeers + peer append
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ProcessAddPeer_triggers_rereplication_of_underreplicated_stream()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2315 sa.missingPeers() — adds new peer to group
|
||||
var meta = new JetStreamMetaGroup(3); // leader by default (selfIndex == leaderIndex == 1)
|
||||
|
||||
// Stream assigned with 2 peers but DesiredReplicas == 3 → under-replicated
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "orders-rg",
|
||||
Peers = ["peer-1", "peer-2"],
|
||||
DesiredReplicas = 3,
|
||||
};
|
||||
var sa = new StreamAssignment { StreamName = "ORDERS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
meta.ProcessAddPeer("peer-3");
|
||||
|
||||
var updated = meta.GetStreamAssignment("ORDERS")!;
|
||||
updated.Group.Peers.ShouldContain("peer-3");
|
||||
updated.Group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessAddPeer_does_not_add_peer_to_fully_replicated_stream()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2315 missingPeers() returns false when at desired count
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "events-rg",
|
||||
Peers = ["peer-1", "peer-2", "peer-3"],
|
||||
DesiredReplicas = 3,
|
||||
};
|
||||
var sa = new StreamAssignment { StreamName = "EVENTS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
meta.ProcessAddPeer("peer-4");
|
||||
|
||||
var updated = meta.GetStreamAssignment("EVENTS")!;
|
||||
updated.Group.Peers.Count.ShouldBe(3);
|
||||
updated.Group.Peers.ShouldNotContain("peer-4");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessAddPeer_does_not_add_peer_already_in_group()
|
||||
{
|
||||
// Peer already a member — should not be added twice.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "logs-rg",
|
||||
Peers = ["peer-1"],
|
||||
DesiredReplicas = 2,
|
||||
};
|
||||
var sa = new StreamAssignment { StreamName = "LOGS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
meta.ProcessAddPeer("peer-1");
|
||||
|
||||
var updated = meta.GetStreamAssignment("LOGS")!;
|
||||
updated.Group.Peers.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessAddPeer_non_leader_does_not_modify_assignments()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2301 — only leader triggers re-assignment
|
||||
var meta = new JetStreamMetaGroup(3, selfIndex: 2); // not leader
|
||||
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "rg",
|
||||
Peers = ["peer-1"],
|
||||
DesiredReplicas = 3,
|
||||
};
|
||||
var sa = new StreamAssignment { StreamName = "S", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
meta.ProcessAddPeer("peer-2");
|
||||
|
||||
// Peer is registered but stream is not modified since not leader.
|
||||
meta.GetKnownPeers().ShouldContain("peer-2");
|
||||
meta.GetStreamAssignment("S")!.Group.Peers.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ProcessRemovePeer — stream reassignment
|
||||
// Go reference: jetstream_cluster.go:2342 processRemovePeer
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ProcessRemovePeer_reassigns_streams_away_from_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2385-2392 — streams with removed peer get remapped
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
// Register three peers
|
||||
meta.AddKnownPeer("peer-1");
|
||||
meta.AddKnownPeer("peer-2");
|
||||
meta.AddKnownPeer("peer-3");
|
||||
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "rg",
|
||||
Peers = ["peer-1", "peer-2"],
|
||||
};
|
||||
var sa = new StreamAssignment { StreamName = "ORDERS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
meta.ProcessRemovePeer("peer-1");
|
||||
|
||||
var updated = meta.GetStreamAssignment("ORDERS")!;
|
||||
updated.Group.Peers.ShouldNotContain("peer-1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessRemovePeer_removes_peer_from_known_peers()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2342 — peer is de-registered
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.AddKnownPeer("peer-1");
|
||||
|
||||
meta.ProcessRemovePeer("peer-1");
|
||||
|
||||
meta.GetKnownPeers().ShouldNotContain("peer-1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessRemovePeer_unknown_peer_is_noop()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2342 — no crash when peer not known
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-2", "peer-3"] };
|
||||
var sa = new StreamAssignment { StreamName = "S", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
// Should not throw
|
||||
meta.ProcessRemovePeer("peer-99");
|
||||
|
||||
// Stream unaffected
|
||||
meta.GetStreamAssignment("S")!.Group.Peers.ShouldContain("peer-2");
|
||||
meta.GetStreamAssignment("S")!.Group.Peers.ShouldContain("peer-3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessRemovePeer_non_leader_only_deregisters_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2378 — non-leader skips re-assignment
|
||||
var meta = new JetStreamMetaGroup(3, selfIndex: 2);
|
||||
meta.AddKnownPeer("peer-1");
|
||||
meta.AddKnownPeer("peer-2");
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2"] };
|
||||
var sa = new StreamAssignment { StreamName = "S", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
meta.ProcessRemovePeer("peer-1");
|
||||
|
||||
// Peer removed from known set
|
||||
meta.GetKnownPeers().ShouldNotContain("peer-1");
|
||||
|
||||
// Stream assignments are NOT modified by a non-leader
|
||||
meta.GetStreamAssignment("S")!.Group.Peers.ShouldContain("peer-1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// RemovePeerFromStream
|
||||
// Go reference: jetstream_cluster.go:2403 removePeerFromStreamLocked
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RemovePeerFromStream_removes_peer_from_group()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2404 — peer is removed from stream group
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.AddKnownPeer("peer-1");
|
||||
meta.AddKnownPeer("peer-2");
|
||||
meta.AddKnownPeer("peer-3");
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2", "peer-3"] };
|
||||
var sa = new StreamAssignment { StreamName = "EVENTS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
meta.RemovePeerFromStream("EVENTS", "peer-2");
|
||||
|
||||
var updated = meta.GetStreamAssignment("EVENTS")!;
|
||||
updated.Group.Peers.ShouldNotContain("peer-2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemovePeerFromStream_returns_false_for_nonexistent_stream()
|
||||
{
|
||||
// RemovePeerFromStream silently returns false when stream not found.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var result = meta.RemovePeerFromStream("GHOST", "peer-1");
|
||||
|
||||
result.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemovePeerFromStream_returns_false_when_peer_not_in_group()
|
||||
{
|
||||
// Peer not a member of the stream's group.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2"] };
|
||||
var sa = new StreamAssignment { StreamName = "S", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
var result = meta.RemovePeerFromStream("S", "peer-99");
|
||||
|
||||
result.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemovePeerFromStream_replaces_peer_when_replacement_available()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:7088-7094 — replacement peer picked from available pool
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.AddKnownPeer("peer-1");
|
||||
meta.AddKnownPeer("peer-2");
|
||||
meta.AddKnownPeer("peer-3"); // replacement candidate
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2"] };
|
||||
var sa = new StreamAssignment { StreamName = "ORDERS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
var result = meta.RemovePeerFromStream("ORDERS", "peer-1");
|
||||
|
||||
result.ShouldBeTrue();
|
||||
var updated = meta.GetStreamAssignment("ORDERS")!;
|
||||
updated.Group.Peers.ShouldNotContain("peer-1");
|
||||
updated.Group.Peers.Count.ShouldBe(2);
|
||||
updated.Group.Peers.ShouldContain("peer-3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemovePeerFromStream_shrinks_group_when_no_replacement_available()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:7102-7110 — R>1 bare removal fallback
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
// Only peer-1 and peer-2 are known; peer-1 is in the group; no replacement
|
||||
meta.AddKnownPeer("peer-1");
|
||||
meta.AddKnownPeer("peer-2");
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2"] };
|
||||
var sa = new StreamAssignment { StreamName = "LOGS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
var result = meta.RemovePeerFromStream("LOGS", "peer-1");
|
||||
|
||||
// No replacement found → group shrinks
|
||||
result.ShouldBeFalse();
|
||||
var updated = meta.GetStreamAssignment("LOGS")!;
|
||||
updated.Group.Peers.ShouldNotContain("peer-1");
|
||||
updated.Group.Peers.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// RemapStreamAssignment
|
||||
// Go reference: jetstream_cluster.go:7077 remapStreamAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RemapStreamAssignment_selects_new_peers()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:7077 — retain existing minus removed, add candidate
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2", "peer-3"] };
|
||||
var sa = new StreamAssignment { StreamName = "EVENTS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
var available = new List<string> { "peer-1", "peer-2", "peer-3", "peer-4" };
|
||||
var result = meta.RemapStreamAssignment(sa, available, removePeer: "peer-3");
|
||||
|
||||
result.ShouldBeTrue();
|
||||
var updated = meta.GetStreamAssignment("EVENTS")!;
|
||||
updated.Group.Peers.ShouldNotContain("peer-3");
|
||||
updated.Group.Peers.Count.ShouldBe(3);
|
||||
updated.Group.Peers.ShouldContain("peer-4");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemapStreamAssignment_retains_existing_peers()
|
||||
{
|
||||
// Retained peers (not removed) remain in the new assignment.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2", "peer-3"] };
|
||||
var sa = new StreamAssignment { StreamName = "S", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
var available = new List<string> { "peer-1", "peer-2", "peer-3", "peer-4" };
|
||||
meta.RemapStreamAssignment(sa, available, removePeer: "peer-1");
|
||||
|
||||
var updated = meta.GetStreamAssignment("S")!;
|
||||
updated.Group.Peers.ShouldContain("peer-2");
|
||||
updated.Group.Peers.ShouldContain("peer-3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemapStreamAssignment_returns_false_when_no_replacement()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:7098-7110 — no placement, R1 returns false
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1"] };
|
||||
var sa = new StreamAssignment { StreamName = "R1", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
var available = new List<string> { "peer-1" };
|
||||
var result = meta.RemapStreamAssignment(sa, available, removePeer: "peer-1");
|
||||
|
||||
// Only peer-1 available and it is the removed one → nothing to add
|
||||
result.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemapStreamAssignment_empty_available_shrinks_group()
|
||||
{
|
||||
// When the available-peer list is empty, the group simply loses the removed peer.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2"] };
|
||||
var sa = new StreamAssignment { StreamName = "S", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
var result = meta.RemapStreamAssignment(sa, [], removePeer: "peer-1");
|
||||
|
||||
result.ShouldBeFalse();
|
||||
meta.GetStreamAssignment("S")!.Group.Peers.ShouldNotContain("peer-1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// AddKnownPeer / RemoveKnownPeer
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void AddKnownPeer_and_RemoveKnownPeer_are_consistent()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.AddKnownPeer("p1");
|
||||
meta.AddKnownPeer("p2");
|
||||
meta.RemoveKnownPeer("p1");
|
||||
|
||||
var known = meta.GetKnownPeers();
|
||||
known.ShouldNotContain("p1");
|
||||
known.ShouldContain("p2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemoveKnownPeer_unknown_peer_is_noop()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.AddKnownPeer("p1");
|
||||
|
||||
// Should not throw
|
||||
meta.RemoveKnownPeer("p99");
|
||||
|
||||
meta.GetKnownPeers().Count.ShouldBe(1);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,309 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go:7212 selectPeerGroup
|
||||
// Covers: PlacementEngine peer selection with cluster affinity, tag filtering,
|
||||
// exclude-tag filtering, unavailable peer exclusion, storage-based ordering,
|
||||
// single replica selection, and combined policy filtering.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for PlacementEngine topology-aware peer selection.
|
||||
/// Go reference: jetstream_cluster.go:7212 selectPeerGroup.
|
||||
/// </summary>
|
||||
public class PlacementEngineTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Basic selection with enough peers
|
||||
// Go reference: jetstream_cluster.go selectPeerGroup base case
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Basic_selection_with_enough_peers()
|
||||
{
|
||||
var peers = CreatePeers(5);
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("test-group", 3, peers);
|
||||
|
||||
group.Name.ShouldBe("test-group");
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Selection_returns_exact_replica_count()
|
||||
{
|
||||
var peers = CreatePeers(10);
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("exact", 5, peers);
|
||||
|
||||
group.Peers.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Insufficient peers throws
|
||||
// Go reference: jetstream_cluster.go not enough peers error
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Insufficient_peers_throws()
|
||||
{
|
||||
var peers = CreatePeers(2);
|
||||
|
||||
Should.Throw<InvalidOperationException>(
|
||||
() => PlacementEngine.SelectPeerGroup("fail", 5, peers));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Zero_peers_with_replicas_throws()
|
||||
{
|
||||
var group = Should.Throw<InvalidOperationException>(
|
||||
() => PlacementEngine.SelectPeerGroup("empty", 1, []));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Cluster affinity filtering
|
||||
// Go reference: jetstream_cluster.go cluster affinity in placement
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Cluster_affinity_selects_only_matching_cluster()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Cluster = "us-east" },
|
||||
new() { PeerId = "p2", Cluster = "us-west" },
|
||||
new() { PeerId = "p3", Cluster = "us-east" },
|
||||
new() { PeerId = "p4", Cluster = "us-east" },
|
||||
new() { PeerId = "p5", Cluster = "eu-west" },
|
||||
};
|
||||
var policy = new PlacementPolicy { Cluster = "us-east" };
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("cluster", 3, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
group.Peers.ShouldAllBe(id => id.StartsWith("p1") || id.StartsWith("p3") || id.StartsWith("p4"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Cluster_affinity_is_case_insensitive()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Cluster = "US-East" },
|
||||
new() { PeerId = "p2", Cluster = "us-east" },
|
||||
};
|
||||
var policy = new PlacementPolicy { Cluster = "us-east" };
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("ci", 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Cluster_affinity_with_insufficient_matching_throws()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Cluster = "us-east" },
|
||||
new() { PeerId = "p2", Cluster = "us-west" },
|
||||
};
|
||||
var policy = new PlacementPolicy { Cluster = "us-east" };
|
||||
|
||||
Should.Throw<InvalidOperationException>(
|
||||
() => PlacementEngine.SelectPeerGroup("fail", 2, peers, policy));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Tag filtering (include and exclude)
|
||||
// Go reference: jetstream_cluster.go tag-based filtering
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Tag_filtering_selects_peers_with_all_required_tags()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Tags = ["ssd", "fast"] },
|
||||
new() { PeerId = "p2", Tags = ["ssd"] },
|
||||
new() { PeerId = "p3", Tags = ["ssd", "fast", "gpu"] },
|
||||
new() { PeerId = "p4", Tags = ["hdd"] },
|
||||
};
|
||||
var policy = new PlacementPolicy { Tags = ["ssd", "fast"] };
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("tags", 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldContain("p1");
|
||||
group.Peers.ShouldContain("p3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Exclude_tag_filtering_removes_peers_with_excluded_tags()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Tags = ["ssd"] },
|
||||
new() { PeerId = "p2", Tags = ["ssd", "deprecated"] },
|
||||
new() { PeerId = "p3", Tags = ["ssd"] },
|
||||
};
|
||||
var policy = new PlacementPolicy { ExcludeTags = ["deprecated"] };
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("excl", 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldNotContain("p2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Unavailable peers excluded
|
||||
// Go reference: jetstream_cluster.go offline peer filter
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Unavailable_peers_are_excluded()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Available = true },
|
||||
new() { PeerId = "p2", Available = false },
|
||||
new() { PeerId = "p3", Available = true },
|
||||
new() { PeerId = "p4", Available = false },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("avail", 2, peers);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldContain("p1");
|
||||
group.Peers.ShouldContain("p3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void All_unavailable_throws()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Available = false },
|
||||
new() { PeerId = "p2", Available = false },
|
||||
};
|
||||
|
||||
Should.Throw<InvalidOperationException>(
|
||||
() => PlacementEngine.SelectPeerGroup("fail", 1, peers));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Peers ordered by available storage
|
||||
// Go reference: jetstream_cluster.go storage-based ordering
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Peers_ordered_by_available_storage_descending()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "low", AvailableStorage = 100 },
|
||||
new() { PeerId = "high", AvailableStorage = 10000 },
|
||||
new() { PeerId = "mid", AvailableStorage = 5000 },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("storage", 2, peers);
|
||||
|
||||
// Should pick high and mid (top 2 by storage)
|
||||
group.Peers[0].ShouldBe("high");
|
||||
group.Peers[1].ShouldBe("mid");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Single replica selection
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Single_replica_selection()
|
||||
{
|
||||
var peers = CreatePeers(5);
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("single", 1, peers);
|
||||
|
||||
group.Peers.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Policy with all filters combined
|
||||
// Go reference: jetstream_cluster.go combined placement policy
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Combined_policy_filters_applied_together()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Cluster = "us-east", Tags = ["ssd"], Available = true, AvailableStorage = 5000 },
|
||||
new() { PeerId = "p2", Cluster = "us-east", Tags = ["ssd", "old"], Available = true, AvailableStorage = 8000 },
|
||||
new() { PeerId = "p3", Cluster = "us-west", Tags = ["ssd"], Available = true, AvailableStorage = 9000 },
|
||||
new() { PeerId = "p4", Cluster = "us-east", Tags = ["ssd"], Available = false, AvailableStorage = 10000 },
|
||||
new() { PeerId = "p5", Cluster = "us-east", Tags = ["ssd"], Available = true, AvailableStorage = 7000 },
|
||||
new() { PeerId = "p6", Cluster = "us-east", Tags = ["hdd"], Available = true, AvailableStorage = 12000 },
|
||||
};
|
||||
var policy = new PlacementPolicy
|
||||
{
|
||||
Cluster = "us-east",
|
||||
Tags = ["ssd"],
|
||||
ExcludeTags = ["old"],
|
||||
};
|
||||
|
||||
// After filtering: p1 (5000), p5 (7000) — p2 excluded (old tag), p3 (wrong cluster), p4 (unavailable), p6 (no ssd tag)
|
||||
var group = PlacementEngine.SelectPeerGroup("combined", 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
// Ordered by storage descending: p5 (7000) first, p1 (5000) second
|
||||
group.Peers[0].ShouldBe("p5");
|
||||
group.Peers[1].ShouldBe("p1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Null policy is allowed (no filtering)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Null_policy_selects_without_filtering()
|
||||
{
|
||||
var peers = CreatePeers(3);
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("nofilter", 3, peers, policy: null);
|
||||
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Empty policy fields are ignored
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Empty_policy_cluster_is_ignored()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Cluster = "us-east" },
|
||||
new() { PeerId = "p2", Cluster = "us-west" },
|
||||
};
|
||||
var policy = new PlacementPolicy { Cluster = "" };
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("empty-cluster", 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
private static List<PeerInfo> CreatePeers(int count)
|
||||
{
|
||||
return Enumerable.Range(1, count)
|
||||
.Select(i => new PeerInfo
|
||||
{
|
||||
PeerId = $"peer-{i}",
|
||||
Available = true,
|
||||
AvailableStorage = long.MaxValue - i,
|
||||
})
|
||||
.ToList();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,260 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go
|
||||
// Covers: RaftGroup member management, peer add/remove/preferred,
|
||||
// factory method via PlacementEngine, replication health properties,
|
||||
// and quorum size calculation.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for RaftGroup lifecycle: membership helpers, factory method,
|
||||
/// replication status properties, and quorum size.
|
||||
/// Go reference: jetstream_cluster.go:154-163 raftGroup struct and peer management.
|
||||
/// </summary>
|
||||
public class RaftGroupLifecycleTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// IsMember — membership check
|
||||
// Go reference: jetstream_cluster.go isMember helper
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void IsMember_returns_true_for_existing_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go isMember — checks rg.Peers contains id
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2", "peer-3"] };
|
||||
|
||||
group.IsMember("peer-2").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsMember_returns_false_for_non_member()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go isMember — returns false when not in Peers
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2"] };
|
||||
|
||||
group.IsMember("peer-9").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// SetPreferred — assign preferred peer
|
||||
// Go reference: jetstream_cluster.go setPreferred / rg.Preferred
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void SetPreferred_sets_preferred_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go setPreferred — assigns rg.Preferred when member
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2", "peer-3"] };
|
||||
|
||||
group.SetPreferred("peer-3");
|
||||
|
||||
group.Preferred.ShouldBe("peer-3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void SetPreferred_throws_for_non_member()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go setPreferred — validates membership before setting
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2"] };
|
||||
|
||||
Should.Throw<InvalidOperationException>(() => group.SetPreferred("peer-99"));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// RemovePeer — remove a peer from the group
|
||||
// Go reference: jetstream_cluster.go removePeer
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RemovePeer_removes_existing_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go removePeer — removes peer from rg.Peers
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2", "peer-3"] };
|
||||
|
||||
var removed = group.RemovePeer("peer-2");
|
||||
|
||||
removed.ShouldBeTrue();
|
||||
group.Peers.ShouldNotContain("peer-2");
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemovePeer_clears_preferred_when_removing_preferred()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go removePeer — clears rg.Preferred if it matches removed peer
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2", "peer-3"], Preferred = "peer-2" };
|
||||
|
||||
group.RemovePeer("peer-2");
|
||||
|
||||
group.Preferred.ShouldBe(string.Empty);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemovePeer_returns_false_for_non_member()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go removePeer — returns false when peer not found
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2"] };
|
||||
|
||||
var removed = group.RemovePeer("peer-99");
|
||||
|
||||
removed.ShouldBeFalse();
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// AddPeer — add a new peer to the group
|
||||
// Go reference: jetstream_cluster.go addPeer / expandGroup
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void AddPeer_adds_new_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go addPeer — appends peer to rg.Peers
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2"] };
|
||||
|
||||
var added = group.AddPeer("peer-3");
|
||||
|
||||
added.ShouldBeTrue();
|
||||
group.Peers.ShouldContain("peer-3");
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void AddPeer_returns_false_for_existing_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go addPeer — skips duplicate, returns false
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2"] };
|
||||
|
||||
var added = group.AddPeer("peer-1");
|
||||
|
||||
added.ShouldBeFalse();
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// CreateRaftGroup factory — uses PlacementEngine
|
||||
// Go reference: jetstream_cluster.go:7212 selectPeerGroup called from createGroupForStream
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void CreateRaftGroup_uses_placement_engine()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go createGroupForStream — calls selectPeerGroup
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "peer-A", Available = true, AvailableStorage = 9000 },
|
||||
new() { PeerId = "peer-B", Available = true, AvailableStorage = 8000 },
|
||||
new() { PeerId = "peer-C", Available = true, AvailableStorage = 7000 },
|
||||
};
|
||||
|
||||
var group = RaftGroup.CreateRaftGroup("my-stream", 3, peers);
|
||||
|
||||
group.Name.ShouldBe("my-stream");
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
group.Peers.ShouldContain("peer-A");
|
||||
group.Peers.ShouldContain("peer-B");
|
||||
group.Peers.ShouldContain("peer-C");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CreateRaftGroup_sets_desired_replicas()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go rg.DesiredReplicas = replicas after group creation
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "peer-X", Available = true },
|
||||
new() { PeerId = "peer-Y", Available = true },
|
||||
new() { PeerId = "peer-Z", Available = true },
|
||||
};
|
||||
|
||||
var group = RaftGroup.CreateRaftGroup("replicated-stream", 3, peers);
|
||||
|
||||
group.DesiredReplicas.ShouldBe(3);
|
||||
group.HasDesiredReplicas.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// IsUnderReplicated — replication health
|
||||
// Go reference: jetstream_cluster.go missingPeers — len(Peers) < DesiredReplicas
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void IsUnderReplicated_true_when_peers_less_than_desired()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2284 sa.missingPeers()
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1"], DesiredReplicas = 3 };
|
||||
|
||||
group.IsUnderReplicated.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsUnderReplicated_false_when_peers_equal_desired()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2284 sa.missingPeers() — no deficit when equal
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2", "peer-3"], DesiredReplicas = 3 };
|
||||
|
||||
group.IsUnderReplicated.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsUnderReplicated_false_when_no_desired_replicas_set()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go — without DesiredReplicas set, no under-replication
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1"] };
|
||||
|
||||
group.IsUnderReplicated.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// IsOverReplicated — excess replication detection
|
||||
// Go reference: jetstream_cluster.go extraPeers — len(Peers) > DesiredReplicas
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void IsOverReplicated_true_when_peers_more_than_desired()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go extraPeers detection for scale-down
|
||||
var group = new RaftGroup { Name = "test", Peers = ["p1", "p2", "p3", "p4"], DesiredReplicas = 3 };
|
||||
|
||||
group.IsOverReplicated.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsOverReplicated_false_when_peers_equal_desired()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go — no excess when equal
|
||||
var group = new RaftGroup { Name = "test", Peers = ["p1", "p2", "p3"], DesiredReplicas = 3 };
|
||||
|
||||
group.IsOverReplicated.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsOverReplicated_false_when_no_desired_replicas_set()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go — without DesiredReplicas set, no over-replication
|
||||
var group = new RaftGroup { Name = "test", Peers = ["p1", "p2", "p3", "p4"] };
|
||||
|
||||
group.IsOverReplicated.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// QuorumSize — majority quorum calculation
|
||||
// Go reference: jetstream_cluster.go quorumNeeded — (n/2)+1
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Theory]
|
||||
[InlineData(1, 1)] // R=1 → quorum=1
|
||||
[InlineData(3, 2)] // R=3 → quorum=2
|
||||
[InlineData(5, 3)] // R=5 → quorum=3
|
||||
[InlineData(2, 2)] // R=2 → quorum=2 (degenerate, but formula consistent)
|
||||
[InlineData(4, 3)] // R=4 → quorum=3
|
||||
public void QuorumSize_correct_for_various_counts(int peerCount, int expectedQuorum)
|
||||
{
|
||||
// Go reference: jetstream_cluster.go quorumNeeded — (n/2)+1
|
||||
var peers = Enumerable.Range(1, peerCount).Select(i => $"peer-{i}").ToList();
|
||||
var group = new RaftGroup { Name = "quorum-test", Peers = peers };
|
||||
|
||||
group.QuorumSize.ShouldBe(expectedQuorum);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,196 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go
|
||||
// Covers: Per-stream RAFT group message proposals, message count tracking,
|
||||
// sequence tracking, leader change events, replica status reporting,
|
||||
// and non-leader rejection.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for StreamReplicaGroup stream-specific RAFT apply logic:
|
||||
/// message proposals, message count, last sequence, leader change
|
||||
/// event, and replica status reporting.
|
||||
/// Go reference: jetstream_cluster.go processStreamMsg, processStreamEntries.
|
||||
/// </summary>
|
||||
public class StreamRaftGroupTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// ProposeMessageAsync succeeds as leader
|
||||
// Go reference: jetstream_cluster.go processStreamMsg
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Propose_message_succeeds_as_leader()
|
||||
{
|
||||
var group = new StreamReplicaGroup("MSGS", replicas: 3);
|
||||
|
||||
var index = await group.ProposeMessageAsync(
|
||||
"orders.new", ReadOnlyMemory<byte>.Empty, "hello"u8.ToArray(), default);
|
||||
|
||||
index.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ProposeMessageAsync fails when not leader
|
||||
// Go reference: jetstream_cluster.go leader check
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Propose_message_fails_when_not_leader()
|
||||
{
|
||||
var group = new StreamReplicaGroup("NOLEAD", replicas: 3);
|
||||
|
||||
// Step down so the current leader is no longer leader
|
||||
group.Leader.RequestStepDown();
|
||||
|
||||
await Should.ThrowAsync<InvalidOperationException>(async () =>
|
||||
await group.ProposeMessageAsync(
|
||||
"test.sub", ReadOnlyMemory<byte>.Empty, "data"u8.ToArray(), default));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Message count increments after proposal
|
||||
// Go reference: stream.go state.Msgs tracking
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Message_count_increments_after_proposal()
|
||||
{
|
||||
var group = new StreamReplicaGroup("COUNT", replicas: 3);
|
||||
|
||||
group.MessageCount.ShouldBe(0);
|
||||
|
||||
await group.ProposeMessageAsync("a.1", ReadOnlyMemory<byte>.Empty, "m1"u8.ToArray(), default);
|
||||
group.MessageCount.ShouldBe(1);
|
||||
|
||||
await group.ProposeMessageAsync("a.2", ReadOnlyMemory<byte>.Empty, "m2"u8.ToArray(), default);
|
||||
group.MessageCount.ShouldBe(2);
|
||||
|
||||
await group.ProposeMessageAsync("a.3", ReadOnlyMemory<byte>.Empty, "m3"u8.ToArray(), default);
|
||||
group.MessageCount.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Last sequence tracks correctly
|
||||
// Go reference: stream.go state.LastSeq
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Last_sequence_tracks_correctly()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SEQ", replicas: 3);
|
||||
|
||||
group.LastSequence.ShouldBe(0);
|
||||
|
||||
var idx1 = await group.ProposeMessageAsync("s.1", ReadOnlyMemory<byte>.Empty, "d1"u8.ToArray(), default);
|
||||
group.LastSequence.ShouldBe(idx1);
|
||||
|
||||
var idx2 = await group.ProposeMessageAsync("s.2", ReadOnlyMemory<byte>.Empty, "d2"u8.ToArray(), default);
|
||||
group.LastSequence.ShouldBe(idx2);
|
||||
|
||||
idx2.ShouldBeGreaterThan(idx1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Step down triggers leader change event
|
||||
// Go reference: jetstream_cluster.go leader change notification
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Step_down_triggers_leader_change_event()
|
||||
{
|
||||
var group = new StreamReplicaGroup("EVENT", replicas: 3);
|
||||
var previousId = group.Leader.Id;
|
||||
|
||||
LeaderChangedEventArgs? receivedArgs = null;
|
||||
group.LeaderChanged += (_, args) => receivedArgs = args;
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
|
||||
receivedArgs.ShouldNotBeNull();
|
||||
receivedArgs.PreviousLeaderId.ShouldBe(previousId);
|
||||
receivedArgs.NewLeaderId.ShouldNotBe(previousId);
|
||||
receivedArgs.NewTerm.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_stepdowns_fire_leader_changed_each_time()
|
||||
{
|
||||
var group = new StreamReplicaGroup("MULTI_EVENT", replicas: 3);
|
||||
var eventCount = 0;
|
||||
group.LeaderChanged += (_, _) => eventCount++;
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
await group.StepDownAsync(default);
|
||||
await group.StepDownAsync(default);
|
||||
|
||||
eventCount.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Replica status reports correct state
|
||||
// Go reference: jetstream_cluster.go stream replica status
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_status_reports_correct_state()
|
||||
{
|
||||
var group = new StreamReplicaGroup("STATUS", replicas: 3);
|
||||
|
||||
await group.ProposeMessageAsync("x.1", ReadOnlyMemory<byte>.Empty, "m1"u8.ToArray(), default);
|
||||
await group.ProposeMessageAsync("x.2", ReadOnlyMemory<byte>.Empty, "m2"u8.ToArray(), default);
|
||||
|
||||
var status = group.GetStatus();
|
||||
|
||||
status.StreamName.ShouldBe("STATUS");
|
||||
status.LeaderId.ShouldBe(group.Leader.Id);
|
||||
status.LeaderTerm.ShouldBeGreaterThan(0);
|
||||
status.MessageCount.ShouldBe(2);
|
||||
status.LastSequence.ShouldBeGreaterThan(0);
|
||||
status.ReplicaCount.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Initial_status_has_zero_messages()
|
||||
{
|
||||
var group = new StreamReplicaGroup("EMPTY", replicas: 1);
|
||||
|
||||
var status = group.GetStatus();
|
||||
|
||||
status.MessageCount.ShouldBe(0);
|
||||
status.LastSequence.ShouldBe(0);
|
||||
status.ReplicaCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Status updates after step down
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Status_reflects_new_leader_after_stepdown()
|
||||
{
|
||||
var group = new StreamReplicaGroup("NEWLEAD", replicas: 3);
|
||||
var statusBefore = group.GetStatus();
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
|
||||
var statusAfter = group.GetStatus();
|
||||
statusAfter.LeaderId.ShouldNotBe(statusBefore.LeaderId);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ProposeAsync still works after ProposeMessageAsync
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeAsync_and_ProposeMessageAsync_coexist()
|
||||
{
|
||||
var group = new StreamReplicaGroup("COEXIST", replicas: 3);
|
||||
|
||||
var idx1 = await group.ProposeAsync("PUB test.1", default);
|
||||
var idx2 = await group.ProposeMessageAsync("test.2", ReadOnlyMemory<byte>.Empty, "data"u8.ToArray(), default);
|
||||
|
||||
idx2.ShouldBeGreaterThan(idx1);
|
||||
group.MessageCount.ShouldBe(1); // Only ProposeMessageAsync increments message count
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,309 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go
|
||||
// Covers: StreamReplicaGroup construction from StreamAssignment, per-stream RAFT apply
|
||||
// logic (processStreamEntries), checkpoint/restore snapshot lifecycle, and commit/processed
|
||||
// index tracking through the group facade.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.Raft;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for B10: per-stream RAFT apply logic added to StreamReplicaGroup.
|
||||
/// Covers construction from StreamAssignment, apply loop, snapshot checkpoint/restore,
|
||||
/// and the CommitIndex/ProcessedIndex/PendingCommits facade properties.
|
||||
/// Go reference: jetstream_cluster.go processStreamAssignment, processStreamEntries.
|
||||
/// </summary>
|
||||
public class StreamReplicaGroupApplyTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: jetstream_cluster.go processStreamAssignment — builds per-stream raft group
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Construction_from_assignment_creates_correct_number_of_nodes()
|
||||
{
|
||||
var assignment = new StreamAssignment
|
||||
{
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "orders-raft",
|
||||
Peers = ["n1", "n2", "n3"],
|
||||
},
|
||||
};
|
||||
|
||||
var group = new StreamReplicaGroup(assignment);
|
||||
|
||||
group.Nodes.Count.ShouldBe(3);
|
||||
group.StreamName.ShouldBe("ORDERS");
|
||||
group.Assignment.ShouldNotBeNull();
|
||||
group.Assignment!.StreamName.ShouldBe("ORDERS");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Construction_from_assignment_uses_peer_ids_as_node_ids()
|
||||
{
|
||||
var assignment = new StreamAssignment
|
||||
{
|
||||
StreamName = "EVENTS",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "events-raft",
|
||||
Peers = ["peer-a", "peer-b", "peer-c"],
|
||||
},
|
||||
};
|
||||
|
||||
var group = new StreamReplicaGroup(assignment);
|
||||
|
||||
var nodeIds = group.Nodes.Select(n => n.Id).ToHashSet();
|
||||
nodeIds.ShouldContain("peer-a");
|
||||
nodeIds.ShouldContain("peer-b");
|
||||
nodeIds.ShouldContain("peer-c");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Construction_from_assignment_elects_leader()
|
||||
{
|
||||
var assignment = new StreamAssignment
|
||||
{
|
||||
StreamName = "STREAM",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "stream-raft",
|
||||
Peers = ["n1", "n2", "n3"],
|
||||
},
|
||||
};
|
||||
|
||||
var group = new StreamReplicaGroup(assignment);
|
||||
|
||||
group.Leader.ShouldNotBeNull();
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Construction_from_assignment_with_no_peers_creates_single_node()
|
||||
{
|
||||
var assignment = new StreamAssignment
|
||||
{
|
||||
StreamName = "SOLO",
|
||||
Group = new RaftGroup { Name = "solo-raft" },
|
||||
};
|
||||
|
||||
var group = new StreamReplicaGroup(assignment);
|
||||
|
||||
group.Nodes.Count.ShouldBe(1);
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: raft.go:150-160 (applied/processed fields) — commit index on proposal
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeAsync_through_group_increments_commit_index()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TRACK", replicas: 3);
|
||||
group.CommitIndex.ShouldBe(0);
|
||||
|
||||
await group.ProposeAsync("msg.1", default);
|
||||
|
||||
group.CommitIndex.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_proposals_increment_commit_index_monotonically()
|
||||
{
|
||||
var group = new StreamReplicaGroup("MULTI", replicas: 3);
|
||||
|
||||
await group.ProposeAsync("msg.1", default);
|
||||
await group.ProposeAsync("msg.2", default);
|
||||
await group.ProposeAsync("msg.3", default);
|
||||
|
||||
group.CommitIndex.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: jetstream_cluster.go processStreamEntries — apply loop
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_processes_pending_entries()
|
||||
{
|
||||
var group = new StreamReplicaGroup("APPLY", replicas: 3);
|
||||
|
||||
await group.ProposeAsync("store.msg.1", default);
|
||||
await group.ProposeAsync("store.msg.2", default);
|
||||
|
||||
group.PendingCommits.ShouldBe(2);
|
||||
|
||||
await group.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
group.PendingCommits.ShouldBe(0);
|
||||
group.ProcessedIndex.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_marks_regular_entries_as_processed()
|
||||
{
|
||||
var group = new StreamReplicaGroup("MARK", replicas: 1);
|
||||
|
||||
var idx = await group.ProposeAsync("data.record", default);
|
||||
|
||||
group.ProcessedIndex.ShouldBe(0);
|
||||
|
||||
await group.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
group.ProcessedIndex.ShouldBe(idx);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_on_empty_queue_is_noop()
|
||||
{
|
||||
var group = new StreamReplicaGroup("EMPTY", replicas: 3);
|
||||
|
||||
// No proposals — queue is empty, should not throw
|
||||
await group.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
group.ProcessedIndex.ShouldBe(0);
|
||||
group.PendingCommits.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: raft.go CreateSnapshotCheckpoint — snapshot lifecycle
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task CheckpointAsync_creates_snapshot_at_current_state()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SNAP", replicas: 3);
|
||||
|
||||
await group.ProposeAsync("entry.1", default);
|
||||
await group.ProposeAsync("entry.2", default);
|
||||
|
||||
var snapshot = await group.CheckpointAsync(default);
|
||||
|
||||
snapshot.ShouldNotBeNull();
|
||||
snapshot.LastIncludedIndex.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task CheckpointAsync_snapshot_index_matches_applied_index()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SNAPIDX", replicas: 1);
|
||||
|
||||
await group.ProposeAsync("record.1", default);
|
||||
await group.ProposeAsync("record.2", default);
|
||||
|
||||
var snapshot = await group.CheckpointAsync(default);
|
||||
|
||||
snapshot.LastIncludedIndex.ShouldBe(group.Leader.AppliedIndex);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: raft.go DrainAndReplaySnapshot — restore lifecycle
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task RestoreFromSnapshotAsync_restores_state()
|
||||
{
|
||||
var group = new StreamReplicaGroup("RESTORE", replicas: 3);
|
||||
|
||||
await group.ProposeAsync("pre.1", default);
|
||||
await group.ProposeAsync("pre.2", default);
|
||||
|
||||
var snapshot = await group.CheckpointAsync(default);
|
||||
|
||||
// Advance state further after snapshot
|
||||
await group.ProposeAsync("post.1", default);
|
||||
|
||||
// Restore: should drain queue and roll back to snapshot state
|
||||
await group.RestoreFromSnapshotAsync(snapshot, default);
|
||||
|
||||
// After restore the commit index reflects the snapshot
|
||||
group.CommitIndex.ShouldBe(snapshot.LastIncludedIndex);
|
||||
// Pending commits should be drained
|
||||
group.PendingCommits.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task RestoreFromSnapshotAsync_drains_pending_commits()
|
||||
{
|
||||
var group = new StreamReplicaGroup("DRAIN", replicas: 3);
|
||||
|
||||
// Propose several entries so queue has items
|
||||
await group.ProposeAsync("queued.1", default);
|
||||
await group.ProposeAsync("queued.2", default);
|
||||
await group.ProposeAsync("queued.3", default);
|
||||
|
||||
group.PendingCommits.ShouldBeGreaterThan(0);
|
||||
|
||||
var snapshot = new RaftSnapshot
|
||||
{
|
||||
LastIncludedIndex = 3,
|
||||
LastIncludedTerm = group.Leader.Term,
|
||||
};
|
||||
|
||||
await group.RestoreFromSnapshotAsync(snapshot, default);
|
||||
|
||||
group.PendingCommits.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: raft.go:150-160 — PendingCommits reflects commit queue depth
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task PendingCommits_reflects_commit_queue_depth()
|
||||
{
|
||||
var group = new StreamReplicaGroup("QUEUE", replicas: 3);
|
||||
|
||||
group.PendingCommits.ShouldBe(0);
|
||||
|
||||
await group.ProposeAsync("q.1", default);
|
||||
group.PendingCommits.ShouldBe(1);
|
||||
|
||||
await group.ProposeAsync("q.2", default);
|
||||
group.PendingCommits.ShouldBe(2);
|
||||
|
||||
await group.ApplyCommittedEntriesAsync(default);
|
||||
group.PendingCommits.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: raft.go applied/processed tracking — CommitIndex and ProcessedIndex
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task CommitIndex_and_ProcessedIndex_track_through_the_group()
|
||||
{
|
||||
var group = new StreamReplicaGroup("INDICES", replicas: 3);
|
||||
|
||||
group.CommitIndex.ShouldBe(0);
|
||||
group.ProcessedIndex.ShouldBe(0);
|
||||
|
||||
await group.ProposeAsync("step.1", default);
|
||||
group.CommitIndex.ShouldBe(1);
|
||||
// Not yet applied
|
||||
group.ProcessedIndex.ShouldBe(0);
|
||||
|
||||
await group.ApplyCommittedEntriesAsync(default);
|
||||
group.ProcessedIndex.ShouldBe(1);
|
||||
|
||||
await group.ProposeAsync("step.2", default);
|
||||
group.CommitIndex.ShouldBe(2);
|
||||
group.ProcessedIndex.ShouldBe(1); // still only first entry applied
|
||||
|
||||
await group.ApplyCommittedEntriesAsync(default);
|
||||
group.ProcessedIndex.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CommitIndex_initially_zero_for_fresh_group()
|
||||
{
|
||||
var group = new StreamReplicaGroup("FRESH", replicas: 5);
|
||||
|
||||
group.CommitIndex.ShouldBe(0);
|
||||
group.ProcessedIndex.ShouldBe(0);
|
||||
group.PendingCommits.ShouldBe(0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,381 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: per-stream RAFT groups, stream assignment proposal, replica count
|
||||
// enforcement, leader election for stream group, data replication across
|
||||
// stream replicas, placement scaling, stepdown behavior.
|
||||
using System.Collections.Concurrent;
|
||||
using System.Reflection;
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
using NATS.Server.Raft;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering per-stream RAFT groups: stream assignment proposal,
|
||||
/// replica count enforcement, leader election, data replication across
|
||||
/// replicas, placement scaling, and stepdown behavior.
|
||||
/// Ported from Go jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class StreamReplicaGroupTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_r3_creates_three_raft_nodes()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 3);
|
||||
|
||||
group.Nodes.Count.ShouldBe(3);
|
||||
group.StreamName.ShouldBe("TEST");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterSingleReplicaStreams server/jetstream_cluster_1_test.go:223
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_r1_creates_single_raft_node()
|
||||
{
|
||||
var group = new StreamReplicaGroup("R1S", replicas: 1);
|
||||
|
||||
group.Nodes.Count.ShouldBe(1);
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_zero_replicas_creates_one_node()
|
||||
{
|
||||
var group = new StreamReplicaGroup("ZERO", replicas: 0);
|
||||
|
||||
group.Nodes.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_negative_replicas_creates_one_node()
|
||||
{
|
||||
var group = new StreamReplicaGroup("NEG", replicas: -1);
|
||||
|
||||
group.Nodes.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_elects_initial_leader_on_creation()
|
||||
{
|
||||
var group = new StreamReplicaGroup("ELECT", replicas: 3);
|
||||
|
||||
group.Leader.ShouldNotBeNull();
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
group.Leader.Role.ShouldBe(RaftRole.Leader);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_leader_id_follows_naming_convention()
|
||||
{
|
||||
var group = new StreamReplicaGroup("MY_STREAM", replicas: 3);
|
||||
|
||||
group.Leader.Id.ShouldStartWith("my_stream-r");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_stepdown_changes_leader()
|
||||
{
|
||||
var group = new StreamReplicaGroup("STEP", replicas: 3);
|
||||
var before = group.Leader.Id;
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
|
||||
group.Leader.Id.ShouldNotBe(before);
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_consecutive_stepdowns_cycle_leaders()
|
||||
{
|
||||
var group = new StreamReplicaGroup("CYCLE", replicas: 3);
|
||||
var leaders = new List<string> { group.Leader.Id };
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
leaders.Add(group.Leader.Id);
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
leaders.Add(group.Leader.Id);
|
||||
|
||||
leaders[1].ShouldNotBe(leaders[0]);
|
||||
leaders[2].ShouldNotBe(leaders[1]);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_stepdown_wraps_around()
|
||||
{
|
||||
var group = new StreamReplicaGroup("WRAP", replicas: 3);
|
||||
var ids = new HashSet<string>();
|
||||
|
||||
for (var i = 0; i < 6; i++)
|
||||
{
|
||||
ids.Add(group.Leader.Id);
|
||||
await group.StepDownAsync(default);
|
||||
}
|
||||
|
||||
// Should have cycled through all 3 nodes
|
||||
ids.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_leader_accepts_proposals()
|
||||
{
|
||||
var group = new StreamReplicaGroup("PROPOSE", replicas: 3);
|
||||
|
||||
var index = await group.ProposeAsync("PUB test.1", default);
|
||||
|
||||
index.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_sequential_proposals_have_increasing_indices()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SEQPROP", replicas: 3);
|
||||
|
||||
var idx1 = await group.ProposeAsync("PUB test.1", default);
|
||||
var idx2 = await group.ProposeAsync("PUB test.2", default);
|
||||
var idx3 = await group.ProposeAsync("PUB test.3", default);
|
||||
|
||||
idx2.ShouldBeGreaterThan(idx1);
|
||||
idx3.ShouldBeGreaterThan(idx2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamNormalCatchup server/jetstream_cluster_1_test.go:1607
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_proposals_survive_stepdown()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SURVIVE", replicas: 3);
|
||||
|
||||
await group.ProposeAsync("PUB a.1", default);
|
||||
await group.ProposeAsync("PUB a.2", default);
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
|
||||
// New leader should accept proposals
|
||||
var idx = await group.ProposeAsync("PUB a.3", default);
|
||||
idx.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_apply_placement_scales_up()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SCALEUP", replicas: 1);
|
||||
group.Nodes.Count.ShouldBe(1);
|
||||
|
||||
await group.ApplyPlacementAsync([1, 2, 3], default);
|
||||
|
||||
group.Nodes.Count.ShouldBe(3);
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_apply_placement_scales_down()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SCALEDN", replicas: 5);
|
||||
group.Nodes.Count.ShouldBe(5);
|
||||
|
||||
await group.ApplyPlacementAsync([1, 2], default);
|
||||
|
||||
group.Nodes.Count.ShouldBe(2);
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_apply_same_size_is_noop()
|
||||
{
|
||||
var group = new StreamReplicaGroup("NOOP", replicas: 3);
|
||||
var leaderBefore = group.Leader.Id;
|
||||
|
||||
await group.ApplyPlacementAsync([1, 2, 3], default);
|
||||
|
||||
group.Nodes.Count.ShouldBe(3);
|
||||
// Leader should remain the same since placement is a no-op
|
||||
group.Leader.Id.ShouldBe(leaderBefore);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_all_nodes_share_cluster()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SHARED", replicas: 3);
|
||||
|
||||
foreach (var node in group.Nodes)
|
||||
node.Members.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamSynchedTimeStamps server/jetstream_cluster_1_test.go:977
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_manager_creates_replica_group_on_stream_create()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var streamManager = new StreamManager(meta);
|
||||
|
||||
streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "REPL",
|
||||
Subjects = ["repl.>"],
|
||||
Replicas = 3,
|
||||
});
|
||||
|
||||
// Use reflection to verify internal replica group was created
|
||||
var field = typeof(StreamManager)
|
||||
.GetField("_replicaGroups", BindingFlags.NonPublic | BindingFlags.Instance)!;
|
||||
var groups = (ConcurrentDictionary<string, StreamReplicaGroup>)field.GetValue(streamManager)!;
|
||||
|
||||
groups.ContainsKey("REPL").ShouldBeTrue();
|
||||
groups["REPL"].Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_leader_stepdown_via_stream_manager_changes_leader()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var streamManager = new StreamManager(meta);
|
||||
|
||||
streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "SD",
|
||||
Subjects = ["sd.>"],
|
||||
Replicas = 3,
|
||||
});
|
||||
|
||||
var field = typeof(StreamManager)
|
||||
.GetField("_replicaGroups", BindingFlags.NonPublic | BindingFlags.Instance)!;
|
||||
var groups = (ConcurrentDictionary<string, StreamReplicaGroup>)field.GetValue(streamManager)!;
|
||||
var leaderBefore = groups["SD"].Leader.Id;
|
||||
|
||||
await streamManager.StepDownStreamLeaderAsync("SD", default);
|
||||
|
||||
groups["SD"].Leader.Id.ShouldNotBe(leaderBefore);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamDelete server/jetstream_cluster_1_test.go:472
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Stream_delete_removes_replica_group()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var streamManager = new StreamManager(meta);
|
||||
|
||||
streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "DELRG",
|
||||
Subjects = ["delrg.>"],
|
||||
Replicas = 3,
|
||||
});
|
||||
|
||||
streamManager.Delete("DELRG").ShouldBeTrue();
|
||||
|
||||
var field = typeof(StreamManager)
|
||||
.GetField("_replicaGroups", BindingFlags.NonPublic | BindingFlags.Instance)!;
|
||||
var groups = (ConcurrentDictionary<string, StreamReplicaGroup>)field.GetValue(streamManager)!;
|
||||
|
||||
groups.ContainsKey("DELRG").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamUpdate server/jetstream_cluster_1_test.go:1433
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Stream_update_preserves_replica_group_when_replicas_unchanged()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var streamManager = new StreamManager(meta);
|
||||
|
||||
streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "UPD",
|
||||
Subjects = ["upd.>"],
|
||||
Replicas = 3,
|
||||
});
|
||||
|
||||
var field = typeof(StreamManager)
|
||||
.GetField("_replicaGroups", BindingFlags.NonPublic | BindingFlags.Instance)!;
|
||||
var groups = (ConcurrentDictionary<string, StreamReplicaGroup>)field.GetValue(streamManager)!;
|
||||
var groupBefore = groups["UPD"];
|
||||
|
||||
streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "UPD",
|
||||
Subjects = ["upd.>", "upd2.>"],
|
||||
Replicas = 3,
|
||||
MaxMsgs = 100,
|
||||
});
|
||||
|
||||
// Same replica count means the group reference should be the same
|
||||
groups["UPD"].ShouldBeSameAs(groupBefore);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,246 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go:7212 selectPeerGroup
|
||||
// Covers: UniqueTag enforcement, HA asset limits, weighted scoring by available resources.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for topology-aware placement: JetStreamUniqueTag enforcement,
|
||||
/// MaxAssetsPerPeer HA limits, and weighted scoring.
|
||||
/// Go reference: jetstream_cluster.go:7212 selectPeerGroup (uniqueTagPrefix, maxHaAssets, weighted sort).
|
||||
/// </summary>
|
||||
public class TopologyPlacementTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// UniqueTag enforcement
|
||||
// Go reference: jetstream_cluster.go:7251 uniqueTagPrefix / checkUniqueTag
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void UniqueTag_prevents_same_tag_value_replicas()
|
||||
{
|
||||
// 3 peers: p1 and p2 in az:us-east-1a, p3 in az:us-east-1b.
|
||||
// R=2 with UniqueTag="az" must pick one from each AZ.
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Tags = ["az:us-east-1a"], AvailableStorage = 1000 },
|
||||
new() { PeerId = "p2", Tags = ["az:us-east-1a"], AvailableStorage = 2000 },
|
||||
new() { PeerId = "p3", Tags = ["az:us-east-1b"], AvailableStorage = 900 },
|
||||
};
|
||||
var policy = new PlacementPolicy { UniqueTag = "az" };
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("az-group", 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
// One peer must be from az:us-east-1a and one from az:us-east-1b.
|
||||
var selectedPeers = peers.Where(p => group.Peers.Contains(p.PeerId)).ToList();
|
||||
var azValues = selectedPeers
|
||||
.SelectMany(p => p.Tags)
|
||||
.Where(t => t.StartsWith("az:", StringComparison.OrdinalIgnoreCase))
|
||||
.ToList();
|
||||
azValues.Distinct(StringComparer.OrdinalIgnoreCase).Count().ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void UniqueTag_throws_when_not_enough_unique_values()
|
||||
{
|
||||
// All 3 peers share the same AZ tag; R=2 requires 2 unique AZ values → impossible.
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Tags = ["az:us-east-1a"] },
|
||||
new() { PeerId = "p2", Tags = ["az:us-east-1a"] },
|
||||
new() { PeerId = "p3", Tags = ["az:us-east-1a"] },
|
||||
};
|
||||
var policy = new PlacementPolicy { UniqueTag = "az" };
|
||||
|
||||
Should.Throw<InvalidOperationException>(
|
||||
() => PlacementEngine.SelectPeerGroup("fail", 2, peers, policy));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Tag_prefix_matching_for_unique_constraint()
|
||||
{
|
||||
// UniqueTag="az" should match tags like "az:us-east-1a", "az:us-west-2b", etc.
|
||||
// Go reference: jetstream_cluster.go:7265 strings.HasPrefix(t, uniqueTagPrefix)
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Tags = ["az:us-east-1a", "ssd"] },
|
||||
new() { PeerId = "p2", Tags = ["az:us-west-2b", "ssd"] },
|
||||
new() { PeerId = "p3", Tags = ["az:eu-central-1a", "ssd"] },
|
||||
};
|
||||
var policy = new PlacementPolicy { UniqueTag = "az" };
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("prefix", 3, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
group.Peers.ShouldContain("p1");
|
||||
group.Peers.ShouldContain("p2");
|
||||
group.Peers.ShouldContain("p3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Empty_unique_tag_ignored()
|
||||
{
|
||||
// UniqueTag="" or null → no unique constraint applied, normal selection.
|
||||
// Go reference: jetstream_cluster.go:7252 if uniqueTagPrefix != _EMPTY_
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Tags = ["az:us-east-1a"] },
|
||||
new() { PeerId = "p2", Tags = ["az:us-east-1a"] },
|
||||
new() { PeerId = "p3", Tags = ["az:us-east-1a"] },
|
||||
};
|
||||
|
||||
// No UniqueTag policy — all 3 peers are valid, R=3 should succeed.
|
||||
var groupNull = PlacementEngine.SelectPeerGroup("no-unique-null", 3, peers, policy: null);
|
||||
groupNull.Peers.Count.ShouldBe(3);
|
||||
|
||||
// Empty string UniqueTag → treated as disabled.
|
||||
var policy = new PlacementPolicy { UniqueTag = "" };
|
||||
var groupEmpty = PlacementEngine.SelectPeerGroup("no-unique-empty", 3, peers, policy);
|
||||
groupEmpty.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void UniqueTag_combined_with_cluster_filter()
|
||||
{
|
||||
// Both cluster filter and UniqueTag must be applied together.
|
||||
// Go reference: jetstream_cluster.go:7346 cluster check before uniqueTag check
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Cluster = "us-east", Tags = ["az:us-east-1a"] },
|
||||
new() { PeerId = "p2", Cluster = "us-east", Tags = ["az:us-east-1a"] },
|
||||
new() { PeerId = "p3", Cluster = "us-east", Tags = ["az:us-east-1b"] },
|
||||
new() { PeerId = "p4", Cluster = "us-west", Tags = ["az:us-west-2a"] },
|
||||
};
|
||||
var policy = new PlacementPolicy { Cluster = "us-east", UniqueTag = "az" };
|
||||
|
||||
// Only p1/p2/p3 are in us-east; UniqueTag="az" → picks one from 1a and one from 1b.
|
||||
var group = PlacementEngine.SelectPeerGroup("combo", 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldNotContain("p4");
|
||||
var selectedPeers = peers.Where(p => group.Peers.Contains(p.PeerId)).ToList();
|
||||
var azValues = selectedPeers
|
||||
.SelectMany(p => p.Tags)
|
||||
.Where(t => t.StartsWith("az:", StringComparison.OrdinalIgnoreCase))
|
||||
.Distinct(StringComparer.OrdinalIgnoreCase)
|
||||
.ToList();
|
||||
azValues.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// MaxAssetsPerPeer HA limit deprioritization
|
||||
// Go reference: jetstream_cluster.go:7428 maxHaAssets check (deprioritize vs hard exclude)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void MaxAssetsPerPeer_deprioritizes_overloaded_peers()
|
||||
{
|
||||
// p1 is at its asset limit but p2 and p3 are not.
|
||||
// With enough non-overloaded candidates, overloaded peer should not be selected.
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", AvailableStorage = 10_000, CurrentAssets = 5, MaxAssetsPerPeer = 5 },
|
||||
new() { PeerId = "p2", AvailableStorage = 8_000, CurrentAssets = 1, MaxAssetsPerPeer = 5 },
|
||||
new() { PeerId = "p3", AvailableStorage = 6_000, CurrentAssets = 0, MaxAssetsPerPeer = 5 },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("ha-limit", 2, peers);
|
||||
|
||||
// p1 is deprioritized (at max), so p2 and p3 should be selected over p1.
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldContain("p2");
|
||||
group.Peers.ShouldContain("p3");
|
||||
group.Peers.ShouldNotContain("p1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void MaxAssetsPerPeer_still_used_when_no_alternatives()
|
||||
{
|
||||
// All peers are at their HA asset limit, but we must still select from them.
|
||||
// Go reference: jetstream_cluster.go — deprioritize (move to end), not hard exclude.
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", AvailableStorage = 1000, CurrentAssets = 3, MaxAssetsPerPeer = 3 },
|
||||
new() { PeerId = "p2", AvailableStorage = 900, CurrentAssets = 3, MaxAssetsPerPeer = 3 },
|
||||
};
|
||||
|
||||
// Should succeed even though both peers are at max.
|
||||
var group = PlacementEngine.SelectPeerGroup("ha-fallback", 2, peers);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldContain("p1");
|
||||
group.Peers.ShouldContain("p2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Zero_MaxAssets_means_unlimited()
|
||||
{
|
||||
// MaxAssetsPerPeer=0 → no asset limit, peer treated as not overloaded regardless of CurrentAssets.
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", AvailableStorage = 5000, CurrentAssets = 100, MaxAssetsPerPeer = 0 },
|
||||
new() { PeerId = "p2", AvailableStorage = 4000, CurrentAssets = 200, MaxAssetsPerPeer = 0 },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("unlimited", 2, peers);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldContain("p1");
|
||||
group.Peers.ShouldContain("p2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Weighted score = AvailableStorage - (CurrentAssets * AssetCostWeight)
|
||||
// Go reference: jetstream_cluster.go:7469 sort by avail then ns (stream count)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Weighted_score_prefers_less_loaded_peers()
|
||||
{
|
||||
// p1: more storage but many assets → lower score
|
||||
// p2: less storage but few assets → higher score
|
||||
// With DefaultAssetCostWeight = 1GB, even a small difference in assets
|
||||
// can overcome a moderate storage advantage.
|
||||
const long gb = PlacementEngine.DefaultAssetCostWeight; // 1_073_741_824L
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
// p1: score = 10*GB - 5*GB = 5*GB
|
||||
new() { PeerId = "p1", AvailableStorage = 10 * gb, CurrentAssets = 5 },
|
||||
// p2: score = 9*GB - 1*GB = 8*GB (wins despite less raw storage)
|
||||
new() { PeerId = "p2", AvailableStorage = 9 * gb, CurrentAssets = 1 },
|
||||
// p3: score = 3*GB - 0 = 3*GB
|
||||
new() { PeerId = "p3", AvailableStorage = 3 * gb, CurrentAssets = 0 },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("weighted", 2, peers);
|
||||
|
||||
// p2 has the highest score (8*GB), p1 has second (5*GB).
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers[0].ShouldBe("p2");
|
||||
group.Peers[1].ShouldBe("p1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Weighted_score_with_custom_cost_weight()
|
||||
{
|
||||
// Verify score formula: score = AvailableStorage - (CurrentAssets * AssetCostWeight)
|
||||
// Use a fixed, small cost weight to make the math obvious.
|
||||
const long costWeight = 1000L;
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
// score = 5000 - (3 * 1000) = 2000
|
||||
new() { PeerId = "p1", AvailableStorage = 5000, CurrentAssets = 3 },
|
||||
// score = 4000 - (0 * 1000) = 4000 (wins)
|
||||
new() { PeerId = "p2", AvailableStorage = 4000, CurrentAssets = 0 },
|
||||
// score = 6000 - (5 * 1000) = 1000 (loses)
|
||||
new() { PeerId = "p3", AvailableStorage = 6000, CurrentAssets = 5 },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("custom-weight", 2, peers, assetCostWeight: costWeight);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers[0].ShouldBe("p2"); // score 4000
|
||||
group.Peers[1].ShouldBe("p1"); // score 2000
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,222 @@
|
||||
// Go parity: jetstream_cluster.go — version-incompatible stream/consumer assignment handling.
|
||||
// Covers: future-version SA/CA rejection, unknown MetaEntryType graceful handling,
|
||||
// SkippedUnsupportedEntries counter, mixed-version batch partial apply.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for graceful handling of version-incompatible stream/consumer assignments
|
||||
/// in JetStreamMetaGroup (Gap 2.11).
|
||||
/// Go reference: jetstream_cluster.go — versioned assignment processing, unknown entry fallback.
|
||||
/// </summary>
|
||||
public class UnsupportedAssetTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// ProcessStreamAssignment — version checks
|
||||
// Go reference: jetstream_cluster.go:4541 processStreamAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamAssignment_skips_future_version()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — future-version entries are skipped to avoid cluster crash.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup { Name = "orders-group" },
|
||||
Version = 2, // future version — beyond CurrentVersion
|
||||
};
|
||||
|
||||
var result = meta.ProcessStreamAssignment(sa);
|
||||
|
||||
result.ShouldBeFalse();
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamAssignment_accepts_current_version()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — current-version entries are processed normally.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup { Name = "orders-group" },
|
||||
Version = JetStreamMetaGroup.CurrentVersion,
|
||||
};
|
||||
|
||||
var result = meta.ProcessStreamAssignment(sa);
|
||||
|
||||
result.ShouldBeTrue();
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamAssignment_accepts_default_version()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — version 0 (default/unset) is treated as current version,
|
||||
// maintaining backward compatibility with pre-versioned assignments.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup { Name = "orders-group" },
|
||||
// Version = 0 (default int value — pre-versioned assignment)
|
||||
};
|
||||
|
||||
var result = meta.ProcessStreamAssignment(sa);
|
||||
|
||||
result.ShouldBeTrue();
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ProcessConsumerAssignment — version checks
|
||||
// Go reference: jetstream_cluster.go:5300 processConsumerAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerAssignment_skips_future_version()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — future-version consumer entries are skipped.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
// First add the parent stream (current version)
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup { Name = "orders-group" },
|
||||
};
|
||||
meta.ProcessStreamAssignment(sa);
|
||||
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "my-consumer",
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup { Name = "consumer-group" },
|
||||
Version = 2, // future version
|
||||
};
|
||||
|
||||
var result = meta.ProcessConsumerAssignment(ca);
|
||||
|
||||
result.ShouldBeFalse();
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerAssignment_accepts_current_version()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — current-version consumer entries are processed normally.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup { Name = "orders-group" },
|
||||
};
|
||||
meta.ProcessStreamAssignment(sa);
|
||||
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "my-consumer",
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup { Name = "consumer-group" },
|
||||
Version = JetStreamMetaGroup.CurrentVersion,
|
||||
};
|
||||
|
||||
var result = meta.ProcessConsumerAssignment(ca);
|
||||
|
||||
result.ShouldBeTrue();
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyEntry — unknown entry type
|
||||
// Go reference: jetstream_cluster.go — unknown entry type fallback (no crash)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_unknown_type_does_not_crash()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — unknown entry types must not crash the cluster;
|
||||
// they are counted and skipped to allow forward compatibility.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
// Should not throw
|
||||
meta.ApplyEntry(MetaEntryType.Unknown, "something");
|
||||
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// SkippedUnsupportedEntries counter accumulation
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void SkippedUnsupportedEntries_count_increments_for_each_skip()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — cluster must track how many unsupported entries
|
||||
// were encountered so operators can detect version skew.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var futureStream = new StreamAssignment
|
||||
{
|
||||
StreamName = "STREAM-A",
|
||||
Group = new RaftGroup { Name = "g1" },
|
||||
Version = 99,
|
||||
};
|
||||
var futureStream2 = new StreamAssignment
|
||||
{
|
||||
StreamName = "STREAM-B",
|
||||
Group = new RaftGroup { Name = "g2" },
|
||||
Version = 99,
|
||||
};
|
||||
|
||||
meta.ProcessStreamAssignment(futureStream);
|
||||
meta.ProcessStreamAssignment(futureStream2);
|
||||
meta.ApplyEntry(MetaEntryType.Unknown, "x");
|
||||
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Mixed-version batch: only v1 assignments applied
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Mixed_versions_partial_apply()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — when replaying a RAFT log with mixed-version entries,
|
||||
// supported entries are applied and future-version entries are skipped without affecting
|
||||
// correctly versioned entries.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var streams = new[]
|
||||
{
|
||||
new StreamAssignment { StreamName = "S1", Group = new RaftGroup { Name = "g1" }, Version = 1 },
|
||||
new StreamAssignment { StreamName = "S2", Group = new RaftGroup { Name = "g2" }, Version = 2 }, // future
|
||||
new StreamAssignment { StreamName = "S3", Group = new RaftGroup { Name = "g3" }, Version = 1 },
|
||||
new StreamAssignment { StreamName = "S4", Group = new RaftGroup { Name = "g4" }, Version = 3 }, // future
|
||||
new StreamAssignment { StreamName = "S5", Group = new RaftGroup { Name = "g5" }, Version = 0 }, // default = current
|
||||
};
|
||||
|
||||
foreach (var sa in streams)
|
||||
meta.ProcessStreamAssignment(sa);
|
||||
|
||||
// S1, S3, S5 should be applied; S2, S4 skipped
|
||||
meta.StreamCount.ShouldBe(3);
|
||||
meta.GetStreamAssignment("S1").ShouldNotBeNull();
|
||||
meta.GetStreamAssignment("S2").ShouldBeNull();
|
||||
meta.GetStreamAssignment("S3").ShouldNotBeNull();
|
||||
meta.GetStreamAssignment("S4").ShouldBeNull();
|
||||
meta.GetStreamAssignment("S5").ShouldNotBeNull();
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(2);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,229 @@
|
||||
// Ported from golang/nats-server/server/jetstream_consumer_test.go
|
||||
// Covers: consumer creation, deliver policies (All, Last, New, ByStartSequence, ByStartTime),
|
||||
// and ack policies (None, Explicit, All) as modelled in the .NET port.
|
||||
//
|
||||
// Go reference tests:
|
||||
// TestJetStreamConsumerCreate (~line 2967)
|
||||
// TestJetStreamConsumerWithStartTime (~line 3160)
|
||||
// TestJetStreamConsumerMaxDeliveries (~line 3265)
|
||||
// TestJetStreamConsumerAckFloorFill (~line 3404)
|
||||
// TestJetStreamConsumerReplayRateNoAck (~line 4505)
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream;
|
||||
|
||||
/// <summary>
|
||||
/// Consumer delivery parity tests ported from the Go reference implementation.
|
||||
/// These tests exercise push/pull delivery, deliver policies, and ack policies against
|
||||
/// the in-process ConsumerManager + StreamManager, mirroring the semantics validated in
|
||||
/// golang/nats-server/server/jetstream_consumer_test.go.
|
||||
/// </summary>
|
||||
public class ConsumerDeliveryParityTests
|
||||
{
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 1 – Pull consumer with DeliverPolicy.All returns all published msgs
|
||||
//
|
||||
// Go reference: TestJetStreamConsumerCreate – verifies that a durable pull
|
||||
// consumer created with default settings fetches all stored messages in
|
||||
// sequence order.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task Pull_consumer_deliver_all_returns_messages_in_sequence_order()
|
||||
{
|
||||
var streams = new StreamManager();
|
||||
streams.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "ORDERS",
|
||||
Subjects = ["orders.*"],
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
var consumers = new ConsumerManager();
|
||||
consumers.CreateOrUpdate("ORDERS", new ConsumerConfig
|
||||
{
|
||||
DurableName = "PULL",
|
||||
DeliverPolicy = DeliverPolicy.All,
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
streams.Capture("orders.created", "msg-1"u8.ToArray());
|
||||
streams.Capture("orders.updated", "msg-2"u8.ToArray());
|
||||
streams.Capture("orders.created", "msg-3"u8.ToArray());
|
||||
|
||||
var batch = await consumers.FetchAsync("ORDERS", "PULL", 3, streams, default);
|
||||
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
batch.Messages[0].Sequence.ShouldBe((ulong)1);
|
||||
batch.Messages[1].Sequence.ShouldBe((ulong)2);
|
||||
batch.Messages[2].Sequence.ShouldBe((ulong)3);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 2 – Deliver policy Last starts at the final stored sequence
|
||||
//
|
||||
// Go reference: TestJetStreamConsumerWithMultipleStartOptions – verifies
|
||||
// that DeliverLast causes the consumer cursor to begin at the last message
|
||||
// in the stream rather than seq 1.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task Pull_consumer_deliver_last_starts_at_final_sequence()
|
||||
{
|
||||
var streams = new StreamManager();
|
||||
streams.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "ORDERS",
|
||||
Subjects = ["orders.*"],
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
streams.Capture("orders.a", "first"u8.ToArray());
|
||||
streams.Capture("orders.b", "second"u8.ToArray());
|
||||
streams.Capture("orders.c", "third"u8.ToArray());
|
||||
|
||||
var consumers = new ConsumerManager();
|
||||
consumers.CreateOrUpdate("ORDERS", new ConsumerConfig
|
||||
{
|
||||
DurableName = "LAST",
|
||||
DeliverPolicy = DeliverPolicy.Last,
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
var batch = await consumers.FetchAsync("ORDERS", "LAST", 5, streams, default);
|
||||
|
||||
// DeliverLast cursor resolves to sequence 3 (last stored).
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
batch.Messages[0].Sequence.ShouldBe((ulong)3);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 3 – Deliver policy New skips all messages present at first-fetch time
|
||||
//
|
||||
// Go reference: TestJetStreamConsumerDeliverNewNotConsumingBeforeRestart
|
||||
// (~line 6213) – validates that DeliverNew positions the cursor past the
|
||||
// last stored sequence so that messages already in the stream when the
|
||||
// consumer first fetches are not returned.
|
||||
//
|
||||
// In the .NET port the initial sequence is resolved on the first FetchAsync
|
||||
// call (when NextSequence == 1). DeliverPolicy.New sets the cursor to
|
||||
// lastSeq + 1, so every message present at fetch time is skipped and only
|
||||
// subsequent publishes are visible.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task Pull_consumer_deliver_new_skips_messages_present_at_first_fetch()
|
||||
{
|
||||
var streams = new StreamManager();
|
||||
streams.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "ORDERS",
|
||||
Subjects = ["orders.*"],
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
streams.Capture("orders.a", "pre-1"u8.ToArray());
|
||||
streams.Capture("orders.b", "pre-2"u8.ToArray());
|
||||
|
||||
var consumers = new ConsumerManager();
|
||||
consumers.CreateOrUpdate("ORDERS", new ConsumerConfig
|
||||
{
|
||||
DurableName = "NEW",
|
||||
DeliverPolicy = DeliverPolicy.New,
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
// First fetch: resolves cursor to lastSeq+1 = 3, which has no message yet.
|
||||
var empty = await consumers.FetchAsync("ORDERS", "NEW", 5, streams, default);
|
||||
empty.Messages.Count.ShouldBe(0);
|
||||
|
||||
// Now publish a new message – this is the "new" message after the cursor.
|
||||
streams.Capture("orders.c", "post-1"u8.ToArray());
|
||||
|
||||
// Second fetch: cursor is already at 3, the newly published message is at 3.
|
||||
var batch = await consumers.FetchAsync("ORDERS", "NEW", 5, streams, default);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
batch.Messages[0].Sequence.ShouldBe((ulong)3);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 4 – Deliver policy ByStartTime resolves cursor at the correct seq
|
||||
//
|
||||
// Go reference: TestJetStreamConsumerWithStartTime (~line 3160) – publishes
|
||||
// messages before a recorded timestamp, then creates a consumer with
|
||||
// DeliverByStartTime and verifies the first delivered sequence matches the
|
||||
// first message after that timestamp.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task Pull_consumer_deliver_by_start_time_resolves_correct_starting_sequence()
|
||||
{
|
||||
var streams = new StreamManager();
|
||||
streams.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "ORDERS",
|
||||
Subjects = ["orders.*"],
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
streams.Capture("orders.a", "before-1"u8.ToArray());
|
||||
streams.Capture("orders.b", "before-2"u8.ToArray());
|
||||
|
||||
// Brief pause so that stored timestamps of pre-existing messages are
|
||||
// strictly before the cut point we are about to record.
|
||||
await Task.Delay(10);
|
||||
var startTime = DateTime.UtcNow;
|
||||
|
||||
streams.Capture("orders.c", "after-1"u8.ToArray());
|
||||
streams.Capture("orders.d", "after-2"u8.ToArray());
|
||||
|
||||
var consumers = new ConsumerManager();
|
||||
consumers.CreateOrUpdate("ORDERS", new ConsumerConfig
|
||||
{
|
||||
DurableName = "BYTIME",
|
||||
DeliverPolicy = DeliverPolicy.ByStartTime,
|
||||
OptStartTimeUtc = startTime,
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
var batch = await consumers.FetchAsync("ORDERS", "BYTIME", 5, streams, default);
|
||||
|
||||
// Only messages with timestamp >= startTime should be returned.
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
batch.Messages.All(m => m.Sequence >= 3).ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 5 – AckAll advances the ack floor and blocks re-delivery of acked msgs
|
||||
//
|
||||
// Go reference: TestJetStreamConsumerAckFloorFill (~line 3404) – publishes
|
||||
// four messages, acks all via AckAll on seq 4, and then verifies that a
|
||||
// subsequent fetch returns zero messages because every sequence is at or
|
||||
// below the ack floor.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task Explicit_ack_all_advances_floor_and_suppresses_redelivery()
|
||||
{
|
||||
var streams = new StreamManager();
|
||||
streams.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "ORDERS",
|
||||
Subjects = ["orders.*"],
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
var consumers = new ConsumerManager();
|
||||
consumers.CreateOrUpdate("ORDERS", new ConsumerConfig
|
||||
{
|
||||
DurableName = "ACK",
|
||||
AckPolicy = AckPolicy.Explicit,
|
||||
AckWaitMs = 100,
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
for (var i = 1; i <= 4; i++)
|
||||
streams.Capture("orders.created", Encoding.UTF8.GetBytes($"msg-{i}"));
|
||||
|
||||
var first = await consumers.FetchAsync("ORDERS", "ACK", 4, streams, default);
|
||||
first.Messages.Count.ShouldBe(4);
|
||||
|
||||
// AckAll up to sequence 4 should advance floor and clear all pending.
|
||||
consumers.AckAll("ORDERS", "ACK", 4);
|
||||
|
||||
// A subsequent fetch must return no messages because the ack floor
|
||||
// now covers all published sequences and there are no new messages.
|
||||
var second = await consumers.FetchAsync("ORDERS", "ACK", 4, streams, default);
|
||||
second.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,118 @@
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for enhanced AckProcessor with RedeliveryTracker integration.
|
||||
/// Go reference: consumer.go:4854 (processInboundAcks).
|
||||
/// </summary>
|
||||
public class AckProcessorEnhancedTests
|
||||
{
|
||||
[Fact]
|
||||
public void ProcessAck_removes_from_pending()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
|
||||
var processor = new AckProcessor(tracker);
|
||||
|
||||
processor.Register(1, "deliver.subj");
|
||||
processor.PendingCount.ShouldBe(1);
|
||||
|
||||
processor.ProcessAck(1);
|
||||
processor.PendingCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessNak_schedules_redelivery()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
|
||||
var processor = new AckProcessor(tracker);
|
||||
|
||||
processor.Register(1, "deliver.subj");
|
||||
processor.ProcessNak(1, delayMs: 500);
|
||||
|
||||
processor.PendingCount.ShouldBe(1); // still pending until redelivered
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessTerm_removes_permanently()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
|
||||
var processor = new AckProcessor(tracker);
|
||||
|
||||
processor.Register(1, "deliver.subj");
|
||||
processor.ProcessTerm(1);
|
||||
|
||||
processor.PendingCount.ShouldBe(0);
|
||||
processor.TerminatedCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessProgress_resets_deadline_to_full_ack_wait()
|
||||
{
|
||||
// Go: consumer.go — processAckProgress (+WPI): resets deadline to UtcNow + ackWait
|
||||
// Verify the invariant: after ProcessProgress, the deadline is strictly in the future
|
||||
// by at least (ackWait - epsilon) milliseconds, without relying on wall-clock delays.
|
||||
var ackWaitMs = 1000;
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: ackWaitMs);
|
||||
var processor = new AckProcessor(tracker);
|
||||
|
||||
processor.Register(1, "deliver.subj");
|
||||
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
processor.ProcessProgress(1);
|
||||
var after = DateTimeOffset.UtcNow;
|
||||
|
||||
var deadline = processor.GetDeadline(1);
|
||||
|
||||
// Deadline must be at least (before + ackWait) and at most (after + ackWait + epsilon)
|
||||
deadline.ShouldBeGreaterThanOrEqualTo(before.AddMilliseconds(ackWaitMs));
|
||||
deadline.ShouldBeLessThanOrEqualTo(after.AddMilliseconds(ackWaitMs + 50));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void MaxAckPending_blocks_new_registrations()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
|
||||
var processor = new AckProcessor(tracker, maxAckPending: 2);
|
||||
|
||||
processor.Register(1, "d.1");
|
||||
processor.Register(2, "d.2");
|
||||
processor.CanRegister().ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CanRegister_true_when_unlimited()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
|
||||
var processor = new AckProcessor(tracker); // maxAckPending=0 means unlimited
|
||||
|
||||
processor.Register(1, "d.1");
|
||||
processor.CanRegister().ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ParseAckType_identifies_all_types()
|
||||
{
|
||||
AckProcessor.ParseAckType("+ACK"u8).ShouldBe(AckType.Ack);
|
||||
AckProcessor.ParseAckType("-NAK"u8).ShouldBe(AckType.Nak);
|
||||
AckProcessor.ParseAckType("+TERM"u8).ShouldBe(AckType.Term);
|
||||
AckProcessor.ParseAckType("+WPI"u8).ShouldBe(AckType.Progress);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ParseAckType_returns_unknown_for_invalid()
|
||||
{
|
||||
AckProcessor.ParseAckType("GARBAGE"u8).ShouldBe(AckType.Unknown);
|
||||
AckProcessor.ParseAckType(""u8).ShouldBe(AckType.Unknown);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void GetDeadline_returns_min_for_unknown_sequence()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
|
||||
var processor = new AckProcessor(tracker);
|
||||
|
||||
// Unknown sequence should return DateTimeOffset.MinValue
|
||||
processor.GetDeadline(999).ShouldBe(DateTimeOffset.MinValue);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,185 @@
|
||||
// Go: consumer.go:2550 (processAckMsg, processNak, processTerm, processAckProgress)
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
public class AckProcessorNakTests
|
||||
{
|
||||
// Test 1: ProcessAck with empty payload acks the sequence
|
||||
[Fact]
|
||||
public void ProcessAck_empty_payload_acks_sequence()
|
||||
{
|
||||
// Go: consumer.go — empty ack payload treated as "+ACK"
|
||||
var ack = new AckProcessor();
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
|
||||
ack.ProcessAck(1, ReadOnlySpan<byte>.Empty);
|
||||
|
||||
ack.PendingCount.ShouldBe(0);
|
||||
ack.AckFloor.ShouldBe((ulong)1);
|
||||
}
|
||||
|
||||
// Test 2: ProcessAck with -NAK schedules redelivery
|
||||
[Fact]
|
||||
public async Task ProcessAck_nak_payload_schedules_redelivery()
|
||||
{
|
||||
// Go: consumer.go — "-NAK" triggers rescheduled redelivery
|
||||
var ack = new AckProcessor();
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
|
||||
ack.ProcessAck(1, "-NAK"u8);
|
||||
|
||||
// Should still be pending (redelivery scheduled)
|
||||
ack.PendingCount.ShouldBe(1);
|
||||
|
||||
// Should expire quickly (using ackWait fallback of 5000ms — verify it is still pending now)
|
||||
ack.TryGetExpired(out _, out _).ShouldBeFalse();
|
||||
|
||||
await Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Test 3: ProcessAck with -NAK {delay} uses custom delay
|
||||
[Fact]
|
||||
public async Task ProcessAck_nak_with_delay_uses_custom_delay()
|
||||
{
|
||||
// Go: consumer.go — "-NAK {delay}" parses optional explicit delay in milliseconds
|
||||
var ack = new AckProcessor();
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
|
||||
ack.ProcessAck(1, "-NAK 1"u8);
|
||||
|
||||
// Sequence still pending
|
||||
ack.PendingCount.ShouldBe(1);
|
||||
|
||||
// With a 1ms delay, should expire quickly
|
||||
await Task.Delay(10);
|
||||
ack.TryGetExpired(out var seq, out _).ShouldBeTrue();
|
||||
seq.ShouldBe((ulong)1);
|
||||
}
|
||||
|
||||
// Test 4: ProcessAck with +TERM removes from pending
|
||||
[Fact]
|
||||
public void ProcessAck_term_removes_from_pending()
|
||||
{
|
||||
// Go: consumer.go — "+TERM" permanently terminates delivery; sequence never redelivered
|
||||
var ack = new AckProcessor();
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
|
||||
ack.ProcessAck(1, "+TERM"u8);
|
||||
|
||||
ack.PendingCount.ShouldBe(0);
|
||||
ack.HasPending.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Test 5: ProcessAck with +WPI resets deadline without incrementing delivery count
|
||||
[Fact]
|
||||
public async Task ProcessAck_wpi_resets_deadline_without_incrementing_deliveries()
|
||||
{
|
||||
// Go: consumer.go — "+WPI" resets ack deadline; delivery count must not change
|
||||
var ack = new AckProcessor();
|
||||
ack.Register(1, ackWaitMs: 10);
|
||||
|
||||
// Wait for the deadline to approach, then reset it via progress
|
||||
await Task.Delay(5);
|
||||
ack.ProcessAck(1, "+WPI"u8);
|
||||
|
||||
// Deadline was just reset — should not be expired yet
|
||||
ack.TryGetExpired(out _, out var deliveries).ShouldBeFalse();
|
||||
|
||||
// Deliveries count must remain at 1 (not incremented by WPI)
|
||||
deliveries.ShouldBe(0);
|
||||
|
||||
// Sequence still pending
|
||||
ack.PendingCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Test 6: Backoff array applies correct delay per redelivery attempt
|
||||
[Fact]
|
||||
public async Task ProcessNak_backoff_array_applies_delay_by_delivery_count()
|
||||
{
|
||||
// Go: consumer.go — backoff array indexes by (deliveries - 1)
|
||||
var ack = new AckProcessor(backoffMs: [1, 50, 5000]);
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
|
||||
// First NAK — delivery count is 1 → backoff[0] = 1ms
|
||||
ack.ProcessNak(1);
|
||||
|
||||
await Task.Delay(10);
|
||||
ack.TryGetExpired(out _, out _).ShouldBeTrue();
|
||||
|
||||
// Now delivery count is 2 → backoff[1] = 50ms
|
||||
ack.ProcessNak(1);
|
||||
ack.TryGetExpired(out _, out _).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Test 7: Backoff array clamps at last entry for high delivery counts
|
||||
[Fact]
|
||||
public async Task ProcessNak_backoff_clamps_at_last_entry_for_high_delivery_count()
|
||||
{
|
||||
// Go: consumer.go — backoff index clamped to backoff.Length-1 when deliveries exceed array size
|
||||
var ack = new AckProcessor(backoffMs: [1, 2]);
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
|
||||
// Drive deliveries up: NAK twice to advance delivery count past array length
|
||||
ack.ProcessNak(1); // deliveries becomes 2 (index 1 = 2ms)
|
||||
await Task.Delay(10);
|
||||
ack.TryGetExpired(out _, out _).ShouldBeTrue();
|
||||
|
||||
ack.ProcessNak(1); // deliveries becomes 3 (index clamps to 1 = 2ms)
|
||||
await Task.Delay(10);
|
||||
ack.TryGetExpired(out var seq, out _).ShouldBeTrue();
|
||||
seq.ShouldBe((ulong)1);
|
||||
}
|
||||
|
||||
// Test 8: AckSequence advances AckFloor when contiguous
|
||||
[Fact]
|
||||
public void AckSequence_advances_ackfloor_for_contiguous_sequences()
|
||||
{
|
||||
// Go: consumer.go — acking contiguous sequences from floor advances AckFloor monotonically
|
||||
var ack = new AckProcessor();
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
ack.Register(2, ackWaitMs: 5000);
|
||||
ack.Register(3, ackWaitMs: 5000);
|
||||
|
||||
ack.AckSequence(1);
|
||||
ack.AckFloor.ShouldBe((ulong)1);
|
||||
|
||||
ack.AckSequence(2);
|
||||
ack.AckFloor.ShouldBe((ulong)2);
|
||||
}
|
||||
|
||||
// Test 9: ProcessTerm increments TerminatedCount
|
||||
[Fact]
|
||||
public void ProcessTerm_increments_terminated_count()
|
||||
{
|
||||
// Go: consumer.go — terminated sequences tracked separately from acked sequences
|
||||
var ack = new AckProcessor();
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
ack.Register(2, ackWaitMs: 5000);
|
||||
|
||||
ack.TerminatedCount.ShouldBe(0);
|
||||
|
||||
ack.ProcessTerm(1);
|
||||
ack.TerminatedCount.ShouldBe(1);
|
||||
|
||||
ack.ProcessTerm(2);
|
||||
ack.TerminatedCount.ShouldBe(2);
|
||||
}
|
||||
|
||||
// Test 10: NAK after TERM is ignored (sequence already terminated)
|
||||
[Fact]
|
||||
public void ProcessNak_after_term_is_ignored()
|
||||
{
|
||||
// Go: consumer.go — once terminated, a sequence cannot be rescheduled via NAK
|
||||
var ack = new AckProcessor(backoffMs: [1]);
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
|
||||
ack.ProcessTerm(1);
|
||||
ack.PendingCount.ShouldBe(0);
|
||||
|
||||
// Attempting to NAK a terminated sequence has no effect
|
||||
ack.ProcessNak(1);
|
||||
ack.PendingCount.ShouldBe(0);
|
||||
ack.TerminatedCount.ShouldBe(1);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,198 @@
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using Shouldly;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for cluster-aware pending pull request tracking in PullConsumerEngine.
|
||||
/// Go reference: consumer.go proposeWaitingRequest / waitingRequestsPending — cluster-wide
|
||||
/// pending pull request coordination via the consumer RAFT group.
|
||||
/// golang/nats-server/server/consumer.go proposeWaitingRequest
|
||||
/// </summary>
|
||||
public class ClusterPendingRequestTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// ProposeWaitingRequest
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ProposeWaitingRequest_with_quorum_returns_true()
|
||||
{
|
||||
// Go: consumer.go proposeWaitingRequest — only propose when quorum available.
|
||||
var engine = new PullConsumerEngine();
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "test-group",
|
||||
Peers = ["peer-1", "peer-2", "peer-3"],
|
||||
};
|
||||
var request = new PullWaitingRequest { Batch = 10, Reply = "reply.test.1" };
|
||||
|
||||
var result = engine.ProposeWaitingRequest(request, group);
|
||||
|
||||
result.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProposeWaitingRequest_without_quorum_returns_false()
|
||||
{
|
||||
// Go: consumer.go proposeWaitingRequest — no quorum (0 peers means quorum = 1, but 0 < 1).
|
||||
var engine = new PullConsumerEngine();
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "empty-group",
|
||||
Peers = [],
|
||||
};
|
||||
var request = new PullWaitingRequest { Batch = 5, Reply = "reply.noquorum" };
|
||||
|
||||
var result = engine.ProposeWaitingRequest(request, group);
|
||||
|
||||
result.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProposeWaitingRequest_registers_in_cluster_pending()
|
||||
{
|
||||
// Go: consumer.go — after a successful proposal, the request must appear in the
|
||||
// cluster pending map so it can be fulfilled or expired.
|
||||
var engine = new PullConsumerEngine();
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "test-group",
|
||||
Peers = ["peer-1", "peer-2", "peer-3"],
|
||||
};
|
||||
var request = new PullWaitingRequest { Batch = 4, Reply = "reply.reg" };
|
||||
|
||||
engine.ProposeWaitingRequest(request, group);
|
||||
|
||||
var pending = engine.GetClusterPendingRequests();
|
||||
pending.ShouldContain(r => r.Reply == "reply.reg");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Multiple_proposals_tracked_independently()
|
||||
{
|
||||
// Go: consumer.go — each reply subject is an independent pending slot;
|
||||
// proposals with different reply subjects must not overwrite each other.
|
||||
var engine = new PullConsumerEngine();
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "test-group",
|
||||
Peers = ["peer-1", "peer-2", "peer-3"],
|
||||
};
|
||||
|
||||
engine.ProposeWaitingRequest(new PullWaitingRequest { Batch = 1, Reply = "reply.A" }, group);
|
||||
engine.ProposeWaitingRequest(new PullWaitingRequest { Batch = 2, Reply = "reply.B" }, group);
|
||||
engine.ProposeWaitingRequest(new PullWaitingRequest { Batch = 3, Reply = "reply.C" }, group);
|
||||
|
||||
engine.ClusterPendingCount.ShouldBe(3);
|
||||
var pending = engine.GetClusterPendingRequests();
|
||||
pending.ShouldContain(r => r.Reply == "reply.A" && r.Batch == 1);
|
||||
pending.ShouldContain(r => r.Reply == "reply.B" && r.Batch == 2);
|
||||
pending.ShouldContain(r => r.Reply == "reply.C" && r.Batch == 3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ClusterPendingCount
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ClusterPendingCount_tracks_pending_requests()
|
||||
{
|
||||
// Go: consumer.go — ClusterPendingCount reflects the current size of the pending map.
|
||||
var engine = new PullConsumerEngine();
|
||||
engine.ClusterPendingCount.ShouldBe(0);
|
||||
|
||||
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 1, Reply = "r1" });
|
||||
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 2, Reply = "r2" });
|
||||
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 3, Reply = "r3" });
|
||||
|
||||
engine.ClusterPendingCount.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ClusterPendingCount_decrements_on_remove()
|
||||
{
|
||||
// Go: consumer.go — removing a request via reply subject decrements the pending count.
|
||||
var engine = new PullConsumerEngine();
|
||||
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 5, Reply = "decrement.reply" });
|
||||
engine.ClusterPendingCount.ShouldBe(1);
|
||||
|
||||
engine.RemoveClusterPending("decrement.reply");
|
||||
|
||||
engine.ClusterPendingCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// RegisterClusterPending
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RegisterClusterPending_adds_request_by_reply()
|
||||
{
|
||||
// Go: consumer.go — pending requests are keyed by reply subject for O(1) lookup.
|
||||
var engine = new PullConsumerEngine();
|
||||
var request = new PullWaitingRequest { Batch = 7, Reply = "register.reply.subject" };
|
||||
|
||||
engine.RegisterClusterPending(request);
|
||||
|
||||
var retrieved = engine.RemoveClusterPending("register.reply.subject");
|
||||
retrieved.ShouldNotBeNull();
|
||||
retrieved.Batch.ShouldBe(7);
|
||||
retrieved.Reply.ShouldBe("register.reply.subject");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// RemoveClusterPending
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RemoveClusterPending_returns_and_removes_request()
|
||||
{
|
||||
// Go: consumer.go — RemoveClusterPending both returns the request and removes it
|
||||
// from the map so it is not fulfilled twice.
|
||||
var engine = new PullConsumerEngine();
|
||||
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 3, Reply = "remove.me" });
|
||||
|
||||
var removed = engine.RemoveClusterPending("remove.me");
|
||||
|
||||
removed.ShouldNotBeNull();
|
||||
removed.Reply.ShouldBe("remove.me");
|
||||
engine.ClusterPendingCount.ShouldBe(0);
|
||||
// Second removal should return null — the entry is gone.
|
||||
engine.RemoveClusterPending("remove.me").ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemoveClusterPending_returns_null_for_unknown()
|
||||
{
|
||||
// Go: consumer.go — attempting to remove an unknown reply subject is a no-op.
|
||||
var engine = new PullConsumerEngine();
|
||||
|
||||
var result = engine.RemoveClusterPending("does.not.exist");
|
||||
|
||||
result.ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetClusterPendingRequests
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void GetClusterPendingRequests_returns_all_pending()
|
||||
{
|
||||
// Go: consumer.go — GetClusterPendingRequests is used for expiry sweeps and
|
||||
// diagnostics; it must return every currently pending request.
|
||||
var engine = new PullConsumerEngine();
|
||||
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 1, Reply = "bulk.a" });
|
||||
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 2, Reply = "bulk.b" });
|
||||
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 3, Reply = "bulk.c" });
|
||||
|
||||
var all = engine.GetClusterPendingRequests();
|
||||
|
||||
all.Count.ShouldBe(3);
|
||||
all.Select(r => r.Reply).ShouldContain("bulk.a");
|
||||
all.Select(r => r.Reply).ShouldContain("bulk.b");
|
||||
all.Select(r => r.Reply).ShouldContain("bulk.c");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,702 @@
|
||||
// Go reference: golang/nats-server/server/jetstream_consumer_test.go
|
||||
// Ports Go consumer tests that map to existing .NET infrastructure:
|
||||
// multiple filters, consumer actions, filter matching, priority groups,
|
||||
// ack timeout retry, descriptions, single-token subjects, overflow.
|
||||
|
||||
using System.Text.RegularExpressions;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.Subscriptions;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// Go parity tests ported from jetstream_consumer_test.go for consumer
|
||||
/// behaviors including filter matching, consumer actions, priority groups,
|
||||
/// ack retry, descriptions, and overflow handling.
|
||||
/// </summary>
|
||||
public class ConsumerGoParityTests
|
||||
{
|
||||
// =========================================================================
|
||||
// Helper: Generate N filter subjects matching Go's filterSubjects() function.
|
||||
// Go: jetstream_consumer_test.go:829
|
||||
// =========================================================================
|
||||
|
||||
private static List<string> GenerateFilterSubjects(int n)
|
||||
{
|
||||
var fs = new List<string>();
|
||||
while (fs.Count < n)
|
||||
{
|
||||
var literals = new[] { "foo", "bar", Guid.NewGuid().ToString("N")[..8], "xyz", "abcdef" };
|
||||
fs.Add(string.Join('.', literals));
|
||||
if (fs.Count >= n) break;
|
||||
|
||||
for (int i = 0; i < literals.Length && fs.Count < n; i++)
|
||||
{
|
||||
var entry = new string[literals.Length];
|
||||
for (int j = 0; j < literals.Length; j++)
|
||||
entry[j] = j == i ? "*" : literals[j];
|
||||
fs.Add(string.Join('.', entry));
|
||||
}
|
||||
}
|
||||
|
||||
return fs.Take(n).ToList();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerIsFilteredMatch — jetstream_consumer_test.go:856
|
||||
// Tests the filter matching logic used by consumers to determine if a
|
||||
// message subject matches their filter configuration.
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[InlineData(new string[0], "foo.bar", true)] // no filter = match all
|
||||
[InlineData(new[] { "foo.baz", "foo.bar" }, "foo.bar", true)] // literal match
|
||||
[InlineData(new[] { "foo.baz", "foo.bar" }, "foo.ban", false)] // literal mismatch
|
||||
[InlineData(new[] { "bar.>", "foo.>" }, "foo.bar", true)] // wildcard > match
|
||||
[InlineData(new[] { "bar.>", "foo.>" }, "bar.foo", true)] // wildcard > match
|
||||
[InlineData(new[] { "bar.>", "foo.>" }, "baz.foo", false)] // wildcard > mismatch
|
||||
[InlineData(new[] { "bar.*", "foo.*" }, "foo.bar", true)] // wildcard * match
|
||||
[InlineData(new[] { "bar.*", "foo.*" }, "bar.foo", true)] // wildcard * match
|
||||
[InlineData(new[] { "bar.*", "foo.*" }, "baz.foo", false)] // wildcard * mismatch
|
||||
[InlineData(new[] { "foo.*.x", "foo.*.y" }, "foo.bar.x", true)] // multi-token wildcard match
|
||||
[InlineData(new[] { "foo.*.x", "foo.*.y", "foo.*.z" }, "foo.bar.z", true)] // multi wildcard match
|
||||
public void IsFilteredMatch_basic_cases(string[] filters, string subject, bool expected)
|
||||
{
|
||||
// Go: TestJetStreamConsumerIsFilteredMatch jetstream_consumer_test.go:856
|
||||
var compiled = new CompiledFilter(filters);
|
||||
compiled.Matches(subject).ShouldBe(expected);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsFilteredMatch_many_filters_mismatch()
|
||||
{
|
||||
// Go: TestJetStreamConsumerIsFilteredMatch jetstream_consumer_test.go:874
|
||||
// 100 filter subjects, none should match "foo.bar.do.not.match.any.filter.subject"
|
||||
var filters = GenerateFilterSubjects(100);
|
||||
var compiled = new CompiledFilter(filters);
|
||||
compiled.Matches("foo.bar.do.not.match.any.filter.subject").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsFilteredMatch_many_filters_match()
|
||||
{
|
||||
// Go: TestJetStreamConsumerIsFilteredMatch jetstream_consumer_test.go:875
|
||||
// 100 filter subjects; "foo.bar.*.xyz.abcdef" should be among them, matching
|
||||
// "foo.bar.12345.xyz.abcdef" via wildcard
|
||||
var filters = GenerateFilterSubjects(100);
|
||||
var compiled = new CompiledFilter(filters);
|
||||
// One of the generated wildcard filters should be "foo.bar.*.xyz.abcdef"
|
||||
// which matches "foo.bar.12345.xyz.abcdef"
|
||||
compiled.Matches("foo.bar.12345.xyz.abcdef").ShouldBeTrue();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerIsEqualOrSubsetMatch — jetstream_consumer_test.go:921
|
||||
// Tests whether a subject is an equal or subset match of the consumer's filters.
|
||||
// This is used for work queue overlap detection.
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[InlineData(new string[0], "foo.bar", false)] // no filter = no subset
|
||||
[InlineData(new[] { "foo.baz", "foo.bar" }, "foo.bar", true)] // literal match
|
||||
[InlineData(new[] { "foo.baz", "foo.bar" }, "foo.ban", false)] // literal mismatch
|
||||
[InlineData(new[] { "bar.>", "foo.>" }, "foo.>", true)] // equal wildcard match
|
||||
[InlineData(new[] { "bar.foo.>", "foo.bar.>" }, "bar.>", true)] // subset match: bar.foo.> is subset of bar.>
|
||||
[InlineData(new[] { "bar.>", "foo.>" }, "baz.foo.>", false)] // no match
|
||||
public void IsEqualOrSubsetMatch_basic_cases(string[] filters, string subject, bool expected)
|
||||
{
|
||||
// Go: TestJetStreamConsumerIsEqualOrSubsetMatch jetstream_consumer_test.go:921
|
||||
// A subject is a "subset match" if any filter equals the subject or if
|
||||
// the filter is a more specific version (subset) of the subject.
|
||||
// Filter "bar.foo.>" is a subset of subject "bar.>" because bar.foo.> matches
|
||||
// only things that bar.> also matches.
|
||||
bool result = false;
|
||||
foreach (var filter in filters)
|
||||
{
|
||||
// Equal match
|
||||
if (string.Equals(filter, subject, StringComparison.Ordinal))
|
||||
{
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// Subset match: filter is more specific (subset) than subject
|
||||
// i.e., everything matched by filter is also matched by subject
|
||||
if (SubjectMatch.MatchLiteral(filter, subject))
|
||||
{
|
||||
result = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
result.ShouldBe(expected);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsEqualOrSubsetMatch_many_filters_literal()
|
||||
{
|
||||
// Go: TestJetStreamConsumerIsEqualOrSubsetMatch jetstream_consumer_test.go:934
|
||||
var filters = GenerateFilterSubjects(100);
|
||||
// One of the generated filters is a literal like "foo.bar.<uuid>.xyz.abcdef"
|
||||
// The subject "foo.bar.*.xyz.abcdef" is a pattern that all such literals match
|
||||
bool found = filters.Any(f => SubjectMatch.MatchLiteral(f, "foo.bar.*.xyz.abcdef"));
|
||||
found.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsEqualOrSubsetMatch_many_filters_subset()
|
||||
{
|
||||
// Go: TestJetStreamConsumerIsEqualOrSubsetMatch jetstream_consumer_test.go:935
|
||||
var filters = GenerateFilterSubjects(100);
|
||||
// "foo.bar.>" should match many of the generated filters as a superset
|
||||
bool found = filters.Any(f => SubjectMatch.MatchLiteral(f, "foo.bar.>"));
|
||||
found.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerActions — jetstream_consumer_test.go:472
|
||||
// Tests consumer create/update action semantics.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_create_action_succeeds_for_new_consumer()
|
||||
{
|
||||
// Go: TestJetStreamConsumerActions jetstream_consumer_test.go:472
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
|
||||
|
||||
var response = await fx.CreateConsumerAsync("TEST", "DUR", null,
|
||||
filterSubjects: ["one", "two"],
|
||||
ackPolicy: AckPolicy.Explicit);
|
||||
|
||||
response.Error.ShouldBeNull();
|
||||
response.ConsumerInfo.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_create_action_idempotent_with_same_config()
|
||||
{
|
||||
// Go: TestJetStreamConsumerActions jetstream_consumer_test.go:497
|
||||
// Create consumer again with identical config should succeed
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
|
||||
|
||||
var r1 = await fx.CreateConsumerAsync("TEST", "DUR", null,
|
||||
filterSubjects: ["one", "two"],
|
||||
ackPolicy: AckPolicy.Explicit);
|
||||
r1.Error.ShouldBeNull();
|
||||
|
||||
var r2 = await fx.CreateConsumerAsync("TEST", "DUR", null,
|
||||
filterSubjects: ["one", "two"],
|
||||
ackPolicy: AckPolicy.Explicit);
|
||||
r2.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_update_existing_succeeds()
|
||||
{
|
||||
// Go: TestJetStreamConsumerActions jetstream_consumer_test.go:516
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
|
||||
|
||||
await fx.CreateConsumerAsync("TEST", "DUR", null,
|
||||
filterSubjects: ["one", "two"],
|
||||
ackPolicy: AckPolicy.Explicit);
|
||||
|
||||
// Update filter subjects
|
||||
var response = await fx.CreateConsumerAsync("TEST", "DUR", null,
|
||||
filterSubjects: ["one"],
|
||||
ackPolicy: AckPolicy.Explicit);
|
||||
response.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerActionsOnWorkQueuePolicyStream — jetstream_consumer_test.go:557
|
||||
// Tests consumer actions on a work queue policy stream.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_on_work_queue_stream()
|
||||
{
|
||||
// Go: TestJetStreamConsumerActionsOnWorkQueuePolicyStream jetstream_consumer_test.go:557
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamConfigAsync(new StreamConfig
|
||||
{
|
||||
Name = "TEST",
|
||||
Subjects = ["one", "two", "three", "four", "five.>"],
|
||||
Retention = RetentionPolicy.WorkQueue,
|
||||
});
|
||||
|
||||
var r1 = await fx.CreateConsumerAsync("TEST", "DUR", null,
|
||||
filterSubjects: ["one", "two"],
|
||||
ackPolicy: AckPolicy.Explicit);
|
||||
r1.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerPedanticMode — jetstream_consumer_test.go:1253
|
||||
// Consumer pedantic mode validates various configuration constraints.
|
||||
// We test the validation that exists in the .NET implementation.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_ephemeral_can_be_created()
|
||||
{
|
||||
// Go: TestJetStreamConsumerPedanticMode jetstream_consumer_test.go:1253
|
||||
// Test that ephemeral consumers can be created
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
|
||||
|
||||
var response = await fx.CreateConsumerAsync("TEST", "EPH", null,
|
||||
filterSubjects: ["one"],
|
||||
ackPolicy: AckPolicy.Explicit,
|
||||
ephemeral: true);
|
||||
response.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerMultipleFiltersRemoveFilters — jetstream_consumer_test.go:45
|
||||
// Consumer with multiple filter subjects, then updating to fewer.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_multiple_filters_can_be_updated()
|
||||
{
|
||||
// Go: TestJetStreamConsumerMultipleFiltersRemoveFilters jetstream_consumer_test.go:45
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
|
||||
|
||||
// Create consumer with multiple filters
|
||||
var r1 = await fx.CreateConsumerAsync("TEST", "CF", null,
|
||||
filterSubjects: ["one", "two", "three"]);
|
||||
r1.Error.ShouldBeNull();
|
||||
|
||||
// Update to fewer filters
|
||||
var r2 = await fx.CreateConsumerAsync("TEST", "CF", null,
|
||||
filterSubjects: ["one"]);
|
||||
r2.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerMultipleConsumersSingleFilter — jetstream_consumer_test.go:188
|
||||
// Multiple consumers each with a single filter on the same stream.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_consumers_each_with_single_filter()
|
||||
{
|
||||
// Go: TestJetStreamConsumerMultipleConsumersSingleFilter jetstream_consumer_test.go:188
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
|
||||
|
||||
var r1 = await fx.CreateConsumerAsync("TEST", "C1", "one");
|
||||
r1.Error.ShouldBeNull();
|
||||
|
||||
var r2 = await fx.CreateConsumerAsync("TEST", "C2", "two");
|
||||
r2.Error.ShouldBeNull();
|
||||
|
||||
// Publish to each filter
|
||||
var ack1 = await fx.PublishAndGetAckAsync("one", "msg1");
|
||||
ack1.ErrorCode.ShouldBeNull();
|
||||
var ack2 = await fx.PublishAndGetAckAsync("two", "msg2");
|
||||
ack2.ErrorCode.ShouldBeNull();
|
||||
|
||||
// Each consumer should see only its filtered messages
|
||||
var batch1 = await fx.FetchAsync("TEST", "C1", 10);
|
||||
batch1.Messages.ShouldNotBeEmpty();
|
||||
batch1.Messages.All(m => m.Subject == "one").ShouldBeTrue();
|
||||
|
||||
var batch2 = await fx.FetchAsync("TEST", "C2", 10);
|
||||
batch2.Messages.ShouldNotBeEmpty();
|
||||
batch2.Messages.All(m => m.Subject == "two").ShouldBeTrue();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerMultipleConsumersMultipleFilters — jetstream_consumer_test.go:300
|
||||
// Multiple consumers with overlapping multiple filter subjects.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_consumers_with_multiple_filters()
|
||||
{
|
||||
// Go: TestJetStreamConsumerMultipleConsumersMultipleFilters jetstream_consumer_test.go:300
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
|
||||
|
||||
var r1 = await fx.CreateConsumerAsync("TEST", "C1", null,
|
||||
filterSubjects: ["one", "two"]);
|
||||
r1.Error.ShouldBeNull();
|
||||
|
||||
var r2 = await fx.CreateConsumerAsync("TEST", "C2", null,
|
||||
filterSubjects: ["two", "three"]);
|
||||
r2.Error.ShouldBeNull();
|
||||
|
||||
await fx.PublishAndGetAckAsync("one", "msg1");
|
||||
await fx.PublishAndGetAckAsync("two", "msg2");
|
||||
await fx.PublishAndGetAckAsync("three", "msg3");
|
||||
|
||||
// C1 should see "one" and "two"
|
||||
var batch1 = await fx.FetchAsync("TEST", "C1", 10);
|
||||
batch1.Messages.Count.ShouldBe(2);
|
||||
|
||||
// C2 should see "two" and "three"
|
||||
var batch2 = await fx.FetchAsync("TEST", "C2", 10);
|
||||
batch2.Messages.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerMultipleFiltersSequence — jetstream_consumer_test.go:426
|
||||
// Verifies sequence ordering with multiple filter subjects.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_filters_preserve_sequence_order()
|
||||
{
|
||||
// Go: TestJetStreamConsumerMultipleFiltersSequence jetstream_consumer_test.go:426
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
|
||||
|
||||
await fx.CreateConsumerAsync("TEST", "CF", null,
|
||||
filterSubjects: ["one", "two"]);
|
||||
|
||||
await fx.PublishAndGetAckAsync("one", "msg1");
|
||||
await fx.PublishAndGetAckAsync("two", "msg2");
|
||||
await fx.PublishAndGetAckAsync("one", "msg3");
|
||||
|
||||
var batch = await fx.FetchAsync("TEST", "CF", 10);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
|
||||
// Verify sequences are in order
|
||||
for (int i = 1; i < batch.Messages.Count; i++)
|
||||
{
|
||||
batch.Messages[i].Sequence.ShouldBeGreaterThan(batch.Messages[i - 1].Sequence);
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerPinned — jetstream_consumer_test.go:1545
|
||||
// Priority group registration and active consumer selection.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public void PriorityGroup_pinned_consumer_gets_messages()
|
||||
{
|
||||
// Go: TestJetStreamConsumerPinned jetstream_consumer_test.go:1545
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group1", "C1", priority: 1);
|
||||
mgr.Register("group1", "C2", priority: 2);
|
||||
|
||||
// C1 (lowest priority number) should be active
|
||||
mgr.IsActive("group1", "C1").ShouldBeTrue();
|
||||
mgr.IsActive("group1", "C2").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerPinnedUnsetsAfterAtMostPinnedTTL — jetstream_consumer_test.go:1711
|
||||
// When the pinned consumer disconnects, the next one takes over.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public void PriorityGroup_pinned_unsets_on_disconnect()
|
||||
{
|
||||
// Go: TestJetStreamConsumerPinnedUnsetsAfterAtMostPinnedTTL jetstream_consumer_test.go:1711
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group1", "C1", priority: 1);
|
||||
mgr.Register("group1", "C2", priority: 2);
|
||||
|
||||
mgr.IsActive("group1", "C1").ShouldBeTrue();
|
||||
|
||||
// Unregister C1 (simulates disconnect)
|
||||
mgr.Unregister("group1", "C1");
|
||||
mgr.IsActive("group1", "C2").ShouldBeTrue();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerPinnedUnsubscribeOnPinned — jetstream_consumer_test.go:1802
|
||||
// Unsubscribing the pinned consumer causes failover.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public void PriorityGroup_unsubscribe_pinned_causes_failover()
|
||||
{
|
||||
// Go: TestJetStreamConsumerPinnedUnsubscribeOnPinned jetstream_consumer_test.go:1802
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group1", "C1", priority: 1);
|
||||
mgr.Register("group1", "C2", priority: 2);
|
||||
mgr.Register("group1", "C3", priority: 3);
|
||||
|
||||
mgr.GetActiveConsumer("group1").ShouldBe("C1");
|
||||
|
||||
mgr.Unregister("group1", "C1");
|
||||
mgr.GetActiveConsumer("group1").ShouldBe("C2");
|
||||
|
||||
mgr.Unregister("group1", "C2");
|
||||
mgr.GetActiveConsumer("group1").ShouldBe("C3");
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerUnpinPickDifferentRequest — jetstream_consumer_test.go:1973
|
||||
// When unpin is called, the next request goes to a different consumer.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public void PriorityGroup_unpin_picks_different_consumer()
|
||||
{
|
||||
// Go: TestJetStreamConsumerUnpinPickDifferentRequest jetstream_consumer_test.go:1973
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group1", "C1", priority: 1);
|
||||
mgr.Register("group1", "C2", priority: 2);
|
||||
|
||||
mgr.GetActiveConsumer("group1").ShouldBe("C1");
|
||||
|
||||
// Remove C1 and re-add with higher priority number
|
||||
mgr.Unregister("group1", "C1");
|
||||
mgr.Register("group1", "C1", priority: 3);
|
||||
|
||||
// Now C2 should be active (priority 2 < priority 3)
|
||||
mgr.GetActiveConsumer("group1").ShouldBe("C2");
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerPinnedTTL — jetstream_consumer_test.go:2067
|
||||
// Priority group TTL behavior.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public void PriorityGroup_registration_updates_priority()
|
||||
{
|
||||
// Go: TestJetStreamConsumerPinnedTTL jetstream_consumer_test.go:2067
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group1", "C1", priority: 5);
|
||||
mgr.Register("group1", "C2", priority: 1);
|
||||
|
||||
mgr.GetActiveConsumer("group1").ShouldBe("C2");
|
||||
|
||||
// Re-register C1 with lower priority
|
||||
mgr.Register("group1", "C1", priority: 0);
|
||||
mgr.GetActiveConsumer("group1").ShouldBe("C1");
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerWithPriorityGroups — jetstream_consumer_test.go:2246
|
||||
// End-to-end test of priority groups with consumers.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public void PriorityGroup_multiple_groups_independent()
|
||||
{
|
||||
// Go: TestJetStreamConsumerWithPriorityGroups jetstream_consumer_test.go:2246
|
||||
var mgr = new PriorityGroupManager();
|
||||
|
||||
mgr.Register("groupA", "C1", priority: 1);
|
||||
mgr.Register("groupA", "C2", priority: 2);
|
||||
mgr.Register("groupB", "C3", priority: 1);
|
||||
mgr.Register("groupB", "C4", priority: 2);
|
||||
|
||||
// Groups are independent
|
||||
mgr.GetActiveConsumer("groupA").ShouldBe("C1");
|
||||
mgr.GetActiveConsumer("groupB").ShouldBe("C3");
|
||||
|
||||
mgr.Unregister("groupA", "C1");
|
||||
mgr.GetActiveConsumer("groupA").ShouldBe("C2");
|
||||
mgr.GetActiveConsumer("groupB").ShouldBe("C3"); // unchanged
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerOverflow — jetstream_consumer_test.go:2434
|
||||
// Consumer overflow handling when max_ack_pending is reached.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_overflow_with_max_ack_pending()
|
||||
{
|
||||
// Go: TestJetStreamConsumerOverflow jetstream_consumer_test.go:2434
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
|
||||
|
||||
var response = await fx.CreateConsumerAsync("TEST", "OVER", "test.>",
|
||||
ackPolicy: AckPolicy.Explicit,
|
||||
maxAckPending: 2);
|
||||
response.Error.ShouldBeNull();
|
||||
|
||||
// Publish 5 messages
|
||||
for (int i = 0; i < 5; i++)
|
||||
await fx.PublishAndGetAckAsync($"test.{i}", $"msg{i}");
|
||||
|
||||
// Fetch should be limited by max_ack_pending. Due to check-after-add
|
||||
// semantics in PullConsumerEngine (add msg, then check), it returns
|
||||
// max_ack_pending + 1 messages (the last one triggers the break).
|
||||
var batch = await fx.FetchAsync("TEST", "OVER", 10);
|
||||
batch.Messages.Count.ShouldBeLessThanOrEqualTo(3); // MaxAckPending(2) + 1
|
||||
batch.Messages.Count.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestPriorityGroupNameRegex — jetstream_consumer_test.go:2584
|
||||
// Validates the regex for priority group names.
|
||||
// Already tested in ClientProtocolGoParityTests; additional coverage here.
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[InlineData("A", true)]
|
||||
[InlineData("group/consumer=A", true)]
|
||||
[InlineData("abc-def_123", true)]
|
||||
[InlineData("", false)]
|
||||
[InlineData("A B", false)]
|
||||
[InlineData("A\tB", false)]
|
||||
[InlineData("group-name-that-is-too-long", false)]
|
||||
[InlineData("\r\n", false)]
|
||||
public void PriorityGroupNameRegex_consumer_test_parity(string group, bool expected)
|
||||
{
|
||||
// Go: TestPriorityGroupNameRegex jetstream_consumer_test.go:2584
|
||||
// Go regex: ^[a-zA-Z0-9/_=-]{1,16}$
|
||||
var pattern = new Regex(@"^[a-zA-Z0-9/_=\-]{1,16}$");
|
||||
pattern.IsMatch(group).ShouldBe(expected);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerRetryAckAfterTimeout — jetstream_consumer_test.go:2734
|
||||
// Retrying an ack after timeout should not error. Tests the ack processor.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_retry_ack_after_timeout_succeeds()
|
||||
{
|
||||
// Go: TestJetStreamConsumerRetryAckAfterTimeout jetstream_consumer_test.go:2734
|
||||
await using var fx = await JetStreamApiFixture.StartWithAckExplicitConsumerAsync(ackWaitMs: 500);
|
||||
|
||||
await fx.PublishAndGetAckAsync("orders.created", "order-1");
|
||||
|
||||
var batch = await fx.FetchAsync("ORDERS", "PULL", 1);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
|
||||
// Ack the message (first ack)
|
||||
var info = await fx.GetConsumerInfoAsync("ORDERS", "PULL");
|
||||
info.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerAndStreamDescriptions — jetstream_consumer_test.go:3073
|
||||
// Streams and consumers can have description metadata.
|
||||
// StreamConfig.Description not yet implemented in .NET; test stream creation instead.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_and_stream_info_available()
|
||||
{
|
||||
// Go: TestJetStreamConsumerAndStreamDescriptions jetstream_consumer_test.go:3073
|
||||
// Description property not yet on StreamConfig in .NET; validate basic stream/consumer info.
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("foo", "foo.>");
|
||||
|
||||
var streamInfo = await fx.RequestLocalAsync("$JS.API.STREAM.INFO.foo", "{}");
|
||||
streamInfo.Error.ShouldBeNull();
|
||||
streamInfo.StreamInfo!.Config.Name.ShouldBe("foo");
|
||||
|
||||
var r = await fx.CreateConsumerAsync("foo", "analytics", "foo.>");
|
||||
r.Error.ShouldBeNull();
|
||||
r.ConsumerInfo.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerSingleTokenSubject — jetstream_consumer_test.go:3172
|
||||
// Consumer with a single-token filter subject works correctly.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_single_token_subject()
|
||||
{
|
||||
// Go: TestJetStreamConsumerSingleTokenSubject jetstream_consumer_test.go:3172
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
|
||||
|
||||
var response = await fx.CreateConsumerAsync("TEST", "STS", "orders");
|
||||
response.Error.ShouldBeNull();
|
||||
|
||||
await fx.PublishAndGetAckAsync("orders", "single-token-msg");
|
||||
|
||||
var batch = await fx.FetchAsync("TEST", "STS", 10);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
batch.Messages[0].Subject.ShouldBe("orders");
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// TestJetStreamConsumerMultipleFiltersLastPerSubject — jetstream_consumer_test.go:768
|
||||
// Consumer with DeliverPolicy.LastPerSubject and multiple filters.
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_multiple_filters_deliver_last_per_subject()
|
||||
{
|
||||
// Go: TestJetStreamConsumerMultipleFiltersLastPerSubject jetstream_consumer_test.go:768
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
|
||||
|
||||
// Publish multiple messages per subject
|
||||
await fx.PublishAndGetAckAsync("one", "first-1");
|
||||
await fx.PublishAndGetAckAsync("two", "first-2");
|
||||
await fx.PublishAndGetAckAsync("one", "second-1");
|
||||
await fx.PublishAndGetAckAsync("two", "second-2");
|
||||
|
||||
var response = await fx.CreateConsumerAsync("TEST", "LP", null,
|
||||
filterSubjects: ["one", "two"],
|
||||
deliverPolicy: DeliverPolicy.Last);
|
||||
response.Error.ShouldBeNull();
|
||||
|
||||
// With deliver last, we should get the latest message
|
||||
var batch = await fx.FetchAsync("TEST", "LP", 10);
|
||||
batch.Messages.ShouldNotBeEmpty();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Subject wildcard matching — additional parity tests
|
||||
// =========================================================================
|
||||
|
||||
[Theory]
|
||||
[InlineData("foo.bar", "foo.bar", true)]
|
||||
[InlineData("foo.bar", "foo.*", true)]
|
||||
[InlineData("foo.bar", "foo.>", true)]
|
||||
[InlineData("foo.bar.baz", "foo.>", true)]
|
||||
[InlineData("foo.bar.baz", "foo.*", false)]
|
||||
[InlineData("foo.bar.baz", "foo.*.baz", true)]
|
||||
[InlineData("foo.bar.baz", "foo.*.>", true)]
|
||||
[InlineData("bar.foo", "foo.*", false)]
|
||||
public void SubjectMatch_wildcard_matching(string literal, string pattern, bool expected)
|
||||
{
|
||||
// Validates SubjectMatch.MatchLiteral behavior used by consumer filtering
|
||||
SubjectMatch.MatchLiteral(literal, pattern).ShouldBe(expected);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// CompiledFilter from ConsumerConfig
|
||||
// =========================================================================
|
||||
|
||||
[Fact]
|
||||
public void CompiledFilter_from_consumer_config_works()
|
||||
{
|
||||
// Validate that CompiledFilter.FromConfig matches behavior
|
||||
var config = new ConsumerConfig
|
||||
{
|
||||
DurableName = "test",
|
||||
FilterSubjects = ["orders.*", "payments.>"],
|
||||
};
|
||||
|
||||
var filter = CompiledFilter.FromConfig(config);
|
||||
filter.Matches("orders.created").ShouldBeTrue();
|
||||
filter.Matches("orders.updated").ShouldBeTrue();
|
||||
filter.Matches("payments.settled").ShouldBeTrue();
|
||||
filter.Matches("payments.a.b.c").ShouldBeTrue();
|
||||
filter.Matches("shipments.sent").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CompiledFilter_empty_matches_all()
|
||||
{
|
||||
var config = new ConsumerConfig { DurableName = "test" };
|
||||
var filter = CompiledFilter.FromConfig(config);
|
||||
filter.Matches("any.subject.here").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CompiledFilter_single_filter()
|
||||
{
|
||||
var config = new ConsumerConfig
|
||||
{
|
||||
DurableName = "test",
|
||||
FilterSubject = "orders.>",
|
||||
};
|
||||
var filter = CompiledFilter.FromConfig(config);
|
||||
filter.Matches("orders.created").ShouldBeTrue();
|
||||
filter.Matches("payments.settled").ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,103 @@
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for consumer pause/resume with auto-resume timer.
|
||||
/// Go reference: consumer.go (pause/resume).
|
||||
/// </summary>
|
||||
public class ConsumerPauseResumeTests
|
||||
{
|
||||
private static ConsumerManager CreateManager() => new();
|
||||
|
||||
private static void CreateConsumer(ConsumerManager mgr, string stream, string name)
|
||||
{
|
||||
mgr.CreateOrUpdate(stream, new ConsumerConfig { DurableName = name });
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Pause_with_deadline_sets_paused()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
CreateConsumer(mgr, "test-stream", "test-consumer");
|
||||
|
||||
var until = DateTime.UtcNow.AddSeconds(5);
|
||||
mgr.Pause("test-stream", "test-consumer", until);
|
||||
|
||||
mgr.IsPaused("test-stream", "test-consumer").ShouldBeTrue();
|
||||
mgr.GetPauseUntil("test-stream", "test-consumer").ShouldBe(until);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resume_clears_pause()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
CreateConsumer(mgr, "test-stream", "test-consumer");
|
||||
|
||||
mgr.Pause("test-stream", "test-consumer", DateTime.UtcNow.AddSeconds(5));
|
||||
mgr.Resume("test-stream", "test-consumer");
|
||||
|
||||
mgr.IsPaused("test-stream", "test-consumer").ShouldBeFalse();
|
||||
mgr.GetPauseUntil("test-stream", "test-consumer").ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Pause_auto_resumes_after_deadline()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
CreateConsumer(mgr, "test-stream", "test-consumer");
|
||||
|
||||
// Use a semaphore to synchronize on the actual timer callback rather than a blind delay.
|
||||
using var resumed = new SemaphoreSlim(0, 1);
|
||||
mgr.OnAutoResumed += (_, _) => resumed.Release();
|
||||
|
||||
mgr.Pause("test-stream", "test-consumer", DateTime.UtcNow.AddMilliseconds(100));
|
||||
|
||||
var signalled = await resumed.WaitAsync(TimeSpan.FromSeconds(5));
|
||||
signalled.ShouldBeTrue("auto-resume timer did not fire within 5 seconds");
|
||||
|
||||
mgr.IsPaused("test-stream", "test-consumer").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsPaused_returns_false_for_unknown_consumer()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
mgr.IsPaused("unknown", "unknown").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void GetPauseUntil_returns_null_for_unknown_consumer()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
mgr.GetPauseUntil("unknown", "unknown").ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resume_returns_false_for_unknown_consumer()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
mgr.Resume("unknown", "unknown").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Pause_returns_false_for_unknown_consumer()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
mgr.Pause("unknown", "unknown", DateTime.UtcNow.AddSeconds(5)).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsPaused_auto_resumes_expired_deadline()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
CreateConsumer(mgr, "test-stream", "c1");
|
||||
|
||||
// Pause with a deadline in the past
|
||||
mgr.Pause("test-stream", "c1", DateTime.UtcNow.AddMilliseconds(-100));
|
||||
|
||||
// IsPaused should detect the expired deadline and auto-resume
|
||||
mgr.IsPaused("test-stream", "c1").ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,244 @@
|
||||
// Go reference: consumer.go:4241 (processResetReq)
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
using System.Text;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for consumer reset-to-sequence (Gap 3.12) and AckProcessor.ClearAll / SetAckFloor.
|
||||
/// Go reference: consumer.go:4241 processResetReq.
|
||||
/// </summary>
|
||||
public class ConsumerResetTests
|
||||
{
|
||||
private static ConsumerManager CreateManager() => new();
|
||||
|
||||
private static void CreateConsumer(ConsumerManager mgr, string stream, string name,
|
||||
Action<ConsumerConfig>? configure = null)
|
||||
{
|
||||
var config = new ConsumerConfig { DurableName = name };
|
||||
configure?.Invoke(config);
|
||||
mgr.CreateOrUpdate(stream, config);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// ResetToSequence tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go reference: consumer.go:4241 — processResetReq sets consumer.sseq to
|
||||
// the requested sequence so the next fetch starts there.
|
||||
[Fact]
|
||||
public void ResetToSequence_updates_next_sequence()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
CreateConsumer(mgr, "ORDERS", "oc1");
|
||||
|
||||
// Advance the consumer naturally so NextSequence is not 1
|
||||
mgr.TryGet("ORDERS", "oc1", out var before);
|
||||
before.NextSequence = 10;
|
||||
|
||||
mgr.ResetToSequence("ORDERS", "oc1", 5);
|
||||
|
||||
mgr.TryGet("ORDERS", "oc1", out var after);
|
||||
after.NextSequence.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// Go reference: consumer.go:4241 — reset clears the pending ack map so
|
||||
// stale ack tokens from before the reset cannot be accepted.
|
||||
[Fact]
|
||||
public void ResetToSequence_clears_pending_acks()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
CreateConsumer(mgr, "ORDERS", "oc2");
|
||||
|
||||
mgr.TryGet("ORDERS", "oc2", out var handle);
|
||||
handle.AckProcessor.Register(3, ackWaitMs: 5000);
|
||||
handle.AckProcessor.Register(7, ackWaitMs: 5000);
|
||||
handle.AckProcessor.PendingCount.ShouldBe(2);
|
||||
|
||||
mgr.ResetToSequence("ORDERS", "oc2", 1);
|
||||
|
||||
handle.AckProcessor.PendingCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go reference: consumer.go:4241 — pendingBytes must be zeroed on reset
|
||||
// so the idle heartbeat header is correct after the reset.
|
||||
[Fact]
|
||||
public void ResetToSequence_clears_pending_bytes()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
CreateConsumer(mgr, "ORDERS", "oc3");
|
||||
|
||||
mgr.TryGet("ORDERS", "oc3", out var handle);
|
||||
handle.PendingBytes = 12345;
|
||||
|
||||
mgr.ResetToSequence("ORDERS", "oc3", 1);
|
||||
|
||||
handle.PendingBytes.ShouldBe(0L);
|
||||
}
|
||||
|
||||
// Go reference: consumer.go:4241 — returns false when the consumer does
|
||||
// not exist (unknown stream or durable name).
|
||||
[Fact]
|
||||
public void ResetToSequence_returns_false_for_missing_consumer()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
|
||||
mgr.ResetToSequence("NO-STREAM", "NO-CONSUMER", 1).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Go reference: consumer.go:4241 — returns true when the consumer exists
|
||||
// and the reset is applied.
|
||||
[Fact]
|
||||
public void ResetToSequence_returns_true_for_existing_consumer()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
CreateConsumer(mgr, "ORDERS", "oc4");
|
||||
|
||||
mgr.ResetToSequence("ORDERS", "oc4", 42).ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go reference: consumer.go:4241 — consumer config (subject filters, ack
|
||||
// policy, etc.) is immutable during reset; only positional / tracking state
|
||||
// is cleared.
|
||||
[Fact]
|
||||
public void ResetToSequence_preserves_config()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
CreateConsumer(mgr, "ORDERS", "oc5", cfg =>
|
||||
{
|
||||
cfg.FilterSubject = "orders.>";
|
||||
cfg.AckPolicy = AckPolicy.Explicit;
|
||||
});
|
||||
|
||||
mgr.ResetToSequence("ORDERS", "oc5", 1);
|
||||
|
||||
mgr.TryGet("ORDERS", "oc5", out var handle);
|
||||
handle.Config.FilterSubject.ShouldBe("orders.>");
|
||||
handle.Config.AckPolicy.ShouldBe(AckPolicy.Explicit);
|
||||
}
|
||||
|
||||
// Go reference: consumer.go:4241 — after reset the push engine can
|
||||
// re-enqueue messages starting at the reset sequence.
|
||||
[Fact]
|
||||
public void ResetToSequence_allows_re_delivery_from_sequence()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
CreateConsumer(mgr, "ORDERS", "oc6", cfg =>
|
||||
{
|
||||
cfg.Push = true;
|
||||
cfg.DeliverSubject = "deliver.test";
|
||||
});
|
||||
|
||||
mgr.TryGet("ORDERS", "oc6", out var handle);
|
||||
handle.NextSequence = 50;
|
||||
|
||||
mgr.ResetToSequence("ORDERS", "oc6", 10);
|
||||
|
||||
// After reset the consumer reads from sequence 10
|
||||
handle.NextSequence.ShouldBe(10UL);
|
||||
|
||||
// Simulate re-enqueueing a message at that sequence via OnPublished
|
||||
var msg = new StoredMessage
|
||||
{
|
||||
Sequence = 10,
|
||||
Subject = "orders.new",
|
||||
Payload = Encoding.UTF8.GetBytes("data"),
|
||||
TimestampUtc = DateTime.UtcNow,
|
||||
};
|
||||
mgr.OnPublished("ORDERS", msg);
|
||||
|
||||
// Message should be in the push frame queue
|
||||
handle.PushFrames.Count.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// AckProcessor.ClearAll tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go reference: consumer.go processResetReq — pending ack map cleared
|
||||
[Fact]
|
||||
public void ClearAll_clears_pending()
|
||||
{
|
||||
var processor = new AckProcessor();
|
||||
processor.Register(1, ackWaitMs: 5000);
|
||||
processor.Register(2, ackWaitMs: 5000);
|
||||
processor.Register(3, ackWaitMs: 5000);
|
||||
processor.PendingCount.ShouldBe(3);
|
||||
|
||||
processor.ClearAll();
|
||||
|
||||
processor.PendingCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go reference: consumer.go processResetReq — terminated set cleared
|
||||
[Fact]
|
||||
public void ClearAll_clears_terminated()
|
||||
{
|
||||
var processor = new AckProcessor();
|
||||
processor.Register(1, ackWaitMs: 5000);
|
||||
processor.Register(2, ackWaitMs: 5000);
|
||||
processor.ProcessTerm(1);
|
||||
processor.ProcessTerm(2);
|
||||
processor.TerminatedCount.ShouldBe(2);
|
||||
|
||||
processor.ClearAll();
|
||||
|
||||
processor.TerminatedCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go reference: consumer.go processResetReq — ack floor reset to 0
|
||||
[Fact]
|
||||
public void ClearAll_resets_ack_floor()
|
||||
{
|
||||
var processor = new AckProcessor();
|
||||
processor.Register(1, ackWaitMs: 5000);
|
||||
processor.Register(2, ackWaitMs: 5000);
|
||||
processor.AckSequence(1);
|
||||
processor.AckSequence(2);
|
||||
processor.AckFloor.ShouldBeGreaterThan(0UL);
|
||||
|
||||
processor.ClearAll();
|
||||
|
||||
processor.AckFloor.ShouldBe(0UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// AckProcessor.SetAckFloor tests
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go reference: consumer.go processResetReq — ack floor can be set to a
|
||||
// specific sequence to reflect the stream state after reset.
|
||||
[Fact]
|
||||
public void SetAckFloor_updates_floor()
|
||||
{
|
||||
var processor = new AckProcessor();
|
||||
|
||||
processor.SetAckFloor(99);
|
||||
|
||||
processor.AckFloor.ShouldBe(99UL);
|
||||
}
|
||||
|
||||
// Go reference: consumer.go processResetReq — any pending sequences below
|
||||
// the new floor are irrelevant (already delivered before the floor) and
|
||||
// must be pruned to avoid ghost acks.
|
||||
[Fact]
|
||||
public void SetAckFloor_removes_entries_below_floor()
|
||||
{
|
||||
var processor = new AckProcessor();
|
||||
processor.Register(1, ackWaitMs: 5000);
|
||||
processor.Register(2, ackWaitMs: 5000);
|
||||
processor.Register(5, ackWaitMs: 5000);
|
||||
processor.Register(10, ackWaitMs: 5000);
|
||||
processor.PendingCount.ShouldBe(4);
|
||||
|
||||
processor.SetAckFloor(5);
|
||||
|
||||
// Sequences 1, 2, and 5 (<=5) are below or at the new floor and must be removed
|
||||
processor.PendingCount.ShouldBe(1);
|
||||
// Sequence 10 is above the floor and must remain
|
||||
processor.HasPending.ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,207 @@
|
||||
// Go: consumer.go hasDeliveryInterest, deleteNotActive
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
public class DeliveryInterestTests
|
||||
{
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 1 — HasInterest is true after a subscribe
|
||||
//
|
||||
// Go reference: consumer.go hasDeliveryInterest — returns true when at
|
||||
// least one client is subscribed to the push consumer's deliver subject.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void HasInterest_true_after_subscribe()
|
||||
{
|
||||
var tracker = new DeliveryInterestTracker();
|
||||
|
||||
tracker.OnSubscribe();
|
||||
|
||||
tracker.HasInterest.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 2 — HasInterest is false initially (no subscribers)
|
||||
//
|
||||
// Go reference: consumer.go — on creation there are no delivery subscribers.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void HasInterest_false_initially()
|
||||
{
|
||||
var tracker = new DeliveryInterestTracker();
|
||||
|
||||
tracker.HasInterest.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 3 — HasInterest drops to false after all subscribers unsubscribe
|
||||
//
|
||||
// Go reference: consumer.go hasDeliveryInterest — once subscription count
|
||||
// reaches 0, interest is gone.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void HasInterest_false_after_all_unsubscribe()
|
||||
{
|
||||
var tracker = new DeliveryInterestTracker();
|
||||
|
||||
tracker.OnSubscribe();
|
||||
tracker.OnSubscribe();
|
||||
tracker.OnUnsubscribe();
|
||||
tracker.OnUnsubscribe();
|
||||
|
||||
tracker.HasInterest.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 4 — SubscriberCount tracks multiple subscribers accurately
|
||||
//
|
||||
// Go reference: consumer.go — the interest count must reflect the exact
|
||||
// number of active push-consumer delivery subscriptions.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void SubscriberCount_tracks_multiple_subscribers()
|
||||
{
|
||||
var tracker = new DeliveryInterestTracker();
|
||||
|
||||
tracker.SubscriberCount.ShouldBe(0);
|
||||
|
||||
tracker.OnSubscribe();
|
||||
tracker.SubscriberCount.ShouldBe(1);
|
||||
|
||||
tracker.OnSubscribe();
|
||||
tracker.SubscriberCount.ShouldBe(2);
|
||||
|
||||
tracker.OnSubscribe();
|
||||
tracker.SubscriberCount.ShouldBe(3);
|
||||
|
||||
tracker.OnUnsubscribe();
|
||||
tracker.SubscriberCount.ShouldBe(2);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 5 — OnUnsubscribe floors subscriber count at zero (no negatives)
|
||||
//
|
||||
// Go reference: consumer.go deleteNotActive — stray unsub events must not
|
||||
// drive the count below zero and corrupt subsequent interest checks.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void OnUnsubscribe_floors_at_zero()
|
||||
{
|
||||
var tracker = new DeliveryInterestTracker();
|
||||
|
||||
tracker.OnUnsubscribe();
|
||||
tracker.OnUnsubscribe();
|
||||
|
||||
tracker.SubscriberCount.ShouldBe(0);
|
||||
tracker.HasInterest.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 6 — ShouldDelete is false while interest exists
|
||||
//
|
||||
// Go reference: consumer.go deleteNotActive — ephemeral cleanup is only
|
||||
// triggered when there are no active subscribers.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void ShouldDelete_false_when_has_interest()
|
||||
{
|
||||
var tracker = new DeliveryInterestTracker(inactiveTimeout: TimeSpan.FromMilliseconds(1));
|
||||
|
||||
tracker.OnSubscribe();
|
||||
|
||||
tracker.ShouldDelete.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 7 — ShouldDelete is false immediately after unsubscribe (timeout
|
||||
// has not yet elapsed)
|
||||
//
|
||||
// Go reference: consumer.go deleteNotActive — the inactive timeout must
|
||||
// fully elapse before the consumer is eligible for deletion.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void ShouldDelete_false_immediately_after_unsubscribe()
|
||||
{
|
||||
var tracker = new DeliveryInterestTracker(inactiveTimeout: TimeSpan.FromSeconds(30));
|
||||
|
||||
tracker.OnSubscribe();
|
||||
tracker.OnUnsubscribe();
|
||||
|
||||
// No wait — timeout has not elapsed yet.
|
||||
tracker.ShouldDelete.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 8 — ShouldDelete is true after the inactive timeout elapses with
|
||||
// zero subscribers
|
||||
//
|
||||
// Go reference: consumer.go deleteNotActive — once the configurable
|
||||
// MaxAckPending / inactive threshold passes, the ephemeral consumer is
|
||||
// scheduled for removal.
|
||||
// -------------------------------------------------------------------------
|
||||
[SlopwatchSuppress("SW004", "Intentional timeout test: ShouldDelete requires real wall-clock elapsed time to observe the inactive threshold firing; no synchronisation primitive can replace this")]
|
||||
[Fact]
|
||||
public async Task ShouldDelete_true_after_timeout()
|
||||
{
|
||||
var tracker = new DeliveryInterestTracker(inactiveTimeout: TimeSpan.FromMilliseconds(50));
|
||||
|
||||
tracker.OnSubscribe();
|
||||
tracker.OnUnsubscribe();
|
||||
|
||||
await Task.Delay(100); // Wait for timeout to elapse.
|
||||
|
||||
tracker.ShouldDelete.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 9 — Reset clears all state (count and inactivity timer)
|
||||
//
|
||||
// Go reference: consumer.go — Reset is used to reinitialise tracking when
|
||||
// a consumer is re-attached or recreated.
|
||||
// -------------------------------------------------------------------------
|
||||
[SlopwatchSuppress("SW004", "Intentional timeout test: must let the inactive threshold elapse to confirm Reset clears the inactivity timer; no synchronisation primitive can replace this")]
|
||||
[Fact]
|
||||
public async Task Reset_clears_all_state()
|
||||
{
|
||||
var tracker = new DeliveryInterestTracker(inactiveTimeout: TimeSpan.FromMilliseconds(50));
|
||||
|
||||
tracker.OnSubscribe();
|
||||
tracker.OnSubscribe();
|
||||
tracker.OnUnsubscribe();
|
||||
tracker.OnUnsubscribe();
|
||||
await Task.Delay(100); // Let timeout elapse.
|
||||
|
||||
tracker.Reset();
|
||||
|
||||
tracker.SubscriberCount.ShouldBe(0);
|
||||
tracker.HasInterest.ShouldBeFalse();
|
||||
tracker.ShouldDelete.ShouldBeFalse(); // inactivity timer also cleared
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 10 — Subscribing after an unsubscribe clears the inactivity timer
|
||||
// so ShouldDelete stays false even after the original timeout
|
||||
//
|
||||
// Go reference: consumer.go hasDeliveryInterest — a re-subscription resets
|
||||
// the inactive-since timestamp, preventing spurious cleanup.
|
||||
// -------------------------------------------------------------------------
|
||||
[SlopwatchSuppress("SW004", "Intentional timeout test: must let the original inactive window pass after re-subscribe to confirm the inactivity timer was cleared; no synchronisation primitive can replace this")]
|
||||
[Fact]
|
||||
public async Task Subscribe_clears_inactivity_timer()
|
||||
{
|
||||
var tracker = new DeliveryInterestTracker(inactiveTimeout: TimeSpan.FromMilliseconds(50));
|
||||
|
||||
tracker.OnSubscribe();
|
||||
tracker.OnUnsubscribe();
|
||||
|
||||
// Re-subscribe before the timeout elapses.
|
||||
tracker.OnSubscribe();
|
||||
|
||||
await Task.Delay(100); // Original timeout window passes.
|
||||
|
||||
// Still has interest and timer was reset, so ShouldDelete must be false.
|
||||
tracker.HasInterest.ShouldBeTrue();
|
||||
tracker.ShouldDelete.ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,427 @@
|
||||
// Go: consumer.go:1400 (loopAndGatherMsgs) — gather loop polls the store for new messages,
|
||||
// dispatches them through the send delegate, respects filter subjects,
|
||||
// advances NextSequence, handles deleted/null entries, and exits on cancellation.
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
public class DeliveryLoopTests
|
||||
{
|
||||
// -----------------------------------------------------------------------
|
||||
// Helpers
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
private static ConsumerHandle MakeConsumer(ConsumerConfig config)
|
||||
=> new("TEST-STREAM", config);
|
||||
|
||||
/// <summary>
|
||||
/// Returns a send delegate that releases <paramref name="sem"/> on each delivery
|
||||
/// and accumulates original subjects into <paramref name="deliveredOriginalSubjects"/>.
|
||||
/// </summary>
|
||||
private static Func<string, string, ReadOnlyMemory<byte>, ReadOnlyMemory<byte>, CancellationToken, ValueTask>
|
||||
MakeSemaphoreSend(List<string> deliveredOriginalSubjects, SemaphoreSlim sem)
|
||||
=> (_, origSubj, _, _, _) =>
|
||||
{
|
||||
lock (deliveredOriginalSubjects)
|
||||
deliveredOriginalSubjects.Add(origSubj);
|
||||
sem.Release();
|
||||
return ValueTask.CompletedTask;
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 1 — GatherLoop polls store for new messages
|
||||
//
|
||||
// Go reference: consumer.go:1560 — poll store for messages from nextSeq to LastSeq.
|
||||
// Three messages appended before the loop starts; loop must dispatch all three.
|
||||
// -----------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task GatherLoop_polls_store_for_new_messages()
|
||||
{
|
||||
var store = new MemStore();
|
||||
await store.AppendAsync("foo", "msg1"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "msg2"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "msg3"u8.ToArray(), default);
|
||||
|
||||
var consumer = MakeConsumer(new ConsumerConfig { DurableName = "POLL" });
|
||||
var delivered = new List<string>();
|
||||
var sem = new SemaphoreSlim(0);
|
||||
var send = MakeSemaphoreSend(delivered, sem);
|
||||
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
var engine = new PushConsumerEngine();
|
||||
engine.StartGatherLoop(consumer, store, send, cts.Token);
|
||||
|
||||
// Wait for exactly 3 releases — one per dispatched message
|
||||
await sem.WaitAsync(cts.Token);
|
||||
await sem.WaitAsync(cts.Token);
|
||||
await sem.WaitAsync(cts.Token);
|
||||
|
||||
engine.StopGatherLoop();
|
||||
|
||||
lock (delivered) delivered.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 2 — GatherLoop respects FilterSubject
|
||||
//
|
||||
// Go reference: consumer.go:1569 — ShouldDeliver skips messages whose subject
|
||||
// does not match cfg.FilterSubject.
|
||||
// -----------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task GatherLoop_respects_filter_subject()
|
||||
{
|
||||
var store = new MemStore();
|
||||
await store.AppendAsync("orders.us", "o1"u8.ToArray(), default);
|
||||
await store.AppendAsync("events.x", "e1"u8.ToArray(), default);
|
||||
await store.AppendAsync("orders.eu", "o2"u8.ToArray(), default);
|
||||
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "FILTERED",
|
||||
FilterSubject = "orders.>",
|
||||
});
|
||||
|
||||
var delivered = new List<string>();
|
||||
var sem = new SemaphoreSlim(0);
|
||||
var send = MakeSemaphoreSend(delivered, sem);
|
||||
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
var engine = new PushConsumerEngine();
|
||||
engine.StartGatherLoop(consumer, store, send, cts.Token);
|
||||
|
||||
// Only "orders.us" and "orders.eu" match the filter — wait for exactly 2
|
||||
await sem.WaitAsync(cts.Token);
|
||||
await sem.WaitAsync(cts.Token);
|
||||
|
||||
engine.StopGatherLoop();
|
||||
|
||||
lock (delivered)
|
||||
{
|
||||
delivered.Count.ShouldBe(2);
|
||||
delivered.ShouldContain("orders.us");
|
||||
delivered.ShouldContain("orders.eu");
|
||||
delivered.ShouldNotContain("events.x");
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 3 — ShouldDeliver with no filter delivers all subjects
|
||||
//
|
||||
// Go reference: consumer.go — empty FilterSubject + empty FilterSubjects → deliver all.
|
||||
// -----------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void ShouldDeliver_with_no_filter_delivers_all()
|
||||
{
|
||||
var config = new ConsumerConfig { DurableName = "ANY" };
|
||||
|
||||
PushConsumerEngine.ShouldDeliverPublic(config, "orders.new").ShouldBeTrue();
|
||||
PushConsumerEngine.ShouldDeliverPublic(config, "events.x").ShouldBeTrue();
|
||||
PushConsumerEngine.ShouldDeliverPublic(config, "telemetry.cpu.host1").ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 4 — ShouldDeliver with single FilterSubject
|
||||
//
|
||||
// Go reference: consumer.go — FilterSubject is matched via SubjectMatch.MatchLiteral.
|
||||
// -----------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void ShouldDeliver_with_single_filter()
|
||||
{
|
||||
var config = new ConsumerConfig
|
||||
{
|
||||
DurableName = "SINGLE",
|
||||
FilterSubject = "orders.us",
|
||||
};
|
||||
|
||||
PushConsumerEngine.ShouldDeliverPublic(config, "orders.us").ShouldBeTrue();
|
||||
PushConsumerEngine.ShouldDeliverPublic(config, "orders.eu").ShouldBeFalse();
|
||||
PushConsumerEngine.ShouldDeliverPublic(config, "events.x").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 5 — ShouldDeliver with multiple filters (FilterSubjects list)
|
||||
//
|
||||
// Go reference: consumer.go — FilterSubjects: any match → deliver.
|
||||
// -----------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void ShouldDeliver_with_multiple_filters()
|
||||
{
|
||||
var config = new ConsumerConfig
|
||||
{
|
||||
DurableName = "MULTI",
|
||||
FilterSubjects = ["orders.us", "events.created"],
|
||||
};
|
||||
|
||||
PushConsumerEngine.ShouldDeliverPublic(config, "orders.us").ShouldBeTrue();
|
||||
PushConsumerEngine.ShouldDeliverPublic(config, "events.created").ShouldBeTrue();
|
||||
PushConsumerEngine.ShouldDeliverPublic(config, "orders.eu").ShouldBeFalse();
|
||||
PushConsumerEngine.ShouldDeliverPublic(config, "events.deleted").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 6 — ShouldDeliver with wildcard filter
|
||||
//
|
||||
// Go reference: consumer.go — wildcard matching via SubjectMatch.MatchLiteral.
|
||||
// "orders.*" matches one-token suffix; "orders.us.new" has two suffix tokens
|
||||
// so it does not match.
|
||||
// -----------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void ShouldDeliver_with_wildcard_filter()
|
||||
{
|
||||
var config = new ConsumerConfig
|
||||
{
|
||||
DurableName = "WILDCARD",
|
||||
FilterSubject = "orders.*",
|
||||
};
|
||||
|
||||
PushConsumerEngine.ShouldDeliverPublic(config, "orders.new").ShouldBeTrue();
|
||||
PushConsumerEngine.ShouldDeliverPublic(config, "orders.old").ShouldBeTrue();
|
||||
PushConsumerEngine.ShouldDeliverPublic(config, "orders.us.new").ShouldBeFalse();
|
||||
PushConsumerEngine.ShouldDeliverPublic(config, "events.x").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 7 — Signal(NewMessage) wakes the gather loop
|
||||
//
|
||||
// Go reference: consumer.go:1620 — channel send wakes the loop so it does
|
||||
// not have to wait the full 250ms poll timeout.
|
||||
// -----------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task GatherLoop_signal_wakes_loop()
|
||||
{
|
||||
var store = new MemStore();
|
||||
var consumer = MakeConsumer(new ConsumerConfig { DurableName = "SIGNAL" });
|
||||
|
||||
// loopStarted is released once the loop has begun its first wait cycle —
|
||||
// we detect this by waiting until NextSequence has been set to 1 (the loop
|
||||
// initialises it on entry) and the store is still empty.
|
||||
var deliveredSem = new SemaphoreSlim(0, 1);
|
||||
var delivered = new List<string>();
|
||||
|
||||
Func<string, string, ReadOnlyMemory<byte>, ReadOnlyMemory<byte>, CancellationToken, ValueTask> send =
|
||||
(_, origSubj, _, _, _) =>
|
||||
{
|
||||
lock (delivered) delivered.Add(origSubj);
|
||||
deliveredSem.Release();
|
||||
return ValueTask.CompletedTask;
|
||||
};
|
||||
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
var engine = new PushConsumerEngine();
|
||||
engine.StartGatherLoop(consumer, store, send, cts.Token);
|
||||
|
||||
// Spin (yield only) until the loop has entered its first 250ms wait, which we
|
||||
// infer from GatheredCount staying at 0 while the loop is running.
|
||||
// We yield the thread without sleeping to avoid SW004.
|
||||
var spins = 0;
|
||||
while (engine.GatheredCount == 0 && spins < 5_000)
|
||||
{
|
||||
await Task.Yield();
|
||||
spins++;
|
||||
}
|
||||
|
||||
// Append a message and signal — delivery should arrive well before 500ms
|
||||
await store.AppendAsync("foo", "hello"u8.ToArray(), default);
|
||||
engine.Signal(ConsumerSignal.NewMessage);
|
||||
|
||||
var received = await deliveredSem.WaitAsync(TimeSpan.FromMilliseconds(500), cts.Token);
|
||||
engine.StopGatherLoop();
|
||||
|
||||
received.ShouldBeTrue("expected delivery within 500ms after Signal(NewMessage)");
|
||||
lock (delivered) delivered.Count.ShouldBeGreaterThanOrEqualTo(1);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 8 — GatherLoop advances NextSequence
|
||||
//
|
||||
// Go reference: consumer.go:1600 — nextSeq++ and consumer.NextSequence = nextSeq.
|
||||
// -----------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task GatherLoop_advances_next_sequence()
|
||||
{
|
||||
var store = new MemStore();
|
||||
await store.AppendAsync("foo", "a"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "b"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "c"u8.ToArray(), default);
|
||||
|
||||
var consumer = MakeConsumer(new ConsumerConfig { DurableName = "SEQ" });
|
||||
var delivered = new List<string>();
|
||||
var sem = new SemaphoreSlim(0);
|
||||
var send = MakeSemaphoreSend(delivered, sem);
|
||||
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
var engine = new PushConsumerEngine();
|
||||
engine.StartGatherLoop(consumer, store, send, cts.Token);
|
||||
|
||||
// Wait for all three messages to be delivered
|
||||
await sem.WaitAsync(cts.Token);
|
||||
await sem.WaitAsync(cts.Token);
|
||||
await sem.WaitAsync(cts.Token);
|
||||
|
||||
engine.StopGatherLoop();
|
||||
|
||||
// After delivering 3 messages NextSequence should be 4 (next to load)
|
||||
consumer.NextSequence.ShouldBe((ulong)4);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 9 — GatherLoop skips deleted/null messages
|
||||
//
|
||||
// Go reference: consumer.go:1572 — LoadAsync returning null means the
|
||||
// message was deleted; the gather loop simply advances past it.
|
||||
// -----------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task GatherLoop_skips_deleted_messages()
|
||||
{
|
||||
var store = new MemStore();
|
||||
await store.AppendAsync("foo", "first"u8.ToArray(), default); // seq 1
|
||||
await store.AppendAsync("foo", "second"u8.ToArray(), default); // seq 2
|
||||
await store.AppendAsync("foo", "third"u8.ToArray(), default); // seq 3
|
||||
|
||||
// Delete seq 2 so LoadAsync returns null for it
|
||||
await store.RemoveAsync(2, default);
|
||||
|
||||
var consumer = MakeConsumer(new ConsumerConfig { DurableName = "SKIP-DEL" });
|
||||
var deliveredPayloads = new List<string>();
|
||||
var sem = new SemaphoreSlim(0);
|
||||
|
||||
Func<string, string, ReadOnlyMemory<byte>, ReadOnlyMemory<byte>, CancellationToken, ValueTask> send =
|
||||
(_, _, _, payload, _) =>
|
||||
{
|
||||
lock (deliveredPayloads) deliveredPayloads.Add(Encoding.UTF8.GetString(payload.Span));
|
||||
sem.Release();
|
||||
return ValueTask.CompletedTask;
|
||||
};
|
||||
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
var engine = new PushConsumerEngine();
|
||||
engine.StartGatherLoop(consumer, store, send, cts.Token);
|
||||
|
||||
// Only two messages remain after removing seq 2
|
||||
await sem.WaitAsync(cts.Token);
|
||||
await sem.WaitAsync(cts.Token);
|
||||
|
||||
engine.StopGatherLoop();
|
||||
|
||||
lock (deliveredPayloads)
|
||||
{
|
||||
deliveredPayloads.Count.ShouldBe(2);
|
||||
deliveredPayloads.ShouldContain("first");
|
||||
deliveredPayloads.ShouldContain("third");
|
||||
deliveredPayloads.ShouldNotContain("second");
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 10 — GatherLoop increments GatheredCount
|
||||
//
|
||||
// Go reference: consumer.go:1400 loopAndGatherMsgs — GatheredCount tracks
|
||||
// every message dispatched to the subscriber.
|
||||
// -----------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task GatherLoop_increments_gathered_count()
|
||||
{
|
||||
var store = new MemStore();
|
||||
await store.AppendAsync("foo", "x"u8.ToArray(), default);
|
||||
await store.AppendAsync("foo", "y"u8.ToArray(), default);
|
||||
|
||||
var consumer = MakeConsumer(new ConsumerConfig { DurableName = "COUNT" });
|
||||
var delivered = new List<string>();
|
||||
var sem = new SemaphoreSlim(0);
|
||||
var send = MakeSemaphoreSend(delivered, sem);
|
||||
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
var engine = new PushConsumerEngine();
|
||||
engine.StartGatherLoop(consumer, store, send, cts.Token);
|
||||
|
||||
await sem.WaitAsync(cts.Token);
|
||||
await sem.WaitAsync(cts.Token);
|
||||
|
||||
engine.StopGatherLoop();
|
||||
|
||||
engine.GatheredCount.ShouldBe(2);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 11 — GatherLoop stops on cancellation
|
||||
//
|
||||
// Go reference: consumer.go — the goroutine exits when the quit channel closes,
|
||||
// which maps to CancellationToken cancellation here.
|
||||
// -----------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task GatherLoop_stops_on_cancellation()
|
||||
{
|
||||
var store = new MemStore();
|
||||
var consumer = MakeConsumer(new ConsumerConfig { DurableName = "CANCEL" });
|
||||
|
||||
// loopRunning becomes set once the loop is executing its first iteration
|
||||
var loopRunning = new TaskCompletionSource<bool>(TaskCreationOptions.RunContinuationsAsynchronously);
|
||||
var deliveredCount = 0;
|
||||
|
||||
Func<string, string, ReadOnlyMemory<byte>, ReadOnlyMemory<byte>, CancellationToken, ValueTask> send =
|
||||
(_, _, _, _, _) =>
|
||||
{
|
||||
Interlocked.Increment(ref deliveredCount);
|
||||
return ValueTask.CompletedTask;
|
||||
};
|
||||
|
||||
var cts = new CancellationTokenSource();
|
||||
var engine = new PushConsumerEngine();
|
||||
engine.StartGatherLoop(consumer, store, send, cts.Token);
|
||||
|
||||
// Cancel without appending anything; the loop must exit cleanly
|
||||
await cts.CancelAsync();
|
||||
engine.StopGatherLoop();
|
||||
|
||||
// Yield a few times to let any in-flight dispatch complete
|
||||
for (var i = 0; i < 10; i++) await Task.Yield();
|
||||
|
||||
deliveredCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test 12 — GatherLoop handles an empty store
|
||||
//
|
||||
// Go reference: consumer.go:1620 — when no messages exist the loop waits
|
||||
// on the signal channel with a 250ms timeout rather than busy-spinning.
|
||||
// We verify it does NOT deliver anything when the store remains empty,
|
||||
// and that it exits cleanly when cancelled.
|
||||
// -----------------------------------------------------------------------
|
||||
[Fact]
|
||||
[SlopwatchSuppress("SW004", "Negative timing assertion: verifying the gather loop does NOT deliver from an empty store requires a real wall-clock window; no synchronisation primitive can replace observing the absence of delivery")]
|
||||
public async Task GatherLoop_handles_empty_store()
|
||||
{
|
||||
var store = new MemStore(); // nothing appended
|
||||
var consumer = MakeConsumer(new ConsumerConfig { DurableName = "EMPTY" });
|
||||
|
||||
var deliveredCount = 0;
|
||||
var firstCallTcs = new TaskCompletionSource<bool>(TaskCreationOptions.RunContinuationsAsynchronously);
|
||||
|
||||
Func<string, string, ReadOnlyMemory<byte>, ReadOnlyMemory<byte>, CancellationToken, ValueTask> send =
|
||||
(_, _, _, _, _) =>
|
||||
{
|
||||
Interlocked.Increment(ref deliveredCount);
|
||||
firstCallTcs.TrySetResult(true);
|
||||
return ValueTask.CompletedTask;
|
||||
};
|
||||
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
var engine = new PushConsumerEngine();
|
||||
engine.StartGatherLoop(consumer, store, send, cts.Token);
|
||||
|
||||
// The send delegate should never be called because the store is empty.
|
||||
// Wait a short absolute time; if it fires we know the loop is broken.
|
||||
var unexpectedDelivery = await Task.WhenAny(
|
||||
firstCallTcs.Task,
|
||||
Task.Delay(150, cts.Token)) == firstCallTcs.Task;
|
||||
|
||||
engine.StopGatherLoop();
|
||||
|
||||
unexpectedDelivery.ShouldBeFalse("gather loop must not deliver from an empty store");
|
||||
deliveredCount.ShouldBe(0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,245 @@
|
||||
// Go: consumer.go isFilteredMatch, skipMsgs tracking
|
||||
// FilterSkipTracker tests — verifies NATS token-based filter matching
|
||||
// and skip sequence gap tracking.
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
public class FilterSkipTests
|
||||
{
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 1 — No filter always matches every subject
|
||||
//
|
||||
// Go reference: consumer.go isFilteredMatch — when no filter subjects are
|
||||
// configured all messages are delivered.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void ShouldDeliver_no_filter_always_matches()
|
||||
{
|
||||
var tracker = new FilterSkipTracker();
|
||||
|
||||
tracker.ShouldDeliver("orders.us").ShouldBeTrue();
|
||||
tracker.ShouldDeliver("events.payment").ShouldBeTrue();
|
||||
tracker.ShouldDeliver("anything").ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 2 — Single exact filter matches only the matching subject
|
||||
//
|
||||
// Go reference: consumer.go isFilteredMatch — literal subject match.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void ShouldDeliver_single_filter_exact_match()
|
||||
{
|
||||
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
|
||||
|
||||
tracker.ShouldDeliver("orders.us").ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 3 — Single filter does not match a different subject
|
||||
//
|
||||
// Go reference: consumer.go isFilteredMatch — non-matching subjects are
|
||||
// skipped.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void ShouldDeliver_single_filter_no_match()
|
||||
{
|
||||
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
|
||||
|
||||
tracker.ShouldDeliver("events.x").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 4 — Star wildcard matches a single token
|
||||
//
|
||||
// Go reference: consumer.go isFilteredMatch — SubjectMatch.MatchLiteral
|
||||
// treats '*' as a single-token wildcard, so "orders.*" matches "orders.us"
|
||||
// but not "orders.us.east" (two remaining tokens).
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void ShouldDeliver_wildcard_star()
|
||||
{
|
||||
var tracker = new FilterSkipTracker(filterSubject: "orders.*");
|
||||
|
||||
tracker.ShouldDeliver("orders.us").ShouldBeTrue();
|
||||
tracker.ShouldDeliver("orders.us.east").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 5 — Greater-than wildcard matches remaining tokens
|
||||
//
|
||||
// Go reference: consumer.go isFilteredMatch — '>' matches one or more
|
||||
// remaining tokens.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void ShouldDeliver_wildcard_gt()
|
||||
{
|
||||
var tracker = new FilterSkipTracker(filterSubject: "orders.>");
|
||||
|
||||
tracker.ShouldDeliver("orders.us.east").ShouldBeTrue();
|
||||
tracker.ShouldDeliver("orders.eu").ShouldBeTrue();
|
||||
tracker.ShouldDeliver("events.x").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 6 — Multiple filter subjects: matches any of them
|
||||
//
|
||||
// Go reference: consumer.go isFilteredMatch — when FilterSubjects is
|
||||
// populated, a message matches if any entry matches.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void ShouldDeliver_multiple_filters()
|
||||
{
|
||||
var tracker = new FilterSkipTracker(filterSubjects: ["orders.>", "events.>"]);
|
||||
|
||||
tracker.ShouldDeliver("orders.us").ShouldBeTrue();
|
||||
tracker.ShouldDeliver("events.payment").ShouldBeTrue();
|
||||
tracker.ShouldDeliver("metrics.cpu").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 7 — MatchCount increments when message is delivered
|
||||
//
|
||||
// Go reference: consumer.go — consumer tracks matched message counts.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void MatchCount_increments_on_match()
|
||||
{
|
||||
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
|
||||
|
||||
tracker.ShouldDeliver("orders.us");
|
||||
tracker.ShouldDeliver("orders.us");
|
||||
|
||||
tracker.MatchCount.ShouldBe(2L);
|
||||
tracker.SkipCount.ShouldBe(0L);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 8 — SkipCount increments when message does not match
|
||||
//
|
||||
// Go reference: consumer.go skipMsgs — non-matching messages are counted.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void SkipCount_increments_on_skip()
|
||||
{
|
||||
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
|
||||
|
||||
tracker.ShouldDeliver("events.x");
|
||||
tracker.ShouldDeliver("events.y");
|
||||
|
||||
tracker.SkipCount.ShouldBe(2L);
|
||||
tracker.MatchCount.ShouldBe(0L);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 9 — RecordSkip stores a sequence number in the skipped set
|
||||
//
|
||||
// Go reference: consumer.go skipMsgs — stores gap sequences for later
|
||||
// resolution during delivery.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void RecordSkip_tracks_sequence()
|
||||
{
|
||||
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
|
||||
|
||||
tracker.RecordSkip(5UL);
|
||||
tracker.RecordSkip(7UL);
|
||||
|
||||
tracker.SkippedSequenceCount.ShouldBe(2);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 10 — NextUnskippedSequence skips over all recorded sequences
|
||||
//
|
||||
// Go reference: consumer.go — finding the next deliverable sequence after
|
||||
// gaps caused by filter skips.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void NextUnskippedSequence_skips_recorded()
|
||||
{
|
||||
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
|
||||
|
||||
tracker.RecordSkip(2UL);
|
||||
tracker.RecordSkip(3UL);
|
||||
|
||||
// seq 1 is not skipped
|
||||
tracker.NextUnskippedSequence(1UL).ShouldBe(1UL);
|
||||
// seq 2 and 3 are skipped → next unskipped is 4
|
||||
tracker.NextUnskippedSequence(2UL).ShouldBe(4UL);
|
||||
// seq 4 is not skipped
|
||||
tracker.NextUnskippedSequence(4UL).ShouldBe(4UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 11 — PurgeBelow removes entries below the floor sequence
|
||||
//
|
||||
// Go reference: consumer.go — ack floor advancement purges old skip entries
|
||||
// to prevent unbounded growth.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void PurgeBelow_removes_old_entries()
|
||||
{
|
||||
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
|
||||
|
||||
tracker.RecordSkip(1UL);
|
||||
tracker.RecordSkip(3UL);
|
||||
tracker.RecordSkip(5UL);
|
||||
tracker.RecordSkip(7UL);
|
||||
|
||||
tracker.PurgeBelow(5UL);
|
||||
|
||||
// sequences 1 and 3 should be gone (< 5); 5 and 7 remain (>= 5)
|
||||
tracker.SkippedSequenceCount.ShouldBe(2);
|
||||
tracker.NextUnskippedSequence(5UL).ShouldBe(6UL); // 5 still skipped
|
||||
tracker.NextUnskippedSequence(1UL).ShouldBe(1UL); // 1 was purged
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 12 — HasFilter is false when no filter is configured
|
||||
//
|
||||
// Go reference: consumer.go — no filter means deliver all messages.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void HasFilter_false_when_empty()
|
||||
{
|
||||
var tracker = new FilterSkipTracker();
|
||||
|
||||
tracker.HasFilter.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 13 — HasFilter is true when a single filter is configured
|
||||
//
|
||||
// Go reference: consumer.go — FilterSubject set means selective delivery.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void HasFilter_true_with_single_filter()
|
||||
{
|
||||
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
|
||||
|
||||
tracker.HasFilter.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 14 — Reset clears all counters and skipped sequences
|
||||
//
|
||||
// Go reference: consumer.go — consumer state reset on reconfiguration.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void Reset_clears_all_state()
|
||||
{
|
||||
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
|
||||
|
||||
tracker.ShouldDeliver("orders.us");
|
||||
tracker.ShouldDeliver("events.x");
|
||||
tracker.RecordSkip(10UL);
|
||||
tracker.RecordSkip(11UL);
|
||||
|
||||
tracker.Reset();
|
||||
|
||||
tracker.MatchCount.ShouldBe(0L);
|
||||
tracker.SkipCount.ShouldBe(0L);
|
||||
tracker.SkippedSequenceCount.ShouldBe(0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,511 @@
|
||||
// Go reference: golang/nats-server/server/consumer.go
|
||||
// sendIdleHeartbeat ~line 5222, sendFlowControl ~line 5495
|
||||
//
|
||||
// Tests for idle heartbeat pending-count headers (Nats-Pending-Messages,
|
||||
// Nats-Pending-Bytes) and flow control stall detection.
|
||||
|
||||
using System.Collections.Concurrent;
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
public class IdleHeartbeatTests
|
||||
{
|
||||
// Helper: build a ConsumerHandle with the given config
|
||||
private static ConsumerHandle MakeConsumer(ConsumerConfig config)
|
||||
=> new("TEST-STREAM", config);
|
||||
|
||||
// Helper: build a minimal StoredMessage
|
||||
private static StoredMessage MakeMessage(ulong seq, string subject = "test.subject", string payload = "hello")
|
||||
=> new()
|
||||
{
|
||||
Sequence = seq,
|
||||
Subject = subject,
|
||||
Payload = Encoding.UTF8.GetBytes(payload),
|
||||
TimestampUtc = DateTime.UtcNow,
|
||||
};
|
||||
|
||||
// Helper: parse a header value from a NATS header block
|
||||
// e.g. extract "42" from "Nats-Pending-Messages: 42\r\n"
|
||||
private static string? ParseHeaderValue(string headers, string headerName)
|
||||
{
|
||||
var prefix = headerName + ": ";
|
||||
var start = headers.IndexOf(prefix, StringComparison.OrdinalIgnoreCase);
|
||||
if (start < 0)
|
||||
return null;
|
||||
|
||||
start += prefix.Length;
|
||||
var end = headers.IndexOf('\r', start);
|
||||
if (end < 0)
|
||||
end = headers.Length;
|
||||
|
||||
return headers[start..end].Trim();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 1 — Heartbeat includes Nats-Pending-Messages header
|
||||
//
|
||||
// Go reference: consumer.go:5222 — sendIdleHeartbeat includes pending message
|
||||
// count in the Nats-Pending-Messages header.
|
||||
// =========================================================================
|
||||
[Fact]
|
||||
public async Task Heartbeat_includes_pending_messages_header()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "HB-PENDING",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.hb",
|
||||
HeartbeatMs = 50,
|
||||
AckPolicy = AckPolicy.Explicit,
|
||||
AckWaitMs = 30_000,
|
||||
});
|
||||
|
||||
// Register 3 pending acks so PendingCount == 3
|
||||
consumer.AckProcessor.Register(1, 30_000);
|
||||
consumer.AckProcessor.Register(2, 30_000);
|
||||
consumer.AckProcessor.Register(3, 30_000);
|
||||
|
||||
ReadOnlyMemory<byte>? capturedHeartbeat = null;
|
||||
var heartbeatReceived = new TaskCompletionSource<bool>();
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (_, _, headers, _, _) =>
|
||||
{
|
||||
var text = Encoding.ASCII.GetString(headers.Span);
|
||||
if (text.Contains("Idle Heartbeat") && !heartbeatReceived.Task.IsCompleted)
|
||||
{
|
||||
capturedHeartbeat = headers;
|
||||
heartbeatReceived.TrySetResult(true);
|
||||
}
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
await heartbeatReceived.Task.WaitAsync(TimeSpan.FromSeconds(5));
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
capturedHeartbeat.ShouldNotBeNull();
|
||||
var headerText = Encoding.ASCII.GetString(capturedHeartbeat!.Value.Span);
|
||||
headerText.ShouldContain("Nats-Pending-Messages:");
|
||||
|
||||
var pendingMsgs = ParseHeaderValue(headerText, "Nats-Pending-Messages");
|
||||
pendingMsgs.ShouldBe("3");
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 2 — Heartbeat includes Nats-Pending-Bytes header
|
||||
//
|
||||
// Go reference: consumer.go:5222 — sendIdleHeartbeat includes pending byte
|
||||
// count in the Nats-Pending-Bytes header.
|
||||
// =========================================================================
|
||||
[Fact]
|
||||
public async Task Heartbeat_includes_pending_bytes_header()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "HB-BYTES",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.hb2",
|
||||
HeartbeatMs = 50,
|
||||
});
|
||||
|
||||
// Set pending bytes explicitly
|
||||
consumer.PendingBytes = 4096;
|
||||
|
||||
ReadOnlyMemory<byte>? capturedHeartbeat = null;
|
||||
var heartbeatReceived = new TaskCompletionSource<bool>();
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (_, _, headers, _, _) =>
|
||||
{
|
||||
var text = Encoding.ASCII.GetString(headers.Span);
|
||||
if (text.Contains("Idle Heartbeat") && !heartbeatReceived.Task.IsCompleted)
|
||||
{
|
||||
capturedHeartbeat = headers;
|
||||
heartbeatReceived.TrySetResult(true);
|
||||
}
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
await heartbeatReceived.Task.WaitAsync(TimeSpan.FromSeconds(5));
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
capturedHeartbeat.ShouldNotBeNull();
|
||||
var headerText = Encoding.ASCII.GetString(capturedHeartbeat!.Value.Span);
|
||||
headerText.ShouldContain("Nats-Pending-Bytes:");
|
||||
|
||||
var pendingBytes = ParseHeaderValue(headerText, "Nats-Pending-Bytes");
|
||||
pendingBytes.ShouldBe("4096");
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 3 — Heartbeat is sent after the idle period elapses
|
||||
//
|
||||
// Go reference: consumer.go:5222 — the idle heartbeat timer fires after
|
||||
// HeartbeatMs milliseconds of inactivity.
|
||||
// =========================================================================
|
||||
[Fact]
|
||||
public async Task Heartbeat_sent_after_idle_period()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "HB-TIMER",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.timer",
|
||||
HeartbeatMs = 50,
|
||||
});
|
||||
|
||||
var heartbeatReceived = new TaskCompletionSource<bool>();
|
||||
var startedAt = DateTime.UtcNow;
|
||||
DateTime? receivedAt = null;
|
||||
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
|
||||
// Start loop with no messages — only the timer can fire a heartbeat
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (_, _, headers, _, _) =>
|
||||
{
|
||||
var text = Encoding.ASCII.GetString(headers.Span);
|
||||
if (text.Contains("Idle Heartbeat") && !heartbeatReceived.Task.IsCompleted)
|
||||
{
|
||||
receivedAt = DateTime.UtcNow;
|
||||
heartbeatReceived.TrySetResult(true);
|
||||
}
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
await heartbeatReceived.Task.WaitAsync(TimeSpan.FromSeconds(5));
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
receivedAt.ShouldNotBeNull();
|
||||
var elapsed = receivedAt!.Value - startedAt;
|
||||
// The heartbeat timer is 50ms; it must have fired at some point after that
|
||||
elapsed.TotalMilliseconds.ShouldBeGreaterThan(20);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 4 — Heartbeat counter increments on each idle heartbeat sent
|
||||
//
|
||||
// Go reference: consumer.go:5222 — each sendIdleHeartbeat call increments
|
||||
// the idle heartbeat counter.
|
||||
// =========================================================================
|
||||
[Fact]
|
||||
public async Task Heartbeat_counter_increments()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "HB-COUNT",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.count",
|
||||
HeartbeatMs = 40,
|
||||
});
|
||||
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
var heartbeatsReceived = 0;
|
||||
// Use a semaphore so each heartbeat arrival is explicitly awaited.
|
||||
var sem = new SemaphoreSlim(0);
|
||||
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (_, _, headers, _, _) =>
|
||||
{
|
||||
var text = Encoding.ASCII.GetString(headers.Span);
|
||||
if (text.Contains("Idle Heartbeat"))
|
||||
{
|
||||
Interlocked.Increment(ref heartbeatsReceived);
|
||||
sem.Release();
|
||||
}
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
// Wait for at least 2 heartbeat deliveries via the send delegate.
|
||||
await sem.WaitAsync(cts.Token);
|
||||
await sem.WaitAsync(cts.Token);
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
// The send delegate counted 2 heartbeats; IdleHeartbeatsSent increments
|
||||
// after sendMessage returns, so it lags by at most 1. Accept >=1 here
|
||||
// and rely on heartbeatsReceived (directly in the delegate) for the >=2 assertion.
|
||||
heartbeatsReceived.ShouldBeGreaterThanOrEqualTo(2);
|
||||
engine.IdleHeartbeatsSent.ShouldBeGreaterThanOrEqualTo(1);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 5 — Heartbeat shows zero pending when no acks are outstanding
|
||||
//
|
||||
// Go reference: consumer.go:5222 — when no messages are pending ack,
|
||||
// Nats-Pending-Messages should be 0 and Nats-Pending-Bytes should be 0.
|
||||
// =========================================================================
|
||||
[Fact]
|
||||
public async Task Heartbeat_zero_pending_when_no_acks()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "HB-ZERO",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.zero",
|
||||
HeartbeatMs = 50,
|
||||
});
|
||||
|
||||
// No acks registered, PendingBytes stays 0
|
||||
|
||||
ReadOnlyMemory<byte>? capturedHeartbeat = null;
|
||||
var heartbeatReceived = new TaskCompletionSource<bool>();
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (_, _, headers, _, _) =>
|
||||
{
|
||||
var text = Encoding.ASCII.GetString(headers.Span);
|
||||
if (text.Contains("Idle Heartbeat") && !heartbeatReceived.Task.IsCompleted)
|
||||
{
|
||||
capturedHeartbeat = headers;
|
||||
heartbeatReceived.TrySetResult(true);
|
||||
}
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
await heartbeatReceived.Task.WaitAsync(TimeSpan.FromSeconds(5));
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
capturedHeartbeat.ShouldNotBeNull();
|
||||
var headerText = Encoding.ASCII.GetString(capturedHeartbeat!.Value.Span);
|
||||
|
||||
var pendingMsgs = ParseHeaderValue(headerText, "Nats-Pending-Messages");
|
||||
var pendingBytes = ParseHeaderValue(headerText, "Nats-Pending-Bytes");
|
||||
|
||||
pendingMsgs.ShouldBe("0");
|
||||
pendingBytes.ShouldBe("0");
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 6 — Heartbeat reset on data delivery (timer should not fire early)
|
||||
//
|
||||
// Go reference: consumer.go:5222 — the idle heartbeat timer is reset on every
|
||||
// data delivery so that it only fires after a true idle period.
|
||||
// =========================================================================
|
||||
// Task.Delay(50) is intentional: this is a negative-timing assertion that
|
||||
// verifies no heartbeat fires within 50ms of a 200ms timer reset. There is
|
||||
// no synchronisation primitive that can assert an event does NOT occur within
|
||||
// a wall-clock window; the delay is the only correct approach here.
|
||||
[SlopwatchSuppress("SW004", "Negative timing assertion: verifying heartbeat does NOT fire within 50ms window after 200ms timer reset requires real wall-clock elapsed time")]
|
||||
[Fact]
|
||||
public async Task Heartbeat_reset_on_data_delivery()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "HB-RESET",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.reset",
|
||||
HeartbeatMs = 200, // longer interval for this test
|
||||
});
|
||||
|
||||
var messages = new ConcurrentBag<string>();
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
var dataDelivered = new TaskCompletionSource<bool>();
|
||||
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (_, _, headers, _, _) =>
|
||||
{
|
||||
var text = Encoding.ASCII.GetString(headers.Span);
|
||||
messages.Add(text);
|
||||
if (text.Contains("NATS/1.0\r\n") && !text.Contains("Idle Heartbeat"))
|
||||
dataDelivered.TrySetResult(true);
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
// Enqueue a data message — this resets the heartbeat timer
|
||||
engine.Enqueue(consumer, MakeMessage(1));
|
||||
await dataDelivered.Task.WaitAsync(TimeSpan.FromSeconds(5));
|
||||
|
||||
// Record how many heartbeats exist right after data delivery
|
||||
var heartbeatsAfterData = messages.Count(m => m.Contains("Idle Heartbeat"));
|
||||
|
||||
// Wait a short period — heartbeat timer should NOT have fired again yet (200ms interval)
|
||||
await Task.Delay(50);
|
||||
var heartbeatsShortWait = messages.Count(m => m.Contains("Idle Heartbeat"));
|
||||
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
// The timer reset should mean no NEW timer heartbeat fired within 50ms
|
||||
// (the 200ms interval means we'd need to wait ~200ms after the last data delivery)
|
||||
heartbeatsShortWait.ShouldBe(heartbeatsAfterData);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 7 — Flow control pending count increments on each FC frame sent
|
||||
//
|
||||
// Go reference: consumer.go:5495 — each flow control frame sent increments
|
||||
// the pending count for stall detection.
|
||||
// =========================================================================
|
||||
[Fact]
|
||||
public async Task FlowControl_pending_count_increments()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "FC-INC",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.fc",
|
||||
FlowControl = true,
|
||||
});
|
||||
|
||||
// Release once for each FC frame the delivery loop sends
|
||||
var fcSem = new SemaphoreSlim(0);
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (_, _, headers, _, _) =>
|
||||
{
|
||||
var text = Encoding.ASCII.GetString(headers.Span);
|
||||
if (text.Contains("FlowControl"))
|
||||
fcSem.Release();
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
// Enqueue 2 messages — each message with FlowControl=true appends a FC frame
|
||||
engine.Enqueue(consumer, MakeMessage(1));
|
||||
engine.Enqueue(consumer, MakeMessage(2));
|
||||
|
||||
// Wait until both FC frames have been sent by the delivery loop
|
||||
await fcSem.WaitAsync(cts.Token);
|
||||
await fcSem.WaitAsync(cts.Token);
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
// FlowControlPendingCount should have reached at least 2 (one per enqueued message)
|
||||
engine.FlowControlPendingCount.ShouldBeGreaterThanOrEqualTo(2);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 8 — AcknowledgeFlowControl decrements the pending count
|
||||
//
|
||||
// Go reference: consumer.go:5495 — when the subscriber sends a flow control
|
||||
// acknowledgement, the pending count is decremented.
|
||||
// =========================================================================
|
||||
[Fact]
|
||||
public async Task FlowControl_acknowledge_decrements_count()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "FC-DEC",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.fc2",
|
||||
FlowControl = true,
|
||||
});
|
||||
|
||||
var fcSem = new SemaphoreSlim(0);
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (_, _, headers, _, _) =>
|
||||
{
|
||||
var text = Encoding.ASCII.GetString(headers.Span);
|
||||
if (text.Contains("FlowControl"))
|
||||
fcSem.Release();
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
// Enqueue 3 messages so 3 FC frames are queued
|
||||
engine.Enqueue(consumer, MakeMessage(1));
|
||||
engine.Enqueue(consumer, MakeMessage(2));
|
||||
engine.Enqueue(consumer, MakeMessage(3));
|
||||
|
||||
// Wait for all 3 FC frames to be sent by the delivery loop
|
||||
await fcSem.WaitAsync(cts.Token);
|
||||
await fcSem.WaitAsync(cts.Token);
|
||||
await fcSem.WaitAsync(cts.Token);
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
var countBefore = engine.FlowControlPendingCount;
|
||||
countBefore.ShouldBeGreaterThan(0);
|
||||
|
||||
engine.AcknowledgeFlowControl();
|
||||
engine.FlowControlPendingCount.ShouldBe(countBefore - 1);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 9 — IsFlowControlStalled returns true when pending >= MaxFlowControlPending
|
||||
//
|
||||
// Go reference: consumer.go:5495 — stall detection triggers when the subscriber
|
||||
// falls too far behind in acknowledging flow control messages.
|
||||
// =========================================================================
|
||||
[Fact]
|
||||
public async Task FlowControl_stalled_when_pending_exceeds_max()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "FC-STALL",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.stall",
|
||||
FlowControl = true,
|
||||
});
|
||||
|
||||
var fcSem = new SemaphoreSlim(0);
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (_, _, headers, _, _) =>
|
||||
{
|
||||
var text = Encoding.ASCII.GetString(headers.Span);
|
||||
if (text.Contains("FlowControl"))
|
||||
fcSem.Release();
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
// Enqueue MaxFlowControlPending messages to reach the stall threshold
|
||||
for (var i = 1; i <= PushConsumerEngine.MaxFlowControlPending; i++)
|
||||
engine.Enqueue(consumer, MakeMessage((ulong)i));
|
||||
|
||||
// Wait until all FC frames have been sent by the delivery loop
|
||||
for (var i = 0; i < PushConsumerEngine.MaxFlowControlPending; i++)
|
||||
await fcSem.WaitAsync(cts.Token);
|
||||
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
engine.FlowControlPendingCount.ShouldBeGreaterThanOrEqualTo(PushConsumerEngine.MaxFlowControlPending);
|
||||
engine.IsFlowControlStalled.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Test 10 — AcknowledgeFlowControl never goes below zero
|
||||
//
|
||||
// Go reference: consumer.go:5495 — the pending count should never be negative;
|
||||
// calling AcknowledgeFlowControl when count is 0 must be a no-op.
|
||||
// =========================================================================
|
||||
[Fact]
|
||||
public void FlowControl_pending_never_negative()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
|
||||
// Count starts at 0; calling Acknowledge should keep it at 0
|
||||
engine.FlowControlPendingCount.ShouldBe(0);
|
||||
engine.AcknowledgeFlowControl();
|
||||
engine.FlowControlPendingCount.ShouldBe(0);
|
||||
|
||||
engine.AcknowledgeFlowControl();
|
||||
engine.AcknowledgeFlowControl();
|
||||
engine.FlowControlPendingCount.ShouldBe(0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,184 @@
|
||||
// Go: consumer.go maxDeliver config — max delivery enforcement and advisory generation
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
public class MaxDeliveriesTests
|
||||
{
|
||||
// Test 1: MaxDeliver=0 means unlimited; many redeliveries do not terminate
|
||||
[Fact]
|
||||
public void MaxDeliver_zero_means_unlimited()
|
||||
{
|
||||
// Go: consumer.go — maxDeliver=0 disables the limit entirely
|
||||
var ack = new AckProcessor();
|
||||
ack.MaxDeliver = 0;
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
|
||||
for (var i = 0; i < 100; i++)
|
||||
ack.ScheduleRedelivery(1, delayMs: 1);
|
||||
|
||||
ack.PendingCount.ShouldBe(1);
|
||||
ack.ExceededCount.ShouldBe(0);
|
||||
ack.TerminatedCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Test 2: MaxDeliver=3 terminates on the 4th redelivery attempt
|
||||
[Fact]
|
||||
public void MaxDeliver_terminates_when_exceeded()
|
||||
{
|
||||
// Go: consumer.go — maxDeliver enforced in ScheduleRedelivery; Deliveries > maxDeliver → terminate
|
||||
var ack = new AckProcessor();
|
||||
ack.MaxDeliver = 3;
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
|
||||
// Deliveries starts at 1 after Register; three more bumps → Deliveries reaches 4
|
||||
ack.ScheduleRedelivery(1, delayMs: 1); // Deliveries = 2 (ok)
|
||||
ack.ScheduleRedelivery(1, delayMs: 1); // Deliveries = 3 (ok, at limit)
|
||||
ack.ScheduleRedelivery(1, delayMs: 1); // Deliveries = 4 (exceeded)
|
||||
|
||||
ack.PendingCount.ShouldBe(0);
|
||||
ack.TerminatedCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Test 3: Exceeded sequence is added to the exceeded list
|
||||
[Fact]
|
||||
public void Exceeded_sequence_added_to_list()
|
||||
{
|
||||
// Go: consumer.go — exceeded sequences collected for advisory events
|
||||
var ack = new AckProcessor();
|
||||
ack.MaxDeliver = 1;
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
|
||||
// Deliveries starts at 1; next call makes it 2 which exceeds maxDeliver=1
|
||||
ack.ScheduleRedelivery(1, delayMs: 1);
|
||||
|
||||
ack.ExceededCount.ShouldBe(1);
|
||||
ack.GetExceededSequences().ShouldContain((ulong)1);
|
||||
}
|
||||
|
||||
// Test 4: DrainExceeded clears the list after returning sequences
|
||||
[Fact]
|
||||
public void DrainExceeded_clears_list()
|
||||
{
|
||||
// Go: consumer.go — drain after sending advisories to avoid duplicate events
|
||||
var ack = new AckProcessor();
|
||||
ack.MaxDeliver = 1;
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
ack.ScheduleRedelivery(1, delayMs: 1);
|
||||
|
||||
ack.ExceededCount.ShouldBe(1);
|
||||
|
||||
ack.DrainExceeded();
|
||||
|
||||
ack.ExceededCount.ShouldBe(0);
|
||||
ack.GetExceededSequences().ShouldBeEmpty();
|
||||
}
|
||||
|
||||
// Test 5: Exceeded sequence is moved to the terminated set
|
||||
[Fact]
|
||||
public void Exceeded_message_is_terminated()
|
||||
{
|
||||
// Go: consumer.go — exceeded sequence enters terminated set; cannot be redelivered
|
||||
var ack = new AckProcessor();
|
||||
ack.MaxDeliver = 2;
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
|
||||
ack.ScheduleRedelivery(1, delayMs: 1); // Deliveries = 2 (at limit)
|
||||
ack.ScheduleRedelivery(1, delayMs: 1); // Deliveries = 3 (exceeded)
|
||||
|
||||
// Sequence removed from pending and not redeliverable
|
||||
ack.PendingCount.ShouldBe(0);
|
||||
ack.TerminatedCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Test 6: ProcessNak triggers redelivery check through ScheduleRedelivery
|
||||
[Fact]
|
||||
public void ProcessNak_triggers_redelivery_check()
|
||||
{
|
||||
// Go: consumer.go — processNak calls ScheduleRedelivery which enforces maxDeliver
|
||||
var ack = new AckProcessor();
|
||||
ack.MaxDeliver = 2;
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
|
||||
ack.ProcessNak(1); // Deliveries = 2 (at limit)
|
||||
ack.PendingCount.ShouldBe(1);
|
||||
ack.ExceededCount.ShouldBe(0);
|
||||
|
||||
ack.ProcessNak(1); // Deliveries = 3 (exceeded)
|
||||
ack.PendingCount.ShouldBe(0);
|
||||
ack.ExceededCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Test 7: ScheduleRedelivery enforces max deliver regardless of whether it was called from expiry path
|
||||
[Fact]
|
||||
public void TryGetExpired_respects_max_deliver()
|
||||
{
|
||||
// Go: consumer.go — ScheduleRedelivery checks maxDeliver regardless of call site;
|
||||
// the expiry redelivery loop calls ScheduleRedelivery so maxDeliver is enforced there too.
|
||||
// We verify this by directly calling ScheduleRedelivery after the deadline would have passed,
|
||||
// without relying on wall-clock time.
|
||||
var ack = new AckProcessor();
|
||||
ack.MaxDeliver = 1;
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
|
||||
// Deliveries starts at 1; calling ScheduleRedelivery makes it 2 which exceeds maxDeliver=1
|
||||
// This is exactly what the expiry dispatch loop does: read expired seq, call ScheduleRedelivery
|
||||
ack.ScheduleRedelivery(1, delayMs: 1);
|
||||
|
||||
ack.PendingCount.ShouldBe(0);
|
||||
ack.ExceededCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Test 8: MaxDeliver=-1 is treated as unlimited (negative values clamped to 0)
|
||||
[Fact]
|
||||
public void MaxDeliver_negative_treated_as_unlimited()
|
||||
{
|
||||
// Go: consumer.go — any non-positive maxDeliver is treated as unlimited
|
||||
var ack = new AckProcessor();
|
||||
ack.MaxDeliver = -1;
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
|
||||
for (var i = 0; i < 50; i++)
|
||||
ack.ScheduleRedelivery(1, delayMs: 1);
|
||||
|
||||
ack.PendingCount.ShouldBe(1);
|
||||
ack.ExceededCount.ShouldBe(0);
|
||||
ack.MaxDeliver.ShouldBe(0); // -1 clamped to 0
|
||||
}
|
||||
|
||||
// Test 9: ExceededPolicy defaults to Drop
|
||||
[Fact]
|
||||
public void DeliveryExceededPolicy_defaults_to_drop()
|
||||
{
|
||||
// Go: consumer.go — default behavior for exceeded messages is to drop them
|
||||
var ack = new AckProcessor();
|
||||
|
||||
ack.ExceededPolicy.ShouldBe(DeliveryExceededPolicy.Drop);
|
||||
}
|
||||
|
||||
// Test 10: Multiple independent sequences can each exceed max deliveries
|
||||
[Fact]
|
||||
public void Multiple_sequences_can_exceed()
|
||||
{
|
||||
// Go: consumer.go — each sequence tracked independently; multiple can exceed in same window
|
||||
var ack = new AckProcessor();
|
||||
ack.MaxDeliver = 1;
|
||||
ack.Register(1, ackWaitMs: 5000);
|
||||
ack.Register(2, ackWaitMs: 5000);
|
||||
ack.Register(3, ackWaitMs: 5000);
|
||||
|
||||
// Each sequence starts at Deliveries=1; one call makes each exceed maxDeliver=1
|
||||
ack.ScheduleRedelivery(1, delayMs: 1);
|
||||
ack.ScheduleRedelivery(2, delayMs: 1);
|
||||
ack.ScheduleRedelivery(3, delayMs: 1);
|
||||
|
||||
ack.ExceededCount.ShouldBe(3);
|
||||
ack.PendingCount.ShouldBe(0);
|
||||
ack.TerminatedCount.ShouldBe(3);
|
||||
|
||||
var exceeded = ack.GetExceededSequences();
|
||||
exceeded.ShouldContain((ulong)1);
|
||||
exceeded.ShouldContain((ulong)2);
|
||||
exceeded.ShouldContain((ulong)3);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for priority group pin ID management.
|
||||
/// Go reference: consumer.go (setPinnedTimer, assignNewPinId).
|
||||
/// </summary>
|
||||
public class PriorityGroupPinningTests
|
||||
{
|
||||
[Fact]
|
||||
public void AssignPinId_generates_unique_ids()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group-1", "consumer-a", priority: 0);
|
||||
|
||||
var pin1 = mgr.AssignPinId("group-1", "consumer-a");
|
||||
var pin2 = mgr.AssignPinId("group-1", "consumer-a");
|
||||
|
||||
pin1.ShouldNotBeNullOrEmpty();
|
||||
pin2.ShouldNotBeNullOrEmpty();
|
||||
pin1.ShouldNotBe(pin2); // each assignment is unique
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ValidatePinId_accepts_current()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group-1", "consumer-a", priority: 0);
|
||||
|
||||
var pin = mgr.AssignPinId("group-1", "consumer-a");
|
||||
mgr.ValidatePinId("group-1", pin).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ValidatePinId_rejects_expired()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group-1", "consumer-a", priority: 0);
|
||||
|
||||
var pin1 = mgr.AssignPinId("group-1", "consumer-a");
|
||||
var pin2 = mgr.AssignPinId("group-1", "consumer-a"); // replaces pin1
|
||||
|
||||
mgr.ValidatePinId("group-1", pin1).ShouldBeFalse();
|
||||
mgr.ValidatePinId("group-1", pin2).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void UnassignPinId_clears()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group-1", "consumer-a", priority: 0);
|
||||
|
||||
var pin = mgr.AssignPinId("group-1", "consumer-a");
|
||||
mgr.UnassignPinId("group-1");
|
||||
|
||||
mgr.ValidatePinId("group-1", pin).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ValidatePinId_returns_false_for_unknown_group()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.ValidatePinId("unknown", "any-pin").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void UnassignPinId_noop_for_unknown_group()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
// Should not throw
|
||||
Should.NotThrow(() => mgr.UnassignPinId("unknown"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PinId_is_22_chars()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("g1", "c1", priority: 0);
|
||||
|
||||
var pin = mgr.AssignPinId("g1", "c1");
|
||||
pin.Length.ShouldBe(22);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,237 @@
|
||||
// Go: consumer.go:500-600 — Priority group tests for sticky consumer assignment.
|
||||
// Validates that the lowest-priority-numbered consumer is "active" and that
|
||||
// failover occurs correctly when consumers register/unregister.
|
||||
using System.Collections.Concurrent;
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
public class PriorityGroupTests
|
||||
{
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 1 — Single consumer registered is active
|
||||
//
|
||||
// Go reference: consumer.go:500 — when only one consumer is in a priority
|
||||
// group, it is unconditionally the active consumer.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void Register_SingleConsumer_IsActive()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group1", "consumer-a", priority: 1);
|
||||
|
||||
mgr.IsActive("group1", "consumer-a").ShouldBeTrue();
|
||||
mgr.GetActiveConsumer("group1").ShouldBe("consumer-a");
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 2 — Multiple consumers: lowest priority number wins
|
||||
//
|
||||
// Go reference: consumer.go:510 — the consumer with the lowest priority
|
||||
// number is the active consumer. Priority 1 < Priority 5, so 1 wins.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void Register_MultipleConsumers_LowestPriorityIsActive()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group1", "consumer-high", priority: 5);
|
||||
mgr.Register("group1", "consumer-low", priority: 1);
|
||||
mgr.Register("group1", "consumer-mid", priority: 3);
|
||||
|
||||
mgr.GetActiveConsumer("group1").ShouldBe("consumer-low");
|
||||
mgr.IsActive("group1", "consumer-low").ShouldBeTrue();
|
||||
mgr.IsActive("group1", "consumer-high").ShouldBeFalse();
|
||||
mgr.IsActive("group1", "consumer-mid").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 3 — Unregister active consumer: next takes over
|
||||
//
|
||||
// Go reference: consumer.go:530 — when the active consumer disconnects,
|
||||
// the next-lowest-priority consumer becomes active (failover).
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void Unregister_ActiveConsumer_NextTakesOver()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group1", "consumer-a", priority: 1);
|
||||
mgr.Register("group1", "consumer-b", priority: 2);
|
||||
mgr.Register("group1", "consumer-c", priority: 3);
|
||||
|
||||
mgr.GetActiveConsumer("group1").ShouldBe("consumer-a");
|
||||
|
||||
mgr.Unregister("group1", "consumer-a");
|
||||
|
||||
mgr.GetActiveConsumer("group1").ShouldBe("consumer-b");
|
||||
mgr.IsActive("group1", "consumer-b").ShouldBeTrue();
|
||||
mgr.IsActive("group1", "consumer-a").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 4 — Unregister non-active consumer: active unchanged
|
||||
//
|
||||
// Go reference: consumer.go:540 — removing a non-active consumer does not
|
||||
// change the active assignment.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void Unregister_NonActiveConsumer_ActiveUnchanged()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group1", "consumer-a", priority: 1);
|
||||
mgr.Register("group1", "consumer-b", priority: 2);
|
||||
|
||||
mgr.GetActiveConsumer("group1").ShouldBe("consumer-a");
|
||||
|
||||
mgr.Unregister("group1", "consumer-b");
|
||||
|
||||
mgr.GetActiveConsumer("group1").ShouldBe("consumer-a");
|
||||
mgr.IsActive("group1", "consumer-a").ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 5 — Same priority: first registered wins
|
||||
//
|
||||
// Go reference: consumer.go:520 — when two consumers share the same
|
||||
// priority, the first to register is treated as the active consumer.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void Register_SamePriority_FirstRegisteredWins()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group1", "consumer-first", priority: 1);
|
||||
mgr.Register("group1", "consumer-second", priority: 1);
|
||||
|
||||
mgr.GetActiveConsumer("group1").ShouldBe("consumer-first");
|
||||
mgr.IsActive("group1", "consumer-first").ShouldBeTrue();
|
||||
mgr.IsActive("group1", "consumer-second").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 6 — Empty group returns null
|
||||
//
|
||||
// Go reference: consumer.go:550 — calling GetActiveConsumer on an empty
|
||||
// or nonexistent group returns nil (null).
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void GetActiveConsumer_EmptyGroup_ReturnsNull()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
|
||||
mgr.GetActiveConsumer("nonexistent").ShouldBeNull();
|
||||
mgr.IsActive("nonexistent", "any-consumer").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 7 — Idle heartbeat sent after timeout
|
||||
//
|
||||
// Go reference: consumer.go:5222 — sendIdleHeartbeat is invoked by a
|
||||
// background timer when no data frames are delivered within HeartbeatMs.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task IdleHeartbeat_SentAfterTimeout()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = new ConsumerHandle("TEST-STREAM", new ConsumerConfig
|
||||
{
|
||||
DurableName = "HB-CONSUMER",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.hb",
|
||||
HeartbeatMs = 50, // 50ms heartbeat interval
|
||||
});
|
||||
|
||||
var sent = new ConcurrentBag<(string Subject, string ReplyTo, byte[] Headers, byte[] Payload)>();
|
||||
|
||||
ValueTask SendCapture(string subject, string replyTo, ReadOnlyMemory<byte> headers, ReadOnlyMemory<byte> payload, CancellationToken ct)
|
||||
{
|
||||
sent.Add((subject, replyTo, headers.ToArray(), payload.ToArray()));
|
||||
return ValueTask.CompletedTask;
|
||||
}
|
||||
|
||||
using var cts = new CancellationTokenSource();
|
||||
|
||||
engine.StartDeliveryLoop(consumer, SendCapture, cts.Token);
|
||||
|
||||
// Wait long enough for at least one idle heartbeat to fire
|
||||
await Task.Delay(200);
|
||||
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
engine.IdleHeartbeatsSent.ShouldBeGreaterThan(0);
|
||||
|
||||
// Verify the heartbeat messages were sent to the deliver subject
|
||||
var hbMessages = sent.Where(s =>
|
||||
Encoding.ASCII.GetString(s.Headers).Contains("Idle Heartbeat")).ToList();
|
||||
hbMessages.Count.ShouldBeGreaterThan(0);
|
||||
hbMessages.ShouldAllBe(m => m.Subject == "deliver.hb");
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 8 — Idle heartbeat resets on data delivery
|
||||
//
|
||||
// Go reference: consumer.go:5222 — the idle heartbeat timer is reset
|
||||
// whenever a data frame is delivered, so heartbeats only fire during
|
||||
// periods of inactivity.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task IdleHeartbeat_ResetOnDataDelivery()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = new ConsumerHandle("TEST-STREAM", new ConsumerConfig
|
||||
{
|
||||
DurableName = "HB-RESET",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.hbreset",
|
||||
HeartbeatMs = 100, // 100ms heartbeat interval
|
||||
});
|
||||
|
||||
var dataFramesSent = new ConcurrentBag<string>();
|
||||
var heartbeatsSent = new ConcurrentBag<string>();
|
||||
|
||||
ValueTask SendCapture(string subject, string replyTo, ReadOnlyMemory<byte> headers, ReadOnlyMemory<byte> payload, CancellationToken ct)
|
||||
{
|
||||
var headerStr = Encoding.ASCII.GetString(headers.Span);
|
||||
if (headerStr.Contains("Idle Heartbeat"))
|
||||
heartbeatsSent.Add(subject);
|
||||
else
|
||||
dataFramesSent.Add(subject);
|
||||
return ValueTask.CompletedTask;
|
||||
}
|
||||
|
||||
using var cts = new CancellationTokenSource();
|
||||
|
||||
engine.StartDeliveryLoop(consumer, SendCapture, cts.Token);
|
||||
|
||||
// Continuously enqueue data messages faster than the heartbeat interval
|
||||
// to keep the timer resetting. Each data delivery resets the idle heartbeat.
|
||||
for (var i = 0; i < 5; i++)
|
||||
{
|
||||
engine.Enqueue(consumer, new StoredMessage
|
||||
{
|
||||
Sequence = (ulong)(i + 1),
|
||||
Subject = "test.data",
|
||||
Payload = Encoding.UTF8.GetBytes($"msg-{i}"),
|
||||
TimestampUtc = DateTime.UtcNow,
|
||||
});
|
||||
await Task.Delay(30); // 30ms between messages — well within 100ms heartbeat
|
||||
}
|
||||
|
||||
// Wait a bit after last message for potential heartbeat
|
||||
await Task.Delay(50);
|
||||
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
// Data frames should have been sent
|
||||
dataFramesSent.Count.ShouldBeGreaterThan(0);
|
||||
|
||||
// During continuous data delivery, idle heartbeats from the timer should
|
||||
// NOT have fired because the timer is reset on each data frame.
|
||||
// (The queue-based heartbeat frames still fire as part of Enqueue, but
|
||||
// the idle heartbeat timer counter should be 0 or very low since data
|
||||
// kept flowing within the heartbeat interval.)
|
||||
engine.IdleHeartbeatsSent.ShouldBe(0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,196 @@
|
||||
// Go: consumer.go — Pull consumer timeout enforcement and compiled filter tests.
|
||||
// ExpiresMs support per consumer.go pull request handling.
|
||||
// CompiledFilter optimizes multi-subject filter matching for consumers.
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
public class PullConsumerTimeoutTests
|
||||
{
|
||||
private static StreamHandle MakeStream(MemStore store)
|
||||
=> new(new StreamConfig { Name = "TEST", Subjects = ["test.>"] }, store);
|
||||
|
||||
private static ConsumerHandle MakeConsumer(ConsumerConfig? config = null)
|
||||
=> new("TEST", config ?? new ConsumerConfig { DurableName = "C1" });
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 1 — ExpiresMs returns partial batch when timeout fires
|
||||
//
|
||||
// Go reference: consumer.go — pull fetch with expires returns whatever
|
||||
// messages are available when the timeout fires, even if batch is not full.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task FetchAsync_ExpiresMs_ReturnsPartialBatch()
|
||||
{
|
||||
var store = new MemStore();
|
||||
var stream = MakeStream(store);
|
||||
|
||||
// Store only 2 messages, but request a batch of 10
|
||||
await store.AppendAsync("test.a", Encoding.UTF8.GetBytes("msg1"), CancellationToken.None);
|
||||
await store.AppendAsync("test.b", Encoding.UTF8.GetBytes("msg2"), CancellationToken.None);
|
||||
|
||||
var consumer = MakeConsumer();
|
||||
var engine = new PullConsumerEngine();
|
||||
|
||||
var result = await engine.FetchAsync(stream, consumer, new PullFetchRequest
|
||||
{
|
||||
Batch = 10,
|
||||
ExpiresMs = 100,
|
||||
}, CancellationToken.None);
|
||||
|
||||
// Should get the 2 available messages (partial batch)
|
||||
result.Messages.Count.ShouldBe(2);
|
||||
result.Messages[0].Subject.ShouldBe("test.a");
|
||||
result.Messages[1].Subject.ShouldBe("test.b");
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 2 — ExpiresMs sets TimedOut = true on partial result
|
||||
//
|
||||
// Go reference: consumer.go — when a pull request expires and the batch
|
||||
// is not fully filled, the response indicates a timeout occurred.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task FetchAsync_ExpiresMs_ReturnsTimedOutTrue()
|
||||
{
|
||||
var store = new MemStore();
|
||||
var stream = MakeStream(store);
|
||||
|
||||
// Store no messages — the fetch should time out with empty results
|
||||
var consumer = MakeConsumer();
|
||||
var engine = new PullConsumerEngine();
|
||||
|
||||
var result = await engine.FetchAsync(stream, consumer, new PullFetchRequest
|
||||
{
|
||||
Batch = 5,
|
||||
ExpiresMs = 50,
|
||||
}, CancellationToken.None);
|
||||
|
||||
result.TimedOut.ShouldBeTrue();
|
||||
result.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 3 — No ExpiresMs waits for full batch (returns what's available)
|
||||
//
|
||||
// Go reference: consumer.go — without expires, the fetch returns available
|
||||
// messages up to batch size without a timeout constraint.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task FetchAsync_NoExpires_WaitsForFullBatch()
|
||||
{
|
||||
var store = new MemStore();
|
||||
var stream = MakeStream(store);
|
||||
|
||||
await store.AppendAsync("test.a", Encoding.UTF8.GetBytes("msg1"), CancellationToken.None);
|
||||
await store.AppendAsync("test.b", Encoding.UTF8.GetBytes("msg2"), CancellationToken.None);
|
||||
await store.AppendAsync("test.c", Encoding.UTF8.GetBytes("msg3"), CancellationToken.None);
|
||||
|
||||
var consumer = MakeConsumer();
|
||||
var engine = new PullConsumerEngine();
|
||||
|
||||
var result = await engine.FetchAsync(stream, consumer, new PullFetchRequest
|
||||
{
|
||||
Batch = 3,
|
||||
ExpiresMs = 0, // No timeout
|
||||
}, CancellationToken.None);
|
||||
|
||||
result.Messages.Count.ShouldBe(3);
|
||||
result.TimedOut.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 4 — CompiledFilter with no filters matches everything
|
||||
//
|
||||
// Go reference: consumer.go — a consumer with no filter subjects receives
|
||||
// all messages from the stream.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void CompiledFilter_NoFilters_MatchesEverything()
|
||||
{
|
||||
var filter = new CompiledFilter([]);
|
||||
|
||||
filter.Matches("test.a").ShouldBeTrue();
|
||||
filter.Matches("foo.bar.baz").ShouldBeTrue();
|
||||
filter.Matches("anything").ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 5 — CompiledFilter with single exact filter matches only that subject
|
||||
//
|
||||
// Go reference: consumer.go — single filter_subject matches via MatchLiteral.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void CompiledFilter_SingleFilter_MatchesExact()
|
||||
{
|
||||
var filter = new CompiledFilter(["test.specific"]);
|
||||
|
||||
filter.Matches("test.specific").ShouldBeTrue();
|
||||
filter.Matches("test.other").ShouldBeFalse();
|
||||
filter.Matches("test").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 6 — CompiledFilter with single wildcard filter
|
||||
//
|
||||
// Go reference: consumer.go — wildcard filter_subject uses MatchLiteral
|
||||
// which supports * (single token) and > (multi-token) wildcards.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void CompiledFilter_SingleWildcard_MatchesPattern()
|
||||
{
|
||||
var starFilter = new CompiledFilter(["test.*"]);
|
||||
starFilter.Matches("test.a").ShouldBeTrue();
|
||||
starFilter.Matches("test.b").ShouldBeTrue();
|
||||
starFilter.Matches("test.a.b").ShouldBeFalse();
|
||||
starFilter.Matches("other.a").ShouldBeFalse();
|
||||
|
||||
var fwcFilter = new CompiledFilter(["test.>"]);
|
||||
fwcFilter.Matches("test.a").ShouldBeTrue();
|
||||
fwcFilter.Matches("test.a.b").ShouldBeTrue();
|
||||
fwcFilter.Matches("test.a.b.c").ShouldBeTrue();
|
||||
fwcFilter.Matches("other.a").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 7 — CompiledFilter with multiple filters matches any
|
||||
//
|
||||
// Go reference: consumer.go — filter_subjects (plural) matches if ANY of
|
||||
// the patterns match. Uses HashSet for exact subjects + MatchLiteral for
|
||||
// wildcard patterns.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void CompiledFilter_MultipleFilters_MatchesAny()
|
||||
{
|
||||
var filter = new CompiledFilter(["orders.us", "orders.eu", "events.>"]);
|
||||
|
||||
// Exact matches
|
||||
filter.Matches("orders.us").ShouldBeTrue();
|
||||
filter.Matches("orders.eu").ShouldBeTrue();
|
||||
|
||||
// Wildcard match
|
||||
filter.Matches("events.created").ShouldBeTrue();
|
||||
filter.Matches("events.updated.v2").ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 8 — CompiledFilter with multiple filters rejects non-matching
|
||||
//
|
||||
// Go reference: consumer.go — subjects that match none of the filter
|
||||
// patterns are excluded from delivery.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void CompiledFilter_MultipleFilters_RejectsNonMatching()
|
||||
{
|
||||
var filter = new CompiledFilter(["orders.us", "orders.eu", "events.>"]);
|
||||
|
||||
filter.Matches("orders.jp").ShouldBeFalse();
|
||||
filter.Matches("billing.us").ShouldBeFalse();
|
||||
filter.Matches("events").ShouldBeFalse(); // ">" requires at least one token after
|
||||
filter.Matches("random.subject").ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,317 @@
|
||||
// Go: consumer.go (dispatchToDeliver ~line 5040, sendFlowControl ~line 5495,
|
||||
// sendIdleHeartbeat ~line 5222, rate-limit logic ~line 5120)
|
||||
using System.Collections.Concurrent;
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
public class PushConsumerDeliveryTests
|
||||
{
|
||||
// Helper: build a ConsumerHandle wired with the given config
|
||||
private static ConsumerHandle MakeConsumer(ConsumerConfig config)
|
||||
=> new("TEST-STREAM", config);
|
||||
|
||||
// Helper: build a minimal StoredMessage
|
||||
private static StoredMessage MakeMessage(ulong seq, string subject = "test.subject", string payload = "hello")
|
||||
=> new()
|
||||
{
|
||||
Sequence = seq,
|
||||
Subject = subject,
|
||||
Payload = Encoding.UTF8.GetBytes(payload),
|
||||
TimestampUtc = DateTime.UtcNow,
|
||||
};
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 1 — Delivery loop sends messages in FIFO order
|
||||
//
|
||||
// Go reference: consumer.go:5040 — dispatchToDeliver processes the outbound
|
||||
// queue sequentially; messages must arrive in the order they were enqueued.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task DeliveryLoop_sends_messages_in_FIFO_order()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "PUSH",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.test",
|
||||
});
|
||||
|
||||
engine.Enqueue(consumer, MakeMessage(1, payload: "first"));
|
||||
engine.Enqueue(consumer, MakeMessage(2, payload: "second"));
|
||||
engine.Enqueue(consumer, MakeMessage(3, payload: "third"));
|
||||
|
||||
var received = new ConcurrentQueue<(string subject, ReadOnlyMemory<byte> payload)>();
|
||||
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (subj, _, _, payload, ct) =>
|
||||
{
|
||||
received.Enqueue((subj, payload));
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
// Wait until all three messages are delivered
|
||||
while (received.Count < 3 && !cts.IsCancellationRequested)
|
||||
await Task.Delay(5, cts.Token);
|
||||
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
received.Count.ShouldBe(3);
|
||||
var items = received.ToArray();
|
||||
Encoding.UTF8.GetString(items[0].payload.Span).ShouldBe("first");
|
||||
Encoding.UTF8.GetString(items[1].payload.Span).ShouldBe("second");
|
||||
Encoding.UTF8.GetString(items[2].payload.Span).ShouldBe("third");
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 2 — Rate limiting delays delivery
|
||||
//
|
||||
// Go reference: consumer.go:5120 — the rate limiter delays sending when
|
||||
// AvailableAtUtc is in the future. A frame whose AvailableAtUtc is 100ms
|
||||
// ahead must not be delivered until that deadline has passed.
|
||||
// The delivery loop honours frame.AvailableAtUtc directly; this test
|
||||
// injects a frame with a known future timestamp to verify that behaviour.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task DeliveryLoop_rate_limiting_delays_delivery()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "RATE",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.rate",
|
||||
});
|
||||
|
||||
// Inject a frame with AvailableAtUtc 150ms in the future to simulate
|
||||
// what Enqueue() computes when RateLimitBps produces a delay.
|
||||
var msg = MakeMessage(1);
|
||||
consumer.PushFrames.Enqueue(new PushFrame
|
||||
{
|
||||
IsData = true,
|
||||
Message = msg,
|
||||
AvailableAtUtc = DateTime.UtcNow.AddMilliseconds(150),
|
||||
});
|
||||
|
||||
var delivered = new TaskCompletionSource<DateTime>();
|
||||
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
|
||||
var startedAt = DateTime.UtcNow;
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (_, _, _, _, _) =>
|
||||
{
|
||||
delivered.TrySetResult(DateTime.UtcNow);
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
var deliveredAt = await delivered.Task.WaitAsync(TimeSpan.FromSeconds(5));
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
// The loop must have waited at least ~100ms for AvailableAtUtc to pass
|
||||
var elapsed = deliveredAt - startedAt;
|
||||
elapsed.TotalMilliseconds.ShouldBeGreaterThan(100);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 3 — Heartbeat frames are sent
|
||||
//
|
||||
// Go reference: consumer.go:5222 — sendIdleHeartbeat emits a
|
||||
// "NATS/1.0 100 Idle Heartbeat" status frame on the deliver subject.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task DeliveryLoop_sends_heartbeat_frames()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "HB",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.hb",
|
||||
HeartbeatMs = 100,
|
||||
});
|
||||
|
||||
// Enqueue one data message; HeartbeatMs > 0 causes Enqueue to also
|
||||
// append a heartbeat frame immediately after.
|
||||
engine.Enqueue(consumer, MakeMessage(1));
|
||||
|
||||
var headerSnapshots = new ConcurrentBag<ReadOnlyMemory<byte>>();
|
||||
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (_, _, headers, _, _) =>
|
||||
{
|
||||
headerSnapshots.Add(headers);
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
// Wait for both the data frame and the heartbeat frame
|
||||
while (headerSnapshots.Count < 2 && !cts.IsCancellationRequested)
|
||||
await Task.Delay(5, cts.Token);
|
||||
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
headerSnapshots.Count.ShouldBeGreaterThanOrEqualTo(2);
|
||||
|
||||
// At least one frame must contain "Idle Heartbeat"
|
||||
var anyHeartbeat = headerSnapshots.Any(h =>
|
||||
Encoding.ASCII.GetString(h.Span).Contains("Idle Heartbeat"));
|
||||
anyHeartbeat.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 4 — Flow control frames are sent
|
||||
//
|
||||
// Go reference: consumer.go:5495 — sendFlowControl sends a status frame
|
||||
// "NATS/1.0 100 FlowControl Request" to the deliver subject.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task DeliveryLoop_sends_flow_control_frames()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "FC",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.fc",
|
||||
FlowControl = true,
|
||||
HeartbeatMs = 100, // Go requires heartbeat when flow control is on
|
||||
});
|
||||
|
||||
engine.Enqueue(consumer, MakeMessage(1));
|
||||
|
||||
var headerSnapshots = new ConcurrentBag<ReadOnlyMemory<byte>>();
|
||||
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (_, _, headers, _, _) =>
|
||||
{
|
||||
headerSnapshots.Add(headers);
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
// data + flow-control + heartbeat = 3 frames
|
||||
while (headerSnapshots.Count < 3 && !cts.IsCancellationRequested)
|
||||
await Task.Delay(5, cts.Token);
|
||||
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
var anyFlowControl = headerSnapshots.Any(h =>
|
||||
Encoding.ASCII.GetString(h.Span).Contains("FlowControl"));
|
||||
anyFlowControl.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 5 — Delivery stops on cancellation
|
||||
//
|
||||
// Go reference: consumer.go — the delivery goroutine exits when the qch
|
||||
// (quit channel) is signalled, which maps to CancellationToken here.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task DeliveryLoop_stops_on_cancellation()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "CANCEL",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.cancel",
|
||||
});
|
||||
|
||||
var deliveryCount = 0;
|
||||
var cts = new CancellationTokenSource();
|
||||
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (_, _, _, _, _) =>
|
||||
{
|
||||
Interlocked.Increment(ref deliveryCount);
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
// Cancel immediately — nothing enqueued so delivery count must stay 0
|
||||
await cts.CancelAsync();
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
// Brief settle — no messages were queued so nothing should have been delivered
|
||||
await Task.Delay(20);
|
||||
deliveryCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 6 — Data frame headers contain JetStream metadata
|
||||
//
|
||||
// Go reference: stream.go:586 — JSSequence = "Nats-Sequence",
|
||||
// JSTimeStamp = "Nats-Time-Stamp", JSSubject = "Nats-Subject"
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task DeliveryLoop_data_frame_headers_contain_jetstream_metadata()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "META",
|
||||
Push = true,
|
||||
DeliverSubject = "deliver.meta",
|
||||
});
|
||||
|
||||
var msg = MakeMessage(42, subject: "events.created");
|
||||
engine.Enqueue(consumer, msg);
|
||||
|
||||
ReadOnlyMemory<byte>? capturedHeaders = null;
|
||||
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
var tcs = new TaskCompletionSource<bool>();
|
||||
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
async (_, _, headers, _, _) =>
|
||||
{
|
||||
capturedHeaders = headers;
|
||||
tcs.TrySetResult(true);
|
||||
await ValueTask.CompletedTask;
|
||||
},
|
||||
cts.Token);
|
||||
|
||||
await tcs.Task.WaitAsync(TimeSpan.FromSeconds(5));
|
||||
engine.StopDeliveryLoop();
|
||||
|
||||
capturedHeaders.ShouldNotBeNull();
|
||||
var headerText = Encoding.ASCII.GetString(capturedHeaders!.Value.Span);
|
||||
headerText.ShouldContain("Nats-Sequence: 42");
|
||||
headerText.ShouldContain("Nats-Subject: events.created");
|
||||
headerText.ShouldContain("Nats-Time-Stamp:");
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 7 — DeliverSubject property is set when StartDeliveryLoop is called
|
||||
//
|
||||
// Go reference: consumer.go:1131 — dsubj is set from cfg.DeliverSubject.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void DeliverSubject_property_is_set_from_consumer_config()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = MakeConsumer(new ConsumerConfig
|
||||
{
|
||||
DurableName = "DS",
|
||||
Push = true,
|
||||
DeliverSubject = "my.deliver.subject",
|
||||
});
|
||||
|
||||
using var cts = new CancellationTokenSource();
|
||||
engine.StartDeliveryLoop(consumer,
|
||||
(_, _, _, _, _) => ValueTask.CompletedTask,
|
||||
cts.Token);
|
||||
|
||||
engine.DeliverSubject.ShouldBe("my.deliver.subject");
|
||||
engine.StopDeliveryLoop();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for the new PriorityQueue-based RedeliveryTracker features.
|
||||
/// Go reference: consumer.go (rdq redelivery queue).
|
||||
/// </summary>
|
||||
public class RedeliveryTrackerPriorityQueueTests
|
||||
{
|
||||
[Fact]
|
||||
public void Schedule_and_get_due_returns_expired()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
|
||||
var past = DateTimeOffset.UtcNow.AddMilliseconds(-100);
|
||||
|
||||
tracker.Schedule(1, past);
|
||||
tracker.Schedule(2, DateTimeOffset.UtcNow.AddSeconds(60)); // future
|
||||
|
||||
var due = tracker.GetDue(DateTimeOffset.UtcNow).ToList();
|
||||
due.Count.ShouldBe(1);
|
||||
due[0].ShouldBe(1UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Acknowledge_removes_from_queue()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
|
||||
tracker.Schedule(1, DateTimeOffset.UtcNow.AddMilliseconds(-100));
|
||||
|
||||
tracker.Acknowledge(1);
|
||||
|
||||
var due = tracker.GetDue(DateTimeOffset.UtcNow).ToList();
|
||||
due.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsMaxDeliveries_returns_true_at_threshold()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 3, ackWaitMs: 1000);
|
||||
|
||||
tracker.IncrementDeliveryCount(1);
|
||||
tracker.IncrementDeliveryCount(1);
|
||||
tracker.IsMaxDeliveries(1).ShouldBeFalse();
|
||||
|
||||
tracker.IncrementDeliveryCount(1);
|
||||
tracker.IsMaxDeliveries(1).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Backoff_schedule_uses_delivery_count()
|
||||
{
|
||||
var backoff = new long[] { 100, 500, 2000 };
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 10, ackWaitMs: 1000, backoffMs: backoff);
|
||||
|
||||
// First redeliver: 100ms
|
||||
var delay1 = tracker.GetBackoffDelay(deliveryCount: 1);
|
||||
delay1.ShouldBe(100L);
|
||||
|
||||
// Second: 500ms
|
||||
var delay2 = tracker.GetBackoffDelay(deliveryCount: 2);
|
||||
delay2.ShouldBe(500L);
|
||||
|
||||
// Beyond schedule: use last value
|
||||
var delay4 = tracker.GetBackoffDelay(deliveryCount: 4);
|
||||
delay4.ShouldBe(2000L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void GetDue_returns_in_deadline_order()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
tracker.Schedule(3, now.AddMilliseconds(-300));
|
||||
tracker.Schedule(1, now.AddMilliseconds(-100));
|
||||
tracker.Schedule(2, now.AddMilliseconds(-200));
|
||||
|
||||
var due = tracker.GetDue(now).ToList();
|
||||
due.Count.ShouldBe(3);
|
||||
due[0].ShouldBe(3UL); // earliest deadline first
|
||||
due[1].ShouldBe(2UL);
|
||||
due[2].ShouldBe(1UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void GetBackoffDelay_with_no_backoff_returns_ackWait()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 2000);
|
||||
tracker.GetBackoffDelay(1).ShouldBe(2000L);
|
||||
tracker.GetBackoffDelay(5).ShouldBe(2000L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IncrementDeliveryCount_for_untracked_seq_starts_at_one()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
|
||||
tracker.IncrementDeliveryCount(42);
|
||||
// First increment should make count = 1, so maxDeliveries=5 means not max yet
|
||||
tracker.IsMaxDeliveries(42).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Acknowledge_also_clears_delivery_count()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 3, ackWaitMs: 1000);
|
||||
tracker.IncrementDeliveryCount(1);
|
||||
tracker.IncrementDeliveryCount(1);
|
||||
tracker.Acknowledge(1);
|
||||
// After ack, delivery count should be cleared
|
||||
tracker.IsMaxDeliveries(1).ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,198 @@
|
||||
// Go: consumer.go (trackPending ~line 5540, processNak, rdq/rdc map,
|
||||
// addToRedeliverQueue, maxdeliver check)
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
public class RedeliveryTrackerTests
|
||||
{
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 1 — Backoff array clamping at last entry for high delivery counts
|
||||
//
|
||||
// Go reference: consumer.go — backoff index = min(deliveries-1, len(backoff)-1)
|
||||
// so that sequences with delivery counts past the array length use the last
|
||||
// backoff value rather than going out of bounds.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task Schedule_clamps_backoff_at_last_entry_for_high_delivery_count()
|
||||
{
|
||||
var tracker = new RedeliveryTracker([1, 5000]);
|
||||
|
||||
// delivery 1 → backoff[0] = 1ms
|
||||
tracker.Schedule(seq: 1, deliveryCount: 1);
|
||||
await Task.Delay(10);
|
||||
tracker.GetDue().ShouldContain(1UL);
|
||||
|
||||
tracker.Acknowledge(1);
|
||||
|
||||
// delivery 3 → index clamps to 1 → backoff[1] = 5000ms
|
||||
tracker.Schedule(seq: 1, deliveryCount: 3);
|
||||
tracker.GetDue().ShouldNotContain(1UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 2 — GetDue returns only entries whose deadline has passed
|
||||
//
|
||||
// Go reference: consumer.go — rdq items are eligible for redelivery only
|
||||
// once their scheduled deadline has elapsed.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task GetDue_returns_only_expired_entries()
|
||||
{
|
||||
var tracker = new RedeliveryTracker([1, 5000]);
|
||||
|
||||
// 1ms backoff → will expire quickly
|
||||
tracker.Schedule(seq: 10, deliveryCount: 1);
|
||||
// 5000ms backoff → will not expire in test window
|
||||
tracker.Schedule(seq: 20, deliveryCount: 2);
|
||||
|
||||
// Neither should be due yet immediately after scheduling
|
||||
tracker.GetDue().ShouldNotContain(10UL);
|
||||
|
||||
await Task.Delay(15);
|
||||
|
||||
var due = tracker.GetDue();
|
||||
due.ShouldContain(10UL);
|
||||
due.ShouldNotContain(20UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 3 — Acknowledge removes the sequence from tracking
|
||||
//
|
||||
// Go reference: consumer.go — acking a sequence removes it from pending map
|
||||
// so it is never surfaced by GetDue again.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task Acknowledge_removes_sequence_from_tracking()
|
||||
{
|
||||
var tracker = new RedeliveryTracker([1]);
|
||||
|
||||
tracker.Schedule(seq: 5, deliveryCount: 1);
|
||||
await Task.Delay(10);
|
||||
|
||||
tracker.GetDue().ShouldContain(5UL);
|
||||
|
||||
tracker.Acknowledge(5);
|
||||
|
||||
tracker.IsTracking(5).ShouldBeFalse();
|
||||
tracker.GetDue().ShouldNotContain(5UL);
|
||||
tracker.TrackedCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 4 — IsMaxDeliveries returns true when threshold is reached
|
||||
//
|
||||
// Go reference: consumer.go — when rdc[sseq] >= MaxDeliver the sequence is
|
||||
// dropped from redelivery and never surfaced again.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void IsMaxDeliveries_returns_true_when_delivery_count_meets_threshold()
|
||||
{
|
||||
var tracker = new RedeliveryTracker([100]);
|
||||
|
||||
tracker.Schedule(seq: 7, deliveryCount: 3);
|
||||
|
||||
tracker.IsMaxDeliveries(7, maxDeliver: 3).ShouldBeTrue();
|
||||
tracker.IsMaxDeliveries(7, maxDeliver: 4).ShouldBeFalse();
|
||||
tracker.IsMaxDeliveries(7, maxDeliver: 2).ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 5 — IsMaxDeliveries returns false when maxDeliver is 0 (unlimited)
|
||||
//
|
||||
// Go reference: consumer.go — MaxDeliver <= 0 means unlimited redeliveries.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void IsMaxDeliveries_returns_false_when_maxDeliver_is_zero()
|
||||
{
|
||||
var tracker = new RedeliveryTracker([100]);
|
||||
|
||||
tracker.Schedule(seq: 99, deliveryCount: 1000);
|
||||
|
||||
tracker.IsMaxDeliveries(99, maxDeliver: 0).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 6 — Empty backoff falls back to ackWait
|
||||
//
|
||||
// Go reference: consumer.go — when BackOff is empty the ack-wait duration is
|
||||
// used as the redelivery delay.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task Schedule_with_empty_backoff_falls_back_to_ackWait()
|
||||
{
|
||||
// Empty backoff array → fall back to ackWaitMs
|
||||
var tracker = new RedeliveryTracker([]);
|
||||
|
||||
tracker.Schedule(seq: 1, deliveryCount: 1, ackWaitMs: 1);
|
||||
await Task.Delay(10);
|
||||
|
||||
tracker.GetDue().ShouldContain(1UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 7 — Empty backoff with large ackWait does not expire prematurely
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void Schedule_with_empty_backoff_and_large_ackWait_does_not_expire()
|
||||
{
|
||||
var tracker = new RedeliveryTracker([]);
|
||||
|
||||
tracker.Schedule(seq: 2, deliveryCount: 1, ackWaitMs: 5000);
|
||||
|
||||
tracker.GetDue().ShouldNotContain(2UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 8 — Schedule returns the deadline UTC time
|
||||
//
|
||||
// Go reference: consumer.go:5540 — trackPending stores the computed deadline.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void Schedule_returns_deadline_in_the_future()
|
||||
{
|
||||
var tracker = new RedeliveryTracker([100]);
|
||||
|
||||
var before = DateTime.UtcNow;
|
||||
var deadline = tracker.Schedule(seq: 3, deliveryCount: 1);
|
||||
var after = DateTime.UtcNow;
|
||||
|
||||
deadline.ShouldBeGreaterThanOrEqualTo(before);
|
||||
// Deadline should be ahead of scheduling time by at least the backoff value
|
||||
(deadline - after).TotalMilliseconds.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 9 — Multiple sequences tracked independently
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public async Task Multiple_sequences_are_tracked_independently()
|
||||
{
|
||||
var tracker = new RedeliveryTracker([1, 5000]);
|
||||
|
||||
tracker.Schedule(seq: 1, deliveryCount: 1); // 1ms → expires soon
|
||||
tracker.Schedule(seq: 2, deliveryCount: 2); // 5000ms → won't expire
|
||||
|
||||
tracker.TrackedCount.ShouldBe(2);
|
||||
|
||||
await Task.Delay(15);
|
||||
|
||||
var due = tracker.GetDue();
|
||||
due.ShouldContain(1UL);
|
||||
due.ShouldNotContain(2UL);
|
||||
|
||||
tracker.Acknowledge(1);
|
||||
tracker.TrackedCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 10 — IsMaxDeliveries returns false for untracked sequence
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void IsMaxDeliveries_returns_false_for_untracked_sequence()
|
||||
{
|
||||
var tracker = new RedeliveryTracker([100]);
|
||||
|
||||
tracker.IsMaxDeliveries(999, maxDeliver: 1).ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,174 @@
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for SampleTracker: sample frequency parsing and stochastic latency sampling.
|
||||
/// Go reference: consumer.go sampleFrequency, shouldSample, parseSampleFrequency.
|
||||
/// </summary>
|
||||
public class SampleModeTests
|
||||
{
|
||||
// --- ParseSampleFrequency ---
|
||||
|
||||
[Fact]
|
||||
public void ParseSampleFrequency_one_percent()
|
||||
{
|
||||
var rate = SampleTracker.ParseSampleFrequency("1%");
|
||||
rate.ShouldBe(0.01, 1e-9);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ParseSampleFrequency_fifty_percent()
|
||||
{
|
||||
var rate = SampleTracker.ParseSampleFrequency("50%");
|
||||
rate.ShouldBe(0.5, 1e-9);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ParseSampleFrequency_hundred_percent()
|
||||
{
|
||||
var rate = SampleTracker.ParseSampleFrequency("100%");
|
||||
rate.ShouldBe(1.0, 1e-9);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ParseSampleFrequency_zero()
|
||||
{
|
||||
var rate = SampleTracker.ParseSampleFrequency("0%");
|
||||
rate.ShouldBe(0.0, 1e-9);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ParseSampleFrequency_no_percent_sign()
|
||||
{
|
||||
var rate = SampleTracker.ParseSampleFrequency("25");
|
||||
rate.ShouldBe(0.25, 1e-9);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ParseSampleFrequency_empty_string()
|
||||
{
|
||||
var rate = SampleTracker.ParseSampleFrequency("");
|
||||
rate.ShouldBe(0.0, 1e-9);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ParseSampleFrequency_null()
|
||||
{
|
||||
var rate = SampleTracker.ParseSampleFrequency(null);
|
||||
rate.ShouldBe(0.0, 1e-9);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ParseSampleFrequency_invalid()
|
||||
{
|
||||
var rate = SampleTracker.ParseSampleFrequency("abc");
|
||||
rate.ShouldBe(0.0, 1e-9);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ParseSampleFrequency_over_100_clamped()
|
||||
{
|
||||
var rate = SampleTracker.ParseSampleFrequency("200%");
|
||||
rate.ShouldBe(1.0, 1e-9);
|
||||
}
|
||||
|
||||
// --- ShouldSample ---
|
||||
|
||||
[Fact]
|
||||
public void ShouldSample_rate_100_always_samples()
|
||||
{
|
||||
var tracker = new SampleTracker(1.0);
|
||||
for (var i = 0; i < 20; i++)
|
||||
{
|
||||
tracker.ShouldSample().ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShouldSample_rate_0_never_samples()
|
||||
{
|
||||
var tracker = new SampleTracker(0.0);
|
||||
for (var i = 0; i < 20; i++)
|
||||
{
|
||||
tracker.ShouldSample().ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShouldSample_increments_total_deliveries()
|
||||
{
|
||||
var tracker = new SampleTracker(0.5);
|
||||
tracker.TotalDeliveries.ShouldBe(0L);
|
||||
|
||||
tracker.ShouldSample();
|
||||
tracker.TotalDeliveries.ShouldBe(1L);
|
||||
|
||||
tracker.ShouldSample();
|
||||
tracker.TotalDeliveries.ShouldBe(2L);
|
||||
|
||||
tracker.ShouldSample();
|
||||
tracker.TotalDeliveries.ShouldBe(3L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShouldSample_stochastic_with_seeded_random()
|
||||
{
|
||||
// Use a seeded Random for deterministic results.
|
||||
// With seed 42 and rate 0.5, we can predict exact outcomes.
|
||||
var rng = new Random(42);
|
||||
var tracker = new SampleTracker(0.5, rng);
|
||||
|
||||
// Pre-compute expected outcomes using the same seed.
|
||||
var expectedRng = new Random(42);
|
||||
var expected = new bool[10];
|
||||
for (var i = 0; i < 10; i++)
|
||||
{
|
||||
expected[i] = expectedRng.NextDouble() < 0.5;
|
||||
}
|
||||
|
||||
var actual = new bool[10];
|
||||
for (var i = 0; i < 10; i++)
|
||||
{
|
||||
actual[i] = tracker.ShouldSample();
|
||||
}
|
||||
|
||||
actual.ShouldBe(expected);
|
||||
tracker.TotalDeliveries.ShouldBe(10L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RecordLatency_captures_all_fields()
|
||||
{
|
||||
var tracker = new SampleTracker(1.0);
|
||||
var latency = TimeSpan.FromMilliseconds(42);
|
||||
const ulong seq = 7UL;
|
||||
const string subject = "orders.new";
|
||||
|
||||
var before = DateTime.UtcNow;
|
||||
var sample = tracker.RecordLatency(latency, seq, subject);
|
||||
var after = DateTime.UtcNow;
|
||||
|
||||
sample.Sequence.ShouldBe(seq);
|
||||
sample.Subject.ShouldBe(subject);
|
||||
sample.DeliveryLatency.ShouldBe(latency);
|
||||
sample.SampledAtUtc.ShouldBeGreaterThanOrEqualTo(before);
|
||||
sample.SampledAtUtc.ShouldBeLessThanOrEqualTo(after);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void SampleCount_tracks_sampled_only()
|
||||
{
|
||||
// Rate 1.0: every delivery is sampled.
|
||||
var allSampled = new SampleTracker(1.0);
|
||||
for (var i = 0; i < 5; i++) allSampled.ShouldSample();
|
||||
allSampled.SampleCount.ShouldBe(5L);
|
||||
allSampled.TotalDeliveries.ShouldBe(5L);
|
||||
|
||||
// Rate 0.0: no delivery is sampled.
|
||||
var noneSampled = new SampleTracker(0.0);
|
||||
for (var i = 0; i < 5; i++) noneSampled.ShouldSample();
|
||||
noneSampled.SampleCount.ShouldBe(0L);
|
||||
noneSampled.TotalDeliveries.ShouldBe(5L);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,209 @@
|
||||
// Go: consumer.go (rateLimitBps config, rate limiting in consumer delivery)
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
public class TokenBucketTests
|
||||
{
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 1 — TryConsume succeeds when enough tokens are available
|
||||
//
|
||||
// Go reference: consumer.go — rate limiter allows delivery when token
|
||||
// bucket has sufficient capacity for the message payload size.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void TryConsume_succeeds_when_tokens_available()
|
||||
{
|
||||
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 1000);
|
||||
|
||||
// Full bucket — consume 100 bytes should succeed
|
||||
var result = limiter.TryConsume(100);
|
||||
|
||||
result.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 2 — TryConsume fails when insufficient tokens remain
|
||||
//
|
||||
// Go reference: consumer.go — delivery is gated when bucket is drained.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void TryConsume_fails_when_insufficient_tokens()
|
||||
{
|
||||
// Burst = 2x rate = 200 bytes
|
||||
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 100);
|
||||
|
||||
// Drain all tokens (200 byte burst)
|
||||
limiter.TryConsume(200).ShouldBeTrue();
|
||||
|
||||
// Next consume should fail — no tokens left
|
||||
var result = limiter.TryConsume(1);
|
||||
|
||||
result.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 3 — TryConsume always returns true when rate is zero (unlimited)
|
||||
//
|
||||
// Go reference: consumer.go — rateLimitBps=0 means no rate limiting.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void TryConsume_unlimited_when_rate_zero()
|
||||
{
|
||||
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 0);
|
||||
|
||||
// Should always succeed regardless of size
|
||||
limiter.TryConsume(1_000_000).ShouldBeTrue();
|
||||
limiter.TryConsume(1_000_000).ShouldBeTrue();
|
||||
limiter.TryConsume(long.MaxValue / 2).ShouldBeTrue();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 4 — AvailableTokens starts at burst size
|
||||
//
|
||||
// Go reference: consumer.go — bucket starts full so initial burst is allowed.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void AvailableTokens_starts_at_burst_size()
|
||||
{
|
||||
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 1000, burstSize: 500);
|
||||
|
||||
limiter.AvailableTokens.ShouldBe(500.0, tolerance: 1.0);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 5 — AvailableTokens refills over time
|
||||
//
|
||||
// Go reference: consumer.go — token bucket refills at configured bytes/sec
|
||||
// so that a drained bucket recovers proportionally with elapsed time.
|
||||
// -------------------------------------------------------------------------
|
||||
[SlopwatchSuppress("SW004", "Token bucket refill is driven by real elapsed wall-clock time; no synchronisation primitive can replace observing time-based token accumulation")]
|
||||
[Fact]
|
||||
public async Task AvailableTokens_refills_over_time()
|
||||
{
|
||||
// 10,000 bytes/sec = 10 bytes/ms; burst = 20,000 bytes
|
||||
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 10_000);
|
||||
|
||||
// Drain entire bucket
|
||||
limiter.TryConsume(20_000).ShouldBeTrue();
|
||||
limiter.AvailableTokens.ShouldBeLessThan(1.0);
|
||||
|
||||
// Wait 50ms — should refill ~500 bytes (10 bytes/ms * 50ms)
|
||||
await Task.Delay(50);
|
||||
|
||||
limiter.AvailableTokens.ShouldBeGreaterThan(100.0);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 6 — EstimateWait returns zero when tokens are available
|
||||
//
|
||||
// Go reference: consumer.go — no delay when bucket has capacity.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void EstimateWait_returns_zero_when_tokens_available()
|
||||
{
|
||||
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 1000);
|
||||
|
||||
var wait = limiter.EstimateWait(100);
|
||||
|
||||
wait.ShouldBe(TimeSpan.Zero);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 7 — EstimateWait returns positive duration when tokens are insufficient
|
||||
//
|
||||
// Go reference: consumer.go — delivery delay calculated from deficit / refill rate.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void EstimateWait_returns_positive_when_insufficient()
|
||||
{
|
||||
// 100 bytes/sec = 0.1 bytes/ms; burst = 200 bytes
|
||||
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 100);
|
||||
|
||||
// Drain all tokens
|
||||
limiter.TryConsume(200).ShouldBeTrue();
|
||||
|
||||
// Requesting 50 more bytes — must wait
|
||||
var wait = limiter.EstimateWait(50);
|
||||
|
||||
wait.ShouldBeGreaterThan(TimeSpan.Zero);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 8 — UpdateRate changes the effective rate dynamically
|
||||
//
|
||||
// Go reference: consumer.go — rate can be updated via config reload.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void UpdateRate_changes_rate_dynamically()
|
||||
{
|
||||
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 1000);
|
||||
|
||||
limiter.UpdateRate(500);
|
||||
|
||||
limiter.BytesPerSecond.ShouldBe(500L);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 9 — UpdateRate caps existing tokens at new max
|
||||
//
|
||||
// Go reference: consumer.go — when burst is reduced, current tokens are
|
||||
// clamped to not exceed the new maximum.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void UpdateRate_caps_tokens_at_new_max()
|
||||
{
|
||||
// Start with rate=1000, burst=2000
|
||||
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 1000);
|
||||
|
||||
// Reduce to rate=100, burst=200 — existing tokens (2000) must be capped
|
||||
limiter.UpdateRate(100);
|
||||
|
||||
limiter.AvailableTokens.ShouldBeLessThanOrEqualTo(200.0 + 1.0); // +1 for refill epsilon
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 10 — TryConsume partial consumption leaves correct remainder
|
||||
//
|
||||
// Go reference: consumer.go — each delivery subtracts exactly payload bytes
|
||||
// from the bucket.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void TryConsume_partial_consumption()
|
||||
{
|
||||
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 1000, burstSize: 200);
|
||||
|
||||
limiter.TryConsume(100).ShouldBeTrue();
|
||||
|
||||
// ~100 tokens should remain (minus any tiny refill drift during test)
|
||||
limiter.AvailableTokens.ShouldBeInRange(99.0, 101.0);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 11 — Default burst size is 2x the bytes-per-second rate
|
||||
//
|
||||
// Go reference: consumer.go — default burst allows two seconds worth of data.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void Default_burst_is_2x_rate()
|
||||
{
|
||||
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 500);
|
||||
|
||||
// Bucket starts full at burst = 2 * 500 = 1000
|
||||
limiter.AvailableTokens.ShouldBe(1000.0, tolerance: 1.0);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Test 12 — Custom burst size overrides the default 2x calculation
|
||||
//
|
||||
// Go reference: consumer.go — explicit burst size gives precise control
|
||||
// over maximum allowed burst traffic.
|
||||
// -------------------------------------------------------------------------
|
||||
[Fact]
|
||||
public void Custom_burst_size()
|
||||
{
|
||||
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 500, burstSize: 750);
|
||||
|
||||
limiter.AvailableTokens.ShouldBe(750.0, tolerance: 1.0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,116 @@
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for WaitingRequestQueue FIFO queue with expiry and batch/byte tracking.
|
||||
/// Go reference: consumer.go processNextMsgRequest.
|
||||
/// </summary>
|
||||
public class WaitingRequestQueueTests
|
||||
{
|
||||
[Fact]
|
||||
public void Enqueue_and_dequeue_fifo()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
queue.Enqueue(new PullRequest("reply.1", Batch: 10, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false));
|
||||
queue.Enqueue(new PullRequest("reply.2", Batch: 5, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false));
|
||||
|
||||
queue.Count.ShouldBe(2);
|
||||
|
||||
var first = queue.TryDequeue();
|
||||
first.ShouldNotBeNull();
|
||||
first.ReplyTo.ShouldBe("reply.1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void TryDequeue_returns_null_when_empty()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
queue.TryDequeue().ShouldBeNull();
|
||||
queue.IsEmpty.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Expired_requests_are_removed()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
queue.Enqueue(new PullRequest("expired", Batch: 10, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMilliseconds(-100), NoWait: false));
|
||||
queue.Enqueue(new PullRequest("valid", Batch: 10, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false));
|
||||
|
||||
queue.RemoveExpired(DateTimeOffset.UtcNow);
|
||||
queue.Count.ShouldBe(1);
|
||||
|
||||
var next = queue.TryDequeue();
|
||||
next!.ReplyTo.ShouldBe("valid");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void NoWait_request_returns_immediately_when_empty()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
queue.Enqueue(new PullRequest("nowait", Batch: 10, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: true));
|
||||
|
||||
var req = queue.TryDequeue();
|
||||
req.ShouldNotBeNull();
|
||||
req.NoWait.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void MaxBytes_tracks_accumulation()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
var req = new PullRequest("mb", Batch: 100, MaxBytes: 1024, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false);
|
||||
queue.Enqueue(req);
|
||||
|
||||
var dequeued = queue.TryDequeue()!;
|
||||
dequeued.MaxBytes.ShouldBe(1024L);
|
||||
dequeued.RemainingBytes.ShouldBe(1024L);
|
||||
|
||||
dequeued.ConsumeBytes(256);
|
||||
dequeued.RemainingBytes.ShouldBe(768L);
|
||||
dequeued.IsExhausted.ShouldBeFalse();
|
||||
|
||||
dequeued.ConsumeBytes(800);
|
||||
dequeued.IsExhausted.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Batch_decrements_on_delivery()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
var req = new PullRequest("batch", Batch: 3, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false);
|
||||
queue.Enqueue(req);
|
||||
|
||||
var dequeued = queue.TryDequeue()!;
|
||||
dequeued.RemainingBatch.ShouldBe(3);
|
||||
|
||||
dequeued.ConsumeBatch();
|
||||
dequeued.RemainingBatch.ShouldBe(2);
|
||||
|
||||
dequeued.ConsumeBatch();
|
||||
dequeued.ConsumeBatch();
|
||||
dequeued.IsExhausted.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemoveExpired_handles_all_expired()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
queue.Enqueue(new PullRequest("a", Batch: 1, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMilliseconds(-100), NoWait: false));
|
||||
queue.Enqueue(new PullRequest("b", Batch: 1, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMilliseconds(-50), NoWait: false));
|
||||
|
||||
queue.RemoveExpired(DateTimeOffset.UtcNow);
|
||||
queue.Count.ShouldBe(0);
|
||||
queue.IsEmpty.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PinId_is_stored()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
queue.Enqueue(new PullRequest("pin", Batch: 1, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false, PinId: "pin-123"));
|
||||
|
||||
var dequeued = queue.TryDequeue()!;
|
||||
dequeued.PinId.ShouldBe("pin-123");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
using NATS.Server.JetStream;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for InterestRetentionPolicy per-consumer ack tracking.
|
||||
/// Go reference: stream.go checkInterestState/noInterest.
|
||||
/// </summary>
|
||||
public class InterestRetentionTests
|
||||
{
|
||||
[Fact]
|
||||
public void ShouldRetain_true_when_consumers_have_not_acked()
|
||||
{
|
||||
var policy = new InterestRetentionPolicy();
|
||||
policy.RegisterInterest("consumer-A", "orders.>");
|
||||
policy.RegisterInterest("consumer-B", "orders.>");
|
||||
|
||||
policy.ShouldRetain(1, "orders.new").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShouldRetain_false_when_all_consumers_acked()
|
||||
{
|
||||
var policy = new InterestRetentionPolicy();
|
||||
policy.RegisterInterest("consumer-A", "orders.>");
|
||||
policy.RegisterInterest("consumer-B", "orders.>");
|
||||
|
||||
policy.AcknowledgeDelivery("consumer-A", 1);
|
||||
policy.ShouldRetain(1, "orders.new").ShouldBeTrue(); // B hasn't acked
|
||||
|
||||
policy.AcknowledgeDelivery("consumer-B", 1);
|
||||
policy.ShouldRetain(1, "orders.new").ShouldBeFalse(); // both acked
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShouldRetain_ignores_consumers_without_interest()
|
||||
{
|
||||
var policy = new InterestRetentionPolicy();
|
||||
policy.RegisterInterest("consumer-A", "orders.>");
|
||||
policy.RegisterInterest("consumer-B", "billing.>"); // no interest in orders
|
||||
|
||||
policy.AcknowledgeDelivery("consumer-A", 1);
|
||||
policy.ShouldRetain(1, "orders.new").ShouldBeFalse(); // B has no interest
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void UnregisterInterest_removes_consumer()
|
||||
{
|
||||
var policy = new InterestRetentionPolicy();
|
||||
policy.RegisterInterest("consumer-A", "x.>");
|
||||
policy.RegisterInterest("consumer-B", "x.>");
|
||||
|
||||
policy.UnregisterInterest("consumer-B");
|
||||
|
||||
// Only A needs to ack
|
||||
policy.AcknowledgeDelivery("consumer-A", 1);
|
||||
policy.ShouldRetain(1, "x.y").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShouldRetain_false_when_no_consumers_registered()
|
||||
{
|
||||
var policy = new InterestRetentionPolicy();
|
||||
policy.ShouldRetain(1, "any.subject").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Multiple_sequences_tracked_independently()
|
||||
{
|
||||
var policy = new InterestRetentionPolicy();
|
||||
policy.RegisterInterest("c1", "x.>");
|
||||
|
||||
policy.AcknowledgeDelivery("c1", 1);
|
||||
policy.ShouldRetain(1, "x.y").ShouldBeFalse();
|
||||
policy.ShouldRetain(2, "x.y").ShouldBeTrue(); // seq 2 not acked
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ConsumerCount_tracks_registrations()
|
||||
{
|
||||
var policy = new InterestRetentionPolicy();
|
||||
policy.ConsumerCount.ShouldBe(0);
|
||||
|
||||
policy.RegisterInterest("c1", "x.>");
|
||||
policy.RegisterInterest("c2", "y.>");
|
||||
policy.ConsumerCount.ShouldBe(2);
|
||||
|
||||
policy.UnregisterInterest("c1");
|
||||
policy.ConsumerCount.ShouldBe(1);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,309 @@
|
||||
// Ported from golang/nats-server/server/jetstream_test.go
|
||||
// Account limits: max streams per account, max consumers per stream,
|
||||
// JWT-based account limits, account info reporting, stream/consumer count limits.
|
||||
|
||||
using NATS.Server.Auth;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream;
|
||||
|
||||
public class JetStreamAccountLimitTests
|
||||
{
|
||||
// Go: TestJetStreamSystemLimits server/jetstream_test.go:4837
|
||||
// Account with max streams = 1 cannot create a second stream.
|
||||
[Fact]
|
||||
public async Task Account_max_streams_one_prevents_second_stream_creation()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartJwtLimitedAccountAsync(maxStreams: 1);
|
||||
|
||||
var first = await fx.RequestLocalAsync(
|
||||
"$JS.API.STREAM.CREATE.S1",
|
||||
"""{"name":"S1","subjects":["s1.>"]}""");
|
||||
first.Error.ShouldBeNull();
|
||||
first.StreamInfo.ShouldNotBeNull();
|
||||
|
||||
var second = await fx.RequestLocalAsync(
|
||||
"$JS.API.STREAM.CREATE.S2",
|
||||
"""{"name":"S2","subjects":["s2.>"]}""");
|
||||
second.Error.ShouldNotBeNull();
|
||||
second.Error!.Code.ShouldBe(10027);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamSystemLimits — account with max = 3 creates 3 then fails
|
||||
[Fact]
|
||||
public async Task Account_max_streams_three_rejects_fourth_stream()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartJwtLimitedAccountAsync(maxStreams: 3);
|
||||
|
||||
for (var i = 1; i <= 3; i++)
|
||||
{
|
||||
var ok = await fx.RequestLocalAsync(
|
||||
$"$JS.API.STREAM.CREATE.S{i}",
|
||||
$$$"""{"name":"S{{{i}}}","subjects":["s{{{i}}}.>"]}""");
|
||||
ok.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
var rejected = await fx.RequestLocalAsync(
|
||||
"$JS.API.STREAM.CREATE.S4",
|
||||
"""{"name":"S4","subjects":["s4.>"]}""");
|
||||
rejected.Error.ShouldNotBeNull();
|
||||
rejected.Error!.Code.ShouldBe(10027);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamSystemLimits — after deleting a stream the limit slot is freed
|
||||
[Fact]
|
||||
public async Task Account_max_streams_slot_freed_after_delete()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartJwtLimitedAccountAsync(maxStreams: 2);
|
||||
|
||||
var s1 = await fx.RequestLocalAsync(
|
||||
"$JS.API.STREAM.CREATE.DEL1",
|
||||
"""{"name":"DEL1","subjects":["del1.>"]}""");
|
||||
s1.Error.ShouldBeNull();
|
||||
|
||||
var s2 = await fx.RequestLocalAsync(
|
||||
"$JS.API.STREAM.CREATE.DEL2",
|
||||
"""{"name":"DEL2","subjects":["del2.>"]}""");
|
||||
s2.Error.ShouldBeNull();
|
||||
|
||||
// Delete S1
|
||||
var del = await fx.RequestLocalAsync("$JS.API.STREAM.DELETE.DEL1", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
|
||||
// Now S3 should succeed
|
||||
var s3 = await fx.RequestLocalAsync(
|
||||
"$JS.API.STREAM.CREATE.DEL3",
|
||||
"""{"name":"DEL3","subjects":["del3.>"]}""");
|
||||
s3.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamSystemLimits — account with no limit allows many streams
|
||||
[Fact]
|
||||
public async Task Account_with_zero_max_streams_allows_unlimited_streams()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartJwtLimitedAccountAsync(maxStreams: 0);
|
||||
|
||||
for (var i = 1; i <= 10; i++)
|
||||
{
|
||||
var ok = await fx.RequestLocalAsync(
|
||||
$"$JS.API.STREAM.CREATE.UNLIM{i}",
|
||||
$$$"""{"name":"UNLIM{{{i}}}","subjects":["unlim{{{i}}}.>"]}""");
|
||||
ok.Error.ShouldBeNull();
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestJetStreamMaxConsumers server/jetstream_test.go:553
|
||||
// Stream max_consumers configuration is persisted in stream config and returned in INFO.
|
||||
// Note: The .NET ConsumerManager does not yet enforce per-stream MaxConsumers at the
|
||||
// API layer — the config value is stored and reportable but not enforced during consumer creation.
|
||||
[Fact]
|
||||
public async Task Stream_max_consumers_is_stored_and_returned_in_info()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamConfigAsync(new StreamConfig
|
||||
{
|
||||
Name = "MAXCONSUMERS",
|
||||
Subjects = ["maxconsumers.>"],
|
||||
MaxConsumers = 2,
|
||||
});
|
||||
|
||||
// Config is preserved
|
||||
var info = await fx.RequestLocalAsync("$JS.API.STREAM.INFO.MAXCONSUMERS", "{}");
|
||||
info.Error.ShouldBeNull();
|
||||
info.StreamInfo!.Config.MaxConsumers.ShouldBe(2);
|
||||
|
||||
// Consumers can be created (enforcement is not at the API layer)
|
||||
var c1 = await fx.CreateConsumerAsync("MAXCONSUMERS", "C1", "maxconsumers.>");
|
||||
c1.Error.ShouldBeNull();
|
||||
|
||||
var c2 = await fx.CreateConsumerAsync("MAXCONSUMERS", "C2", "maxconsumers.a");
|
||||
c2.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamMaxConsumers — creating same consumer name twice is idempotent
|
||||
[Fact]
|
||||
public async Task Create_same_consumer_twice_is_idempotent_and_not_counted_twice()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamConfigAsync(new StreamConfig
|
||||
{
|
||||
Name = "IDMCONS",
|
||||
Subjects = ["idmcons.>"],
|
||||
MaxConsumers = 2,
|
||||
});
|
||||
|
||||
var c1a = await fx.CreateConsumerAsync("IDMCONS", "C1", "idmcons.>");
|
||||
c1a.Error.ShouldBeNull();
|
||||
|
||||
// Same name — idempotent, should not count as second consumer
|
||||
var c1b = await fx.CreateConsumerAsync("IDMCONS", "C1", "idmcons.>");
|
||||
c1b.Error.ShouldBeNull();
|
||||
|
||||
// Second unique name should succeed
|
||||
var c2 = await fx.CreateConsumerAsync("IDMCONS", "C2", "idmcons.a");
|
||||
c2.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamRequestAPI server/jetstream_test.go:5995
|
||||
// Account info returns correct stream and consumer counts.
|
||||
[Fact]
|
||||
public async Task Account_info_reflects_created_streams_and_consumers()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("A1", "a1.>");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.A2", """{"name":"A2","subjects":["a2.>"]}""");
|
||||
_ = await fx.CreateConsumerAsync("A1", "CON1", "a1.>");
|
||||
_ = await fx.CreateConsumerAsync("A2", "CON2", "a2.>");
|
||||
_ = await fx.CreateConsumerAsync("A2", "CON3", "a2.x");
|
||||
|
||||
var info = await fx.RequestLocalAsync("$JS.API.INFO", "{}");
|
||||
info.Error.ShouldBeNull();
|
||||
info.AccountInfo.ShouldNotBeNull();
|
||||
info.AccountInfo!.Streams.ShouldBe(2);
|
||||
info.AccountInfo.Consumers.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamRequestAPI — empty account info
|
||||
[Fact]
|
||||
public void Account_info_for_empty_account_returns_zero_counts()
|
||||
{
|
||||
var router = new JetStreamApiRouter(new StreamManager(), new ConsumerManager());
|
||||
var resp = router.Route("$JS.API.INFO", "{}"u8);
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.AccountInfo!.Streams.ShouldBe(0);
|
||||
resp.AccountInfo.Consumers.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamSystemLimits — Account.TryReserveStream enforces MaxJetStreamStreams
|
||||
[Fact]
|
||||
public void Account_reserve_stream_enforces_max_jet_stream_streams()
|
||||
{
|
||||
var account = new Account("TEST")
|
||||
{
|
||||
MaxJetStreamStreams = 2,
|
||||
};
|
||||
|
||||
account.TryReserveStream().ShouldBeTrue();
|
||||
account.TryReserveStream().ShouldBeTrue();
|
||||
account.TryReserveStream().ShouldBeFalse(); // exceeded
|
||||
}
|
||||
|
||||
// Go: TestJetStreamSystemLimits — Account.ReleaseStream frees a slot
|
||||
[Fact]
|
||||
public void Account_release_stream_frees_slot_for_reservation()
|
||||
{
|
||||
var account = new Account("FREETEST")
|
||||
{
|
||||
MaxJetStreamStreams = 1,
|
||||
};
|
||||
|
||||
account.TryReserveStream().ShouldBeTrue();
|
||||
account.TryReserveStream().ShouldBeFalse(); // full
|
||||
|
||||
account.ReleaseStream();
|
||||
|
||||
account.TryReserveStream().ShouldBeTrue(); // slot freed
|
||||
}
|
||||
|
||||
// Go: TestJetStreamSystemLimits — zero max streams means unlimited
|
||||
[Fact]
|
||||
public void Account_with_zero_max_streams_allows_unlimited_reservations()
|
||||
{
|
||||
var account = new Account("UNLIMITED")
|
||||
{
|
||||
MaxJetStreamStreams = 0, // unlimited
|
||||
};
|
||||
|
||||
for (var i = 0; i < 100; i++)
|
||||
account.TryReserveStream().ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamSystemLimits — JetStreamStreamCount tracks correctly
|
||||
[Fact]
|
||||
public void Account_stream_count_tracks_reserve_and_release()
|
||||
{
|
||||
var account = new Account("COUNTTEST")
|
||||
{
|
||||
MaxJetStreamStreams = 5,
|
||||
};
|
||||
|
||||
account.JetStreamStreamCount.ShouldBe(0);
|
||||
account.TryReserveStream();
|
||||
account.JetStreamStreamCount.ShouldBe(1);
|
||||
account.TryReserveStream();
|
||||
account.JetStreamStreamCount.ShouldBe(2);
|
||||
account.ReleaseStream();
|
||||
account.JetStreamStreamCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamRequestAPI — stream list includes all streams
|
||||
[Fact]
|
||||
public async Task Stream_names_includes_all_created_streams()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("LISTA", "lista.>");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.LISTB", """{"name":"LISTB","subjects":["listb.>"]}""");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.LISTC", """{"name":"LISTC","subjects":["listc.>"]}""");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.STREAM.NAMES", "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.Count.ShouldBe(3);
|
||||
names.StreamNames.ShouldContain("LISTA");
|
||||
names.StreamNames.ShouldContain("LISTB");
|
||||
names.StreamNames.ShouldContain("LISTC");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamRequestAPI — stream names sorted alphabetically
|
||||
[Fact]
|
||||
public async Task Stream_names_are_returned_sorted()
|
||||
{
|
||||
await using var fx = new JetStreamApiFixture();
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.ZZZ", """{"name":"ZZZ","subjects":["zzz.>"]}""");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.AAA", """{"name":"AAA","subjects":["aaa.>"]}""");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.MMM", """{"name":"MMM","subjects":["mmm.>"]}""");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.STREAM.NAMES", "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.ShouldBe(names.StreamNames.OrderBy(n => n, StringComparer.Ordinal).ToList());
|
||||
}
|
||||
|
||||
// Go: TestJetStreamMaxConsumers — consumer names list reflects created consumers
|
||||
[Fact]
|
||||
public async Task Consumer_names_list_reflects_created_consumers()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("CONLIST", "conlist.>");
|
||||
_ = await fx.CreateConsumerAsync("CONLIST", "CON1", "conlist.a");
|
||||
_ = await fx.CreateConsumerAsync("CONLIST", "CON2", "conlist.b");
|
||||
_ = await fx.CreateConsumerAsync("CONLIST", "CON3", "conlist.c");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.CONSUMER.NAMES.CONLIST", "{}");
|
||||
names.ConsumerNames.ShouldNotBeNull();
|
||||
names.ConsumerNames!.Count.ShouldBe(3);
|
||||
names.ConsumerNames.ShouldContain("CON1");
|
||||
names.ConsumerNames.ShouldContain("CON2");
|
||||
names.ConsumerNames.ShouldContain("CON3");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamSystemLimits — account limit error has correct code
|
||||
[Fact]
|
||||
public async Task Max_streams_error_uses_code_10027()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartJwtLimitedAccountAsync(maxStreams: 1);
|
||||
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.FIRST", """{"name":"FIRST","subjects":["first.>"]}""");
|
||||
var rejected = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.SECOND", """{"name":"SECOND","subjects":["second.>"]}""");
|
||||
|
||||
rejected.Error.ShouldNotBeNull();
|
||||
rejected.Error!.Code.ShouldBe(10027);
|
||||
rejected.Error.Description.ShouldNotBeNullOrEmpty();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamEnableAndDisableAccount server/jetstream_test.go:128
|
||||
// A new account starts with zero JetStream stream count.
|
||||
[Fact]
|
||||
public void New_account_has_zero_jet_stream_stream_count()
|
||||
{
|
||||
var account = new Account("NEWACCT");
|
||||
account.JetStreamStreamCount.ShouldBe(0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class JetStreamAckRedeliveryStateMachineTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Ack_all_and_backoff_redelivery_follow_monotonic_floor_and_max_deliver_rules()
|
||||
{
|
||||
var violations = new List<string>();
|
||||
|
||||
try
|
||||
{
|
||||
var ackAll = new JetStreamPushConsumerContractTests();
|
||||
await ackAll.Ack_all_advances_floor_and_clears_pending_before_sequence();
|
||||
|
||||
var backoff = new JetStreamConsumerBackoffParityTests();
|
||||
await backoff.Redelivery_honors_backoff_schedule_and_stops_after_max_deliver();
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
violations.Add(ex.Message);
|
||||
}
|
||||
|
||||
violations.ShouldBeEmpty();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,602 @@
|
||||
// Ported from golang/nats-server/server/jetstream_test.go
|
||||
// Admin operations: stream/consumer list/names, account info, stream leader stepdown,
|
||||
// peer info, account purge, server remove, API routing
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.Auth;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream;
|
||||
|
||||
public class JetStreamAdminTests
|
||||
{
|
||||
// Go: TestJetStreamRequestAPI server/jetstream_test.go:5429
|
||||
[Fact]
|
||||
public async Task Account_info_returns_stream_and_consumer_counts()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("S1", "s1.>");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.S2", """{"subjects":["s2.>"]}""");
|
||||
_ = await fx.CreateConsumerAsync("S1", "C1", "s1.>");
|
||||
|
||||
var info = await fx.RequestLocalAsync("$JS.API.INFO", "{}");
|
||||
info.Error.ShouldBeNull();
|
||||
info.AccountInfo.ShouldNotBeNull();
|
||||
info.AccountInfo!.Streams.ShouldBe(2);
|
||||
info.AccountInfo.Consumers.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamRequestAPI — account info with zero
|
||||
[Fact]
|
||||
public void Account_info_empty_returns_zero_counts()
|
||||
{
|
||||
var router = new JetStreamApiRouter(new StreamManager(), new ConsumerManager());
|
||||
var resp = router.Route("$JS.API.INFO", "{}"u8);
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.AccountInfo.ShouldNotBeNull();
|
||||
resp.AccountInfo!.Streams.ShouldBe(0);
|
||||
resp.AccountInfo.Consumers.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamFilteredStreamNames server/jetstream_test.go:5392
|
||||
[Fact]
|
||||
public async Task Stream_names_returns_all_streams()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("ALPHA", "alpha.>");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.BETA", """{"subjects":["beta.>"]}""");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.GAMMA", """{"subjects":["gamma.>"]}""");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.STREAM.NAMES", "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.Count.ShouldBe(3);
|
||||
names.StreamNames.ShouldContain("ALPHA");
|
||||
names.StreamNames.ShouldContain("BETA");
|
||||
names.StreamNames.ShouldContain("GAMMA");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamFilteredStreamNames — names sorted
|
||||
[Fact]
|
||||
public async Task Stream_names_are_sorted()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("ZZZ", "zzz.>");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.AAA", """{"subjects":["aaa.>"]}""");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.MMM", """{"subjects":["mmm.>"]}""");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.STREAM.NAMES", "{}");
|
||||
names.StreamNames![0].ShouldBe("AAA");
|
||||
names.StreamNames[1].ShouldBe("MMM");
|
||||
names.StreamNames[2].ShouldBe("ZZZ");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamStreamList
|
||||
[Fact]
|
||||
public async Task Stream_list_returns_same_as_names()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("L1", "l1.>");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.L2", """{"subjects":["l2.>"]}""");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.STREAM.NAMES", "{}");
|
||||
var list = await fx.RequestLocalAsync("$JS.API.STREAM.LIST", "{}");
|
||||
|
||||
list.StreamNames!.Count.ShouldBe(names.StreamNames!.Count);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamFilteredStreamNames — empty after delete all
|
||||
[Fact]
|
||||
public async Task Stream_names_empty_after_all_deleted()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DEL1", "del1.>");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.DEL2", """{"subjects":["del2.>"]}""");
|
||||
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.DELETE.DEL1", "{}");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.DELETE.DEL2", "{}");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.STREAM.NAMES", "{}");
|
||||
names.StreamNames!.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerList
|
||||
[Fact]
|
||||
public async Task Consumer_names_returns_all_consumers()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("CL", "cl.>");
|
||||
_ = await fx.CreateConsumerAsync("CL", "A", "cl.a");
|
||||
_ = await fx.CreateConsumerAsync("CL", "B", "cl.b");
|
||||
_ = await fx.CreateConsumerAsync("CL", "C", "cl.c");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.CONSUMER.NAMES.CL", "{}");
|
||||
names.ConsumerNames.ShouldNotBeNull();
|
||||
names.ConsumerNames!.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerList — names sorted
|
||||
[Fact]
|
||||
public async Task Consumer_names_are_sorted()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("CS", "cs.>");
|
||||
_ = await fx.CreateConsumerAsync("CS", "ZZZ", "cs.>");
|
||||
_ = await fx.CreateConsumerAsync("CS", "AAA", "cs.>");
|
||||
_ = await fx.CreateConsumerAsync("CS", "MMM", "cs.>");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.CONSUMER.NAMES.CS", "{}");
|
||||
names.ConsumerNames![0].ShouldBe("AAA");
|
||||
names.ConsumerNames[1].ShouldBe("MMM");
|
||||
names.ConsumerNames[2].ShouldBe("ZZZ");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerList — list matches names
|
||||
[Fact]
|
||||
public async Task Consumer_list_returns_same_as_names()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("CLM", "clm.>");
|
||||
_ = await fx.CreateConsumerAsync("CLM", "C1", "clm.>");
|
||||
_ = await fx.CreateConsumerAsync("CLM", "C2", "clm.>");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.CONSUMER.NAMES.CLM", "{}");
|
||||
var list = await fx.RequestLocalAsync("$JS.API.CONSUMER.LIST.CLM", "{}");
|
||||
|
||||
list.ConsumerNames!.Count.ShouldBe(names.ConsumerNames!.Count);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerList — empty after delete all
|
||||
[Fact]
|
||||
public async Task Consumer_names_empty_after_all_deleted()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("CD", "cd.>");
|
||||
_ = await fx.CreateConsumerAsync("CD", "C1", "cd.>");
|
||||
|
||||
_ = await fx.RequestLocalAsync("$JS.API.CONSUMER.DELETE.CD.C1", "{}");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.CONSUMER.NAMES.CD", "{}");
|
||||
names.ConsumerNames!.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamStreamLeaderStepdown
|
||||
[Fact]
|
||||
public async Task Stream_leader_stepdown_returns_success()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("SLD", "sld.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync("$JS.API.STREAM.LEADER.STEPDOWN.SLD", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamStreamPeerRemove
|
||||
[Fact]
|
||||
public async Task Stream_peer_remove_returns_success()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("SPR", "spr.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync("$JS.API.STREAM.PEER.REMOVE.SPR", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerLeaderStepdown
|
||||
[Fact]
|
||||
public async Task Consumer_leader_stepdown_returns_success()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("CLSD", "clsd.>");
|
||||
_ = await fx.CreateConsumerAsync("CLSD", "C1", "clsd.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync("$JS.API.CONSUMER.LEADER.STEPDOWN.CLSD.C1", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAccountPurge server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Account_purge_returns_success()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("AP", "ap.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync("$JS.API.ACCOUNT.PURGE.DEFAULT", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamServerRemove
|
||||
[Fact]
|
||||
public void Server_remove_returns_success()
|
||||
{
|
||||
var router = new JetStreamApiRouter();
|
||||
var resp = router.Route("$JS.API.SERVER.REMOVE", "{}"u8);
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAccountStreamMove
|
||||
[Fact]
|
||||
public async Task Account_stream_move_returns_success()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("ASM", "asm.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync("$JS.API.ACCOUNT.STREAM.MOVE.MYSTREAM", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAccountStreamMoveCancel
|
||||
[Fact]
|
||||
public async Task Account_stream_move_cancel_returns_success()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("ASMC", "asmc.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync("$JS.API.ACCOUNT.STREAM.MOVE.CANCEL.MYSTREAM", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamRequestAPI — unknown subject
|
||||
[Fact]
|
||||
public void Unknown_api_subject_returns_not_found()
|
||||
{
|
||||
var router = new JetStreamApiRouter();
|
||||
var resp = router.Route("$JS.API.UNKNOWN.THING", "{}"u8);
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamRequestAPI — multiple API calls
|
||||
[Fact]
|
||||
public async Task Multiple_api_calls_in_sequence()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("MULTI", "multi.>");
|
||||
|
||||
// INFO
|
||||
var info = await fx.RequestLocalAsync("$JS.API.INFO", "{}");
|
||||
info.AccountInfo.ShouldNotBeNull();
|
||||
|
||||
// STREAM.NAMES
|
||||
var names = await fx.RequestLocalAsync("$JS.API.STREAM.NAMES", "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
|
||||
// STREAM.INFO
|
||||
var sInfo = await fx.RequestLocalAsync("$JS.API.STREAM.INFO.MULTI", "{}");
|
||||
sInfo.StreamInfo.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDisabledLimitsEnforcementJWT server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Jwt_limited_account_enforces_max_streams()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartJwtLimitedAccountAsync(maxStreams: 1);
|
||||
|
||||
var s1 = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.S1", """{"subjects":["s1.>"]}""");
|
||||
s1.Error.ShouldBeNull();
|
||||
|
||||
var s2 = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.S2", """{"subjects":["s2.>"]}""");
|
||||
s2.Error.ShouldNotBeNull();
|
||||
s2.Error!.Code.ShouldBe(10027);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDisabledLimitsEnforcementJWT — delete frees slot
|
||||
[Fact]
|
||||
public async Task Jwt_limited_account_delete_frees_slot()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartJwtLimitedAccountAsync(maxStreams: 1);
|
||||
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.S1", """{"subjects":["s1.>"]}""");
|
||||
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.DELETE.S1", "{}");
|
||||
|
||||
var s2 = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.S2", """{"subjects":["s2.>"]}""");
|
||||
s2.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamSystemLimits server/jetstream_test.go:4636
|
||||
[Fact]
|
||||
public async Task Account_info_updates_after_consumer_creation()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("AI", "ai.>");
|
||||
|
||||
var before = await fx.RequestLocalAsync("$JS.API.INFO", "{}");
|
||||
before.AccountInfo!.Consumers.ShouldBe(0);
|
||||
|
||||
_ = await fx.CreateConsumerAsync("AI", "C1", "ai.>");
|
||||
|
||||
var after = await fx.RequestLocalAsync("$JS.API.INFO", "{}");
|
||||
after.AccountInfo!.Consumers.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamSystemLimits — account info updates after stream deletion
|
||||
[Fact]
|
||||
public async Task Account_info_updates_after_stream_deletion()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("AID", "aid.>");
|
||||
|
||||
var before = await fx.RequestLocalAsync("$JS.API.INFO", "{}");
|
||||
before.AccountInfo!.Streams.ShouldBe(1);
|
||||
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.DELETE.AID", "{}");
|
||||
|
||||
var after = await fx.RequestLocalAsync("$JS.API.INFO", "{}");
|
||||
after.AccountInfo!.Streams.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerList — consumer names scoped to stream
|
||||
[Fact]
|
||||
public async Task Consumer_names_for_non_existent_stream_returns_empty()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("X", "x.>");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.CONSUMER.NAMES.NOPE", "{}");
|
||||
names.ConsumerNames.ShouldNotBeNull();
|
||||
names.ConsumerNames!.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamMetaLeaderStepdown
|
||||
[Fact]
|
||||
public void Meta_leader_stepdown_with_meta_group_returns_success()
|
||||
{
|
||||
var metaGroup = new JetStreamMetaGroup(3);
|
||||
var router = new JetStreamApiRouter(new StreamManager(), new ConsumerManager(), metaGroup);
|
||||
|
||||
var resp = router.Route("$JS.API.META.LEADER.STEPDOWN", "{}"u8);
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamMetaLeaderStepdown — without meta group
|
||||
[Fact]
|
||||
public void Meta_leader_stepdown_without_meta_group_returns_not_found()
|
||||
{
|
||||
var router = new JetStreamApiRouter();
|
||||
var resp = router.Route("$JS.API.META.LEADER.STEPDOWN", "{}"u8);
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamStreamLeaderStepdown — non-existent stream
|
||||
[Fact]
|
||||
public async Task Stream_leader_stepdown_non_existent_still_succeeds()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("LS", "ls.>");
|
||||
|
||||
// Stepdown for non-existent stream doesn't error (no-op)
|
||||
var resp = await fx.RequestLocalAsync("$JS.API.STREAM.LEADER.STEPDOWN.NOPE", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerNext — via API router
|
||||
[Fact]
|
||||
public async Task Consumer_next_via_api_returns_messages()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("NEXT", "next.>");
|
||||
_ = await fx.CreateConsumerAsync("NEXT", "C1", "next.>");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("next.x", "data1");
|
||||
_ = await fx.PublishAndGetAckAsync("next.x", "data2");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.MSG.NEXT.NEXT.C1",
|
||||
"""{"batch":2}""");
|
||||
resp.PullBatch.ShouldNotBeNull();
|
||||
resp.PullBatch!.Messages.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerNext — empty
|
||||
[Fact]
|
||||
public async Task Consumer_next_with_no_messages_returns_empty()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("NE", "ne.>");
|
||||
_ = await fx.CreateConsumerAsync("NE", "C1", "ne.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.MSG.NEXT.NE.C1",
|
||||
"""{"batch":1}""");
|
||||
resp.PullBatch.ShouldNotBeNull();
|
||||
resp.PullBatch!.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamStorageSelection
|
||||
[Fact]
|
||||
public async Task Storage_selection_file()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamConfigAsync(new StreamConfig
|
||||
{
|
||||
Name = "FILE",
|
||||
Subjects = ["file.>"],
|
||||
Storage = StorageType.File,
|
||||
});
|
||||
|
||||
var backend = await fx.GetStreamBackendTypeAsync("FILE");
|
||||
backend.ShouldBe("file");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamStorageSelection — memory
|
||||
[Fact]
|
||||
public async Task Storage_selection_memory()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamConfigAsync(new StreamConfig
|
||||
{
|
||||
Name = "MEM",
|
||||
Subjects = ["mem.>"],
|
||||
Storage = StorageType.Memory,
|
||||
});
|
||||
|
||||
var backend = await fx.GetStreamBackendTypeAsync("MEM");
|
||||
backend.ShouldBe("memory");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamStorageSelection — non-existent
|
||||
[Fact]
|
||||
public async Task Storage_backend_type_for_missing_stream()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("X", "x.>");
|
||||
|
||||
var backend = await fx.GetStreamBackendTypeAsync("NOPE");
|
||||
backend.ShouldBe("missing");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerNames — for specific stream
|
||||
[Fact]
|
||||
public async Task Consumer_names_only_include_target_stream()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("S1", "s1.>");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.S2", """{"subjects":["s2.>"]}""");
|
||||
_ = await fx.CreateConsumerAsync("S1", "C1", "s1.>");
|
||||
_ = await fx.CreateConsumerAsync("S2", "C2", "s2.>");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.CONSUMER.NAMES.S1", "{}");
|
||||
names.ConsumerNames!.Count.ShouldBe(1);
|
||||
names.ConsumerNames.ShouldContain("C1");
|
||||
names.ConsumerNames.ShouldNotContain("C2");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerDelete — delete decrements count
|
||||
[Fact]
|
||||
public async Task Delete_consumer_decrements_account_info_count()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DCC", "dcc.>");
|
||||
_ = await fx.CreateConsumerAsync("DCC", "C1", "dcc.>");
|
||||
_ = await fx.CreateConsumerAsync("DCC", "C2", "dcc.>");
|
||||
|
||||
var before = await fx.RequestLocalAsync("$JS.API.INFO", "{}");
|
||||
before.AccountInfo!.Consumers.ShouldBe(2);
|
||||
|
||||
_ = await fx.RequestLocalAsync("$JS.API.CONSUMER.DELETE.DCC.C1", "{}");
|
||||
|
||||
var after = await fx.RequestLocalAsync("$JS.API.INFO", "{}");
|
||||
after.AccountInfo!.Consumers.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAccountPurge — empty account name fails
|
||||
[Fact]
|
||||
public void Account_purge_without_name_returns_not_found()
|
||||
{
|
||||
var router = new JetStreamApiRouter();
|
||||
var resp = router.Route("$JS.API.ACCOUNT.PURGE.", "{}"u8);
|
||||
resp.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAccountStreamMove — empty stream name fails
|
||||
[Fact]
|
||||
public void Account_stream_move_without_name_returns_not_found()
|
||||
{
|
||||
var router = new JetStreamApiRouter();
|
||||
var resp = router.Route("$JS.API.ACCOUNT.STREAM.MOVE.", "{}"u8);
|
||||
resp.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamStreamLeaderStepdown — empty stream name fails
|
||||
[Fact]
|
||||
public void Stream_leader_stepdown_without_name_returns_not_found()
|
||||
{
|
||||
var router = new JetStreamApiRouter();
|
||||
var resp = router.Route("$JS.API.STREAM.LEADER.STEPDOWN.", "{}"u8);
|
||||
resp.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamStreamPeerRemove — empty stream name fails
|
||||
[Fact]
|
||||
public void Stream_peer_remove_without_name_returns_not_found()
|
||||
{
|
||||
var router = new JetStreamApiRouter();
|
||||
var resp = router.Route("$JS.API.STREAM.PEER.REMOVE.", "{}"u8);
|
||||
resp.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerLeaderStepdown — malformed subject
|
||||
[Fact]
|
||||
public void Consumer_leader_stepdown_with_single_token_returns_not_found()
|
||||
{
|
||||
var router = new JetStreamApiRouter();
|
||||
var resp = router.Route("$JS.API.CONSUMER.LEADER.STEPDOWN.ONLYONE", "{}"u8);
|
||||
resp.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerReset — non-existent consumer
|
||||
[Fact]
|
||||
public async Task Consumer_reset_non_existent_returns_not_found()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("RNE", "rne.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync("$JS.API.CONSUMER.RESET.RNE.NOPE", "{}");
|
||||
resp.Success.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerUnpin — non-existent consumer
|
||||
[Fact]
|
||||
public async Task Consumer_unpin_non_existent_returns_not_found()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("UNE", "une.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync("$JS.API.CONSUMER.UNPIN.UNE.NOPE", "{}");
|
||||
resp.Success.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamLimits server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Jwt_limited_account_allows_within_limit()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartJwtLimitedAccountAsync(maxStreams: 3);
|
||||
|
||||
var s1 = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.S1", """{"subjects":["s1.>"]}""");
|
||||
s1.Error.ShouldBeNull();
|
||||
var s2 = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.S2", """{"subjects":["s2.>"]}""");
|
||||
s2.Error.ShouldBeNull();
|
||||
var s3 = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.S3", """{"subjects":["s3.>"]}""");
|
||||
s3.Error.ShouldBeNull();
|
||||
|
||||
// Fourth should fail
|
||||
var s4 = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.S4", """{"subjects":["s4.>"]}""");
|
||||
s4.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamStreamMessageDeleteViaAPI
|
||||
[Fact]
|
||||
public async Task Message_delete_via_api_and_verify()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("MDAPI", "mdapi.>");
|
||||
_ = await fx.PublishAndGetAckAsync("mdapi.x", "msg1");
|
||||
var ack2 = await fx.PublishAndGetAckAsync("mdapi.x", "msg2");
|
||||
_ = await fx.PublishAndGetAckAsync("mdapi.x", "msg3");
|
||||
|
||||
var del = await fx.RequestLocalAsync(
|
||||
"$JS.API.STREAM.MSG.DELETE.MDAPI",
|
||||
$$"""{ "seq": {{ack2.Seq}} }""");
|
||||
del.Success.ShouldBeTrue();
|
||||
|
||||
// Verify the deleted message is gone
|
||||
var msg = await fx.RequestLocalAsync(
|
||||
"$JS.API.STREAM.MSG.GET.MDAPI",
|
||||
$$"""{ "seq": {{ack2.Seq}} }""");
|
||||
msg.Error.ShouldNotBeNull();
|
||||
|
||||
// Other messages still exist
|
||||
var state = await fx.GetStreamStateAsync("MDAPI");
|
||||
state.Messages.ShouldBe(2UL);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamRequestAPI — direct get missing sequence
|
||||
[Fact]
|
||||
public async Task Direct_get_with_zero_sequence_returns_error()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DGZ", "dgz.>");
|
||||
_ = await fx.PublishAndGetAckAsync("dgz.x", "data");
|
||||
|
||||
var resp = await fx.RequestLocalAsync("$JS.API.DIRECT.GET.DGZ", """{"seq":0}""");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamRequestAPI — direct get non-existent stream
|
||||
[Fact]
|
||||
public void Direct_get_non_existent_stream_returns_error()
|
||||
{
|
||||
var router = new JetStreamApiRouter();
|
||||
var resp = router.Route("$JS.API.DIRECT.GET.NOPE", """{"seq":1}"""u8);
|
||||
resp.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerNext — batch default
|
||||
[Fact]
|
||||
public async Task Consumer_next_with_no_batch_defaults_to_one()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("NBAT", "nbat.>");
|
||||
_ = await fx.CreateConsumerAsync("NBAT", "C1", "nbat.>");
|
||||
_ = await fx.PublishAndGetAckAsync("nbat.x", "data1");
|
||||
_ = await fx.PublishAndGetAckAsync("nbat.x", "data2");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.MSG.NEXT.NBAT.C1", "{}");
|
||||
resp.PullBatch!.Messages.Count.ShouldBe(1);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class JetStreamClusterGovernanceBehaviorParityTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Meta_group_and_replica_group_apply_consensus_committed_placement_before_stream_transition()
|
||||
{
|
||||
var baseline = new JetStreamClusterGovernanceParityTests();
|
||||
await baseline.Cluster_governance_applies_planned_replica_placement();
|
||||
|
||||
var runtime = new JetStreamClusterGovernanceRuntimeParityTests();
|
||||
await runtime.Jetstream_cluster_governance_applies_consensus_backed_placement();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class JetStreamClusterGovernanceRuntimeParityTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Jetstream_cluster_governance_applies_consensus_backed_placement()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig
|
||||
{
|
||||
Name = "ORDERS",
|
||||
Subjects = ["orders.*"],
|
||||
}, default);
|
||||
|
||||
var planner = new AssetPlacementPlanner(3);
|
||||
var placement = planner.PlanReplicas(2);
|
||||
var replicas = new StreamReplicaGroup("ORDERS", 1);
|
||||
await replicas.ApplyPlacementAsync(placement, default);
|
||||
|
||||
meta.GetState().Streams.ShouldContain("ORDERS");
|
||||
replicas.Nodes.Count.ShouldBe(2);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,129 @@
|
||||
using NATS.Server.Configuration;
|
||||
using NATS.Server.JetStream;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream;
|
||||
|
||||
public class JetStreamConfigModelParityBatch3Tests
|
||||
{
|
||||
[Fact]
|
||||
public void JetStreamOptions_exposes_extended_go_config_fields()
|
||||
{
|
||||
var opts = new JetStreamOptions
|
||||
{
|
||||
SyncInterval = TimeSpan.FromSeconds(2),
|
||||
SyncAlways = true,
|
||||
CompressOk = true,
|
||||
UniqueTag = "az",
|
||||
Strict = true,
|
||||
MaxAckPending = 123,
|
||||
MemoryMaxStreamBytes = 1111,
|
||||
StoreMaxStreamBytes = 2222,
|
||||
MaxBytesRequired = true,
|
||||
};
|
||||
|
||||
opts.SyncInterval.ShouldBe(TimeSpan.FromSeconds(2));
|
||||
opts.SyncAlways.ShouldBeTrue();
|
||||
opts.CompressOk.ShouldBeTrue();
|
||||
opts.UniqueTag.ShouldBe("az");
|
||||
opts.Strict.ShouldBeTrue();
|
||||
opts.MaxAckPending.ShouldBe(123);
|
||||
opts.MemoryMaxStreamBytes.ShouldBe(1111);
|
||||
opts.StoreMaxStreamBytes.ShouldBe(2222);
|
||||
opts.MaxBytesRequired.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ConfigProcessor_parses_extended_jetstream_fields()
|
||||
{
|
||||
var opts = ConfigProcessor.ProcessConfig("""
|
||||
jetstream {
|
||||
store_dir: '/tmp/js'
|
||||
max_mem_store: 1024
|
||||
max_file_store: 2048
|
||||
domain: 'D'
|
||||
sync_interval: '2s'
|
||||
sync_always: true
|
||||
compress_ok: true
|
||||
unique_tag: 'az'
|
||||
strict: true
|
||||
max_ack_pending: 42
|
||||
memory_max_stream_bytes: 10000
|
||||
store_max_stream_bytes: 20000
|
||||
max_bytes_required: true
|
||||
}
|
||||
""");
|
||||
|
||||
opts.JetStream.ShouldNotBeNull();
|
||||
var js = opts.JetStream!;
|
||||
js.StoreDir.ShouldBe("/tmp/js");
|
||||
js.MaxMemoryStore.ShouldBe(1024);
|
||||
js.MaxFileStore.ShouldBe(2048);
|
||||
js.Domain.ShouldBe("D");
|
||||
js.SyncInterval.ShouldBe(TimeSpan.FromSeconds(2));
|
||||
js.SyncAlways.ShouldBeTrue();
|
||||
js.CompressOk.ShouldBeTrue();
|
||||
js.UniqueTag.ShouldBe("az");
|
||||
js.Strict.ShouldBeTrue();
|
||||
js.MaxAckPending.ShouldBe(42);
|
||||
js.MemoryMaxStreamBytes.ShouldBe(10000);
|
||||
js.StoreMaxStreamBytes.ShouldBe(20000);
|
||||
js.MaxBytesRequired.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void JetStream_struct_models_cover_stats_limits_and_tiers()
|
||||
{
|
||||
var api = new JetStreamApiStats
|
||||
{
|
||||
Total = 10,
|
||||
Errors = 2,
|
||||
Inflight = 1,
|
||||
};
|
||||
|
||||
var tier = new JetStreamTier
|
||||
{
|
||||
Name = "R3",
|
||||
Memory = 1000,
|
||||
Store = 2000,
|
||||
Streams = 3,
|
||||
Consumers = 5,
|
||||
};
|
||||
|
||||
var limits = new JetStreamAccountLimits
|
||||
{
|
||||
MaxMemory = 10_000,
|
||||
MaxStore = 20_000,
|
||||
MaxStreams = 7,
|
||||
MaxConsumers = 9,
|
||||
MaxAckPending = 25,
|
||||
MemoryMaxStreamBytes = 1_000,
|
||||
StoreMaxStreamBytes = 2_000,
|
||||
MaxBytesRequired = true,
|
||||
Tiers = new Dictionary<string, JetStreamTier>
|
||||
{
|
||||
["R3"] = tier,
|
||||
},
|
||||
};
|
||||
|
||||
var stats = new JetStreamStats
|
||||
{
|
||||
Memory = 123,
|
||||
Store = 456,
|
||||
ReservedMemory = 11,
|
||||
ReservedStore = 22,
|
||||
Accounts = 2,
|
||||
HaAssets = 4,
|
||||
Api = api,
|
||||
};
|
||||
|
||||
limits.Tiers["R3"].Name.ShouldBe("R3");
|
||||
limits.MaxAckPending.ShouldBe(25);
|
||||
limits.MaxBytesRequired.ShouldBeTrue();
|
||||
|
||||
stats.Memory.ShouldBe(123);
|
||||
stats.Store.ShouldBe(456);
|
||||
stats.Api.Total.ShouldBe(10UL);
|
||||
stats.Api.Errors.ShouldBe(2UL);
|
||||
stats.Api.Inflight.ShouldBe(1);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,514 @@
|
||||
// Ported from golang/nats-server/server/jetstream_test.go
|
||||
// Consumer CRUD operations: create push/pull, update, delete, info, ephemeral
|
||||
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream;
|
||||
|
||||
public class JetStreamConsumerCrudTests
|
||||
{
|
||||
// Go: TestJetStreamEphemeralConsumers server/jetstream_test.go:3688
|
||||
[Fact]
|
||||
public async Task Create_ephemeral_consumer()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("ORDERS", "orders.*");
|
||||
var create = await fx.CreateConsumerAsync("ORDERS", "EPH", "orders.*", ephemeral: true);
|
||||
create.Error.ShouldBeNull();
|
||||
create.ConsumerInfo.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamEphemeralPullConsumers server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Create_ephemeral_pull_consumer()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("ORDERS", "orders.*");
|
||||
var create = await fx.CreateConsumerAsync("ORDERS", "EPULL", "orders.*", ephemeral: true);
|
||||
create.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamBasicDeliverSubject server/jetstream_test.go:899
|
||||
[Fact]
|
||||
public async Task Create_push_consumer_with_heartbeats()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithPushConsumerAsync();
|
||||
var info = await fx.GetConsumerInfoAsync("ORDERS", "PUSH");
|
||||
info.Config.Push.ShouldBeTrue();
|
||||
info.Config.HeartbeatMs.ShouldBe(25);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamSubjectFiltering server/jetstream_test.go:1089
|
||||
[Fact]
|
||||
public async Task Create_consumer_with_filter_subject()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("EVENTS", "events.>");
|
||||
var create = await fx.CreateConsumerAsync("EVENTS", "FILT", "events.click");
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("EVENTS", "FILT");
|
||||
info.Config.FilterSubject.ShouldBe("events.click");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamBothFiltersSet server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Create_consumer_with_multiple_filter_subjects()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithMultiFilterConsumerAsync();
|
||||
var info = await fx.GetConsumerInfoAsync("ORDERS", "CF");
|
||||
info.Config.FilterSubjects.ShouldContain("orders.*");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAckExplicitMsgRemoval server/jetstream_test.go:5897
|
||||
[Fact]
|
||||
public async Task Create_consumer_with_ack_explicit()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithAckExplicitConsumerAsync(30_000);
|
||||
var info = await fx.GetConsumerInfoAsync("ORDERS", "PULL");
|
||||
info.Config.AckPolicy.ShouldBe(AckPolicy.Explicit);
|
||||
info.Config.AckWaitMs.ShouldBe(30_000);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAckAllRedelivery server/jetstream_test.go:1850
|
||||
[Fact]
|
||||
public async Task Create_consumer_with_ack_all()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithAckAllConsumerAsync();
|
||||
var info = await fx.GetConsumerInfoAsync("ORDERS", "ACKALL");
|
||||
info.Config.AckPolicy.ShouldBe(AckPolicy.All);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamNoAckStream server/jetstream_test.go:821
|
||||
[Fact]
|
||||
public async Task Create_consumer_with_ack_none()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("NOACK", "noack.>");
|
||||
var create = await fx.CreateConsumerAsync("NOACK", "NONE", "noack.>", ackPolicy: AckPolicy.None);
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("NOACK", "NONE");
|
||||
info.Config.AckPolicy.ShouldBe(AckPolicy.None);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamActiveDelivery server/jetstream_test.go:3644
|
||||
[Fact]
|
||||
public async Task Consumer_info_roundtrip_returns_correct_config()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("ORDERS", "orders.*");
|
||||
_ = await fx.CreateConsumerAsync("ORDERS", "DUR", "orders.created");
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("ORDERS", "DUR");
|
||||
info.Config.DurableName.ShouldBe("DUR");
|
||||
info.Config.FilterSubject.ShouldBe("orders.created");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamChangeConsumerType server/jetstream_test.go:5766
|
||||
[Fact]
|
||||
public async Task Consumer_delete_and_recreate()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("ST", "st.>");
|
||||
_ = await fx.CreateConsumerAsync("ST", "C1", "st.>");
|
||||
|
||||
var del = await fx.RequestLocalAsync("$JS.API.CONSUMER.DELETE.ST.C1", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
|
||||
// Recreate with different filter
|
||||
var create = await fx.CreateConsumerAsync("ST", "C1", "st.created");
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("ST", "C1");
|
||||
info.Config.FilterSubject.ShouldBe("st.created");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectConsumersBeingReported server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Consumer_info_for_non_existent_returns_error()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("S", "s.>");
|
||||
|
||||
var info = await fx.RequestLocalAsync("$JS.API.CONSUMER.INFO.S.NOTEXIST", "{}");
|
||||
info.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamBasicWorkQueue server/jetstream_test.go:937
|
||||
[Fact]
|
||||
public async Task Create_consumer_with_deliver_policy_all()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("WQ", "wq.>");
|
||||
var create = await fx.CreateConsumerAsync("WQ", "C1", "wq.>", deliverPolicy: DeliverPolicy.All);
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("WQ", "C1");
|
||||
info.Config.DeliverPolicy.ShouldBe(DeliverPolicy.All);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDeliverLastPerSubject server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Create_consumer_with_deliver_policy_last()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DL", "dl.>");
|
||||
var create = await fx.CreateConsumerAsync("DL", "LAST", "dl.>", deliverPolicy: DeliverPolicy.Last);
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("DL", "LAST");
|
||||
info.Config.DeliverPolicy.ShouldBe(DeliverPolicy.Last);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDeliverLastPerSubject server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Create_consumer_with_deliver_policy_new()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DN", "dn.>");
|
||||
var create = await fx.CreateConsumerAsync("DN", "NEW", "dn.>", deliverPolicy: DeliverPolicy.New);
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("DN", "NEW");
|
||||
info.Config.DeliverPolicy.ShouldBe(DeliverPolicy.New);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueRetentionStream server/jetstream_test.go:1655
|
||||
[Fact]
|
||||
public async Task Consumer_with_replay_original()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithReplayOriginalConsumerAsync();
|
||||
var info = await fx.GetConsumerInfoAsync("ORDERS", "RO");
|
||||
info.Config.ReplayPolicy.ShouldBe(ReplayPolicy.Original);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamFilteredConsumersWithWiderFilter server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Consumer_with_wildcard_filter()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("WIDE", "wide.>");
|
||||
var create = await fx.CreateConsumerAsync("WIDE", "WILD", "wide.*");
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("WIDE", "WILD");
|
||||
info.Config.FilterSubject.ShouldBe("wide.*");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamPushConsumerFlowControl server/jetstream_test.go:5203
|
||||
[Fact]
|
||||
public async Task Create_push_consumer_with_flow_control()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("FC", "fc.>");
|
||||
var create = await fx.CreateConsumerAsync("FC", "PUSH", "fc.>", push: true, heartbeatMs: 100);
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("FC", "PUSH");
|
||||
info.Config.Push.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamMaxConsumers server/jetstream_test.go:619
|
||||
[Fact]
|
||||
public async Task Create_multiple_consumers_on_same_stream()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("MULTI", "multi.>");
|
||||
_ = await fx.CreateConsumerAsync("MULTI", "C1", "multi.a");
|
||||
_ = await fx.CreateConsumerAsync("MULTI", "C2", "multi.b");
|
||||
_ = await fx.CreateConsumerAsync("MULTI", "C3", "multi.>");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.CONSUMER.NAMES.MULTI", "{}");
|
||||
names.ConsumerNames.ShouldNotBeNull();
|
||||
names.ConsumerNames!.Count.ShouldBe(3);
|
||||
names.ConsumerNames.ShouldContain("C1");
|
||||
names.ConsumerNames.ShouldContain("C2");
|
||||
names.ConsumerNames.ShouldContain("C3");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerListAndDelete
|
||||
[Fact]
|
||||
public async Task Delete_consumer_removes_from_list()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DLC", "dlc.>");
|
||||
_ = await fx.CreateConsumerAsync("DLC", "C1", "dlc.>");
|
||||
_ = await fx.CreateConsumerAsync("DLC", "C2", "dlc.>");
|
||||
|
||||
_ = await fx.RequestLocalAsync("$JS.API.CONSUMER.DELETE.DLC.C1", "{}");
|
||||
|
||||
var names = await fx.RequestLocalAsync("$JS.API.CONSUMER.NAMES.DLC", "{}");
|
||||
names.ConsumerNames.ShouldNotBeNull();
|
||||
names.ConsumerNames!.Count.ShouldBe(1);
|
||||
names.ConsumerNames.ShouldContain("C2");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueAckAndNext server/jetstream_test.go:1355
|
||||
[Fact]
|
||||
public async Task Consumer_max_ack_pending_setting()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("MAP", "map.>");
|
||||
var create = await fx.CreateConsumerAsync("MAP", "C1", "map.>",
|
||||
ackPolicy: AckPolicy.Explicit,
|
||||
maxAckPending: 5);
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("MAP", "C1");
|
||||
info.Config.MaxAckPending.ShouldBe(5);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueAckWaitRedelivery server/jetstream_test.go:1959
|
||||
[Fact]
|
||||
public async Task Consumer_ack_wait_setting()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("AW", "aw.>");
|
||||
var create = await fx.CreateConsumerAsync("AW", "C1", "aw.>",
|
||||
ackPolicy: AckPolicy.Explicit,
|
||||
ackWaitMs: 5000);
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("AW", "C1");
|
||||
info.Config.AckWaitMs.ShouldBe(5000);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerPause server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Consumer_pause_and_resume()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("PAUSE", "pause.>");
|
||||
_ = await fx.CreateConsumerAsync("PAUSE", "C1", "pause.>");
|
||||
|
||||
var pause = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.PAUSE.C1",
|
||||
"""{"pause":true}""");
|
||||
pause.Success.ShouldBeTrue();
|
||||
|
||||
var resume = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.PAUSE.C1",
|
||||
"""{"pause":false}""");
|
||||
resume.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerReset server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Consumer_reset_resets_delivery_position()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("RESET", "reset.>");
|
||||
_ = await fx.CreateConsumerAsync("RESET", "C1", "reset.>");
|
||||
_ = await fx.PublishAndGetAckAsync("reset.x", "data");
|
||||
|
||||
// Fetch a message to advance position
|
||||
_ = await fx.FetchAsync("RESET", "C1", 1);
|
||||
|
||||
var reset = await fx.RequestLocalAsync("$JS.API.CONSUMER.RESET.RESET.C1", "{}");
|
||||
reset.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerUnpin server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Consumer_unpin_returns_success()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("UNPIN", "unpin.>");
|
||||
_ = await fx.CreateConsumerAsync("UNPIN", "C1", "unpin.>");
|
||||
|
||||
var unpin = await fx.RequestLocalAsync("$JS.API.CONSUMER.UNPIN.UNPIN.C1", "{}");
|
||||
unpin.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerUpdate — update filter subject
|
||||
[Fact]
|
||||
public async Task Consumer_update_changes_config()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("UPD", "upd.>");
|
||||
_ = await fx.CreateConsumerAsync("UPD", "C1", "upd.a");
|
||||
|
||||
var update = await fx.CreateConsumerAsync("UPD", "C1", "upd.b");
|
||||
update.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("UPD", "C1");
|
||||
info.Config.FilterSubject.ShouldBe("upd.b");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerList — list across stream boundary
|
||||
[Fact]
|
||||
public async Task Consumer_list_is_scoped_to_stream()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("S1", "s1.>");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.CREATE.S2", """{"subjects":["s2.>"]}""");
|
||||
_ = await fx.CreateConsumerAsync("S1", "C1", "s1.>");
|
||||
_ = await fx.CreateConsumerAsync("S2", "C2", "s2.>");
|
||||
|
||||
var namesS1 = await fx.RequestLocalAsync("$JS.API.CONSUMER.NAMES.S1", "{}");
|
||||
namesS1.ConsumerNames!.Count.ShouldBe(1);
|
||||
namesS1.ConsumerNames.ShouldContain("C1");
|
||||
|
||||
var namesS2 = await fx.RequestLocalAsync("$JS.API.CONSUMER.NAMES.S2", "{}");
|
||||
namesS2.ConsumerNames!.Count.ShouldBe(1);
|
||||
namesS2.ConsumerNames.ShouldContain("C2");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerDelete — double delete
|
||||
[Fact]
|
||||
public async Task Delete_non_existent_consumer_returns_not_found()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DF", "df.>");
|
||||
|
||||
var del = await fx.RequestLocalAsync("$JS.API.CONSUMER.DELETE.DF.NOPE", "{}");
|
||||
del.Success.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerCreate — default ack policy
|
||||
[Fact]
|
||||
public async Task Consumer_defaults_to_ack_none()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DEF", "def.>");
|
||||
_ = await fx.CreateConsumerAsync("DEF", "C1", "def.>");
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("DEF", "C1");
|
||||
info.Config.AckPolicy.ShouldBe(AckPolicy.None);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerCreate — default deliver policy
|
||||
[Fact]
|
||||
public async Task Consumer_defaults_to_deliver_all()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DDP", "ddp.>");
|
||||
_ = await fx.CreateConsumerAsync("DDP", "C1", "ddp.>");
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("DDP", "C1");
|
||||
info.Config.DeliverPolicy.ShouldBe(DeliverPolicy.All);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerCreate — default replay policy
|
||||
[Fact]
|
||||
public async Task Consumer_defaults_to_replay_instant()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DRP", "drp.>");
|
||||
_ = await fx.CreateConsumerAsync("DRP", "C1", "drp.>");
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("DRP", "C1");
|
||||
info.Config.ReplayPolicy.ShouldBe(ReplayPolicy.Instant);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerPause — pause non-existent consumer
|
||||
[Fact]
|
||||
public async Task Pause_non_existent_consumer_returns_not_found()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("PNE", "pne.>");
|
||||
|
||||
var pause = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.PNE.NOPE",
|
||||
"""{"pause":true}""");
|
||||
pause.Success.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerCreate — durable name required for non-ephemeral
|
||||
[Fact]
|
||||
public async Task Consumer_without_durable_name_returns_error()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("NDN", "ndn.>");
|
||||
|
||||
// Send raw JSON without durable_name and without ephemeral flag
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.CREATE.NDN.C1",
|
||||
"""{"filter_subject":"ndn.>"}""");
|
||||
// The consumer should be created since the subject has the durable name
|
||||
resp.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerMaxDeliver
|
||||
[Fact]
|
||||
public async Task Consumer_max_deliver_setting()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("MD", "md.>");
|
||||
var create = await fx.CreateConsumerAsync("MD", "C1", "md.>",
|
||||
ackPolicy: AckPolicy.Explicit);
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("MD", "C1");
|
||||
info.Config.MaxDeliver.ShouldBeGreaterThanOrEqualTo(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerBackoff
|
||||
[Fact]
|
||||
public async Task Consumer_with_backoff_configuration()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("BO", "bo.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.CREATE.BO.C1",
|
||||
"""{"durable_name":"C1","filter_subject":"bo.>","ack_policy":"explicit","backoff_ms":[100,200,500]}""");
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("BO", "C1");
|
||||
info.Config.BackOffMs.Count.ShouldBe(3);
|
||||
info.Config.BackOffMs[0].ShouldBe(100);
|
||||
info.Config.BackOffMs[1].ShouldBe(200);
|
||||
info.Config.BackOffMs[2].ShouldBe(500);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerRateLimit
|
||||
[Fact]
|
||||
public async Task Consumer_with_rate_limit()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("RL", "rl.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.CREATE.RL.C1",
|
||||
"""{"durable_name":"C1","filter_subject":"rl.>","push":true,"heartbeat_ms":100,"rate_limit_bps":1024}""");
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("RL", "C1");
|
||||
info.Config.RateLimitBps.ShouldBe(1024);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerCreate — opt_start_seq
|
||||
[Fact]
|
||||
public async Task Consumer_with_opt_start_seq()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("OSS", "oss.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.CREATE.OSS.C1",
|
||||
"""{"durable_name":"C1","filter_subject":"oss.>","deliver_policy":"by_start_sequence","opt_start_seq":5}""");
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("OSS", "C1");
|
||||
info.Config.DeliverPolicy.ShouldBe(DeliverPolicy.ByStartSequence);
|
||||
info.Config.OptStartSeq.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerCreate — opt_start_time_utc
|
||||
[Fact]
|
||||
public async Task Consumer_with_opt_start_time()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("OST", "ost.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.CREATE.OST.C1",
|
||||
"""{"durable_name":"C1","filter_subject":"ost.>","deliver_policy":"by_start_time","opt_start_time_utc":"2025-01-01T00:00:00Z"}""");
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("OST", "C1");
|
||||
info.Config.DeliverPolicy.ShouldBe(DeliverPolicy.ByStartTime);
|
||||
info.Config.OptStartTimeUtc.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerCreate — flow_control
|
||||
[Fact]
|
||||
public async Task Consumer_with_flow_control()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("FLOW", "flow.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.CREATE.FLOW.C1",
|
||||
"""{"durable_name":"C1","filter_subject":"flow.>","push":true,"heartbeat_ms":100,"flow_control":true}""");
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("FLOW", "C1");
|
||||
info.Config.FlowControl.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerDeliverLastPerSubject server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Consumer_with_deliver_last_per_subject()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DLPS", "dlps.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.CREATE.DLPS.C1",
|
||||
"""{"durable_name":"C1","filter_subject":"dlps.>","deliver_policy":"last_per_subject"}""");
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("DLPS", "C1");
|
||||
info.Config.DeliverPolicy.ShouldBe(DeliverPolicy.LastPerSubject);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class JetStreamConsumerDeliverPolicyLongRunTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Deliver_policy_last_per_subject_and_start_time_resolve_consistent_cursor_under_interleaved_subjects()
|
||||
{
|
||||
var streams = new StreamManager();
|
||||
streams.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "ORDERS",
|
||||
Subjects = ["orders.*"],
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
streams.Capture("orders.a", "1"u8.ToArray());
|
||||
streams.Capture("orders.b", "2"u8.ToArray());
|
||||
streams.Capture("orders.a", "3"u8.ToArray());
|
||||
|
||||
var consumers = new ConsumerManager();
|
||||
consumers.CreateOrUpdate("ORDERS", new ConsumerConfig
|
||||
{
|
||||
DurableName = "LAST-B",
|
||||
DeliverPolicy = DeliverPolicy.LastPerSubject,
|
||||
FilterSubject = "orders.b",
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
var batch = await consumers.FetchAsync("ORDERS", "LAST-B", 1, streams, default);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
batch.Messages[0].Subject.ShouldBe("orders.b");
|
||||
batch.Messages[0].Sequence.ShouldBe((ulong)2);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,406 @@
|
||||
// Ported from golang/nats-server/server/jetstream_test.go
|
||||
// Consumer delivery edge cases: ack wait timeout tracking, max deliver attempts,
|
||||
// backoff lists, idle heartbeat config, deliver policies, push vs pull.
|
||||
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream;
|
||||
|
||||
public class JetStreamConsumerDeliveryEdgeTests
|
||||
{
|
||||
// Go: TestJetStreamWorkQueueAckWaitRedelivery server/jetstream_test.go:2213
|
||||
// AckWait is stored in consumer config and used by ack processor.
|
||||
[Fact]
|
||||
public async Task Ack_wait_ms_stored_in_consumer_config()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("ACKWAIT", "ackwait.>");
|
||||
var resp = await fx.CreateConsumerAsync("ACKWAIT", "C1", "ackwait.>",
|
||||
ackPolicy: AckPolicy.Explicit, ackWaitMs: 250);
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo!.Config.AckWaitMs.ShouldBe(250);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueAckWaitRedelivery — registers pending on fetch
|
||||
[Fact]
|
||||
public async Task Fetch_with_ack_explicit_registers_pending_messages()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithAckExplicitConsumerAsync(500);
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", "msg1");
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", "msg2");
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", "msg3");
|
||||
|
||||
var batch = await fx.FetchAsync("ORDERS", "PULL", 3);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
|
||||
var pending = await fx.GetPendingCountAsync("ORDERS", "PULL");
|
||||
pending.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueNakRedelivery server/jetstream_test.go:2311
|
||||
// After ack all, pending count drops to zero.
|
||||
[Fact]
|
||||
public async Task Ack_all_on_explicit_consumer_clears_all_pending()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithAckExplicitConsumerAsync(30_000);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", $"m{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("ORDERS", "PULL", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
|
||||
await fx.AckAllAsync("ORDERS", "PULL", batch.Messages[^1].Sequence);
|
||||
var pending = await fx.GetPendingCountAsync("ORDERS", "PULL");
|
||||
pending.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAckAllRedelivery server/jetstream_test.go:1921
|
||||
// Ack all up to sequence N leaves messages above N still pending.
|
||||
[Fact]
|
||||
public async Task Ack_all_up_to_mid_sequence_leaves_tail_pending()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithAckAllConsumerAsync();
|
||||
|
||||
for (var i = 0; i < 6; i++)
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", $"m{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("ORDERS", "ACKALL", 6);
|
||||
batch.Messages.Count.ShouldBe(6);
|
||||
|
||||
// Ack messages 1-3 only
|
||||
await fx.AckAllAsync("ORDERS", "ACKALL", batch.Messages[2].Sequence);
|
||||
|
||||
var pending = await fx.GetPendingCountAsync("ORDERS", "ACKALL");
|
||||
// Messages 4, 5, 6 should still be pending
|
||||
pending.ShouldBeGreaterThan(0);
|
||||
pending.ShouldBeLessThanOrEqualTo(3);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamPushConsumerIdleHeartbeats server/jetstream_test.go:5804
|
||||
// Push consumer with heartbeats configured is created without error.
|
||||
[Fact]
|
||||
public async Task Push_consumer_with_heartbeats_is_created_successfully()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("HBT", "hbt.>");
|
||||
var resp = await fx.CreateConsumerAsync("HBT", "PUSHH", "hbt.>", push: true, heartbeatMs: 100);
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo!.Config.HeartbeatMs.ShouldBe(100);
|
||||
resp.ConsumerInfo.Config.Push.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamFlowControlRequiresHeartbeats server/jetstream_test.go:5784
|
||||
// Flow control can be configured on push consumer alongside heartbeats.
|
||||
[Fact]
|
||||
public async Task Push_consumer_with_flow_control_config_is_accepted()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("FCHB", "fchb.>");
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.CREATE.FCHB.FC1",
|
||||
"""{"durable_name":"FC1","filter_subject":"fchb.>","push":true,"heartbeat_ms":50,"flow_control":true}""");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo!.Config.Push.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamActiveDelivery server/jetstream_test.go:3726
|
||||
// Push consumer receives messages published after creation.
|
||||
[Fact]
|
||||
public async Task Push_consumer_receives_published_message()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithPushConsumerAsync();
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", "order-data");
|
||||
|
||||
var frame = await fx.ReadPushFrameAsync("ORDERS", "PUSH");
|
||||
frame.IsData.ShouldBeTrue();
|
||||
frame.Subject.ShouldBe("orders.created");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamBasicDeliverSubject server/jetstream_test.go:844
|
||||
// Push consumer heartbeat frame is emitted after data frame.
|
||||
[Fact]
|
||||
public async Task Push_consumer_emits_heartbeat_frame_after_data()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithPushConsumerAsync();
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", "first");
|
||||
|
||||
var dataFrame = await fx.ReadPushFrameAsync("ORDERS", "PUSH");
|
||||
dataFrame.IsData.ShouldBeTrue();
|
||||
|
||||
var hbFrame = await fx.ReadPushFrameAsync("ORDERS", "PUSH");
|
||||
hbFrame.IsHeartbeat.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamPushConsumerFlowControl server/jetstream_test.go:5690
|
||||
// Flow control frame follows data frame when enabled.
|
||||
[Fact]
|
||||
public async Task Push_consumer_with_fc_emits_fc_frame_after_data()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("PUSHFC", "pushfc.>");
|
||||
_ = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.CREATE.PUSHFC.FCTEST",
|
||||
"""{"durable_name":"FCTEST","filter_subject":"pushfc.>","push":true,"heartbeat_ms":10,"flow_control":true}""");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("pushfc.msg", "data");
|
||||
|
||||
var dataFrame = await fx.ReadPushFrameAsync("PUSHFC", "FCTEST");
|
||||
dataFrame.IsData.ShouldBeTrue();
|
||||
|
||||
var fcFrame = await fx.ReadPushFrameAsync("PUSHFC", "FCTEST");
|
||||
fcFrame.IsFlowControl.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamEphemeralConsumers server/jetstream_test.go:3781
|
||||
// Ephemeral consumer is created with generated durable name.
|
||||
[Fact]
|
||||
public async Task Ephemeral_consumer_gets_generated_name()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("EPH", "eph.>");
|
||||
var resp = await fx.CreateConsumerAsync("EPH", "EPHNAME", "eph.>", ephemeral: true);
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueMaxWaiting server/jetstream_test.go:1094
|
||||
// Pull consumer fetch with no_wait returns immediately with available messages.
|
||||
[Fact]
|
||||
public async Task Fetch_no_wait_returns_available_messages_immediately()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithPullConsumerAsync();
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", "msg1");
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", "msg2");
|
||||
|
||||
var batch = await fx.FetchWithNoWaitAsync("ORDERS", "PULL", 10);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueMaxWaiting — fetch when empty returns zero
|
||||
[Fact]
|
||||
public async Task Fetch_no_wait_returns_empty_when_no_messages()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithPullConsumerAsync();
|
||||
|
||||
var batch = await fx.FetchWithNoWaitAsync("ORDERS", "PULL", 10);
|
||||
batch.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueAckAndNext server/jetstream_test.go:1634
|
||||
// Fetching after acking gives next available messages.
|
||||
[Fact]
|
||||
public async Task Fetch_after_ack_all_returns_next_messages()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithAckAllConsumerAsync();
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", "msg1");
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", "msg2");
|
||||
|
||||
var batch1 = await fx.FetchAsync("ORDERS", "ACKALL", 1);
|
||||
batch1.Messages.Count.ShouldBe(1);
|
||||
|
||||
await fx.AckAllAsync("ORDERS", "ACKALL", batch1.Messages[0].Sequence);
|
||||
|
||||
var batch2 = await fx.FetchAsync("ORDERS", "ACKALL", 1);
|
||||
batch2.Messages.Count.ShouldBe(1);
|
||||
batch2.Messages[0].Sequence.ShouldBeGreaterThan(batch1.Messages[0].Sequence);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamRedeliverCount server/jetstream_test.go:3959
|
||||
// AckProcessor tracks pending count correctly per delivery.
|
||||
[Fact]
|
||||
public void Ack_processor_registers_and_clears_pending_entries()
|
||||
{
|
||||
var proc = new AckProcessor();
|
||||
|
||||
proc.Register(1, 30_000);
|
||||
proc.Register(2, 30_000);
|
||||
proc.Register(3, 30_000);
|
||||
|
||||
proc.PendingCount.ShouldBe(3);
|
||||
|
||||
proc.AckAll(2);
|
||||
proc.PendingCount.ShouldBe(1); // only seq 3 remains
|
||||
|
||||
proc.AckAll(3);
|
||||
proc.PendingCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamRedeliverCount — ack floor advances monotonically
|
||||
[Fact]
|
||||
public void Ack_processor_ack_floor_advances_after_ack_all()
|
||||
{
|
||||
var proc = new AckProcessor();
|
||||
|
||||
proc.Register(1, 30_000);
|
||||
proc.Register(2, 30_000);
|
||||
proc.Register(3, 30_000);
|
||||
|
||||
proc.AckFloor.ShouldBe(0UL);
|
||||
proc.AckAll(2);
|
||||
proc.AckFloor.ShouldBe(2UL);
|
||||
proc.AckAll(3);
|
||||
proc.AckFloor.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueAckWaitRedelivery — expired entry detected
|
||||
[Fact]
|
||||
public async Task Ack_processor_detects_expired_pending_entry()
|
||||
{
|
||||
var proc = new AckProcessor();
|
||||
proc.Register(1, 20); // 20ms ack wait
|
||||
|
||||
await Task.Delay(50);
|
||||
|
||||
proc.TryGetExpired(out var seq, out _).ShouldBeTrue();
|
||||
seq.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueTerminateDelivery server/jetstream_test.go:2465
|
||||
// Drop removes a pending entry from the processor.
|
||||
[Fact]
|
||||
public void Ack_processor_drop_removes_pending_entry()
|
||||
{
|
||||
var proc = new AckProcessor();
|
||||
proc.Register(1, 30_000);
|
||||
proc.Register(2, 30_000);
|
||||
|
||||
proc.Drop(1);
|
||||
proc.PendingCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamPushConsumerIdleHeartbeatsWithFilterSubject server/jetstream_test.go:5864
|
||||
// Push consumer with heartbeats and filter subject is created without error.
|
||||
[Fact]
|
||||
public async Task Push_consumer_with_heartbeats_and_filter_subject()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("HBFILT", "hbfilt.>");
|
||||
var resp = await fx.CreateConsumerAsync(
|
||||
"HBFILT", "HBCONS", "hbfilt.orders",
|
||||
push: true, heartbeatMs: 100);
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo!.Config.FilterSubject.ShouldBe("hbfilt.orders");
|
||||
resp.ConsumerInfo.Config.HeartbeatMs.ShouldBe(100);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAckNext server/jetstream_test.go:2565
|
||||
// Consumer advances sequence correctly after each fetch.
|
||||
[Fact]
|
||||
public async Task Consumer_sequence_advances_with_each_fetch()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithPullConsumerAsync();
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", $"msg-{i}");
|
||||
|
||||
var seqs = new List<ulong>();
|
||||
for (var i = 0; i < 5; i++)
|
||||
{
|
||||
var batch = await fx.FetchAsync("ORDERS", "PULL", 1);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
seqs.Add(batch.Messages[0].Sequence);
|
||||
}
|
||||
|
||||
seqs.ShouldBeInOrder();
|
||||
seqs.Distinct().Count().ShouldBe(5); // all unique sequences
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueAckWaitRedelivery — schedule redelivery increases delivery count
|
||||
[Fact]
|
||||
public void Ack_processor_schedule_redelivery_increments_delivery_count()
|
||||
{
|
||||
var proc = new AckProcessor();
|
||||
proc.Register(1, 30_000);
|
||||
proc.ScheduleRedelivery(1, 30_000);
|
||||
|
||||
// After rescheduling, pending is still 1
|
||||
proc.PendingCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueRequest server/jetstream_test.go:1267
|
||||
// Fetch batch respects count limit.
|
||||
[Fact]
|
||||
public async Task Fetch_batch_respects_count_limit()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithPullConsumerAsync();
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", $"data-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("ORDERS", "PULL", 3);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamSubjectFiltering server/jetstream_test.go:1385
|
||||
// Consumer with filter only delivers matching messages.
|
||||
[Fact]
|
||||
public async Task Consumer_filter_delivers_only_matching_messages()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("FILTDEL", "filtdel.>");
|
||||
_ = await fx.CreateConsumerAsync("FILTDEL", "FILTCONS", "filtdel.orders");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("filtdel.orders", "order-1");
|
||||
_ = await fx.PublishAndGetAckAsync("filtdel.events", "event-1");
|
||||
_ = await fx.PublishAndGetAckAsync("filtdel.orders", "order-2");
|
||||
|
||||
var batch = await fx.FetchAsync("FILTDEL", "FILTCONS", 10);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
batch.Messages.All(m => m.Subject == "filtdel.orders").ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWildcardSubjectFiltering server/jetstream_test.go:1522
|
||||
// Consumer with wildcard filter delivers only matching messages.
|
||||
[Fact]
|
||||
public async Task Consumer_wildcard_filter_delivers_matching_messages()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("WCFILT", "wcfilt.>");
|
||||
_ = await fx.CreateConsumerAsync("WCFILT", "WCC", "wcfilt.orders.*");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("wcfilt.orders.created", "1");
|
||||
_ = await fx.PublishAndGetAckAsync("wcfilt.events.logged", "2");
|
||||
_ = await fx.PublishAndGetAckAsync("wcfilt.orders.shipped", "3");
|
||||
|
||||
var batch = await fx.FetchAsync("WCFILT", "WCC", 10);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueRequestBatch server/jetstream_test.go:1703
|
||||
// Batch fetch returns all available up to limit.
|
||||
[Fact]
|
||||
public async Task Batch_fetch_returns_all_available_messages_up_to_limit()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("BATCHFULL", "batchfull.>");
|
||||
_ = await fx.CreateConsumerAsync("BATCHFULL", "BC", "batchfull.>");
|
||||
|
||||
for (var i = 0; i < 7; i++)
|
||||
_ = await fx.PublishAndGetAckAsync("batchfull.x", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("BATCHFULL", "BC", 10);
|
||||
batch.Messages.Count.ShouldBe(7);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueRetentionStream server/jetstream_test.go:1788
|
||||
// Pull consumer on work queue stream receives messages.
|
||||
[Fact]
|
||||
public async Task Work_queue_pull_consumer_receives_messages()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamConfigAsync(new StreamConfig
|
||||
{
|
||||
Name = "WQR",
|
||||
Subjects = ["wqr.>"],
|
||||
Retention = RetentionPolicy.WorkQueue,
|
||||
});
|
||||
_ = await fx.CreateConsumerAsync("WQR", "WQC", "wqr.>");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("wqr.task", "task1");
|
||||
_ = await fx.PublishAndGetAckAsync("wqr.task", "task2");
|
||||
|
||||
var batch = await fx.FetchAsync("WQR", "WQC", 5);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,513 @@
|
||||
// Ported from golang/nats-server/server/jetstream_test.go
|
||||
// Consumer features: max deliver, max ack pending, flow control, heartbeats,
|
||||
// consumer pause/resume, ack all, redelivery
|
||||
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream;
|
||||
|
||||
public class JetStreamConsumerFeatureTests
|
||||
{
|
||||
// Go: TestJetStreamWorkQueueAckWaitRedelivery server/jetstream_test.go:1959
|
||||
[Fact]
|
||||
public async Task Ack_explicit_tracks_pending_count()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithAckExplicitConsumerAsync(30_000);
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", "msg1");
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", "msg2");
|
||||
|
||||
var batch = await fx.FetchAsync("ORDERS", "PULL", 2);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
|
||||
var pending = await fx.GetPendingCountAsync("ORDERS", "PULL");
|
||||
pending.ShouldBe(2);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAckAllRedelivery server/jetstream_test.go:1850
|
||||
[Fact]
|
||||
public async Task Ack_all_acknowledges_up_to_sequence()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithAckAllConsumerAsync();
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("ORDERS", "ACKALL", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
|
||||
await fx.AckAllAsync("ORDERS", "ACKALL", 3);
|
||||
|
||||
var pending = await fx.GetPendingCountAsync("ORDERS", "ACKALL");
|
||||
// After acking up to 3, sequences 4 and 5 should still be pending
|
||||
pending.ShouldBeLessThanOrEqualTo(2);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAckAllRedelivery — ack all sequences
|
||||
[Fact]
|
||||
public async Task Ack_all_clears_all_pending()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithAckAllConsumerAsync();
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
_ = await fx.PublishAndGetAckAsync("orders.created", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("ORDERS", "ACKALL", 3);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
|
||||
await fx.AckAllAsync("ORDERS", "ACKALL", batch.Messages[^1].Sequence);
|
||||
|
||||
var pending = await fx.GetPendingCountAsync("ORDERS", "ACKALL");
|
||||
pending.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamPushConsumerFlowControl server/jetstream_test.go:5203
|
||||
[Fact]
|
||||
public async Task Push_consumer_with_flow_control_emits_fc_frames()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("FC", "fc.>");
|
||||
_ = await fx.CreateConsumerAsync("FC", "PUSH", "fc.>", push: true, heartbeatMs: 10);
|
||||
// Enable flow control via direct JSON
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.CREATE.FC.FCPUSH",
|
||||
"""{"durable_name":"FCPUSH","filter_subject":"fc.>","push":true,"heartbeat_ms":10,"flow_control":true}""");
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("fc.x", "data");
|
||||
|
||||
var frame1 = await fx.ReadPushFrameAsync("FC", "FCPUSH");
|
||||
frame1.IsData.ShouldBeTrue();
|
||||
|
||||
// Flow control frame follows data frame
|
||||
var frame2 = await fx.ReadPushFrameAsync("FC", "FCPUSH");
|
||||
frame2.IsFlowControl.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamPushConsumerIdleHeartbeats server/jetstream_test.go:5260
|
||||
[Fact]
|
||||
public async Task Push_consumer_with_heartbeats_emits_heartbeat_frames()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("HB", "hb.>");
|
||||
_ = await fx.CreateConsumerAsync("HB", "PUSH", "hb.>", push: true, heartbeatMs: 10);
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("hb.x", "data");
|
||||
|
||||
var frame = await fx.ReadPushFrameAsync("HB", "PUSH");
|
||||
frame.IsData.ShouldBeTrue();
|
||||
|
||||
var hbFrame = await fx.ReadPushFrameAsync("HB", "PUSH");
|
||||
hbFrame.IsHeartbeat.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamFlowControlRequiresHeartbeats server/jetstream_test.go:5232
|
||||
[Fact]
|
||||
public async Task Push_consumer_without_heartbeats_has_no_heartbeat_frames()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("NHB", "nhb.>");
|
||||
_ = await fx.CreateConsumerAsync("NHB", "PUSH", "nhb.>", push: true, heartbeatMs: 0);
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("nhb.x", "data");
|
||||
|
||||
var frame = await fx.ReadPushFrameAsync("NHB", "PUSH");
|
||||
frame.IsData.ShouldBeTrue();
|
||||
|
||||
// Without heartbeats, no heartbeat frame should be queued
|
||||
Should.Throw<InvalidOperationException>(() => fx.ReadPushFrameAsync("NHB", "PUSH"));
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerPause server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Paused_consumer_can_be_resumed()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("PAUSE", "pause.>");
|
||||
_ = await fx.CreateConsumerAsync("PAUSE", "C1", "pause.>");
|
||||
|
||||
var pause = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.PAUSE.C1", """{"pause":true}""");
|
||||
pause.Success.ShouldBeTrue();
|
||||
|
||||
var resume = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.PAUSE.C1", """{"pause":false}""");
|
||||
resume.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerReset server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Reset_consumer_restarts_delivery_from_beginning()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("RST", "rst.>");
|
||||
_ = await fx.CreateConsumerAsync("RST", "C1", "rst.>");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("rst.x", "msg1");
|
||||
_ = await fx.PublishAndGetAckAsync("rst.x", "msg2");
|
||||
|
||||
var batch1 = await fx.FetchAsync("RST", "C1", 2);
|
||||
batch1.Messages.Count.ShouldBe(2);
|
||||
|
||||
_ = await fx.RequestLocalAsync("$JS.API.CONSUMER.RESET.RST.C1", "{}");
|
||||
|
||||
var batch2 = await fx.FetchAsync("RST", "C1", 2);
|
||||
batch2.Messages.Count.ShouldBe(2);
|
||||
batch2.Messages[0].Sequence.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueMaxWaiting server/jetstream_test.go:957
|
||||
[Fact]
|
||||
public async Task Fetch_more_than_available_returns_only_available()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("MW", "mw.>");
|
||||
_ = await fx.CreateConsumerAsync("MW", "C1", "mw.>");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("mw.x", "msg1");
|
||||
_ = await fx.PublishAndGetAckAsync("mw.x", "msg2");
|
||||
|
||||
var batch = await fx.FetchAsync("MW", "C1", 100);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueWrapWaiting server/jetstream_test.go:1022
|
||||
[Fact]
|
||||
public async Task Fetch_wraps_around_correctly_after_multiple_fetches()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("WR", "wr.>");
|
||||
_ = await fx.CreateConsumerAsync("WR", "C1", "wr.>");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
_ = await fx.PublishAndGetAckAsync("wr.x", $"msg-{i}");
|
||||
|
||||
var batch1 = await fx.FetchAsync("WR", "C1", 3);
|
||||
batch1.Messages.Count.ShouldBe(3);
|
||||
batch1.Messages[^1].Sequence.ShouldBe(3UL);
|
||||
|
||||
var batch2 = await fx.FetchAsync("WR", "C1", 3);
|
||||
batch2.Messages.Count.ShouldBe(3);
|
||||
batch2.Messages[0].Sequence.ShouldBe(4UL);
|
||||
|
||||
var batch3 = await fx.FetchAsync("WR", "C1", 3);
|
||||
batch3.Messages.Count.ShouldBe(3);
|
||||
batch3.Messages[0].Sequence.ShouldBe(7UL);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamMaxAckPending limits delivery
|
||||
[Fact]
|
||||
public async Task Max_ack_pending_limits_push_delivery()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("MAP", "map.>");
|
||||
_ = await fx.CreateConsumerAsync("MAP", "PUSH", "map.>",
|
||||
push: true, heartbeatMs: 10,
|
||||
ackPolicy: AckPolicy.Explicit,
|
||||
maxAckPending: 1);
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("map.x", "msg1");
|
||||
_ = await fx.PublishAndGetAckAsync("map.x", "msg2");
|
||||
|
||||
// Only 1 should be delivered due to max ack pending
|
||||
var frame = await fx.ReadPushFrameAsync("MAP", "PUSH");
|
||||
frame.IsData.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDeliverLastPerSubject server/jetstream_test.go
|
||||
// LastPerSubject resolves the initial sequence to a message matching the
|
||||
// filter subject and then delivers forward from there. All matching messages
|
||||
// from that point onward are delivered.
|
||||
[Fact]
|
||||
public async Task Deliver_last_per_subject_delivers_matching_messages()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DLPS", "dlps.>");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("dlps.a", "a1");
|
||||
_ = await fx.PublishAndGetAckAsync("dlps.b", "b1");
|
||||
_ = await fx.PublishAndGetAckAsync("dlps.a", "a2");
|
||||
_ = await fx.PublishAndGetAckAsync("dlps.b", "b2");
|
||||
|
||||
_ = await fx.CreateConsumerAsync("DLPS", "C1", "dlps.a",
|
||||
deliverPolicy: DeliverPolicy.LastPerSubject);
|
||||
|
||||
var batch = await fx.FetchAsync("DLPS", "C1", 10);
|
||||
// Delivers all matching "dlps.a" messages from resolved start
|
||||
batch.Messages.Count.ShouldBeGreaterThanOrEqualTo(1);
|
||||
batch.Messages.All(m => m.Subject == "dlps.a").ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamByStartSequence
|
||||
[Fact]
|
||||
public async Task Deliver_by_start_sequence_begins_at_specified_seq()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("BSS", "bss.>");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
_ = await fx.PublishAndGetAckAsync("bss.x", $"msg-{i}");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.CREATE.BSS.C1",
|
||||
"""{"durable_name":"C1","filter_subject":"bss.>","deliver_policy":"by_start_sequence","opt_start_seq":3}""");
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
var batch = await fx.FetchAsync("BSS", "C1", 10);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
batch.Messages[0].Sequence.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamMultipleSubjectsPushBasic — multiple filter subjects consumer
|
||||
[Fact]
|
||||
public async Task Multi_filter_consumer_receives_matching_messages()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("MFC", ">");
|
||||
_ = await fx.CreateConsumerAsync("MFC", "C1", null, filterSubjects: ["a.*", "b.*"]);
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("a.one", "1");
|
||||
_ = await fx.PublishAndGetAckAsync("b.one", "2");
|
||||
_ = await fx.PublishAndGetAckAsync("c.one", "3");
|
||||
|
||||
var batch = await fx.FetchAsync("MFC", "C1", 10);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAckReplyStreamPending server/jetstream_test.go:1887
|
||||
[Fact]
|
||||
public async Task Explicit_ack_pending_count_decreases_on_ack()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("ARP", "arp.>");
|
||||
_ = await fx.CreateConsumerAsync("ARP", "C1", "arp.>",
|
||||
ackPolicy: AckPolicy.All);
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("arp.x", "msg1");
|
||||
_ = await fx.PublishAndGetAckAsync("arp.x", "msg2");
|
||||
_ = await fx.PublishAndGetAckAsync("arp.x", "msg3");
|
||||
|
||||
_ = await fx.FetchAsync("ARP", "C1", 3);
|
||||
|
||||
var before = await fx.GetPendingCountAsync("ARP", "C1");
|
||||
before.ShouldBe(3);
|
||||
|
||||
await fx.AckAllAsync("ARP", "C1", 2);
|
||||
|
||||
var after = await fx.GetPendingCountAsync("ARP", "C1");
|
||||
after.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAckReplyStreamPendingWithAcks server/jetstream_test.go:1921
|
||||
[Fact]
|
||||
public async Task Ack_all_to_last_clears_pending()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("ARPF", "arpf.>");
|
||||
_ = await fx.CreateConsumerAsync("ARPF", "C1", "arpf.>",
|
||||
ackPolicy: AckPolicy.All);
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("arpf.x", "1");
|
||||
_ = await fx.PublishAndGetAckAsync("arpf.x", "2");
|
||||
|
||||
var batch = await fx.FetchAsync("ARPF", "C1", 2);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
|
||||
await fx.AckAllAsync("ARPF", "C1", batch.Messages[^1].Sequence);
|
||||
|
||||
var pending = await fx.GetPendingCountAsync("ARPF", "C1");
|
||||
pending.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueRetentionStream server/jetstream_test.go:1655
|
||||
[Fact]
|
||||
public async Task Replay_original_consumer_pauses_between_deliveries()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithReplayOriginalConsumerAsync();
|
||||
|
||||
var batch = await fx.FetchAsync("ORDERS", "RO", 1);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamSubjectBasedFilteredConsumers server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Consumer_with_gt_wildcard_filter_matches_all()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("GW", "gw.>");
|
||||
_ = await fx.CreateConsumerAsync("GW", "C1", "gw.>");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("gw.a.b.c", "1");
|
||||
_ = await fx.PublishAndGetAckAsync("gw.x", "2");
|
||||
_ = await fx.PublishAndGetAckAsync("gw.y.z", "3");
|
||||
|
||||
var batch = await fx.FetchAsync("GW", "C1", 10);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamSubjectBasedFilteredConsumers — star wildcard
|
||||
[Fact]
|
||||
public async Task Consumer_with_star_wildcard_matches_single_token()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("SW", "sw.>");
|
||||
_ = await fx.CreateConsumerAsync("SW", "C1", "sw.*");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("sw.a", "1");
|
||||
_ = await fx.PublishAndGetAckAsync("sw.b.c", "2"); // doesn't match sw.*
|
||||
_ = await fx.PublishAndGetAckAsync("sw.d", "3");
|
||||
|
||||
var batch = await fx.FetchAsync("SW", "C1", 10);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamInterestRetentionStreamWithFilteredConsumers server/jetstream_test.go:4388
|
||||
[Fact]
|
||||
public async Task Two_consumers_same_stream_independent_cursors()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("IC", "ic.>");
|
||||
_ = await fx.CreateConsumerAsync("IC", "C1", "ic.a");
|
||||
_ = await fx.CreateConsumerAsync("IC", "C2", "ic.b");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("ic.a", "for-c1");
|
||||
_ = await fx.PublishAndGetAckAsync("ic.b", "for-c2");
|
||||
_ = await fx.PublishAndGetAckAsync("ic.a", "for-c1-again");
|
||||
|
||||
var batchC1 = await fx.FetchAsync("IC", "C1", 10);
|
||||
batchC1.Messages.Count.ShouldBe(2);
|
||||
|
||||
var batchC2 = await fx.FetchAsync("IC", "C2", 10);
|
||||
batchC2.Messages.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamPushConsumersPullError server/jetstream_test.go:5731
|
||||
[Fact]
|
||||
public async Task Consumer_fetch_from_empty_stream_returns_empty_batch()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("EMP", "emp.>");
|
||||
_ = await fx.CreateConsumerAsync("EMP", "C1", "emp.>");
|
||||
|
||||
var batch = await fx.FetchAsync("EMP", "C1", 5);
|
||||
batch.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAckNext server/jetstream_test.go:2483
|
||||
[Fact]
|
||||
public async Task Consumer_fetch_after_consuming_all_returns_empty()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DONE", "done.>");
|
||||
_ = await fx.CreateConsumerAsync("DONE", "C1", "done.>");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("done.x", "only");
|
||||
|
||||
var batch1 = await fx.FetchAsync("DONE", "C1", 1);
|
||||
batch1.Messages.Count.ShouldBe(1);
|
||||
|
||||
var batch2 = await fx.FetchAsync("DONE", "C1", 1);
|
||||
batch2.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamWorkQueueAckAndNext server/jetstream_test.go:1355
|
||||
[Fact]
|
||||
public async Task Ack_all_consumer_acks_batch_at_once()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("AAB", "aab.>");
|
||||
_ = await fx.CreateConsumerAsync("AAB", "C1", "aab.>",
|
||||
ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
_ = await fx.PublishAndGetAckAsync("aab.x", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("AAB", "C1", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
|
||||
await fx.AckAllAsync("AAB", "C1", 5);
|
||||
|
||||
var pending = await fx.GetPendingCountAsync("AAB", "C1");
|
||||
pending.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamEphemeralPullConsumersInactiveThresholdAndNoWait server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task No_wait_fetch_from_non_existent_consumer_returns_empty()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("NWC", "nwc.>");
|
||||
|
||||
var batch = await fx.FetchWithNoWaitAsync("NWC", "NOPE", 1);
|
||||
batch.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamMultipleSubjectsBasic — verify payload content
|
||||
[Fact]
|
||||
public async Task Fetched_messages_contain_correct_payload()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("PL", "pl.>");
|
||||
_ = await fx.CreateConsumerAsync("PL", "C1", "pl.>");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("pl.x", "hello-world");
|
||||
|
||||
var batch = await fx.FetchAsync("PL", "C1", 1);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
Encoding.UTF8.GetString(batch.Messages[0].Payload.Span).ShouldBe("hello-world");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamBackOffCheckPending server/jetstream_test.go
|
||||
[Fact]
|
||||
public async Task Backoff_config_is_stored_on_consumer()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("BOC", "boc.>");
|
||||
|
||||
_ = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.CREATE.BOC.C1",
|
||||
"""{"durable_name":"C1","filter_subject":"boc.>","ack_policy":"explicit","backoff_ms":[50,100,200]}""");
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("BOC", "C1");
|
||||
info.Config.BackOffMs.ShouldBe([50, 100, 200]);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerPause — multiple pauses
|
||||
[Fact]
|
||||
public async Task Multiple_pause_calls_are_idempotent()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("MPI", "mpi.>");
|
||||
_ = await fx.CreateConsumerAsync("MPI", "C1", "mpi.>");
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
{
|
||||
var pause = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.PAUSE.MPI.C1", """{"pause":true}""");
|
||||
pause.Success.ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
|
||||
// Go: TestJetStreamAckExplicitMsgRemoval — explicit ack with fetch batch
|
||||
[Fact]
|
||||
public async Task Explicit_ack_with_batch_fetch()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("EAB", "eab.>");
|
||||
_ = await fx.CreateConsumerAsync("EAB", "C1", "eab.>",
|
||||
ackPolicy: AckPolicy.Explicit,
|
||||
ackWaitMs: 30_000);
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
_ = await fx.PublishAndGetAckAsync("eab.x", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("EAB", "C1", 3);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
|
||||
var pending = await fx.GetPendingCountAsync("EAB", "C1");
|
||||
pending.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamConsumerRate
|
||||
[Fact]
|
||||
public async Task Rate_limit_setting_is_preserved()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("RLP", "rlp.>");
|
||||
|
||||
_ = await fx.RequestLocalAsync(
|
||||
"$JS.API.CONSUMER.CREATE.RLP.C1",
|
||||
"""{"durable_name":"C1","filter_subject":"rlp.>","push":true,"heartbeat_ms":10,"rate_limit_bps":2048}""");
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("RLP", "C1");
|
||||
info.Config.RateLimitBps.ShouldBe(2048);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamRedeliverCount server/jetstream_test.go:3778
|
||||
[Fact]
|
||||
public async Task Consumer_pending_initially_zero()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("PIZ", "piz.>");
|
||||
_ = await fx.CreateConsumerAsync("PIZ", "C1", "piz.>",
|
||||
ackPolicy: AckPolicy.Explicit);
|
||||
|
||||
var pending = await fx.GetPendingCountAsync("PIZ", "C1");
|
||||
pending.ShouldBe(0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
using NATS.Server.JetStream;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class JetStreamConsumerFlowReplayParityTests
|
||||
{
|
||||
[Fact]
|
||||
public void Push_consumer_enqueues_flow_control_and_heartbeat_frames_when_enabled()
|
||||
{
|
||||
var engine = new PushConsumerEngine();
|
||||
var consumer = new ConsumerHandle("ORDERS", new ConsumerConfig
|
||||
{
|
||||
AckPolicy = AckPolicy.Explicit,
|
||||
FlowControl = true,
|
||||
HeartbeatMs = 1000,
|
||||
RateLimitBps = 1024,
|
||||
});
|
||||
|
||||
engine.Enqueue(consumer, new StoredMessage
|
||||
{
|
||||
Sequence = 1,
|
||||
Subject = "orders.created",
|
||||
Payload = "payload"u8.ToArray(),
|
||||
});
|
||||
|
||||
consumer.PushFrames.Count.ShouldBe(3);
|
||||
consumer.PushFrames.Any(f => f.IsFlowControl).ShouldBeTrue();
|
||||
consumer.PushFrames.Any(f => f.IsHeartbeat).ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class JetStreamConsumerRuntimeParityTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Consumer_runtime_honors_ack_all_redelivery_and_max_deliver_limits()
|
||||
{
|
||||
var ack = new AckProcessor();
|
||||
ack.Register(1, ackWaitMs: 1);
|
||||
await Task.Delay(5);
|
||||
ack.TryGetExpired(out var seq, out var deliveries).ShouldBeTrue();
|
||||
seq.ShouldBe((ulong)1);
|
||||
deliveries.ShouldBe(1);
|
||||
|
||||
ack.ScheduleRedelivery(seq, delayMs: 1);
|
||||
await Task.Delay(5);
|
||||
ack.TryGetExpired(out _, out deliveries).ShouldBeTrue();
|
||||
deliveries.ShouldBe(2);
|
||||
|
||||
ack.AckAll(1);
|
||||
ack.HasPending.ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream;
|
||||
|
||||
public class JetStreamConsumerStateMachineStrictParityTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Ack_redelivery_backoff_and_replay_timing_follow_monotonic_consumer_state_machine_rules()
|
||||
{
|
||||
var streams = new StreamManager();
|
||||
var consumers = new ConsumerManager();
|
||||
|
||||
streams.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "ORDERS_SM",
|
||||
Subjects = ["orders.sm"],
|
||||
Retention = RetentionPolicy.Limits,
|
||||
MaxMsgs = 32,
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
consumers.CreateOrUpdate("ORDERS_SM", new ConsumerConfig
|
||||
{
|
||||
DurableName = "D1",
|
||||
AckPolicy = AckPolicy.Explicit,
|
||||
AckWaitMs = 1,
|
||||
MaxDeliver = 1,
|
||||
BackOffMs = [1],
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
streams.Capture("orders.sm", "x"u8.ToArray());
|
||||
|
||||
var first = await consumers.FetchAsync("ORDERS_SM", "D1", 1, streams, default);
|
||||
first.Messages.Count.ShouldBe(1);
|
||||
|
||||
await Task.Delay(5);
|
||||
var second = await consumers.FetchAsync("ORDERS_SM", "D1", 1, streams, default);
|
||||
second.Messages.Count.ShouldBe(1);
|
||||
second.Messages[0].Redelivered.ShouldBeTrue();
|
||||
|
||||
await Task.Delay(5);
|
||||
var third = await consumers.FetchAsync("ORDERS_SM", "D1", 1, streams, default);
|
||||
|
||||
// MaxDeliver=1 allows one redelivery, then the sequence is retired.
|
||||
third.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class JetStreamCrossClusterBehaviorParityTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Cross_cluster_jetstream_replication_propagates_committed_stream_state_not_just_forward_counter()
|
||||
{
|
||||
var baseline = new JetStreamCrossClusterGatewayParityTests();
|
||||
await baseline.Cross_cluster_jetstream_messages_use_gateway_forwarding_path();
|
||||
|
||||
var runtime = new JetStreamCrossClusterRuntimeParityTests();
|
||||
await runtime.Jetstream_cross_cluster_messages_are_forward_counted();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using NATS.Server.Configuration;
|
||||
using NATS.Server.Gateways;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class JetStreamCrossClusterRuntimeParityTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Jetstream_cross_cluster_messages_are_forward_counted()
|
||||
{
|
||||
var manager = new GatewayManager(
|
||||
new GatewayOptions { Host = "127.0.0.1", Port = 0, Name = "A" },
|
||||
new ServerStats(),
|
||||
"S1",
|
||||
_ => { },
|
||||
_ => { },
|
||||
NullLogger<GatewayManager>.Instance);
|
||||
|
||||
await manager.ForwardJetStreamClusterMessageAsync(
|
||||
new GatewayMessage("$JS.CLUSTER.REPL", null, "x"u8.ToArray()),
|
||||
default);
|
||||
|
||||
manager.ForwardedJetStreamClusterMessages.ShouldBe(1);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class JetStreamDedupeWindowParityTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Dedupe_window_expires_entries_and_allows_republish_after_window_boundary()
|
||||
{
|
||||
var streamManager = new StreamManager();
|
||||
streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "D",
|
||||
Subjects = ["d.*"],
|
||||
DuplicateWindowMs = 25,
|
||||
}).Error.ShouldBeNull();
|
||||
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
publisher.TryCaptureWithOptions("d.1", "one"u8.ToArray(), new PublishOptions { MsgId = "m-1" }, out var first).ShouldBeTrue();
|
||||
publisher.TryCaptureWithOptions("d.1", "dup"u8.ToArray(), new PublishOptions { MsgId = "m-1" }, out var second).ShouldBeTrue();
|
||||
second.Seq.ShouldBe(first.Seq);
|
||||
|
||||
await Task.Delay(40);
|
||||
|
||||
publisher.TryCaptureWithOptions("d.1", "after-window"u8.ToArray(), new PublishOptions { MsgId = "m-1" }, out var third).ShouldBeTrue();
|
||||
third.ErrorCode.ShouldBeNull();
|
||||
third.Seq.ShouldBeGreaterThan(first.Seq);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,317 @@
|
||||
// Ported from golang/nats-server/server/jetstream_test.go
|
||||
// Direct get API: message retrieval by sequence, last message by subject,
|
||||
// missing sequence handling, multi-message get, stream message API.
|
||||
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream;
|
||||
|
||||
public class JetStreamDirectGetTests
|
||||
{
|
||||
// Go: TestJetStreamDirectGetBatch server/jetstream_test.go:16524
|
||||
// Direct get retrieves a specific message by sequence number.
|
||||
[Fact]
|
||||
public async Task Direct_get_returns_correct_message_for_sequence()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DG", "dg.>");
|
||||
|
||||
var a1 = await fx.PublishAndGetAckAsync("dg.first", "payload-one");
|
||||
var a2 = await fx.PublishAndGetAckAsync("dg.second", "payload-two");
|
||||
var a3 = await fx.PublishAndGetAckAsync("dg.third", "payload-three");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.DIRECT.GET.DG",
|
||||
$$$"""{ "seq": {{{a2.Seq}}} }""");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.DirectMessage.ShouldNotBeNull();
|
||||
resp.DirectMessage!.Sequence.ShouldBe(a2.Seq);
|
||||
resp.DirectMessage.Subject.ShouldBe("dg.second");
|
||||
resp.DirectMessage.Payload.ShouldBe("payload-two");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — first message in stream
|
||||
[Fact]
|
||||
public async Task Direct_get_retrieves_first_message_by_sequence()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DGF", "dgf.>");
|
||||
|
||||
var a1 = await fx.PublishAndGetAckAsync("dgf.x", "first-data");
|
||||
_ = await fx.PublishAndGetAckAsync("dgf.x", "second-data");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.DIRECT.GET.DGF",
|
||||
$$$"""{ "seq": {{{a1.Seq}}} }""");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.DirectMessage!.Payload.ShouldBe("first-data");
|
||||
resp.DirectMessage.Subject.ShouldBe("dgf.x");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — last message in stream
|
||||
[Fact]
|
||||
public async Task Direct_get_retrieves_last_message_by_sequence()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DGL", "dgl.>");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("dgl.x", "first");
|
||||
var last = await fx.PublishAndGetAckAsync("dgl.x", "last-data");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.DIRECT.GET.DGL",
|
||||
$$$"""{ "seq": {{{last.Seq}}} }""");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.DirectMessage!.Payload.ShouldBe("last-data");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — subject is preserved in response
|
||||
[Fact]
|
||||
public async Task Direct_get_response_includes_correct_subject()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DGSUB", "dgsub.>");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("dgsub.orders.created", "order-payload");
|
||||
var a2 = await fx.PublishAndGetAckAsync("dgsub.events.logged", "event-payload");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.DIRECT.GET.DGSUB",
|
||||
$$$"""{ "seq": {{{a2.Seq}}} }""");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.DirectMessage!.Subject.ShouldBe("dgsub.events.logged");
|
||||
resp.DirectMessage.Payload.ShouldBe("event-payload");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — requesting non-existent sequence returns not found
|
||||
[Fact]
|
||||
public async Task Direct_get_non_existent_sequence_returns_error()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DGNE", "dgne.>");
|
||||
_ = await fx.PublishAndGetAckAsync("dgne.x", "data");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.DIRECT.GET.DGNE",
|
||||
"""{ "seq": 999999 }""");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.DirectMessage.ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — empty stream returns error
|
||||
[Fact]
|
||||
public async Task Direct_get_on_empty_stream_returns_error()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DGEMPTY", "dgempty.>");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.DIRECT.GET.DGEMPTY",
|
||||
"""{ "seq": 1 }""");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.DirectMessage.ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — missing stream returns not found
|
||||
[Fact]
|
||||
public async Task Direct_get_on_missing_stream_returns_not_found()
|
||||
{
|
||||
await using var fx = new JetStreamApiFixture();
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.DIRECT.GET.NONEXISTENT",
|
||||
"""{ "seq": 1 }""");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — sequence 0 in request returns error
|
||||
[Fact]
|
||||
public async Task Direct_get_with_zero_sequence_returns_error()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DGZERO", "dgzero.>");
|
||||
_ = await fx.PublishAndGetAckAsync("dgzero.x", "data");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.DIRECT.GET.DGZERO",
|
||||
"""{ "seq": 0 }""");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — multiple retrieves are independent
|
||||
[Fact]
|
||||
public async Task Direct_get_multiple_sequences_independently()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DGMULTI", "dgmulti.>");
|
||||
|
||||
var a1 = await fx.PublishAndGetAckAsync("dgmulti.a", "alpha");
|
||||
var a2 = await fx.PublishAndGetAckAsync("dgmulti.b", "beta");
|
||||
var a3 = await fx.PublishAndGetAckAsync("dgmulti.c", "gamma");
|
||||
|
||||
var r1 = await fx.RequestLocalAsync("$JS.API.DIRECT.GET.DGMULTI", $$$"""{ "seq": {{{a1.Seq}}} }""");
|
||||
r1.DirectMessage!.Payload.ShouldBe("alpha");
|
||||
|
||||
var r3 = await fx.RequestLocalAsync("$JS.API.DIRECT.GET.DGMULTI", $$$"""{ "seq": {{{a3.Seq}}} }""");
|
||||
r3.DirectMessage!.Payload.ShouldBe("gamma");
|
||||
|
||||
var r2 = await fx.RequestLocalAsync("$JS.API.DIRECT.GET.DGMULTI", $$$"""{ "seq": {{{a2.Seq}}} }""");
|
||||
r2.DirectMessage!.Payload.ShouldBe("beta");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamStreamMessageGet (STREAM.MSG.GET API) server/jetstream_test.go
|
||||
// Stream message get API (not direct) retrieves by sequence.
|
||||
[Fact]
|
||||
public async Task Stream_msg_get_returns_message_by_sequence()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("MSGGET", "msgget.>");
|
||||
|
||||
var a1 = await fx.PublishAndGetAckAsync("msgget.x", "data-one");
|
||||
_ = await fx.PublishAndGetAckAsync("msgget.y", "data-two");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.STREAM.MSG.GET.MSGGET",
|
||||
$$$"""{ "seq": {{{a1.Seq}}} }""");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamMessage.ShouldNotBeNull();
|
||||
resp.StreamMessage!.Sequence.ShouldBe(a1.Seq);
|
||||
resp.StreamMessage.Subject.ShouldBe("msgget.x");
|
||||
resp.StreamMessage.Payload.ShouldBe("data-one");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDeleteMsg — stream msg get after delete returns error
|
||||
[Fact]
|
||||
public async Task Stream_msg_get_after_delete_returns_error()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("GETDEL", "getdel.>");
|
||||
|
||||
var a1 = await fx.PublishAndGetAckAsync("getdel.x", "data");
|
||||
_ = await fx.RequestLocalAsync(
|
||||
"$JS.API.STREAM.MSG.DELETE.GETDEL",
|
||||
$$$"""{ "seq": {{{a1.Seq}}} }""");
|
||||
|
||||
var get = await fx.RequestLocalAsync(
|
||||
"$JS.API.STREAM.MSG.GET.GETDEL",
|
||||
$$$"""{ "seq": {{{a1.Seq}}} }""");
|
||||
get.StreamMessage.ShouldBeNull();
|
||||
get.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — direct get sequence field in response
|
||||
[Fact]
|
||||
public async Task Direct_get_response_sequence_matches_requested_sequence()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DGSEQ", "dgseq.>");
|
||||
|
||||
_ = await fx.PublishAndGetAckAsync("dgseq.a", "1");
|
||||
_ = await fx.PublishAndGetAckAsync("dgseq.b", "2");
|
||||
var a3 = await fx.PublishAndGetAckAsync("dgseq.c", "3");
|
||||
_ = await fx.PublishAndGetAckAsync("dgseq.d", "4");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.DIRECT.GET.DGSEQ",
|
||||
$$$"""{ "seq": {{{a3.Seq}}} }""");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.DirectMessage!.Sequence.ShouldBe(a3.Seq);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — payload is preserved verbatim
|
||||
[Fact]
|
||||
public async Task Direct_get_payload_is_preserved_verbatim()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DGPAY", "dgpay.>");
|
||||
|
||||
const string payload = "Hello, JetStream Direct Get!";
|
||||
var a1 = await fx.PublishAndGetAckAsync("dgpay.msg", payload);
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.DIRECT.GET.DGPAY",
|
||||
$$$"""{ "seq": {{{a1.Seq}}} }""");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.DirectMessage!.Payload.ShouldBe(payload);
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — direct get uses stream storage type correctly
|
||||
[Fact]
|
||||
public async Task Direct_get_works_with_memory_storage_stream()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamConfigAsync(new StreamConfig
|
||||
{
|
||||
Name = "DGMEM",
|
||||
Subjects = ["dgmem.>"],
|
||||
Storage = StorageType.Memory,
|
||||
});
|
||||
|
||||
var a1 = await fx.PublishAndGetAckAsync("dgmem.x", "in-memory");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.DIRECT.GET.DGMEM",
|
||||
$$$"""{ "seq": {{{a1.Seq}}} }""");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.DirectMessage!.Payload.ShouldBe("in-memory");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — backend type reported for memory stream
|
||||
[Fact]
|
||||
public async Task Stream_backend_type_is_memory_for_memory_storage()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamConfigAsync(new StreamConfig
|
||||
{
|
||||
Name = "BACKENDMEM",
|
||||
Subjects = ["backendmem.>"],
|
||||
Storage = StorageType.Memory,
|
||||
});
|
||||
|
||||
var backendType = await fx.GetStreamBackendTypeAsync("BACKENDMEM");
|
||||
backendType.ShouldBe("memory");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — direct get after purge returns error
|
||||
[Fact]
|
||||
public async Task Direct_get_after_purge_returns_not_found()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DGPURGE", "dgpurge.>");
|
||||
|
||||
var a1 = await fx.PublishAndGetAckAsync("dgpurge.x", "data");
|
||||
_ = await fx.RequestLocalAsync("$JS.API.STREAM.PURGE.DGPURGE", "{}");
|
||||
|
||||
var resp = await fx.RequestLocalAsync(
|
||||
"$JS.API.DIRECT.GET.DGPURGE",
|
||||
$$$"""{ "seq": {{{a1.Seq}}} }""");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.DirectMessage.ShouldBeNull();
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — sequence in middle of stream
|
||||
[Fact]
|
||||
public async Task Direct_get_retrieves_middle_sequence_correctly()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("DGMID", "dgmid.>");
|
||||
|
||||
for (var i = 1; i <= 10; i++)
|
||||
_ = await fx.PublishAndGetAckAsync("dgmid.x", $"msg-{i}");
|
||||
|
||||
// Get sequence 5 (middle)
|
||||
var resp = await fx.RequestLocalAsync("$JS.API.DIRECT.GET.DGMID", """{ "seq": 5 }""");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.DirectMessage!.Sequence.ShouldBe(5UL);
|
||||
resp.DirectMessage.Payload.ShouldBe("msg-5");
|
||||
}
|
||||
|
||||
// Go: TestJetStreamDirectGetBatch — stream msg get vs direct get both return same data
|
||||
[Fact]
|
||||
public async Task Stream_msg_get_and_direct_get_return_consistent_data()
|
||||
{
|
||||
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("CONSISTENT", "consistent.>");
|
||||
|
||||
var a1 = await fx.PublishAndGetAckAsync("consistent.x", "consistent-data");
|
||||
|
||||
var directResp = await fx.RequestLocalAsync(
|
||||
"$JS.API.DIRECT.GET.CONSISTENT",
|
||||
$$$"""{ "seq": {{{a1.Seq}}} }""");
|
||||
|
||||
var msgGetResp = await fx.RequestLocalAsync(
|
||||
"$JS.API.STREAM.MSG.GET.CONSISTENT",
|
||||
$$$"""{ "seq": {{{a1.Seq}}} }""");
|
||||
|
||||
directResp.Error.ShouldBeNull();
|
||||
msgGetResp.Error.ShouldBeNull();
|
||||
|
||||
directResp.DirectMessage!.Payload.ShouldBe("consistent-data");
|
||||
msgGetResp.StreamMessage!.Payload.ShouldBe("consistent-data");
|
||||
directResp.DirectMessage.Subject.ShouldBe(msgGetResp.StreamMessage.Subject);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class JetStreamFileStoreCompressionEncryptionParityTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Compression_and_encryption_roundtrip_is_versioned_and_detects_wrong_key_corruption()
|
||||
{
|
||||
var dir = Path.Combine(Path.GetTempPath(), $"nats-js-fs-crypto-{Guid.NewGuid():N}");
|
||||
var options = new FileStoreOptions
|
||||
{
|
||||
Directory = dir,
|
||||
EnableCompression = true,
|
||||
EnableEncryption = true,
|
||||
EncryptionKey = [1, 2, 3, 4],
|
||||
};
|
||||
|
||||
try
|
||||
{
|
||||
ulong sequence;
|
||||
await using (var store = new FileStore(options))
|
||||
{
|
||||
sequence = await store.AppendAsync("orders.created", Encoding.UTF8.GetBytes("payload"), default);
|
||||
var loaded = await store.LoadAsync(sequence, default);
|
||||
loaded.ShouldNotBeNull();
|
||||
Encoding.UTF8.GetString(loaded.Payload.ToArray()).ShouldBe("payload");
|
||||
}
|
||||
|
||||
// Block-based storage: read the .blk file to verify FSV1 envelope.
|
||||
var blkFiles = Directory.GetFiles(dir, "*.blk");
|
||||
blkFiles.Length.ShouldBeGreaterThan(0);
|
||||
|
||||
// Read the first record from the block file and verify FSV1 magic in payload.
|
||||
var blkBytes = File.ReadAllBytes(blkFiles[0]);
|
||||
var record = MessageRecord.Decode(blkBytes.AsSpan(0, MessageRecord.MeasureRecord(blkBytes)));
|
||||
var persisted = record.Payload.ToArray();
|
||||
persisted.Take(4).SequenceEqual("FSV1"u8.ToArray()).ShouldBeTrue();
|
||||
|
||||
Should.Throw<InvalidDataException>(() =>
|
||||
{
|
||||
_ = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir,
|
||||
EnableCompression = true,
|
||||
EnableEncryption = true,
|
||||
EncryptionKey = [9, 9, 9, 9],
|
||||
});
|
||||
});
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (Directory.Exists(dir))
|
||||
Directory.Delete(dir, recursive: true);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class JetStreamFileStoreCryptoCompressionTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task File_store_compression_and_encryption_roundtrip_preserves_payload()
|
||||
{
|
||||
var dir = Path.Combine(Path.GetTempPath(), $"natsdotnet-filestore-crypto-{Guid.NewGuid():N}");
|
||||
try
|
||||
{
|
||||
await using var store = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir,
|
||||
EnableCompression = true,
|
||||
EnableEncryption = true,
|
||||
EncryptionKey = [1, 2, 3, 4],
|
||||
});
|
||||
|
||||
var payload = Enumerable.Repeat((byte)'a', 512).ToArray();
|
||||
var seq = await store.AppendAsync("orders.created", payload, default);
|
||||
var loaded = await store.LoadAsync(seq, default);
|
||||
loaded.ShouldNotBeNull();
|
||||
loaded.Payload.ToArray().ShouldBe(payload);
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (Directory.Exists(dir))
|
||||
Directory.Delete(dir, recursive: true);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
using NATS.Server.JetStream.Storage;
|
||||
using System.Text;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class JetStreamFileStoreDurabilityParityTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task File_store_recovers_block_index_map_after_restart_without_full_log_scan()
|
||||
{
|
||||
var dir = Path.Combine(Path.GetTempPath(), $"nats-js-fs-durable-{Guid.NewGuid():N}");
|
||||
var options = new FileStoreOptions
|
||||
{
|
||||
Directory = dir,
|
||||
BlockSizeBytes = 256,
|
||||
};
|
||||
|
||||
try
|
||||
{
|
||||
await using (var store = new FileStore(options))
|
||||
{
|
||||
for (var i = 0; i < 1000; i++)
|
||||
await store.AppendAsync("orders.created", Encoding.UTF8.GetBytes($"payload-{i}"), default);
|
||||
}
|
||||
|
||||
// Block-based storage: .blk files should be present on disk.
|
||||
Directory.GetFiles(dir, "*.blk").Length.ShouldBeGreaterThan(0);
|
||||
|
||||
await using var reopened = new FileStore(options);
|
||||
var state = await reopened.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)1000);
|
||||
reopened.BlockCount.ShouldBeGreaterThan(1);
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (Directory.Exists(dir))
|
||||
Directory.Delete(dir, recursive: true);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream;
|
||||
|
||||
public class JetStreamFileStoreInvariantTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Filestore_recovery_preserves_sequence_subject_index_and_integrity_after_prune_and_restart_cycles()
|
||||
{
|
||||
var dir = Path.Combine(Path.GetTempPath(), $"nats-js-fs-invariants-{Guid.NewGuid():N}");
|
||||
var options = new FileStoreOptions { Directory = dir };
|
||||
|
||||
try
|
||||
{
|
||||
await using var store = new FileStore(options);
|
||||
var seq1 = await store.AppendAsync("orders.created", "1"u8.ToArray(), default);
|
||||
var seq2 = await store.AppendAsync("orders.updated", "2"u8.ToArray(), default);
|
||||
seq1.ShouldBe((ulong)1);
|
||||
seq2.ShouldBe((ulong)2);
|
||||
|
||||
(await store.RemoveAsync(seq2, default)).ShouldBeTrue();
|
||||
var state = await store.GetStateAsync(default);
|
||||
|
||||
state.Messages.ShouldBe((ulong)1);
|
||||
state.LastSeq.ShouldBe((ulong)1);
|
||||
state.FirstSeq.ShouldBe((ulong)1);
|
||||
(await store.LoadLastBySubjectAsync("orders.updated", default)).ShouldBeNull();
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (Directory.Exists(dir))
|
||||
Directory.Delete(dir, recursive: true);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests;
|
||||
|
||||
public class JetStreamFileStoreLayoutParityTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task File_store_uses_block_index_layout_with_ttl_prune_invariants()
|
||||
{
|
||||
var dir = Path.Combine(Path.GetTempPath(), $"natsdotnet-filestore-{Guid.NewGuid():N}");
|
||||
try
|
||||
{
|
||||
await using var store = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir,
|
||||
BlockSizeBytes = 128,
|
||||
MaxAgeMs = 60_000,
|
||||
});
|
||||
|
||||
for (var i = 0; i < 100; i++)
|
||||
await store.AppendAsync($"orders.{i}", "x"u8.ToArray(), default);
|
||||
|
||||
var state = await store.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)100);
|
||||
store.BlockCount.ShouldBeGreaterThan(1);
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (Directory.Exists(dir))
|
||||
Directory.Delete(dir, recursive: true);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream;
|
||||
|
||||
public class JetStreamFileStoreRecoveryStrictParityTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Filestore_recovery_preserves_sequence_subject_index_and_integrity_after_prune_and_restart_cycles()
|
||||
{
|
||||
var dir = Path.Combine(Path.GetTempPath(), $"nats-js-fs-recovery-{Guid.NewGuid():N}");
|
||||
var options = new FileStoreOptions { Directory = dir };
|
||||
|
||||
try
|
||||
{
|
||||
await using (var store = new FileStore(options))
|
||||
{
|
||||
await store.AppendAsync("orders.created", "a"u8.ToArray(), default);
|
||||
await store.AppendAsync("orders.created", "b"u8.ToArray(), default);
|
||||
await store.RemoveAsync(2, default);
|
||||
}
|
||||
|
||||
await using var reopened = new FileStore(options);
|
||||
var state = await reopened.GetStateAsync(default);
|
||||
state.Messages.ShouldBe((ulong)1);
|
||||
state.LastSeq.ShouldBe((ulong)1);
|
||||
|
||||
var msg1 = await reopened.LoadAsync(1, default);
|
||||
msg1.ShouldNotBeNull();
|
||||
msg1.Subject.ShouldBe("orders.created");
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (Directory.Exists(dir))
|
||||
Directory.Delete(dir, recursive: true);
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user