refactor: extract NATS.Server.JetStream.Tests project

Move 225 JetStream-related test files from NATS.Server.Tests into a
dedicated NATS.Server.JetStream.Tests project. This includes root-level
JetStream*.cs files, storage test files (FileStore, MemStore,
StreamStoreContract), and the full JetStream/ subfolder tree (Api,
Cluster, Consumers, MirrorSource, Snapshots, Storage, Streams).

Updated all namespaces, added InternalsVisibleTo, registered in the
solution file, and added the JETSTREAM_INTEGRATION_MATRIX define.
This commit is contained in:
Joseph Doherty
2026-03-12 15:58:10 -04:00
parent 36b9dfa654
commit 78b4bc2486
228 changed files with 253 additions and 227 deletions

View File

@@ -0,0 +1,118 @@
using NATS.Server.JetStream.Consumers;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
/// <summary>
/// Tests for enhanced AckProcessor with RedeliveryTracker integration.
/// Go reference: consumer.go:4854 (processInboundAcks).
/// </summary>
public class AckProcessorEnhancedTests
{
[Fact]
public void ProcessAck_removes_from_pending()
{
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
var processor = new AckProcessor(tracker);
processor.Register(1, "deliver.subj");
processor.PendingCount.ShouldBe(1);
processor.ProcessAck(1);
processor.PendingCount.ShouldBe(0);
}
[Fact]
public void ProcessNak_schedules_redelivery()
{
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
var processor = new AckProcessor(tracker);
processor.Register(1, "deliver.subj");
processor.ProcessNak(1, delayMs: 500);
processor.PendingCount.ShouldBe(1); // still pending until redelivered
}
[Fact]
public void ProcessTerm_removes_permanently()
{
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
var processor = new AckProcessor(tracker);
processor.Register(1, "deliver.subj");
processor.ProcessTerm(1);
processor.PendingCount.ShouldBe(0);
processor.TerminatedCount.ShouldBe(1);
}
[Fact]
public void ProcessProgress_resets_deadline_to_full_ack_wait()
{
// Go: consumer.go — processAckProgress (+WPI): resets deadline to UtcNow + ackWait
// Verify the invariant: after ProcessProgress, the deadline is strictly in the future
// by at least (ackWait - epsilon) milliseconds, without relying on wall-clock delays.
var ackWaitMs = 1000;
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: ackWaitMs);
var processor = new AckProcessor(tracker);
processor.Register(1, "deliver.subj");
var before = DateTimeOffset.UtcNow;
processor.ProcessProgress(1);
var after = DateTimeOffset.UtcNow;
var deadline = processor.GetDeadline(1);
// Deadline must be at least (before + ackWait) and at most (after + ackWait + epsilon)
deadline.ShouldBeGreaterThanOrEqualTo(before.AddMilliseconds(ackWaitMs));
deadline.ShouldBeLessThanOrEqualTo(after.AddMilliseconds(ackWaitMs + 50));
}
[Fact]
public void MaxAckPending_blocks_new_registrations()
{
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
var processor = new AckProcessor(tracker, maxAckPending: 2);
processor.Register(1, "d.1");
processor.Register(2, "d.2");
processor.CanRegister().ShouldBeFalse();
}
[Fact]
public void CanRegister_true_when_unlimited()
{
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
var processor = new AckProcessor(tracker); // maxAckPending=0 means unlimited
processor.Register(1, "d.1");
processor.CanRegister().ShouldBeTrue();
}
[Fact]
public void ParseAckType_identifies_all_types()
{
AckProcessor.ParseAckType("+ACK"u8).ShouldBe(AckType.Ack);
AckProcessor.ParseAckType("-NAK"u8).ShouldBe(AckType.Nak);
AckProcessor.ParseAckType("+TERM"u8).ShouldBe(AckType.Term);
AckProcessor.ParseAckType("+WPI"u8).ShouldBe(AckType.Progress);
}
[Fact]
public void ParseAckType_returns_unknown_for_invalid()
{
AckProcessor.ParseAckType("GARBAGE"u8).ShouldBe(AckType.Unknown);
AckProcessor.ParseAckType(""u8).ShouldBe(AckType.Unknown);
}
[Fact]
public void GetDeadline_returns_min_for_unknown_sequence()
{
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
var processor = new AckProcessor(tracker);
// Unknown sequence should return DateTimeOffset.MinValue
processor.GetDeadline(999).ShouldBe(DateTimeOffset.MinValue);
}
}

View File

@@ -0,0 +1,185 @@
// Go: consumer.go:2550 (processAckMsg, processNak, processTerm, processAckProgress)
using NATS.Server.JetStream.Consumers;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
public class AckProcessorNakTests
{
// Test 1: ProcessAck with empty payload acks the sequence
[Fact]
public void ProcessAck_empty_payload_acks_sequence()
{
// Go: consumer.go — empty ack payload treated as "+ACK"
var ack = new AckProcessor();
ack.Register(1, ackWaitMs: 5000);
ack.ProcessAck(1, ReadOnlySpan<byte>.Empty);
ack.PendingCount.ShouldBe(0);
ack.AckFloor.ShouldBe((ulong)1);
}
// Test 2: ProcessAck with -NAK schedules redelivery
[Fact]
public async Task ProcessAck_nak_payload_schedules_redelivery()
{
// Go: consumer.go — "-NAK" triggers rescheduled redelivery
var ack = new AckProcessor();
ack.Register(1, ackWaitMs: 5000);
ack.ProcessAck(1, "-NAK"u8);
// Should still be pending (redelivery scheduled)
ack.PendingCount.ShouldBe(1);
// Should expire quickly (using ackWait fallback of 5000ms — verify it is still pending now)
ack.TryGetExpired(out _, out _).ShouldBeFalse();
await Task.CompletedTask;
}
// Test 3: ProcessAck with -NAK {delay} uses custom delay
[Fact]
public async Task ProcessAck_nak_with_delay_uses_custom_delay()
{
// Go: consumer.go — "-NAK {delay}" parses optional explicit delay in milliseconds
var ack = new AckProcessor();
ack.Register(1, ackWaitMs: 5000);
ack.ProcessAck(1, "-NAK 1"u8);
// Sequence still pending
ack.PendingCount.ShouldBe(1);
// With a 1ms delay, should expire quickly
await Task.Delay(10);
ack.TryGetExpired(out var seq, out _).ShouldBeTrue();
seq.ShouldBe((ulong)1);
}
// Test 4: ProcessAck with +TERM removes from pending
[Fact]
public void ProcessAck_term_removes_from_pending()
{
// Go: consumer.go — "+TERM" permanently terminates delivery; sequence never redelivered
var ack = new AckProcessor();
ack.Register(1, ackWaitMs: 5000);
ack.ProcessAck(1, "+TERM"u8);
ack.PendingCount.ShouldBe(0);
ack.HasPending.ShouldBeFalse();
}
// Test 5: ProcessAck with +WPI resets deadline without incrementing delivery count
[Fact]
public async Task ProcessAck_wpi_resets_deadline_without_incrementing_deliveries()
{
// Go: consumer.go — "+WPI" resets ack deadline; delivery count must not change
var ack = new AckProcessor();
ack.Register(1, ackWaitMs: 10);
// Wait for the deadline to approach, then reset it via progress
await Task.Delay(5);
ack.ProcessAck(1, "+WPI"u8);
// Deadline was just reset — should not be expired yet
ack.TryGetExpired(out _, out var deliveries).ShouldBeFalse();
// Deliveries count must remain at 1 (not incremented by WPI)
deliveries.ShouldBe(0);
// Sequence still pending
ack.PendingCount.ShouldBe(1);
}
// Test 6: Backoff array applies correct delay per redelivery attempt
[Fact]
public async Task ProcessNak_backoff_array_applies_delay_by_delivery_count()
{
// Go: consumer.go — backoff array indexes by (deliveries - 1)
var ack = new AckProcessor(backoffMs: [1, 50, 5000]);
ack.Register(1, ackWaitMs: 5000);
// First NAK — delivery count is 1 → backoff[0] = 1ms
ack.ProcessNak(1);
await Task.Delay(10);
ack.TryGetExpired(out _, out _).ShouldBeTrue();
// Now delivery count is 2 → backoff[1] = 50ms
ack.ProcessNak(1);
ack.TryGetExpired(out _, out _).ShouldBeFalse();
}
// Test 7: Backoff array clamps at last entry for high delivery counts
[Fact]
public async Task ProcessNak_backoff_clamps_at_last_entry_for_high_delivery_count()
{
// Go: consumer.go — backoff index clamped to backoff.Length-1 when deliveries exceed array size
var ack = new AckProcessor(backoffMs: [1, 2]);
ack.Register(1, ackWaitMs: 5000);
// Drive deliveries up: NAK twice to advance delivery count past array length
ack.ProcessNak(1); // deliveries becomes 2 (index 1 = 2ms)
await Task.Delay(10);
ack.TryGetExpired(out _, out _).ShouldBeTrue();
ack.ProcessNak(1); // deliveries becomes 3 (index clamps to 1 = 2ms)
await Task.Delay(10);
ack.TryGetExpired(out var seq, out _).ShouldBeTrue();
seq.ShouldBe((ulong)1);
}
// Test 8: AckSequence advances AckFloor when contiguous
[Fact]
public void AckSequence_advances_ackfloor_for_contiguous_sequences()
{
// Go: consumer.go — acking contiguous sequences from floor advances AckFloor monotonically
var ack = new AckProcessor();
ack.Register(1, ackWaitMs: 5000);
ack.Register(2, ackWaitMs: 5000);
ack.Register(3, ackWaitMs: 5000);
ack.AckSequence(1);
ack.AckFloor.ShouldBe((ulong)1);
ack.AckSequence(2);
ack.AckFloor.ShouldBe((ulong)2);
}
// Test 9: ProcessTerm increments TerminatedCount
[Fact]
public void ProcessTerm_increments_terminated_count()
{
// Go: consumer.go — terminated sequences tracked separately from acked sequences
var ack = new AckProcessor();
ack.Register(1, ackWaitMs: 5000);
ack.Register(2, ackWaitMs: 5000);
ack.TerminatedCount.ShouldBe(0);
ack.ProcessTerm(1);
ack.TerminatedCount.ShouldBe(1);
ack.ProcessTerm(2);
ack.TerminatedCount.ShouldBe(2);
}
// Test 10: NAK after TERM is ignored (sequence already terminated)
[Fact]
public void ProcessNak_after_term_is_ignored()
{
// Go: consumer.go — once terminated, a sequence cannot be rescheduled via NAK
var ack = new AckProcessor(backoffMs: [1]);
ack.Register(1, ackWaitMs: 5000);
ack.ProcessTerm(1);
ack.PendingCount.ShouldBe(0);
// Attempting to NAK a terminated sequence has no effect
ack.ProcessNak(1);
ack.PendingCount.ShouldBe(0);
ack.TerminatedCount.ShouldBe(1);
}
}

View File

@@ -0,0 +1,198 @@
using NATS.Server.JetStream.Consumers;
using NATS.Server.JetStream.Cluster;
using Shouldly;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
/// <summary>
/// Tests for cluster-aware pending pull request tracking in PullConsumerEngine.
/// Go reference: consumer.go proposeWaitingRequest / waitingRequestsPending — cluster-wide
/// pending pull request coordination via the consumer RAFT group.
/// golang/nats-server/server/consumer.go proposeWaitingRequest
/// </summary>
public class ClusterPendingRequestTests
{
// ---------------------------------------------------------------
// ProposeWaitingRequest
// ---------------------------------------------------------------
[Fact]
public void ProposeWaitingRequest_with_quorum_returns_true()
{
// Go: consumer.go proposeWaitingRequest — only propose when quorum available.
var engine = new PullConsumerEngine();
var group = new RaftGroup
{
Name = "test-group",
Peers = ["peer-1", "peer-2", "peer-3"],
};
var request = new PullWaitingRequest { Batch = 10, Reply = "reply.test.1" };
var result = engine.ProposeWaitingRequest(request, group);
result.ShouldBeTrue();
}
[Fact]
public void ProposeWaitingRequest_without_quorum_returns_false()
{
// Go: consumer.go proposeWaitingRequest — no quorum (0 peers means quorum = 1, but 0 < 1).
var engine = new PullConsumerEngine();
var group = new RaftGroup
{
Name = "empty-group",
Peers = [],
};
var request = new PullWaitingRequest { Batch = 5, Reply = "reply.noquorum" };
var result = engine.ProposeWaitingRequest(request, group);
result.ShouldBeFalse();
}
[Fact]
public void ProposeWaitingRequest_registers_in_cluster_pending()
{
// Go: consumer.go — after a successful proposal, the request must appear in the
// cluster pending map so it can be fulfilled or expired.
var engine = new PullConsumerEngine();
var group = new RaftGroup
{
Name = "test-group",
Peers = ["peer-1", "peer-2", "peer-3"],
};
var request = new PullWaitingRequest { Batch = 4, Reply = "reply.reg" };
engine.ProposeWaitingRequest(request, group);
var pending = engine.GetClusterPendingRequests();
pending.ShouldContain(r => r.Reply == "reply.reg");
}
[Fact]
public void Multiple_proposals_tracked_independently()
{
// Go: consumer.go — each reply subject is an independent pending slot;
// proposals with different reply subjects must not overwrite each other.
var engine = new PullConsumerEngine();
var group = new RaftGroup
{
Name = "test-group",
Peers = ["peer-1", "peer-2", "peer-3"],
};
engine.ProposeWaitingRequest(new PullWaitingRequest { Batch = 1, Reply = "reply.A" }, group);
engine.ProposeWaitingRequest(new PullWaitingRequest { Batch = 2, Reply = "reply.B" }, group);
engine.ProposeWaitingRequest(new PullWaitingRequest { Batch = 3, Reply = "reply.C" }, group);
engine.ClusterPendingCount.ShouldBe(3);
var pending = engine.GetClusterPendingRequests();
pending.ShouldContain(r => r.Reply == "reply.A" && r.Batch == 1);
pending.ShouldContain(r => r.Reply == "reply.B" && r.Batch == 2);
pending.ShouldContain(r => r.Reply == "reply.C" && r.Batch == 3);
}
// ---------------------------------------------------------------
// ClusterPendingCount
// ---------------------------------------------------------------
[Fact]
public void ClusterPendingCount_tracks_pending_requests()
{
// Go: consumer.go — ClusterPendingCount reflects the current size of the pending map.
var engine = new PullConsumerEngine();
engine.ClusterPendingCount.ShouldBe(0);
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 1, Reply = "r1" });
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 2, Reply = "r2" });
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 3, Reply = "r3" });
engine.ClusterPendingCount.ShouldBe(3);
}
[Fact]
public void ClusterPendingCount_decrements_on_remove()
{
// Go: consumer.go — removing a request via reply subject decrements the pending count.
var engine = new PullConsumerEngine();
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 5, Reply = "decrement.reply" });
engine.ClusterPendingCount.ShouldBe(1);
engine.RemoveClusterPending("decrement.reply");
engine.ClusterPendingCount.ShouldBe(0);
}
// ---------------------------------------------------------------
// RegisterClusterPending
// ---------------------------------------------------------------
[Fact]
public void RegisterClusterPending_adds_request_by_reply()
{
// Go: consumer.go — pending requests are keyed by reply subject for O(1) lookup.
var engine = new PullConsumerEngine();
var request = new PullWaitingRequest { Batch = 7, Reply = "register.reply.subject" };
engine.RegisterClusterPending(request);
var retrieved = engine.RemoveClusterPending("register.reply.subject");
retrieved.ShouldNotBeNull();
retrieved.Batch.ShouldBe(7);
retrieved.Reply.ShouldBe("register.reply.subject");
}
// ---------------------------------------------------------------
// RemoveClusterPending
// ---------------------------------------------------------------
[Fact]
public void RemoveClusterPending_returns_and_removes_request()
{
// Go: consumer.go — RemoveClusterPending both returns the request and removes it
// from the map so it is not fulfilled twice.
var engine = new PullConsumerEngine();
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 3, Reply = "remove.me" });
var removed = engine.RemoveClusterPending("remove.me");
removed.ShouldNotBeNull();
removed.Reply.ShouldBe("remove.me");
engine.ClusterPendingCount.ShouldBe(0);
// Second removal should return null — the entry is gone.
engine.RemoveClusterPending("remove.me").ShouldBeNull();
}
[Fact]
public void RemoveClusterPending_returns_null_for_unknown()
{
// Go: consumer.go — attempting to remove an unknown reply subject is a no-op.
var engine = new PullConsumerEngine();
var result = engine.RemoveClusterPending("does.not.exist");
result.ShouldBeNull();
}
// ---------------------------------------------------------------
// GetClusterPendingRequests
// ---------------------------------------------------------------
[Fact]
public void GetClusterPendingRequests_returns_all_pending()
{
// Go: consumer.go — GetClusterPendingRequests is used for expiry sweeps and
// diagnostics; it must return every currently pending request.
var engine = new PullConsumerEngine();
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 1, Reply = "bulk.a" });
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 2, Reply = "bulk.b" });
engine.RegisterClusterPending(new PullWaitingRequest { Batch = 3, Reply = "bulk.c" });
var all = engine.GetClusterPendingRequests();
all.Count.ShouldBe(3);
all.Select(r => r.Reply).ShouldContain("bulk.a");
all.Select(r => r.Reply).ShouldContain("bulk.b");
all.Select(r => r.Reply).ShouldContain("bulk.c");
}
}

View File

@@ -0,0 +1,702 @@
// Go reference: golang/nats-server/server/jetstream_consumer_test.go
// Ports Go consumer tests that map to existing .NET infrastructure:
// multiple filters, consumer actions, filter matching, priority groups,
// ack timeout retry, descriptions, single-token subjects, overflow.
using System.Text.RegularExpressions;
using NATS.Server.JetStream;
using NATS.Server.JetStream.Consumers;
using NATS.Server.JetStream.Models;
using NATS.Server.Subscriptions;
using NATS.Server.TestUtilities;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
/// <summary>
/// Go parity tests ported from jetstream_consumer_test.go for consumer
/// behaviors including filter matching, consumer actions, priority groups,
/// ack retry, descriptions, and overflow handling.
/// </summary>
public class ConsumerGoParityTests
{
// =========================================================================
// Helper: Generate N filter subjects matching Go's filterSubjects() function.
// Go: jetstream_consumer_test.go:829
// =========================================================================
private static List<string> GenerateFilterSubjects(int n)
{
var fs = new List<string>();
while (fs.Count < n)
{
var literals = new[] { "foo", "bar", Guid.NewGuid().ToString("N")[..8], "xyz", "abcdef" };
fs.Add(string.Join('.', literals));
if (fs.Count >= n) break;
for (int i = 0; i < literals.Length && fs.Count < n; i++)
{
var entry = new string[literals.Length];
for (int j = 0; j < literals.Length; j++)
entry[j] = j == i ? "*" : literals[j];
fs.Add(string.Join('.', entry));
}
}
return fs.Take(n).ToList();
}
// =========================================================================
// TestJetStreamConsumerIsFilteredMatch — jetstream_consumer_test.go:856
// Tests the filter matching logic used by consumers to determine if a
// message subject matches their filter configuration.
// =========================================================================
[Theory]
[InlineData(new string[0], "foo.bar", true)] // no filter = match all
[InlineData(new[] { "foo.baz", "foo.bar" }, "foo.bar", true)] // literal match
[InlineData(new[] { "foo.baz", "foo.bar" }, "foo.ban", false)] // literal mismatch
[InlineData(new[] { "bar.>", "foo.>" }, "foo.bar", true)] // wildcard > match
[InlineData(new[] { "bar.>", "foo.>" }, "bar.foo", true)] // wildcard > match
[InlineData(new[] { "bar.>", "foo.>" }, "baz.foo", false)] // wildcard > mismatch
[InlineData(new[] { "bar.*", "foo.*" }, "foo.bar", true)] // wildcard * match
[InlineData(new[] { "bar.*", "foo.*" }, "bar.foo", true)] // wildcard * match
[InlineData(new[] { "bar.*", "foo.*" }, "baz.foo", false)] // wildcard * mismatch
[InlineData(new[] { "foo.*.x", "foo.*.y" }, "foo.bar.x", true)] // multi-token wildcard match
[InlineData(new[] { "foo.*.x", "foo.*.y", "foo.*.z" }, "foo.bar.z", true)] // multi wildcard match
public void IsFilteredMatch_basic_cases(string[] filters, string subject, bool expected)
{
// Go: TestJetStreamConsumerIsFilteredMatch jetstream_consumer_test.go:856
var compiled = new CompiledFilter(filters);
compiled.Matches(subject).ShouldBe(expected);
}
[Fact]
public void IsFilteredMatch_many_filters_mismatch()
{
// Go: TestJetStreamConsumerIsFilteredMatch jetstream_consumer_test.go:874
// 100 filter subjects, none should match "foo.bar.do.not.match.any.filter.subject"
var filters = GenerateFilterSubjects(100);
var compiled = new CompiledFilter(filters);
compiled.Matches("foo.bar.do.not.match.any.filter.subject").ShouldBeFalse();
}
[Fact]
public void IsFilteredMatch_many_filters_match()
{
// Go: TestJetStreamConsumerIsFilteredMatch jetstream_consumer_test.go:875
// 100 filter subjects; "foo.bar.*.xyz.abcdef" should be among them, matching
// "foo.bar.12345.xyz.abcdef" via wildcard
var filters = GenerateFilterSubjects(100);
var compiled = new CompiledFilter(filters);
// One of the generated wildcard filters should be "foo.bar.*.xyz.abcdef"
// which matches "foo.bar.12345.xyz.abcdef"
compiled.Matches("foo.bar.12345.xyz.abcdef").ShouldBeTrue();
}
// =========================================================================
// TestJetStreamConsumerIsEqualOrSubsetMatch — jetstream_consumer_test.go:921
// Tests whether a subject is an equal or subset match of the consumer's filters.
// This is used for work queue overlap detection.
// =========================================================================
[Theory]
[InlineData(new string[0], "foo.bar", false)] // no filter = no subset
[InlineData(new[] { "foo.baz", "foo.bar" }, "foo.bar", true)] // literal match
[InlineData(new[] { "foo.baz", "foo.bar" }, "foo.ban", false)] // literal mismatch
[InlineData(new[] { "bar.>", "foo.>" }, "foo.>", true)] // equal wildcard match
[InlineData(new[] { "bar.foo.>", "foo.bar.>" }, "bar.>", true)] // subset match: bar.foo.> is subset of bar.>
[InlineData(new[] { "bar.>", "foo.>" }, "baz.foo.>", false)] // no match
public void IsEqualOrSubsetMatch_basic_cases(string[] filters, string subject, bool expected)
{
// Go: TestJetStreamConsumerIsEqualOrSubsetMatch jetstream_consumer_test.go:921
// A subject is a "subset match" if any filter equals the subject or if
// the filter is a more specific version (subset) of the subject.
// Filter "bar.foo.>" is a subset of subject "bar.>" because bar.foo.> matches
// only things that bar.> also matches.
bool result = false;
foreach (var filter in filters)
{
// Equal match
if (string.Equals(filter, subject, StringComparison.Ordinal))
{
result = true;
break;
}
// Subset match: filter is more specific (subset) than subject
// i.e., everything matched by filter is also matched by subject
if (SubjectMatch.MatchLiteral(filter, subject))
{
result = true;
break;
}
}
result.ShouldBe(expected);
}
[Fact]
public void IsEqualOrSubsetMatch_many_filters_literal()
{
// Go: TestJetStreamConsumerIsEqualOrSubsetMatch jetstream_consumer_test.go:934
var filters = GenerateFilterSubjects(100);
// One of the generated filters is a literal like "foo.bar.<uuid>.xyz.abcdef"
// The subject "foo.bar.*.xyz.abcdef" is a pattern that all such literals match
bool found = filters.Any(f => SubjectMatch.MatchLiteral(f, "foo.bar.*.xyz.abcdef"));
found.ShouldBeTrue();
}
[Fact]
public void IsEqualOrSubsetMatch_many_filters_subset()
{
// Go: TestJetStreamConsumerIsEqualOrSubsetMatch jetstream_consumer_test.go:935
var filters = GenerateFilterSubjects(100);
// "foo.bar.>" should match many of the generated filters as a superset
bool found = filters.Any(f => SubjectMatch.MatchLiteral(f, "foo.bar.>"));
found.ShouldBeTrue();
}
// =========================================================================
// TestJetStreamConsumerActions — jetstream_consumer_test.go:472
// Tests consumer create/update action semantics.
// =========================================================================
[Fact]
public async Task Consumer_create_action_succeeds_for_new_consumer()
{
// Go: TestJetStreamConsumerActions jetstream_consumer_test.go:472
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
var response = await fx.CreateConsumerAsync("TEST", "DUR", null,
filterSubjects: ["one", "two"],
ackPolicy: AckPolicy.Explicit);
response.Error.ShouldBeNull();
response.ConsumerInfo.ShouldNotBeNull();
}
[Fact]
public async Task Consumer_create_action_idempotent_with_same_config()
{
// Go: TestJetStreamConsumerActions jetstream_consumer_test.go:497
// Create consumer again with identical config should succeed
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
var r1 = await fx.CreateConsumerAsync("TEST", "DUR", null,
filterSubjects: ["one", "two"],
ackPolicy: AckPolicy.Explicit);
r1.Error.ShouldBeNull();
var r2 = await fx.CreateConsumerAsync("TEST", "DUR", null,
filterSubjects: ["one", "two"],
ackPolicy: AckPolicy.Explicit);
r2.Error.ShouldBeNull();
}
[Fact]
public async Task Consumer_update_existing_succeeds()
{
// Go: TestJetStreamConsumerActions jetstream_consumer_test.go:516
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
await fx.CreateConsumerAsync("TEST", "DUR", null,
filterSubjects: ["one", "two"],
ackPolicy: AckPolicy.Explicit);
// Update filter subjects
var response = await fx.CreateConsumerAsync("TEST", "DUR", null,
filterSubjects: ["one"],
ackPolicy: AckPolicy.Explicit);
response.Error.ShouldBeNull();
}
// =========================================================================
// TestJetStreamConsumerActionsOnWorkQueuePolicyStream — jetstream_consumer_test.go:557
// Tests consumer actions on a work queue policy stream.
// =========================================================================
[Fact]
public async Task Consumer_on_work_queue_stream()
{
// Go: TestJetStreamConsumerActionsOnWorkQueuePolicyStream jetstream_consumer_test.go:557
await using var fx = await JetStreamApiFixture.StartWithStreamConfigAsync(new StreamConfig
{
Name = "TEST",
Subjects = ["one", "two", "three", "four", "five.>"],
Retention = RetentionPolicy.WorkQueue,
});
var r1 = await fx.CreateConsumerAsync("TEST", "DUR", null,
filterSubjects: ["one", "two"],
ackPolicy: AckPolicy.Explicit);
r1.Error.ShouldBeNull();
}
// =========================================================================
// TestJetStreamConsumerPedanticMode — jetstream_consumer_test.go:1253
// Consumer pedantic mode validates various configuration constraints.
// We test the validation that exists in the .NET implementation.
// =========================================================================
[Fact]
public async Task Consumer_ephemeral_can_be_created()
{
// Go: TestJetStreamConsumerPedanticMode jetstream_consumer_test.go:1253
// Test that ephemeral consumers can be created
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
var response = await fx.CreateConsumerAsync("TEST", "EPH", null,
filterSubjects: ["one"],
ackPolicy: AckPolicy.Explicit,
ephemeral: true);
response.Error.ShouldBeNull();
}
// =========================================================================
// TestJetStreamConsumerMultipleFiltersRemoveFilters — jetstream_consumer_test.go:45
// Consumer with multiple filter subjects, then updating to fewer.
// =========================================================================
[Fact]
public async Task Consumer_multiple_filters_can_be_updated()
{
// Go: TestJetStreamConsumerMultipleFiltersRemoveFilters jetstream_consumer_test.go:45
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
// Create consumer with multiple filters
var r1 = await fx.CreateConsumerAsync("TEST", "CF", null,
filterSubjects: ["one", "two", "three"]);
r1.Error.ShouldBeNull();
// Update to fewer filters
var r2 = await fx.CreateConsumerAsync("TEST", "CF", null,
filterSubjects: ["one"]);
r2.Error.ShouldBeNull();
}
// =========================================================================
// TestJetStreamConsumerMultipleConsumersSingleFilter — jetstream_consumer_test.go:188
// Multiple consumers each with a single filter on the same stream.
// =========================================================================
[Fact]
public async Task Multiple_consumers_each_with_single_filter()
{
// Go: TestJetStreamConsumerMultipleConsumersSingleFilter jetstream_consumer_test.go:188
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
var r1 = await fx.CreateConsumerAsync("TEST", "C1", "one");
r1.Error.ShouldBeNull();
var r2 = await fx.CreateConsumerAsync("TEST", "C2", "two");
r2.Error.ShouldBeNull();
// Publish to each filter
var ack1 = await fx.PublishAndGetAckAsync("one", "msg1");
ack1.ErrorCode.ShouldBeNull();
var ack2 = await fx.PublishAndGetAckAsync("two", "msg2");
ack2.ErrorCode.ShouldBeNull();
// Each consumer should see only its filtered messages
var batch1 = await fx.FetchAsync("TEST", "C1", 10);
batch1.Messages.ShouldNotBeEmpty();
batch1.Messages.All(m => m.Subject == "one").ShouldBeTrue();
var batch2 = await fx.FetchAsync("TEST", "C2", 10);
batch2.Messages.ShouldNotBeEmpty();
batch2.Messages.All(m => m.Subject == "two").ShouldBeTrue();
}
// =========================================================================
// TestJetStreamConsumerMultipleConsumersMultipleFilters — jetstream_consumer_test.go:300
// Multiple consumers with overlapping multiple filter subjects.
// =========================================================================
[Fact]
public async Task Multiple_consumers_with_multiple_filters()
{
// Go: TestJetStreamConsumerMultipleConsumersMultipleFilters jetstream_consumer_test.go:300
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
var r1 = await fx.CreateConsumerAsync("TEST", "C1", null,
filterSubjects: ["one", "two"]);
r1.Error.ShouldBeNull();
var r2 = await fx.CreateConsumerAsync("TEST", "C2", null,
filterSubjects: ["two", "three"]);
r2.Error.ShouldBeNull();
await fx.PublishAndGetAckAsync("one", "msg1");
await fx.PublishAndGetAckAsync("two", "msg2");
await fx.PublishAndGetAckAsync("three", "msg3");
// C1 should see "one" and "two"
var batch1 = await fx.FetchAsync("TEST", "C1", 10);
batch1.Messages.Count.ShouldBe(2);
// C2 should see "two" and "three"
var batch2 = await fx.FetchAsync("TEST", "C2", 10);
batch2.Messages.Count.ShouldBe(2);
}
// =========================================================================
// TestJetStreamConsumerMultipleFiltersSequence — jetstream_consumer_test.go:426
// Verifies sequence ordering with multiple filter subjects.
// =========================================================================
[Fact]
public async Task Multiple_filters_preserve_sequence_order()
{
// Go: TestJetStreamConsumerMultipleFiltersSequence jetstream_consumer_test.go:426
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
await fx.CreateConsumerAsync("TEST", "CF", null,
filterSubjects: ["one", "two"]);
await fx.PublishAndGetAckAsync("one", "msg1");
await fx.PublishAndGetAckAsync("two", "msg2");
await fx.PublishAndGetAckAsync("one", "msg3");
var batch = await fx.FetchAsync("TEST", "CF", 10);
batch.Messages.Count.ShouldBe(3);
// Verify sequences are in order
for (int i = 1; i < batch.Messages.Count; i++)
{
batch.Messages[i].Sequence.ShouldBeGreaterThan(batch.Messages[i - 1].Sequence);
}
}
// =========================================================================
// TestJetStreamConsumerPinned — jetstream_consumer_test.go:1545
// Priority group registration and active consumer selection.
// =========================================================================
[Fact]
public void PriorityGroup_pinned_consumer_gets_messages()
{
// Go: TestJetStreamConsumerPinned jetstream_consumer_test.go:1545
var mgr = new PriorityGroupManager();
mgr.Register("group1", "C1", priority: 1);
mgr.Register("group1", "C2", priority: 2);
// C1 (lowest priority number) should be active
mgr.IsActive("group1", "C1").ShouldBeTrue();
mgr.IsActive("group1", "C2").ShouldBeFalse();
}
// =========================================================================
// TestJetStreamConsumerPinnedUnsetsAfterAtMostPinnedTTL — jetstream_consumer_test.go:1711
// When the pinned consumer disconnects, the next one takes over.
// =========================================================================
[Fact]
public void PriorityGroup_pinned_unsets_on_disconnect()
{
// Go: TestJetStreamConsumerPinnedUnsetsAfterAtMostPinnedTTL jetstream_consumer_test.go:1711
var mgr = new PriorityGroupManager();
mgr.Register("group1", "C1", priority: 1);
mgr.Register("group1", "C2", priority: 2);
mgr.IsActive("group1", "C1").ShouldBeTrue();
// Unregister C1 (simulates disconnect)
mgr.Unregister("group1", "C1");
mgr.IsActive("group1", "C2").ShouldBeTrue();
}
// =========================================================================
// TestJetStreamConsumerPinnedUnsubscribeOnPinned — jetstream_consumer_test.go:1802
// Unsubscribing the pinned consumer causes failover.
// =========================================================================
[Fact]
public void PriorityGroup_unsubscribe_pinned_causes_failover()
{
// Go: TestJetStreamConsumerPinnedUnsubscribeOnPinned jetstream_consumer_test.go:1802
var mgr = new PriorityGroupManager();
mgr.Register("group1", "C1", priority: 1);
mgr.Register("group1", "C2", priority: 2);
mgr.Register("group1", "C3", priority: 3);
mgr.GetActiveConsumer("group1").ShouldBe("C1");
mgr.Unregister("group1", "C1");
mgr.GetActiveConsumer("group1").ShouldBe("C2");
mgr.Unregister("group1", "C2");
mgr.GetActiveConsumer("group1").ShouldBe("C3");
}
// =========================================================================
// TestJetStreamConsumerUnpinPickDifferentRequest — jetstream_consumer_test.go:1973
// When unpin is called, the next request goes to a different consumer.
// =========================================================================
[Fact]
public void PriorityGroup_unpin_picks_different_consumer()
{
// Go: TestJetStreamConsumerUnpinPickDifferentRequest jetstream_consumer_test.go:1973
var mgr = new PriorityGroupManager();
mgr.Register("group1", "C1", priority: 1);
mgr.Register("group1", "C2", priority: 2);
mgr.GetActiveConsumer("group1").ShouldBe("C1");
// Remove C1 and re-add with higher priority number
mgr.Unregister("group1", "C1");
mgr.Register("group1", "C1", priority: 3);
// Now C2 should be active (priority 2 < priority 3)
mgr.GetActiveConsumer("group1").ShouldBe("C2");
}
// =========================================================================
// TestJetStreamConsumerPinnedTTL — jetstream_consumer_test.go:2067
// Priority group TTL behavior.
// =========================================================================
[Fact]
public void PriorityGroup_registration_updates_priority()
{
// Go: TestJetStreamConsumerPinnedTTL jetstream_consumer_test.go:2067
var mgr = new PriorityGroupManager();
mgr.Register("group1", "C1", priority: 5);
mgr.Register("group1", "C2", priority: 1);
mgr.GetActiveConsumer("group1").ShouldBe("C2");
// Re-register C1 with lower priority
mgr.Register("group1", "C1", priority: 0);
mgr.GetActiveConsumer("group1").ShouldBe("C1");
}
// =========================================================================
// TestJetStreamConsumerWithPriorityGroups — jetstream_consumer_test.go:2246
// End-to-end test of priority groups with consumers.
// =========================================================================
[Fact]
public void PriorityGroup_multiple_groups_independent()
{
// Go: TestJetStreamConsumerWithPriorityGroups jetstream_consumer_test.go:2246
var mgr = new PriorityGroupManager();
mgr.Register("groupA", "C1", priority: 1);
mgr.Register("groupA", "C2", priority: 2);
mgr.Register("groupB", "C3", priority: 1);
mgr.Register("groupB", "C4", priority: 2);
// Groups are independent
mgr.GetActiveConsumer("groupA").ShouldBe("C1");
mgr.GetActiveConsumer("groupB").ShouldBe("C3");
mgr.Unregister("groupA", "C1");
mgr.GetActiveConsumer("groupA").ShouldBe("C2");
mgr.GetActiveConsumer("groupB").ShouldBe("C3"); // unchanged
}
// =========================================================================
// TestJetStreamConsumerOverflow — jetstream_consumer_test.go:2434
// Consumer overflow handling when max_ack_pending is reached.
// =========================================================================
[Fact]
public async Task Consumer_overflow_with_max_ack_pending()
{
// Go: TestJetStreamConsumerOverflow jetstream_consumer_test.go:2434
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
var response = await fx.CreateConsumerAsync("TEST", "OVER", "test.>",
ackPolicy: AckPolicy.Explicit,
maxAckPending: 2);
response.Error.ShouldBeNull();
// Publish 5 messages
for (int i = 0; i < 5; i++)
await fx.PublishAndGetAckAsync($"test.{i}", $"msg{i}");
// Fetch should be limited by max_ack_pending. Due to check-after-add
// semantics in PullConsumerEngine (add msg, then check), it returns
// max_ack_pending + 1 messages (the last one triggers the break).
var batch = await fx.FetchAsync("TEST", "OVER", 10);
batch.Messages.Count.ShouldBeLessThanOrEqualTo(3); // MaxAckPending(2) + 1
batch.Messages.Count.ShouldBeGreaterThan(0);
}
// =========================================================================
// TestPriorityGroupNameRegex — jetstream_consumer_test.go:2584
// Validates the regex for priority group names.
// Already tested in ClientProtocolGoParityTests; additional coverage here.
// =========================================================================
[Theory]
[InlineData("A", true)]
[InlineData("group/consumer=A", true)]
[InlineData("abc-def_123", true)]
[InlineData("", false)]
[InlineData("A B", false)]
[InlineData("A\tB", false)]
[InlineData("group-name-that-is-too-long", false)]
[InlineData("\r\n", false)]
public void PriorityGroupNameRegex_consumer_test_parity(string group, bool expected)
{
// Go: TestPriorityGroupNameRegex jetstream_consumer_test.go:2584
// Go regex: ^[a-zA-Z0-9/_=-]{1,16}$
var pattern = new Regex(@"^[a-zA-Z0-9/_=\-]{1,16}$");
pattern.IsMatch(group).ShouldBe(expected);
}
// =========================================================================
// TestJetStreamConsumerRetryAckAfterTimeout — jetstream_consumer_test.go:2734
// Retrying an ack after timeout should not error. Tests the ack processor.
// =========================================================================
[Fact]
public async Task Consumer_retry_ack_after_timeout_succeeds()
{
// Go: TestJetStreamConsumerRetryAckAfterTimeout jetstream_consumer_test.go:2734
await using var fx = await JetStreamApiFixture.StartWithAckExplicitConsumerAsync(ackWaitMs: 500);
await fx.PublishAndGetAckAsync("orders.created", "order-1");
var batch = await fx.FetchAsync("ORDERS", "PULL", 1);
batch.Messages.Count.ShouldBe(1);
// Ack the message (first ack)
var info = await fx.GetConsumerInfoAsync("ORDERS", "PULL");
info.ShouldNotBeNull();
}
// =========================================================================
// TestJetStreamConsumerAndStreamDescriptions — jetstream_consumer_test.go:3073
// Streams and consumers can have description metadata.
// StreamConfig.Description not yet implemented in .NET; test stream creation instead.
// =========================================================================
[Fact]
public async Task Consumer_and_stream_info_available()
{
// Go: TestJetStreamConsumerAndStreamDescriptions jetstream_consumer_test.go:3073
// Description property not yet on StreamConfig in .NET; validate basic stream/consumer info.
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("foo", "foo.>");
var streamInfo = await fx.RequestLocalAsync("$JS.API.STREAM.INFO.foo", "{}");
streamInfo.Error.ShouldBeNull();
streamInfo.StreamInfo!.Config.Name.ShouldBe("foo");
var r = await fx.CreateConsumerAsync("foo", "analytics", "foo.>");
r.Error.ShouldBeNull();
r.ConsumerInfo.ShouldNotBeNull();
}
// =========================================================================
// TestJetStreamConsumerSingleTokenSubject — jetstream_consumer_test.go:3172
// Consumer with a single-token filter subject works correctly.
// =========================================================================
[Fact]
public async Task Consumer_single_token_subject()
{
// Go: TestJetStreamConsumerSingleTokenSubject jetstream_consumer_test.go:3172
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
var response = await fx.CreateConsumerAsync("TEST", "STS", "orders");
response.Error.ShouldBeNull();
await fx.PublishAndGetAckAsync("orders", "single-token-msg");
var batch = await fx.FetchAsync("TEST", "STS", 10);
batch.Messages.Count.ShouldBe(1);
batch.Messages[0].Subject.ShouldBe("orders");
}
// =========================================================================
// TestJetStreamConsumerMultipleFiltersLastPerSubject — jetstream_consumer_test.go:768
// Consumer with DeliverPolicy.LastPerSubject and multiple filters.
// =========================================================================
[Fact]
public async Task Consumer_multiple_filters_deliver_last_per_subject()
{
// Go: TestJetStreamConsumerMultipleFiltersLastPerSubject jetstream_consumer_test.go:768
await using var fx = await JetStreamApiFixture.StartWithStreamAsync("TEST", ">");
// Publish multiple messages per subject
await fx.PublishAndGetAckAsync("one", "first-1");
await fx.PublishAndGetAckAsync("two", "first-2");
await fx.PublishAndGetAckAsync("one", "second-1");
await fx.PublishAndGetAckAsync("two", "second-2");
var response = await fx.CreateConsumerAsync("TEST", "LP", null,
filterSubjects: ["one", "two"],
deliverPolicy: DeliverPolicy.Last);
response.Error.ShouldBeNull();
// With deliver last, we should get the latest message
var batch = await fx.FetchAsync("TEST", "LP", 10);
batch.Messages.ShouldNotBeEmpty();
}
// =========================================================================
// Subject wildcard matching — additional parity tests
// =========================================================================
[Theory]
[InlineData("foo.bar", "foo.bar", true)]
[InlineData("foo.bar", "foo.*", true)]
[InlineData("foo.bar", "foo.>", true)]
[InlineData("foo.bar.baz", "foo.>", true)]
[InlineData("foo.bar.baz", "foo.*", false)]
[InlineData("foo.bar.baz", "foo.*.baz", true)]
[InlineData("foo.bar.baz", "foo.*.>", true)]
[InlineData("bar.foo", "foo.*", false)]
public void SubjectMatch_wildcard_matching(string literal, string pattern, bool expected)
{
// Validates SubjectMatch.MatchLiteral behavior used by consumer filtering
SubjectMatch.MatchLiteral(literal, pattern).ShouldBe(expected);
}
// =========================================================================
// CompiledFilter from ConsumerConfig
// =========================================================================
[Fact]
public void CompiledFilter_from_consumer_config_works()
{
// Validate that CompiledFilter.FromConfig matches behavior
var config = new ConsumerConfig
{
DurableName = "test",
FilterSubjects = ["orders.*", "payments.>"],
};
var filter = CompiledFilter.FromConfig(config);
filter.Matches("orders.created").ShouldBeTrue();
filter.Matches("orders.updated").ShouldBeTrue();
filter.Matches("payments.settled").ShouldBeTrue();
filter.Matches("payments.a.b.c").ShouldBeTrue();
filter.Matches("shipments.sent").ShouldBeFalse();
}
[Fact]
public void CompiledFilter_empty_matches_all()
{
var config = new ConsumerConfig { DurableName = "test" };
var filter = CompiledFilter.FromConfig(config);
filter.Matches("any.subject.here").ShouldBeTrue();
}
[Fact]
public void CompiledFilter_single_filter()
{
var config = new ConsumerConfig
{
DurableName = "test",
FilterSubject = "orders.>",
};
var filter = CompiledFilter.FromConfig(config);
filter.Matches("orders.created").ShouldBeTrue();
filter.Matches("payments.settled").ShouldBeFalse();
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,103 @@
using NATS.Server.JetStream;
using NATS.Server.JetStream.Models;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
/// <summary>
/// Tests for consumer pause/resume with auto-resume timer.
/// Go reference: consumer.go (pause/resume).
/// </summary>
public class ConsumerPauseResumeTests
{
private static ConsumerManager CreateManager() => new();
private static void CreateConsumer(ConsumerManager mgr, string stream, string name)
{
mgr.CreateOrUpdate(stream, new ConsumerConfig { DurableName = name });
}
[Fact]
public void Pause_with_deadline_sets_paused()
{
var mgr = CreateManager();
CreateConsumer(mgr, "test-stream", "test-consumer");
var until = DateTime.UtcNow.AddSeconds(5);
mgr.Pause("test-stream", "test-consumer", until);
mgr.IsPaused("test-stream", "test-consumer").ShouldBeTrue();
mgr.GetPauseUntil("test-stream", "test-consumer").ShouldBe(until);
}
[Fact]
public void Resume_clears_pause()
{
var mgr = CreateManager();
CreateConsumer(mgr, "test-stream", "test-consumer");
mgr.Pause("test-stream", "test-consumer", DateTime.UtcNow.AddSeconds(5));
mgr.Resume("test-stream", "test-consumer");
mgr.IsPaused("test-stream", "test-consumer").ShouldBeFalse();
mgr.GetPauseUntil("test-stream", "test-consumer").ShouldBeNull();
}
[Fact]
public async Task Pause_auto_resumes_after_deadline()
{
var mgr = CreateManager();
CreateConsumer(mgr, "test-stream", "test-consumer");
// Use a semaphore to synchronize on the actual timer callback rather than a blind delay.
using var resumed = new SemaphoreSlim(0, 1);
mgr.OnAutoResumed += (_, _) => resumed.Release();
mgr.Pause("test-stream", "test-consumer", DateTime.UtcNow.AddMilliseconds(100));
var signalled = await resumed.WaitAsync(TimeSpan.FromSeconds(5));
signalled.ShouldBeTrue("auto-resume timer did not fire within 5 seconds");
mgr.IsPaused("test-stream", "test-consumer").ShouldBeFalse();
}
[Fact]
public void IsPaused_returns_false_for_unknown_consumer()
{
var mgr = CreateManager();
mgr.IsPaused("unknown", "unknown").ShouldBeFalse();
}
[Fact]
public void GetPauseUntil_returns_null_for_unknown_consumer()
{
var mgr = CreateManager();
mgr.GetPauseUntil("unknown", "unknown").ShouldBeNull();
}
[Fact]
public void Resume_returns_false_for_unknown_consumer()
{
var mgr = CreateManager();
mgr.Resume("unknown", "unknown").ShouldBeFalse();
}
[Fact]
public void Pause_returns_false_for_unknown_consumer()
{
var mgr = CreateManager();
mgr.Pause("unknown", "unknown", DateTime.UtcNow.AddSeconds(5)).ShouldBeFalse();
}
[Fact]
public void IsPaused_auto_resumes_expired_deadline()
{
var mgr = CreateManager();
CreateConsumer(mgr, "test-stream", "c1");
// Pause with a deadline in the past
mgr.Pause("test-stream", "c1", DateTime.UtcNow.AddMilliseconds(-100));
// IsPaused should detect the expired deadline and auto-resume
mgr.IsPaused("test-stream", "c1").ShouldBeFalse();
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,244 @@
// Go reference: consumer.go:4241 (processResetReq)
using NATS.Server.JetStream;
using NATS.Server.JetStream.Consumers;
using NATS.Server.JetStream.Models;
using NATS.Server.JetStream.Storage;
using System.Text;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
/// <summary>
/// Tests for consumer reset-to-sequence (Gap 3.12) and AckProcessor.ClearAll / SetAckFloor.
/// Go reference: consumer.go:4241 processResetReq.
/// </summary>
public class ConsumerResetTests
{
private static ConsumerManager CreateManager() => new();
private static void CreateConsumer(ConsumerManager mgr, string stream, string name,
Action<ConsumerConfig>? configure = null)
{
var config = new ConsumerConfig { DurableName = name };
configure?.Invoke(config);
mgr.CreateOrUpdate(stream, config);
}
// -------------------------------------------------------------------------
// ResetToSequence tests
// -------------------------------------------------------------------------
// Go reference: consumer.go:4241 — processResetReq sets consumer.sseq to
// the requested sequence so the next fetch starts there.
[Fact]
public void ResetToSequence_updates_next_sequence()
{
var mgr = CreateManager();
CreateConsumer(mgr, "ORDERS", "oc1");
// Advance the consumer naturally so NextSequence is not 1
mgr.TryGet("ORDERS", "oc1", out var before);
before.NextSequence = 10;
mgr.ResetToSequence("ORDERS", "oc1", 5);
mgr.TryGet("ORDERS", "oc1", out var after);
after.NextSequence.ShouldBe(5UL);
}
// Go reference: consumer.go:4241 — reset clears the pending ack map so
// stale ack tokens from before the reset cannot be accepted.
[Fact]
public void ResetToSequence_clears_pending_acks()
{
var mgr = CreateManager();
CreateConsumer(mgr, "ORDERS", "oc2");
mgr.TryGet("ORDERS", "oc2", out var handle);
handle.AckProcessor.Register(3, ackWaitMs: 5000);
handle.AckProcessor.Register(7, ackWaitMs: 5000);
handle.AckProcessor.PendingCount.ShouldBe(2);
mgr.ResetToSequence("ORDERS", "oc2", 1);
handle.AckProcessor.PendingCount.ShouldBe(0);
}
// Go reference: consumer.go:4241 — pendingBytes must be zeroed on reset
// so the idle heartbeat header is correct after the reset.
[Fact]
public void ResetToSequence_clears_pending_bytes()
{
var mgr = CreateManager();
CreateConsumer(mgr, "ORDERS", "oc3");
mgr.TryGet("ORDERS", "oc3", out var handle);
handle.PendingBytes = 12345;
mgr.ResetToSequence("ORDERS", "oc3", 1);
handle.PendingBytes.ShouldBe(0L);
}
// Go reference: consumer.go:4241 — returns false when the consumer does
// not exist (unknown stream or durable name).
[Fact]
public void ResetToSequence_returns_false_for_missing_consumer()
{
var mgr = CreateManager();
mgr.ResetToSequence("NO-STREAM", "NO-CONSUMER", 1).ShouldBeFalse();
}
// Go reference: consumer.go:4241 — returns true when the consumer exists
// and the reset is applied.
[Fact]
public void ResetToSequence_returns_true_for_existing_consumer()
{
var mgr = CreateManager();
CreateConsumer(mgr, "ORDERS", "oc4");
mgr.ResetToSequence("ORDERS", "oc4", 42).ShouldBeTrue();
}
// Go reference: consumer.go:4241 — consumer config (subject filters, ack
// policy, etc.) is immutable during reset; only positional / tracking state
// is cleared.
[Fact]
public void ResetToSequence_preserves_config()
{
var mgr = CreateManager();
CreateConsumer(mgr, "ORDERS", "oc5", cfg =>
{
cfg.FilterSubject = "orders.>";
cfg.AckPolicy = AckPolicy.Explicit;
});
mgr.ResetToSequence("ORDERS", "oc5", 1);
mgr.TryGet("ORDERS", "oc5", out var handle);
handle.Config.FilterSubject.ShouldBe("orders.>");
handle.Config.AckPolicy.ShouldBe(AckPolicy.Explicit);
}
// Go reference: consumer.go:4241 — after reset the push engine can
// re-enqueue messages starting at the reset sequence.
[Fact]
public void ResetToSequence_allows_re_delivery_from_sequence()
{
var mgr = CreateManager();
CreateConsumer(mgr, "ORDERS", "oc6", cfg =>
{
cfg.Push = true;
cfg.DeliverSubject = "deliver.test";
});
mgr.TryGet("ORDERS", "oc6", out var handle);
handle.NextSequence = 50;
mgr.ResetToSequence("ORDERS", "oc6", 10);
// After reset the consumer reads from sequence 10
handle.NextSequence.ShouldBe(10UL);
// Simulate re-enqueueing a message at that sequence via OnPublished
var msg = new StoredMessage
{
Sequence = 10,
Subject = "orders.new",
Payload = Encoding.UTF8.GetBytes("data"),
TimestampUtc = DateTime.UtcNow,
};
mgr.OnPublished("ORDERS", msg);
// Message should be in the push frame queue
handle.PushFrames.Count.ShouldBeGreaterThan(0);
}
// -------------------------------------------------------------------------
// AckProcessor.ClearAll tests
// -------------------------------------------------------------------------
// Go reference: consumer.go processResetReq — pending ack map cleared
[Fact]
public void ClearAll_clears_pending()
{
var processor = new AckProcessor();
processor.Register(1, ackWaitMs: 5000);
processor.Register(2, ackWaitMs: 5000);
processor.Register(3, ackWaitMs: 5000);
processor.PendingCount.ShouldBe(3);
processor.ClearAll();
processor.PendingCount.ShouldBe(0);
}
// Go reference: consumer.go processResetReq — terminated set cleared
[Fact]
public void ClearAll_clears_terminated()
{
var processor = new AckProcessor();
processor.Register(1, ackWaitMs: 5000);
processor.Register(2, ackWaitMs: 5000);
processor.ProcessTerm(1);
processor.ProcessTerm(2);
processor.TerminatedCount.ShouldBe(2);
processor.ClearAll();
processor.TerminatedCount.ShouldBe(0);
}
// Go reference: consumer.go processResetReq — ack floor reset to 0
[Fact]
public void ClearAll_resets_ack_floor()
{
var processor = new AckProcessor();
processor.Register(1, ackWaitMs: 5000);
processor.Register(2, ackWaitMs: 5000);
processor.AckSequence(1);
processor.AckSequence(2);
processor.AckFloor.ShouldBeGreaterThan(0UL);
processor.ClearAll();
processor.AckFloor.ShouldBe(0UL);
}
// -------------------------------------------------------------------------
// AckProcessor.SetAckFloor tests
// -------------------------------------------------------------------------
// Go reference: consumer.go processResetReq — ack floor can be set to a
// specific sequence to reflect the stream state after reset.
[Fact]
public void SetAckFloor_updates_floor()
{
var processor = new AckProcessor();
processor.SetAckFloor(99);
processor.AckFloor.ShouldBe(99UL);
}
// Go reference: consumer.go processResetReq — any pending sequences below
// the new floor are irrelevant (already delivered before the floor) and
// must be pruned to avoid ghost acks.
[Fact]
public void SetAckFloor_removes_entries_below_floor()
{
var processor = new AckProcessor();
processor.Register(1, ackWaitMs: 5000);
processor.Register(2, ackWaitMs: 5000);
processor.Register(5, ackWaitMs: 5000);
processor.Register(10, ackWaitMs: 5000);
processor.PendingCount.ShouldBe(4);
processor.SetAckFloor(5);
// Sequences 1, 2, and 5 (<=5) are below or at the new floor and must be removed
processor.PendingCount.ShouldBe(1);
// Sequence 10 is above the floor and must remain
processor.HasPending.ShouldBeTrue();
}
}

View File

@@ -0,0 +1,207 @@
// Go: consumer.go hasDeliveryInterest, deleteNotActive
using NATS.Server.JetStream.Consumers;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
public class DeliveryInterestTests
{
// -------------------------------------------------------------------------
// Test 1 — HasInterest is true after a subscribe
//
// Go reference: consumer.go hasDeliveryInterest — returns true when at
// least one client is subscribed to the push consumer's deliver subject.
// -------------------------------------------------------------------------
[Fact]
public void HasInterest_true_after_subscribe()
{
var tracker = new DeliveryInterestTracker();
tracker.OnSubscribe();
tracker.HasInterest.ShouldBeTrue();
}
// -------------------------------------------------------------------------
// Test 2 — HasInterest is false initially (no subscribers)
//
// Go reference: consumer.go — on creation there are no delivery subscribers.
// -------------------------------------------------------------------------
[Fact]
public void HasInterest_false_initially()
{
var tracker = new DeliveryInterestTracker();
tracker.HasInterest.ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 3 — HasInterest drops to false after all subscribers unsubscribe
//
// Go reference: consumer.go hasDeliveryInterest — once subscription count
// reaches 0, interest is gone.
// -------------------------------------------------------------------------
[Fact]
public void HasInterest_false_after_all_unsubscribe()
{
var tracker = new DeliveryInterestTracker();
tracker.OnSubscribe();
tracker.OnSubscribe();
tracker.OnUnsubscribe();
tracker.OnUnsubscribe();
tracker.HasInterest.ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 4 — SubscriberCount tracks multiple subscribers accurately
//
// Go reference: consumer.go — the interest count must reflect the exact
// number of active push-consumer delivery subscriptions.
// -------------------------------------------------------------------------
[Fact]
public void SubscriberCount_tracks_multiple_subscribers()
{
var tracker = new DeliveryInterestTracker();
tracker.SubscriberCount.ShouldBe(0);
tracker.OnSubscribe();
tracker.SubscriberCount.ShouldBe(1);
tracker.OnSubscribe();
tracker.SubscriberCount.ShouldBe(2);
tracker.OnSubscribe();
tracker.SubscriberCount.ShouldBe(3);
tracker.OnUnsubscribe();
tracker.SubscriberCount.ShouldBe(2);
}
// -------------------------------------------------------------------------
// Test 5 — OnUnsubscribe floors subscriber count at zero (no negatives)
//
// Go reference: consumer.go deleteNotActive — stray unsub events must not
// drive the count below zero and corrupt subsequent interest checks.
// -------------------------------------------------------------------------
[Fact]
public void OnUnsubscribe_floors_at_zero()
{
var tracker = new DeliveryInterestTracker();
tracker.OnUnsubscribe();
tracker.OnUnsubscribe();
tracker.SubscriberCount.ShouldBe(0);
tracker.HasInterest.ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 6 — ShouldDelete is false while interest exists
//
// Go reference: consumer.go deleteNotActive — ephemeral cleanup is only
// triggered when there are no active subscribers.
// -------------------------------------------------------------------------
[Fact]
public void ShouldDelete_false_when_has_interest()
{
var tracker = new DeliveryInterestTracker(inactiveTimeout: TimeSpan.FromMilliseconds(1));
tracker.OnSubscribe();
tracker.ShouldDelete.ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 7 — ShouldDelete is false immediately after unsubscribe (timeout
// has not yet elapsed)
//
// Go reference: consumer.go deleteNotActive — the inactive timeout must
// fully elapse before the consumer is eligible for deletion.
// -------------------------------------------------------------------------
[Fact]
public void ShouldDelete_false_immediately_after_unsubscribe()
{
var tracker = new DeliveryInterestTracker(inactiveTimeout: TimeSpan.FromSeconds(30));
tracker.OnSubscribe();
tracker.OnUnsubscribe();
// No wait — timeout has not elapsed yet.
tracker.ShouldDelete.ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 8 — ShouldDelete is true after the inactive timeout elapses with
// zero subscribers
//
// Go reference: consumer.go deleteNotActive — once the configurable
// MaxAckPending / inactive threshold passes, the ephemeral consumer is
// scheduled for removal.
// -------------------------------------------------------------------------
[SlopwatchSuppress("SW004", "Intentional timeout test: ShouldDelete requires real wall-clock elapsed time to observe the inactive threshold firing; no synchronisation primitive can replace this")]
[Fact]
public async Task ShouldDelete_true_after_timeout()
{
var tracker = new DeliveryInterestTracker(inactiveTimeout: TimeSpan.FromMilliseconds(50));
tracker.OnSubscribe();
tracker.OnUnsubscribe();
await Task.Delay(100); // Wait for timeout to elapse.
tracker.ShouldDelete.ShouldBeTrue();
}
// -------------------------------------------------------------------------
// Test 9 — Reset clears all state (count and inactivity timer)
//
// Go reference: consumer.go — Reset is used to reinitialise tracking when
// a consumer is re-attached or recreated.
// -------------------------------------------------------------------------
[SlopwatchSuppress("SW004", "Intentional timeout test: must let the inactive threshold elapse to confirm Reset clears the inactivity timer; no synchronisation primitive can replace this")]
[Fact]
public async Task Reset_clears_all_state()
{
var tracker = new DeliveryInterestTracker(inactiveTimeout: TimeSpan.FromMilliseconds(50));
tracker.OnSubscribe();
tracker.OnSubscribe();
tracker.OnUnsubscribe();
tracker.OnUnsubscribe();
await Task.Delay(100); // Let timeout elapse.
tracker.Reset();
tracker.SubscriberCount.ShouldBe(0);
tracker.HasInterest.ShouldBeFalse();
tracker.ShouldDelete.ShouldBeFalse(); // inactivity timer also cleared
}
// -------------------------------------------------------------------------
// Test 10 — Subscribing after an unsubscribe clears the inactivity timer
// so ShouldDelete stays false even after the original timeout
//
// Go reference: consumer.go hasDeliveryInterest — a re-subscription resets
// the inactive-since timestamp, preventing spurious cleanup.
// -------------------------------------------------------------------------
[SlopwatchSuppress("SW004", "Intentional timeout test: must let the original inactive window pass after re-subscribe to confirm the inactivity timer was cleared; no synchronisation primitive can replace this")]
[Fact]
public async Task Subscribe_clears_inactivity_timer()
{
var tracker = new DeliveryInterestTracker(inactiveTimeout: TimeSpan.FromMilliseconds(50));
tracker.OnSubscribe();
tracker.OnUnsubscribe();
// Re-subscribe before the timeout elapses.
tracker.OnSubscribe();
await Task.Delay(100); // Original timeout window passes.
// Still has interest and timer was reset, so ShouldDelete must be false.
tracker.HasInterest.ShouldBeTrue();
tracker.ShouldDelete.ShouldBeFalse();
}
}

View File

@@ -0,0 +1,427 @@
// Go: consumer.go:1400 (loopAndGatherMsgs) — gather loop polls the store for new messages,
// dispatches them through the send delegate, respects filter subjects,
// advances NextSequence, handles deleted/null entries, and exits on cancellation.
using System.Text;
using NATS.Server.JetStream;
using NATS.Server.JetStream.Consumers;
using NATS.Server.JetStream.Models;
using NATS.Server.JetStream.Storage;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
public class DeliveryLoopTests
{
// -----------------------------------------------------------------------
// Helpers
// -----------------------------------------------------------------------
private static ConsumerHandle MakeConsumer(ConsumerConfig config)
=> new("TEST-STREAM", config);
/// <summary>
/// Returns a send delegate that releases <paramref name="sem"/> on each delivery
/// and accumulates original subjects into <paramref name="deliveredOriginalSubjects"/>.
/// </summary>
private static Func<string, string, ReadOnlyMemory<byte>, ReadOnlyMemory<byte>, CancellationToken, ValueTask>
MakeSemaphoreSend(List<string> deliveredOriginalSubjects, SemaphoreSlim sem)
=> (_, origSubj, _, _, _) =>
{
lock (deliveredOriginalSubjects)
deliveredOriginalSubjects.Add(origSubj);
sem.Release();
return ValueTask.CompletedTask;
};
// -----------------------------------------------------------------------
// Test 1 — GatherLoop polls store for new messages
//
// Go reference: consumer.go:1560 — poll store for messages from nextSeq to LastSeq.
// Three messages appended before the loop starts; loop must dispatch all three.
// -----------------------------------------------------------------------
[Fact]
public async Task GatherLoop_polls_store_for_new_messages()
{
var store = new MemStore();
await store.AppendAsync("foo", "msg1"u8.ToArray(), default);
await store.AppendAsync("foo", "msg2"u8.ToArray(), default);
await store.AppendAsync("foo", "msg3"u8.ToArray(), default);
var consumer = MakeConsumer(new ConsumerConfig { DurableName = "POLL" });
var delivered = new List<string>();
var sem = new SemaphoreSlim(0);
var send = MakeSemaphoreSend(delivered, sem);
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
var engine = new PushConsumerEngine();
engine.StartGatherLoop(consumer, store, send, cts.Token);
// Wait for exactly 3 releases — one per dispatched message
await sem.WaitAsync(cts.Token);
await sem.WaitAsync(cts.Token);
await sem.WaitAsync(cts.Token);
engine.StopGatherLoop();
lock (delivered) delivered.Count.ShouldBe(3);
}
// -----------------------------------------------------------------------
// Test 2 — GatherLoop respects FilterSubject
//
// Go reference: consumer.go:1569 — ShouldDeliver skips messages whose subject
// does not match cfg.FilterSubject.
// -----------------------------------------------------------------------
[Fact]
public async Task GatherLoop_respects_filter_subject()
{
var store = new MemStore();
await store.AppendAsync("orders.us", "o1"u8.ToArray(), default);
await store.AppendAsync("events.x", "e1"u8.ToArray(), default);
await store.AppendAsync("orders.eu", "o2"u8.ToArray(), default);
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "FILTERED",
FilterSubject = "orders.>",
});
var delivered = new List<string>();
var sem = new SemaphoreSlim(0);
var send = MakeSemaphoreSend(delivered, sem);
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
var engine = new PushConsumerEngine();
engine.StartGatherLoop(consumer, store, send, cts.Token);
// Only "orders.us" and "orders.eu" match the filter — wait for exactly 2
await sem.WaitAsync(cts.Token);
await sem.WaitAsync(cts.Token);
engine.StopGatherLoop();
lock (delivered)
{
delivered.Count.ShouldBe(2);
delivered.ShouldContain("orders.us");
delivered.ShouldContain("orders.eu");
delivered.ShouldNotContain("events.x");
}
}
// -----------------------------------------------------------------------
// Test 3 — ShouldDeliver with no filter delivers all subjects
//
// Go reference: consumer.go — empty FilterSubject + empty FilterSubjects → deliver all.
// -----------------------------------------------------------------------
[Fact]
public void ShouldDeliver_with_no_filter_delivers_all()
{
var config = new ConsumerConfig { DurableName = "ANY" };
PushConsumerEngine.ShouldDeliverPublic(config, "orders.new").ShouldBeTrue();
PushConsumerEngine.ShouldDeliverPublic(config, "events.x").ShouldBeTrue();
PushConsumerEngine.ShouldDeliverPublic(config, "telemetry.cpu.host1").ShouldBeTrue();
}
// -----------------------------------------------------------------------
// Test 4 — ShouldDeliver with single FilterSubject
//
// Go reference: consumer.go — FilterSubject is matched via SubjectMatch.MatchLiteral.
// -----------------------------------------------------------------------
[Fact]
public void ShouldDeliver_with_single_filter()
{
var config = new ConsumerConfig
{
DurableName = "SINGLE",
FilterSubject = "orders.us",
};
PushConsumerEngine.ShouldDeliverPublic(config, "orders.us").ShouldBeTrue();
PushConsumerEngine.ShouldDeliverPublic(config, "orders.eu").ShouldBeFalse();
PushConsumerEngine.ShouldDeliverPublic(config, "events.x").ShouldBeFalse();
}
// -----------------------------------------------------------------------
// Test 5 — ShouldDeliver with multiple filters (FilterSubjects list)
//
// Go reference: consumer.go — FilterSubjects: any match → deliver.
// -----------------------------------------------------------------------
[Fact]
public void ShouldDeliver_with_multiple_filters()
{
var config = new ConsumerConfig
{
DurableName = "MULTI",
FilterSubjects = ["orders.us", "events.created"],
};
PushConsumerEngine.ShouldDeliverPublic(config, "orders.us").ShouldBeTrue();
PushConsumerEngine.ShouldDeliverPublic(config, "events.created").ShouldBeTrue();
PushConsumerEngine.ShouldDeliverPublic(config, "orders.eu").ShouldBeFalse();
PushConsumerEngine.ShouldDeliverPublic(config, "events.deleted").ShouldBeFalse();
}
// -----------------------------------------------------------------------
// Test 6 — ShouldDeliver with wildcard filter
//
// Go reference: consumer.go — wildcard matching via SubjectMatch.MatchLiteral.
// "orders.*" matches one-token suffix; "orders.us.new" has two suffix tokens
// so it does not match.
// -----------------------------------------------------------------------
[Fact]
public void ShouldDeliver_with_wildcard_filter()
{
var config = new ConsumerConfig
{
DurableName = "WILDCARD",
FilterSubject = "orders.*",
};
PushConsumerEngine.ShouldDeliverPublic(config, "orders.new").ShouldBeTrue();
PushConsumerEngine.ShouldDeliverPublic(config, "orders.old").ShouldBeTrue();
PushConsumerEngine.ShouldDeliverPublic(config, "orders.us.new").ShouldBeFalse();
PushConsumerEngine.ShouldDeliverPublic(config, "events.x").ShouldBeFalse();
}
// -----------------------------------------------------------------------
// Test 7 — Signal(NewMessage) wakes the gather loop
//
// Go reference: consumer.go:1620 — channel send wakes the loop so it does
// not have to wait the full 250ms poll timeout.
// -----------------------------------------------------------------------
[Fact]
public async Task GatherLoop_signal_wakes_loop()
{
var store = new MemStore();
var consumer = MakeConsumer(new ConsumerConfig { DurableName = "SIGNAL" });
// loopStarted is released once the loop has begun its first wait cycle —
// we detect this by waiting until NextSequence has been set to 1 (the loop
// initialises it on entry) and the store is still empty.
var deliveredSem = new SemaphoreSlim(0, 1);
var delivered = new List<string>();
Func<string, string, ReadOnlyMemory<byte>, ReadOnlyMemory<byte>, CancellationToken, ValueTask> send =
(_, origSubj, _, _, _) =>
{
lock (delivered) delivered.Add(origSubj);
deliveredSem.Release();
return ValueTask.CompletedTask;
};
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
var engine = new PushConsumerEngine();
engine.StartGatherLoop(consumer, store, send, cts.Token);
// Spin (yield only) until the loop has entered its first 250ms wait, which we
// infer from GatheredCount staying at 0 while the loop is running.
// We yield the thread without sleeping to avoid SW004.
var spins = 0;
while (engine.GatheredCount == 0 && spins < 5_000)
{
await Task.Yield();
spins++;
}
// Append a message and signal — delivery should arrive well before 500ms
await store.AppendAsync("foo", "hello"u8.ToArray(), default);
engine.Signal(ConsumerSignal.NewMessage);
var received = await deliveredSem.WaitAsync(TimeSpan.FromMilliseconds(500), cts.Token);
engine.StopGatherLoop();
received.ShouldBeTrue("expected delivery within 500ms after Signal(NewMessage)");
lock (delivered) delivered.Count.ShouldBeGreaterThanOrEqualTo(1);
}
// -----------------------------------------------------------------------
// Test 8 — GatherLoop advances NextSequence
//
// Go reference: consumer.go:1600 — nextSeq++ and consumer.NextSequence = nextSeq.
// -----------------------------------------------------------------------
[Fact]
public async Task GatherLoop_advances_next_sequence()
{
var store = new MemStore();
await store.AppendAsync("foo", "a"u8.ToArray(), default);
await store.AppendAsync("foo", "b"u8.ToArray(), default);
await store.AppendAsync("foo", "c"u8.ToArray(), default);
var consumer = MakeConsumer(new ConsumerConfig { DurableName = "SEQ" });
var delivered = new List<string>();
var sem = new SemaphoreSlim(0);
var send = MakeSemaphoreSend(delivered, sem);
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
var engine = new PushConsumerEngine();
engine.StartGatherLoop(consumer, store, send, cts.Token);
// Wait for all three messages to be delivered
await sem.WaitAsync(cts.Token);
await sem.WaitAsync(cts.Token);
await sem.WaitAsync(cts.Token);
engine.StopGatherLoop();
// After delivering 3 messages NextSequence should be 4 (next to load)
consumer.NextSequence.ShouldBe((ulong)4);
}
// -----------------------------------------------------------------------
// Test 9 — GatherLoop skips deleted/null messages
//
// Go reference: consumer.go:1572 — LoadAsync returning null means the
// message was deleted; the gather loop simply advances past it.
// -----------------------------------------------------------------------
[Fact]
public async Task GatherLoop_skips_deleted_messages()
{
var store = new MemStore();
await store.AppendAsync("foo", "first"u8.ToArray(), default); // seq 1
await store.AppendAsync("foo", "second"u8.ToArray(), default); // seq 2
await store.AppendAsync("foo", "third"u8.ToArray(), default); // seq 3
// Delete seq 2 so LoadAsync returns null for it
await store.RemoveAsync(2, default);
var consumer = MakeConsumer(new ConsumerConfig { DurableName = "SKIP-DEL" });
var deliveredPayloads = new List<string>();
var sem = new SemaphoreSlim(0);
Func<string, string, ReadOnlyMemory<byte>, ReadOnlyMemory<byte>, CancellationToken, ValueTask> send =
(_, _, _, payload, _) =>
{
lock (deliveredPayloads) deliveredPayloads.Add(Encoding.UTF8.GetString(payload.Span));
sem.Release();
return ValueTask.CompletedTask;
};
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
var engine = new PushConsumerEngine();
engine.StartGatherLoop(consumer, store, send, cts.Token);
// Only two messages remain after removing seq 2
await sem.WaitAsync(cts.Token);
await sem.WaitAsync(cts.Token);
engine.StopGatherLoop();
lock (deliveredPayloads)
{
deliveredPayloads.Count.ShouldBe(2);
deliveredPayloads.ShouldContain("first");
deliveredPayloads.ShouldContain("third");
deliveredPayloads.ShouldNotContain("second");
}
}
// -----------------------------------------------------------------------
// Test 10 — GatherLoop increments GatheredCount
//
// Go reference: consumer.go:1400 loopAndGatherMsgs — GatheredCount tracks
// every message dispatched to the subscriber.
// -----------------------------------------------------------------------
[Fact]
public async Task GatherLoop_increments_gathered_count()
{
var store = new MemStore();
await store.AppendAsync("foo", "x"u8.ToArray(), default);
await store.AppendAsync("foo", "y"u8.ToArray(), default);
var consumer = MakeConsumer(new ConsumerConfig { DurableName = "COUNT" });
var delivered = new List<string>();
var sem = new SemaphoreSlim(0);
var send = MakeSemaphoreSend(delivered, sem);
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
var engine = new PushConsumerEngine();
engine.StartGatherLoop(consumer, store, send, cts.Token);
await sem.WaitAsync(cts.Token);
await sem.WaitAsync(cts.Token);
engine.StopGatherLoop();
engine.GatheredCount.ShouldBe(2);
}
// -----------------------------------------------------------------------
// Test 11 — GatherLoop stops on cancellation
//
// Go reference: consumer.go — the goroutine exits when the quit channel closes,
// which maps to CancellationToken cancellation here.
// -----------------------------------------------------------------------
[Fact]
public async Task GatherLoop_stops_on_cancellation()
{
var store = new MemStore();
var consumer = MakeConsumer(new ConsumerConfig { DurableName = "CANCEL" });
// loopRunning becomes set once the loop is executing its first iteration
var loopRunning = new TaskCompletionSource<bool>(TaskCreationOptions.RunContinuationsAsynchronously);
var deliveredCount = 0;
Func<string, string, ReadOnlyMemory<byte>, ReadOnlyMemory<byte>, CancellationToken, ValueTask> send =
(_, _, _, _, _) =>
{
Interlocked.Increment(ref deliveredCount);
return ValueTask.CompletedTask;
};
var cts = new CancellationTokenSource();
var engine = new PushConsumerEngine();
engine.StartGatherLoop(consumer, store, send, cts.Token);
// Cancel without appending anything; the loop must exit cleanly
await cts.CancelAsync();
engine.StopGatherLoop();
// Yield a few times to let any in-flight dispatch complete
for (var i = 0; i < 10; i++) await Task.Yield();
deliveredCount.ShouldBe(0);
}
// -----------------------------------------------------------------------
// Test 12 — GatherLoop handles an empty store
//
// Go reference: consumer.go:1620 — when no messages exist the loop waits
// on the signal channel with a 250ms timeout rather than busy-spinning.
// We verify it does NOT deliver anything when the store remains empty,
// and that it exits cleanly when cancelled.
// -----------------------------------------------------------------------
[Fact]
[SlopwatchSuppress("SW004", "Negative timing assertion: verifying the gather loop does NOT deliver from an empty store requires a real wall-clock window; no synchronisation primitive can replace observing the absence of delivery")]
public async Task GatherLoop_handles_empty_store()
{
var store = new MemStore(); // nothing appended
var consumer = MakeConsumer(new ConsumerConfig { DurableName = "EMPTY" });
var deliveredCount = 0;
var firstCallTcs = new TaskCompletionSource<bool>(TaskCreationOptions.RunContinuationsAsynchronously);
Func<string, string, ReadOnlyMemory<byte>, ReadOnlyMemory<byte>, CancellationToken, ValueTask> send =
(_, _, _, _, _) =>
{
Interlocked.Increment(ref deliveredCount);
firstCallTcs.TrySetResult(true);
return ValueTask.CompletedTask;
};
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
var engine = new PushConsumerEngine();
engine.StartGatherLoop(consumer, store, send, cts.Token);
// The send delegate should never be called because the store is empty.
// Wait a short absolute time; if it fires we know the loop is broken.
var unexpectedDelivery = await Task.WhenAny(
firstCallTcs.Task,
Task.Delay(150, cts.Token)) == firstCallTcs.Task;
engine.StopGatherLoop();
unexpectedDelivery.ShouldBeFalse("gather loop must not deliver from an empty store");
deliveredCount.ShouldBe(0);
}
}

View File

@@ -0,0 +1,245 @@
// Go: consumer.go isFilteredMatch, skipMsgs tracking
// FilterSkipTracker tests — verifies NATS token-based filter matching
// and skip sequence gap tracking.
using NATS.Server.JetStream.Consumers;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
public class FilterSkipTests
{
// -------------------------------------------------------------------------
// Test 1 — No filter always matches every subject
//
// Go reference: consumer.go isFilteredMatch — when no filter subjects are
// configured all messages are delivered.
// -------------------------------------------------------------------------
[Fact]
public void ShouldDeliver_no_filter_always_matches()
{
var tracker = new FilterSkipTracker();
tracker.ShouldDeliver("orders.us").ShouldBeTrue();
tracker.ShouldDeliver("events.payment").ShouldBeTrue();
tracker.ShouldDeliver("anything").ShouldBeTrue();
}
// -------------------------------------------------------------------------
// Test 2 — Single exact filter matches only the matching subject
//
// Go reference: consumer.go isFilteredMatch — literal subject match.
// -------------------------------------------------------------------------
[Fact]
public void ShouldDeliver_single_filter_exact_match()
{
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
tracker.ShouldDeliver("orders.us").ShouldBeTrue();
}
// -------------------------------------------------------------------------
// Test 3 — Single filter does not match a different subject
//
// Go reference: consumer.go isFilteredMatch — non-matching subjects are
// skipped.
// -------------------------------------------------------------------------
[Fact]
public void ShouldDeliver_single_filter_no_match()
{
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
tracker.ShouldDeliver("events.x").ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 4 — Star wildcard matches a single token
//
// Go reference: consumer.go isFilteredMatch — SubjectMatch.MatchLiteral
// treats '*' as a single-token wildcard, so "orders.*" matches "orders.us"
// but not "orders.us.east" (two remaining tokens).
// -------------------------------------------------------------------------
[Fact]
public void ShouldDeliver_wildcard_star()
{
var tracker = new FilterSkipTracker(filterSubject: "orders.*");
tracker.ShouldDeliver("orders.us").ShouldBeTrue();
tracker.ShouldDeliver("orders.us.east").ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 5 — Greater-than wildcard matches remaining tokens
//
// Go reference: consumer.go isFilteredMatch — '>' matches one or more
// remaining tokens.
// -------------------------------------------------------------------------
[Fact]
public void ShouldDeliver_wildcard_gt()
{
var tracker = new FilterSkipTracker(filterSubject: "orders.>");
tracker.ShouldDeliver("orders.us.east").ShouldBeTrue();
tracker.ShouldDeliver("orders.eu").ShouldBeTrue();
tracker.ShouldDeliver("events.x").ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 6 — Multiple filter subjects: matches any of them
//
// Go reference: consumer.go isFilteredMatch — when FilterSubjects is
// populated, a message matches if any entry matches.
// -------------------------------------------------------------------------
[Fact]
public void ShouldDeliver_multiple_filters()
{
var tracker = new FilterSkipTracker(filterSubjects: ["orders.>", "events.>"]);
tracker.ShouldDeliver("orders.us").ShouldBeTrue();
tracker.ShouldDeliver("events.payment").ShouldBeTrue();
tracker.ShouldDeliver("metrics.cpu").ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 7 — MatchCount increments when message is delivered
//
// Go reference: consumer.go — consumer tracks matched message counts.
// -------------------------------------------------------------------------
[Fact]
public void MatchCount_increments_on_match()
{
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
tracker.ShouldDeliver("orders.us");
tracker.ShouldDeliver("orders.us");
tracker.MatchCount.ShouldBe(2L);
tracker.SkipCount.ShouldBe(0L);
}
// -------------------------------------------------------------------------
// Test 8 — SkipCount increments when message does not match
//
// Go reference: consumer.go skipMsgs — non-matching messages are counted.
// -------------------------------------------------------------------------
[Fact]
public void SkipCount_increments_on_skip()
{
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
tracker.ShouldDeliver("events.x");
tracker.ShouldDeliver("events.y");
tracker.SkipCount.ShouldBe(2L);
tracker.MatchCount.ShouldBe(0L);
}
// -------------------------------------------------------------------------
// Test 9 — RecordSkip stores a sequence number in the skipped set
//
// Go reference: consumer.go skipMsgs — stores gap sequences for later
// resolution during delivery.
// -------------------------------------------------------------------------
[Fact]
public void RecordSkip_tracks_sequence()
{
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
tracker.RecordSkip(5UL);
tracker.RecordSkip(7UL);
tracker.SkippedSequenceCount.ShouldBe(2);
}
// -------------------------------------------------------------------------
// Test 10 — NextUnskippedSequence skips over all recorded sequences
//
// Go reference: consumer.go — finding the next deliverable sequence after
// gaps caused by filter skips.
// -------------------------------------------------------------------------
[Fact]
public void NextUnskippedSequence_skips_recorded()
{
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
tracker.RecordSkip(2UL);
tracker.RecordSkip(3UL);
// seq 1 is not skipped
tracker.NextUnskippedSequence(1UL).ShouldBe(1UL);
// seq 2 and 3 are skipped → next unskipped is 4
tracker.NextUnskippedSequence(2UL).ShouldBe(4UL);
// seq 4 is not skipped
tracker.NextUnskippedSequence(4UL).ShouldBe(4UL);
}
// -------------------------------------------------------------------------
// Test 11 — PurgeBelow removes entries below the floor sequence
//
// Go reference: consumer.go — ack floor advancement purges old skip entries
// to prevent unbounded growth.
// -------------------------------------------------------------------------
[Fact]
public void PurgeBelow_removes_old_entries()
{
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
tracker.RecordSkip(1UL);
tracker.RecordSkip(3UL);
tracker.RecordSkip(5UL);
tracker.RecordSkip(7UL);
tracker.PurgeBelow(5UL);
// sequences 1 and 3 should be gone (< 5); 5 and 7 remain (>= 5)
tracker.SkippedSequenceCount.ShouldBe(2);
tracker.NextUnskippedSequence(5UL).ShouldBe(6UL); // 5 still skipped
tracker.NextUnskippedSequence(1UL).ShouldBe(1UL); // 1 was purged
}
// -------------------------------------------------------------------------
// Test 12 — HasFilter is false when no filter is configured
//
// Go reference: consumer.go — no filter means deliver all messages.
// -------------------------------------------------------------------------
[Fact]
public void HasFilter_false_when_empty()
{
var tracker = new FilterSkipTracker();
tracker.HasFilter.ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 13 — HasFilter is true when a single filter is configured
//
// Go reference: consumer.go — FilterSubject set means selective delivery.
// -------------------------------------------------------------------------
[Fact]
public void HasFilter_true_with_single_filter()
{
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
tracker.HasFilter.ShouldBeTrue();
}
// -------------------------------------------------------------------------
// Test 14 — Reset clears all counters and skipped sequences
//
// Go reference: consumer.go — consumer state reset on reconfiguration.
// -------------------------------------------------------------------------
[Fact]
public void Reset_clears_all_state()
{
var tracker = new FilterSkipTracker(filterSubject: "orders.us");
tracker.ShouldDeliver("orders.us");
tracker.ShouldDeliver("events.x");
tracker.RecordSkip(10UL);
tracker.RecordSkip(11UL);
tracker.Reset();
tracker.MatchCount.ShouldBe(0L);
tracker.SkipCount.ShouldBe(0L);
tracker.SkippedSequenceCount.ShouldBe(0);
}
}

View File

@@ -0,0 +1,511 @@
// Go reference: golang/nats-server/server/consumer.go
// sendIdleHeartbeat ~line 5222, sendFlowControl ~line 5495
//
// Tests for idle heartbeat pending-count headers (Nats-Pending-Messages,
// Nats-Pending-Bytes) and flow control stall detection.
using System.Collections.Concurrent;
using System.Text;
using NATS.Server.JetStream;
using NATS.Server.JetStream.Consumers;
using NATS.Server.JetStream.Models;
using NATS.Server.JetStream.Storage;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
public class IdleHeartbeatTests
{
// Helper: build a ConsumerHandle with the given config
private static ConsumerHandle MakeConsumer(ConsumerConfig config)
=> new("TEST-STREAM", config);
// Helper: build a minimal StoredMessage
private static StoredMessage MakeMessage(ulong seq, string subject = "test.subject", string payload = "hello")
=> new()
{
Sequence = seq,
Subject = subject,
Payload = Encoding.UTF8.GetBytes(payload),
TimestampUtc = DateTime.UtcNow,
};
// Helper: parse a header value from a NATS header block
// e.g. extract "42" from "Nats-Pending-Messages: 42\r\n"
private static string? ParseHeaderValue(string headers, string headerName)
{
var prefix = headerName + ": ";
var start = headers.IndexOf(prefix, StringComparison.OrdinalIgnoreCase);
if (start < 0)
return null;
start += prefix.Length;
var end = headers.IndexOf('\r', start);
if (end < 0)
end = headers.Length;
return headers[start..end].Trim();
}
// =========================================================================
// Test 1 — Heartbeat includes Nats-Pending-Messages header
//
// Go reference: consumer.go:5222 — sendIdleHeartbeat includes pending message
// count in the Nats-Pending-Messages header.
// =========================================================================
[Fact]
public async Task Heartbeat_includes_pending_messages_header()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "HB-PENDING",
Push = true,
DeliverSubject = "deliver.hb",
HeartbeatMs = 50,
AckPolicy = AckPolicy.Explicit,
AckWaitMs = 30_000,
});
// Register 3 pending acks so PendingCount == 3
consumer.AckProcessor.Register(1, 30_000);
consumer.AckProcessor.Register(2, 30_000);
consumer.AckProcessor.Register(3, 30_000);
ReadOnlyMemory<byte>? capturedHeartbeat = null;
var heartbeatReceived = new TaskCompletionSource<bool>();
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
engine.StartDeliveryLoop(consumer,
async (_, _, headers, _, _) =>
{
var text = Encoding.ASCII.GetString(headers.Span);
if (text.Contains("Idle Heartbeat") && !heartbeatReceived.Task.IsCompleted)
{
capturedHeartbeat = headers;
heartbeatReceived.TrySetResult(true);
}
await ValueTask.CompletedTask;
},
cts.Token);
await heartbeatReceived.Task.WaitAsync(TimeSpan.FromSeconds(5));
engine.StopDeliveryLoop();
capturedHeartbeat.ShouldNotBeNull();
var headerText = Encoding.ASCII.GetString(capturedHeartbeat!.Value.Span);
headerText.ShouldContain("Nats-Pending-Messages:");
var pendingMsgs = ParseHeaderValue(headerText, "Nats-Pending-Messages");
pendingMsgs.ShouldBe("3");
}
// =========================================================================
// Test 2 — Heartbeat includes Nats-Pending-Bytes header
//
// Go reference: consumer.go:5222 — sendIdleHeartbeat includes pending byte
// count in the Nats-Pending-Bytes header.
// =========================================================================
[Fact]
public async Task Heartbeat_includes_pending_bytes_header()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "HB-BYTES",
Push = true,
DeliverSubject = "deliver.hb2",
HeartbeatMs = 50,
});
// Set pending bytes explicitly
consumer.PendingBytes = 4096;
ReadOnlyMemory<byte>? capturedHeartbeat = null;
var heartbeatReceived = new TaskCompletionSource<bool>();
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
engine.StartDeliveryLoop(consumer,
async (_, _, headers, _, _) =>
{
var text = Encoding.ASCII.GetString(headers.Span);
if (text.Contains("Idle Heartbeat") && !heartbeatReceived.Task.IsCompleted)
{
capturedHeartbeat = headers;
heartbeatReceived.TrySetResult(true);
}
await ValueTask.CompletedTask;
},
cts.Token);
await heartbeatReceived.Task.WaitAsync(TimeSpan.FromSeconds(5));
engine.StopDeliveryLoop();
capturedHeartbeat.ShouldNotBeNull();
var headerText = Encoding.ASCII.GetString(capturedHeartbeat!.Value.Span);
headerText.ShouldContain("Nats-Pending-Bytes:");
var pendingBytes = ParseHeaderValue(headerText, "Nats-Pending-Bytes");
pendingBytes.ShouldBe("4096");
}
// =========================================================================
// Test 3 — Heartbeat is sent after the idle period elapses
//
// Go reference: consumer.go:5222 — the idle heartbeat timer fires after
// HeartbeatMs milliseconds of inactivity.
// =========================================================================
[Fact]
public async Task Heartbeat_sent_after_idle_period()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "HB-TIMER",
Push = true,
DeliverSubject = "deliver.timer",
HeartbeatMs = 50,
});
var heartbeatReceived = new TaskCompletionSource<bool>();
var startedAt = DateTime.UtcNow;
DateTime? receivedAt = null;
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
// Start loop with no messages — only the timer can fire a heartbeat
engine.StartDeliveryLoop(consumer,
async (_, _, headers, _, _) =>
{
var text = Encoding.ASCII.GetString(headers.Span);
if (text.Contains("Idle Heartbeat") && !heartbeatReceived.Task.IsCompleted)
{
receivedAt = DateTime.UtcNow;
heartbeatReceived.TrySetResult(true);
}
await ValueTask.CompletedTask;
},
cts.Token);
await heartbeatReceived.Task.WaitAsync(TimeSpan.FromSeconds(5));
engine.StopDeliveryLoop();
receivedAt.ShouldNotBeNull();
var elapsed = receivedAt!.Value - startedAt;
// The heartbeat timer is 50ms; it must have fired at some point after that
elapsed.TotalMilliseconds.ShouldBeGreaterThan(20);
}
// =========================================================================
// Test 4 — Heartbeat counter increments on each idle heartbeat sent
//
// Go reference: consumer.go:5222 — each sendIdleHeartbeat call increments
// the idle heartbeat counter.
// =========================================================================
[Fact]
public async Task Heartbeat_counter_increments()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "HB-COUNT",
Push = true,
DeliverSubject = "deliver.count",
HeartbeatMs = 40,
});
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
var heartbeatsReceived = 0;
// Use a semaphore so each heartbeat arrival is explicitly awaited.
var sem = new SemaphoreSlim(0);
engine.StartDeliveryLoop(consumer,
async (_, _, headers, _, _) =>
{
var text = Encoding.ASCII.GetString(headers.Span);
if (text.Contains("Idle Heartbeat"))
{
Interlocked.Increment(ref heartbeatsReceived);
sem.Release();
}
await ValueTask.CompletedTask;
},
cts.Token);
// Wait for at least 2 heartbeat deliveries via the send delegate.
await sem.WaitAsync(cts.Token);
await sem.WaitAsync(cts.Token);
engine.StopDeliveryLoop();
// The send delegate counted 2 heartbeats; IdleHeartbeatsSent increments
// after sendMessage returns, so it lags by at most 1. Accept >=1 here
// and rely on heartbeatsReceived (directly in the delegate) for the >=2 assertion.
heartbeatsReceived.ShouldBeGreaterThanOrEqualTo(2);
engine.IdleHeartbeatsSent.ShouldBeGreaterThanOrEqualTo(1);
}
// =========================================================================
// Test 5 — Heartbeat shows zero pending when no acks are outstanding
//
// Go reference: consumer.go:5222 — when no messages are pending ack,
// Nats-Pending-Messages should be 0 and Nats-Pending-Bytes should be 0.
// =========================================================================
[Fact]
public async Task Heartbeat_zero_pending_when_no_acks()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "HB-ZERO",
Push = true,
DeliverSubject = "deliver.zero",
HeartbeatMs = 50,
});
// No acks registered, PendingBytes stays 0
ReadOnlyMemory<byte>? capturedHeartbeat = null;
var heartbeatReceived = new TaskCompletionSource<bool>();
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
engine.StartDeliveryLoop(consumer,
async (_, _, headers, _, _) =>
{
var text = Encoding.ASCII.GetString(headers.Span);
if (text.Contains("Idle Heartbeat") && !heartbeatReceived.Task.IsCompleted)
{
capturedHeartbeat = headers;
heartbeatReceived.TrySetResult(true);
}
await ValueTask.CompletedTask;
},
cts.Token);
await heartbeatReceived.Task.WaitAsync(TimeSpan.FromSeconds(5));
engine.StopDeliveryLoop();
capturedHeartbeat.ShouldNotBeNull();
var headerText = Encoding.ASCII.GetString(capturedHeartbeat!.Value.Span);
var pendingMsgs = ParseHeaderValue(headerText, "Nats-Pending-Messages");
var pendingBytes = ParseHeaderValue(headerText, "Nats-Pending-Bytes");
pendingMsgs.ShouldBe("0");
pendingBytes.ShouldBe("0");
}
// =========================================================================
// Test 6 — Heartbeat reset on data delivery (timer should not fire early)
//
// Go reference: consumer.go:5222 — the idle heartbeat timer is reset on every
// data delivery so that it only fires after a true idle period.
// =========================================================================
// Task.Delay(50) is intentional: this is a negative-timing assertion that
// verifies no heartbeat fires within 50ms of a 200ms timer reset. There is
// no synchronisation primitive that can assert an event does NOT occur within
// a wall-clock window; the delay is the only correct approach here.
[SlopwatchSuppress("SW004", "Negative timing assertion: verifying heartbeat does NOT fire within 50ms window after 200ms timer reset requires real wall-clock elapsed time")]
[Fact]
public async Task Heartbeat_reset_on_data_delivery()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "HB-RESET",
Push = true,
DeliverSubject = "deliver.reset",
HeartbeatMs = 200, // longer interval for this test
});
var messages = new ConcurrentBag<string>();
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
var dataDelivered = new TaskCompletionSource<bool>();
engine.StartDeliveryLoop(consumer,
async (_, _, headers, _, _) =>
{
var text = Encoding.ASCII.GetString(headers.Span);
messages.Add(text);
if (text.Contains("NATS/1.0\r\n") && !text.Contains("Idle Heartbeat"))
dataDelivered.TrySetResult(true);
await ValueTask.CompletedTask;
},
cts.Token);
// Enqueue a data message — this resets the heartbeat timer
engine.Enqueue(consumer, MakeMessage(1));
await dataDelivered.Task.WaitAsync(TimeSpan.FromSeconds(5));
// Record how many heartbeats exist right after data delivery
var heartbeatsAfterData = messages.Count(m => m.Contains("Idle Heartbeat"));
// Wait a short period — heartbeat timer should NOT have fired again yet (200ms interval)
await Task.Delay(50);
var heartbeatsShortWait = messages.Count(m => m.Contains("Idle Heartbeat"));
engine.StopDeliveryLoop();
// The timer reset should mean no NEW timer heartbeat fired within 50ms
// (the 200ms interval means we'd need to wait ~200ms after the last data delivery)
heartbeatsShortWait.ShouldBe(heartbeatsAfterData);
}
// =========================================================================
// Test 7 — Flow control pending count increments on each FC frame sent
//
// Go reference: consumer.go:5495 — each flow control frame sent increments
// the pending count for stall detection.
// =========================================================================
[Fact]
public async Task FlowControl_pending_count_increments()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "FC-INC",
Push = true,
DeliverSubject = "deliver.fc",
FlowControl = true,
});
// Release once for each FC frame the delivery loop sends
var fcSem = new SemaphoreSlim(0);
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
engine.StartDeliveryLoop(consumer,
async (_, _, headers, _, _) =>
{
var text = Encoding.ASCII.GetString(headers.Span);
if (text.Contains("FlowControl"))
fcSem.Release();
await ValueTask.CompletedTask;
},
cts.Token);
// Enqueue 2 messages — each message with FlowControl=true appends a FC frame
engine.Enqueue(consumer, MakeMessage(1));
engine.Enqueue(consumer, MakeMessage(2));
// Wait until both FC frames have been sent by the delivery loop
await fcSem.WaitAsync(cts.Token);
await fcSem.WaitAsync(cts.Token);
engine.StopDeliveryLoop();
// FlowControlPendingCount should have reached at least 2 (one per enqueued message)
engine.FlowControlPendingCount.ShouldBeGreaterThanOrEqualTo(2);
}
// =========================================================================
// Test 8 — AcknowledgeFlowControl decrements the pending count
//
// Go reference: consumer.go:5495 — when the subscriber sends a flow control
// acknowledgement, the pending count is decremented.
// =========================================================================
[Fact]
public async Task FlowControl_acknowledge_decrements_count()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "FC-DEC",
Push = true,
DeliverSubject = "deliver.fc2",
FlowControl = true,
});
var fcSem = new SemaphoreSlim(0);
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
engine.StartDeliveryLoop(consumer,
async (_, _, headers, _, _) =>
{
var text = Encoding.ASCII.GetString(headers.Span);
if (text.Contains("FlowControl"))
fcSem.Release();
await ValueTask.CompletedTask;
},
cts.Token);
// Enqueue 3 messages so 3 FC frames are queued
engine.Enqueue(consumer, MakeMessage(1));
engine.Enqueue(consumer, MakeMessage(2));
engine.Enqueue(consumer, MakeMessage(3));
// Wait for all 3 FC frames to be sent by the delivery loop
await fcSem.WaitAsync(cts.Token);
await fcSem.WaitAsync(cts.Token);
await fcSem.WaitAsync(cts.Token);
engine.StopDeliveryLoop();
var countBefore = engine.FlowControlPendingCount;
countBefore.ShouldBeGreaterThan(0);
engine.AcknowledgeFlowControl();
engine.FlowControlPendingCount.ShouldBe(countBefore - 1);
}
// =========================================================================
// Test 9 — IsFlowControlStalled returns true when pending >= MaxFlowControlPending
//
// Go reference: consumer.go:5495 — stall detection triggers when the subscriber
// falls too far behind in acknowledging flow control messages.
// =========================================================================
[Fact]
public async Task FlowControl_stalled_when_pending_exceeds_max()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "FC-STALL",
Push = true,
DeliverSubject = "deliver.stall",
FlowControl = true,
});
var fcSem = new SemaphoreSlim(0);
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
engine.StartDeliveryLoop(consumer,
async (_, _, headers, _, _) =>
{
var text = Encoding.ASCII.GetString(headers.Span);
if (text.Contains("FlowControl"))
fcSem.Release();
await ValueTask.CompletedTask;
},
cts.Token);
// Enqueue MaxFlowControlPending messages to reach the stall threshold
for (var i = 1; i <= PushConsumerEngine.MaxFlowControlPending; i++)
engine.Enqueue(consumer, MakeMessage((ulong)i));
// Wait until all FC frames have been sent by the delivery loop
for (var i = 0; i < PushConsumerEngine.MaxFlowControlPending; i++)
await fcSem.WaitAsync(cts.Token);
engine.StopDeliveryLoop();
engine.FlowControlPendingCount.ShouldBeGreaterThanOrEqualTo(PushConsumerEngine.MaxFlowControlPending);
engine.IsFlowControlStalled.ShouldBeTrue();
}
// =========================================================================
// Test 10 — AcknowledgeFlowControl never goes below zero
//
// Go reference: consumer.go:5495 — the pending count should never be negative;
// calling AcknowledgeFlowControl when count is 0 must be a no-op.
// =========================================================================
[Fact]
public void FlowControl_pending_never_negative()
{
var engine = new PushConsumerEngine();
// Count starts at 0; calling Acknowledge should keep it at 0
engine.FlowControlPendingCount.ShouldBe(0);
engine.AcknowledgeFlowControl();
engine.FlowControlPendingCount.ShouldBe(0);
engine.AcknowledgeFlowControl();
engine.AcknowledgeFlowControl();
engine.FlowControlPendingCount.ShouldBe(0);
}
}

View File

@@ -0,0 +1,184 @@
// Go: consumer.go maxDeliver config — max delivery enforcement and advisory generation
using NATS.Server.JetStream.Consumers;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
public class MaxDeliveriesTests
{
// Test 1: MaxDeliver=0 means unlimited; many redeliveries do not terminate
[Fact]
public void MaxDeliver_zero_means_unlimited()
{
// Go: consumer.go — maxDeliver=0 disables the limit entirely
var ack = new AckProcessor();
ack.MaxDeliver = 0;
ack.Register(1, ackWaitMs: 5000);
for (var i = 0; i < 100; i++)
ack.ScheduleRedelivery(1, delayMs: 1);
ack.PendingCount.ShouldBe(1);
ack.ExceededCount.ShouldBe(0);
ack.TerminatedCount.ShouldBe(0);
}
// Test 2: MaxDeliver=3 terminates on the 4th redelivery attempt
[Fact]
public void MaxDeliver_terminates_when_exceeded()
{
// Go: consumer.go — maxDeliver enforced in ScheduleRedelivery; Deliveries > maxDeliver → terminate
var ack = new AckProcessor();
ack.MaxDeliver = 3;
ack.Register(1, ackWaitMs: 5000);
// Deliveries starts at 1 after Register; three more bumps → Deliveries reaches 4
ack.ScheduleRedelivery(1, delayMs: 1); // Deliveries = 2 (ok)
ack.ScheduleRedelivery(1, delayMs: 1); // Deliveries = 3 (ok, at limit)
ack.ScheduleRedelivery(1, delayMs: 1); // Deliveries = 4 (exceeded)
ack.PendingCount.ShouldBe(0);
ack.TerminatedCount.ShouldBe(1);
}
// Test 3: Exceeded sequence is added to the exceeded list
[Fact]
public void Exceeded_sequence_added_to_list()
{
// Go: consumer.go — exceeded sequences collected for advisory events
var ack = new AckProcessor();
ack.MaxDeliver = 1;
ack.Register(1, ackWaitMs: 5000);
// Deliveries starts at 1; next call makes it 2 which exceeds maxDeliver=1
ack.ScheduleRedelivery(1, delayMs: 1);
ack.ExceededCount.ShouldBe(1);
ack.GetExceededSequences().ShouldContain((ulong)1);
}
// Test 4: DrainExceeded clears the list after returning sequences
[Fact]
public void DrainExceeded_clears_list()
{
// Go: consumer.go — drain after sending advisories to avoid duplicate events
var ack = new AckProcessor();
ack.MaxDeliver = 1;
ack.Register(1, ackWaitMs: 5000);
ack.ScheduleRedelivery(1, delayMs: 1);
ack.ExceededCount.ShouldBe(1);
ack.DrainExceeded();
ack.ExceededCount.ShouldBe(0);
ack.GetExceededSequences().ShouldBeEmpty();
}
// Test 5: Exceeded sequence is moved to the terminated set
[Fact]
public void Exceeded_message_is_terminated()
{
// Go: consumer.go — exceeded sequence enters terminated set; cannot be redelivered
var ack = new AckProcessor();
ack.MaxDeliver = 2;
ack.Register(1, ackWaitMs: 5000);
ack.ScheduleRedelivery(1, delayMs: 1); // Deliveries = 2 (at limit)
ack.ScheduleRedelivery(1, delayMs: 1); // Deliveries = 3 (exceeded)
// Sequence removed from pending and not redeliverable
ack.PendingCount.ShouldBe(0);
ack.TerminatedCount.ShouldBe(1);
}
// Test 6: ProcessNak triggers redelivery check through ScheduleRedelivery
[Fact]
public void ProcessNak_triggers_redelivery_check()
{
// Go: consumer.go — processNak calls ScheduleRedelivery which enforces maxDeliver
var ack = new AckProcessor();
ack.MaxDeliver = 2;
ack.Register(1, ackWaitMs: 5000);
ack.ProcessNak(1); // Deliveries = 2 (at limit)
ack.PendingCount.ShouldBe(1);
ack.ExceededCount.ShouldBe(0);
ack.ProcessNak(1); // Deliveries = 3 (exceeded)
ack.PendingCount.ShouldBe(0);
ack.ExceededCount.ShouldBe(1);
}
// Test 7: ScheduleRedelivery enforces max deliver regardless of whether it was called from expiry path
[Fact]
public void TryGetExpired_respects_max_deliver()
{
// Go: consumer.go — ScheduleRedelivery checks maxDeliver regardless of call site;
// the expiry redelivery loop calls ScheduleRedelivery so maxDeliver is enforced there too.
// We verify this by directly calling ScheduleRedelivery after the deadline would have passed,
// without relying on wall-clock time.
var ack = new AckProcessor();
ack.MaxDeliver = 1;
ack.Register(1, ackWaitMs: 5000);
// Deliveries starts at 1; calling ScheduleRedelivery makes it 2 which exceeds maxDeliver=1
// This is exactly what the expiry dispatch loop does: read expired seq, call ScheduleRedelivery
ack.ScheduleRedelivery(1, delayMs: 1);
ack.PendingCount.ShouldBe(0);
ack.ExceededCount.ShouldBe(1);
}
// Test 8: MaxDeliver=-1 is treated as unlimited (negative values clamped to 0)
[Fact]
public void MaxDeliver_negative_treated_as_unlimited()
{
// Go: consumer.go — any non-positive maxDeliver is treated as unlimited
var ack = new AckProcessor();
ack.MaxDeliver = -1;
ack.Register(1, ackWaitMs: 5000);
for (var i = 0; i < 50; i++)
ack.ScheduleRedelivery(1, delayMs: 1);
ack.PendingCount.ShouldBe(1);
ack.ExceededCount.ShouldBe(0);
ack.MaxDeliver.ShouldBe(0); // -1 clamped to 0
}
// Test 9: ExceededPolicy defaults to Drop
[Fact]
public void DeliveryExceededPolicy_defaults_to_drop()
{
// Go: consumer.go — default behavior for exceeded messages is to drop them
var ack = new AckProcessor();
ack.ExceededPolicy.ShouldBe(DeliveryExceededPolicy.Drop);
}
// Test 10: Multiple independent sequences can each exceed max deliveries
[Fact]
public void Multiple_sequences_can_exceed()
{
// Go: consumer.go — each sequence tracked independently; multiple can exceed in same window
var ack = new AckProcessor();
ack.MaxDeliver = 1;
ack.Register(1, ackWaitMs: 5000);
ack.Register(2, ackWaitMs: 5000);
ack.Register(3, ackWaitMs: 5000);
// Each sequence starts at Deliveries=1; one call makes each exceed maxDeliver=1
ack.ScheduleRedelivery(1, delayMs: 1);
ack.ScheduleRedelivery(2, delayMs: 1);
ack.ScheduleRedelivery(3, delayMs: 1);
ack.ExceededCount.ShouldBe(3);
ack.PendingCount.ShouldBe(0);
ack.TerminatedCount.ShouldBe(3);
var exceeded = ack.GetExceededSequences();
exceeded.ShouldContain((ulong)1);
exceeded.ShouldContain((ulong)2);
exceeded.ShouldContain((ulong)3);
}
}

View File

@@ -0,0 +1,84 @@
using NATS.Server.JetStream.Consumers;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
/// <summary>
/// Tests for priority group pin ID management.
/// Go reference: consumer.go (setPinnedTimer, assignNewPinId).
/// </summary>
public class PriorityGroupPinningTests
{
[Fact]
public void AssignPinId_generates_unique_ids()
{
var mgr = new PriorityGroupManager();
mgr.Register("group-1", "consumer-a", priority: 0);
var pin1 = mgr.AssignPinId("group-1", "consumer-a");
var pin2 = mgr.AssignPinId("group-1", "consumer-a");
pin1.ShouldNotBeNullOrEmpty();
pin2.ShouldNotBeNullOrEmpty();
pin1.ShouldNotBe(pin2); // each assignment is unique
}
[Fact]
public void ValidatePinId_accepts_current()
{
var mgr = new PriorityGroupManager();
mgr.Register("group-1", "consumer-a", priority: 0);
var pin = mgr.AssignPinId("group-1", "consumer-a");
mgr.ValidatePinId("group-1", pin).ShouldBeTrue();
}
[Fact]
public void ValidatePinId_rejects_expired()
{
var mgr = new PriorityGroupManager();
mgr.Register("group-1", "consumer-a", priority: 0);
var pin1 = mgr.AssignPinId("group-1", "consumer-a");
var pin2 = mgr.AssignPinId("group-1", "consumer-a"); // replaces pin1
mgr.ValidatePinId("group-1", pin1).ShouldBeFalse();
mgr.ValidatePinId("group-1", pin2).ShouldBeTrue();
}
[Fact]
public void UnassignPinId_clears()
{
var mgr = new PriorityGroupManager();
mgr.Register("group-1", "consumer-a", priority: 0);
var pin = mgr.AssignPinId("group-1", "consumer-a");
mgr.UnassignPinId("group-1");
mgr.ValidatePinId("group-1", pin).ShouldBeFalse();
}
[Fact]
public void ValidatePinId_returns_false_for_unknown_group()
{
var mgr = new PriorityGroupManager();
mgr.ValidatePinId("unknown", "any-pin").ShouldBeFalse();
}
[Fact]
public void UnassignPinId_noop_for_unknown_group()
{
var mgr = new PriorityGroupManager();
// Should not throw
Should.NotThrow(() => mgr.UnassignPinId("unknown"));
}
[Fact]
public void PinId_is_22_chars()
{
var mgr = new PriorityGroupManager();
mgr.Register("g1", "c1", priority: 0);
var pin = mgr.AssignPinId("g1", "c1");
pin.Length.ShouldBe(22);
}
}

View File

@@ -0,0 +1,237 @@
// Go: consumer.go:500-600 — Priority group tests for sticky consumer assignment.
// Validates that the lowest-priority-numbered consumer is "active" and that
// failover occurs correctly when consumers register/unregister.
using System.Collections.Concurrent;
using System.Text;
using NATS.Server.JetStream;
using NATS.Server.JetStream.Consumers;
using NATS.Server.JetStream.Models;
using NATS.Server.JetStream.Storage;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
public class PriorityGroupTests
{
// -------------------------------------------------------------------------
// Test 1 — Single consumer registered is active
//
// Go reference: consumer.go:500 — when only one consumer is in a priority
// group, it is unconditionally the active consumer.
// -------------------------------------------------------------------------
[Fact]
public void Register_SingleConsumer_IsActive()
{
var mgr = new PriorityGroupManager();
mgr.Register("group1", "consumer-a", priority: 1);
mgr.IsActive("group1", "consumer-a").ShouldBeTrue();
mgr.GetActiveConsumer("group1").ShouldBe("consumer-a");
}
// -------------------------------------------------------------------------
// Test 2 — Multiple consumers: lowest priority number wins
//
// Go reference: consumer.go:510 — the consumer with the lowest priority
// number is the active consumer. Priority 1 < Priority 5, so 1 wins.
// -------------------------------------------------------------------------
[Fact]
public void Register_MultipleConsumers_LowestPriorityIsActive()
{
var mgr = new PriorityGroupManager();
mgr.Register("group1", "consumer-high", priority: 5);
mgr.Register("group1", "consumer-low", priority: 1);
mgr.Register("group1", "consumer-mid", priority: 3);
mgr.GetActiveConsumer("group1").ShouldBe("consumer-low");
mgr.IsActive("group1", "consumer-low").ShouldBeTrue();
mgr.IsActive("group1", "consumer-high").ShouldBeFalse();
mgr.IsActive("group1", "consumer-mid").ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 3 — Unregister active consumer: next takes over
//
// Go reference: consumer.go:530 — when the active consumer disconnects,
// the next-lowest-priority consumer becomes active (failover).
// -------------------------------------------------------------------------
[Fact]
public void Unregister_ActiveConsumer_NextTakesOver()
{
var mgr = new PriorityGroupManager();
mgr.Register("group1", "consumer-a", priority: 1);
mgr.Register("group1", "consumer-b", priority: 2);
mgr.Register("group1", "consumer-c", priority: 3);
mgr.GetActiveConsumer("group1").ShouldBe("consumer-a");
mgr.Unregister("group1", "consumer-a");
mgr.GetActiveConsumer("group1").ShouldBe("consumer-b");
mgr.IsActive("group1", "consumer-b").ShouldBeTrue();
mgr.IsActive("group1", "consumer-a").ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 4 — Unregister non-active consumer: active unchanged
//
// Go reference: consumer.go:540 — removing a non-active consumer does not
// change the active assignment.
// -------------------------------------------------------------------------
[Fact]
public void Unregister_NonActiveConsumer_ActiveUnchanged()
{
var mgr = new PriorityGroupManager();
mgr.Register("group1", "consumer-a", priority: 1);
mgr.Register("group1", "consumer-b", priority: 2);
mgr.GetActiveConsumer("group1").ShouldBe("consumer-a");
mgr.Unregister("group1", "consumer-b");
mgr.GetActiveConsumer("group1").ShouldBe("consumer-a");
mgr.IsActive("group1", "consumer-a").ShouldBeTrue();
}
// -------------------------------------------------------------------------
// Test 5 — Same priority: first registered wins
//
// Go reference: consumer.go:520 — when two consumers share the same
// priority, the first to register is treated as the active consumer.
// -------------------------------------------------------------------------
[Fact]
public void Register_SamePriority_FirstRegisteredWins()
{
var mgr = new PriorityGroupManager();
mgr.Register("group1", "consumer-first", priority: 1);
mgr.Register("group1", "consumer-second", priority: 1);
mgr.GetActiveConsumer("group1").ShouldBe("consumer-first");
mgr.IsActive("group1", "consumer-first").ShouldBeTrue();
mgr.IsActive("group1", "consumer-second").ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 6 — Empty group returns null
//
// Go reference: consumer.go:550 — calling GetActiveConsumer on an empty
// or nonexistent group returns nil (null).
// -------------------------------------------------------------------------
[Fact]
public void GetActiveConsumer_EmptyGroup_ReturnsNull()
{
var mgr = new PriorityGroupManager();
mgr.GetActiveConsumer("nonexistent").ShouldBeNull();
mgr.IsActive("nonexistent", "any-consumer").ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 7 — Idle heartbeat sent after timeout
//
// Go reference: consumer.go:5222 — sendIdleHeartbeat is invoked by a
// background timer when no data frames are delivered within HeartbeatMs.
// -------------------------------------------------------------------------
[Fact]
public async Task IdleHeartbeat_SentAfterTimeout()
{
var engine = new PushConsumerEngine();
var consumer = new ConsumerHandle("TEST-STREAM", new ConsumerConfig
{
DurableName = "HB-CONSUMER",
Push = true,
DeliverSubject = "deliver.hb",
HeartbeatMs = 50, // 50ms heartbeat interval
});
var sent = new ConcurrentBag<(string Subject, string ReplyTo, byte[] Headers, byte[] Payload)>();
ValueTask SendCapture(string subject, string replyTo, ReadOnlyMemory<byte> headers, ReadOnlyMemory<byte> payload, CancellationToken ct)
{
sent.Add((subject, replyTo, headers.ToArray(), payload.ToArray()));
return ValueTask.CompletedTask;
}
using var cts = new CancellationTokenSource();
engine.StartDeliveryLoop(consumer, SendCapture, cts.Token);
// Wait long enough for at least one idle heartbeat to fire
await Task.Delay(200);
engine.StopDeliveryLoop();
engine.IdleHeartbeatsSent.ShouldBeGreaterThan(0);
// Verify the heartbeat messages were sent to the deliver subject
var hbMessages = sent.Where(s =>
Encoding.ASCII.GetString(s.Headers).Contains("Idle Heartbeat")).ToList();
hbMessages.Count.ShouldBeGreaterThan(0);
hbMessages.ShouldAllBe(m => m.Subject == "deliver.hb");
}
// -------------------------------------------------------------------------
// Test 8 — Idle heartbeat resets on data delivery
//
// Go reference: consumer.go:5222 — the idle heartbeat timer is reset
// whenever a data frame is delivered, so heartbeats only fire during
// periods of inactivity.
// -------------------------------------------------------------------------
[Fact]
public async Task IdleHeartbeat_ResetOnDataDelivery()
{
var engine = new PushConsumerEngine();
var consumer = new ConsumerHandle("TEST-STREAM", new ConsumerConfig
{
DurableName = "HB-RESET",
Push = true,
DeliverSubject = "deliver.hbreset",
HeartbeatMs = 100, // 100ms heartbeat interval
});
var dataFramesSent = new ConcurrentBag<string>();
var heartbeatsSent = new ConcurrentBag<string>();
ValueTask SendCapture(string subject, string replyTo, ReadOnlyMemory<byte> headers, ReadOnlyMemory<byte> payload, CancellationToken ct)
{
var headerStr = Encoding.ASCII.GetString(headers.Span);
if (headerStr.Contains("Idle Heartbeat"))
heartbeatsSent.Add(subject);
else
dataFramesSent.Add(subject);
return ValueTask.CompletedTask;
}
using var cts = new CancellationTokenSource();
engine.StartDeliveryLoop(consumer, SendCapture, cts.Token);
// Continuously enqueue data messages faster than the heartbeat interval
// to keep the timer resetting. Each data delivery resets the idle heartbeat.
for (var i = 0; i < 5; i++)
{
engine.Enqueue(consumer, new StoredMessage
{
Sequence = (ulong)(i + 1),
Subject = "test.data",
Payload = Encoding.UTF8.GetBytes($"msg-{i}"),
TimestampUtc = DateTime.UtcNow,
});
await Task.Delay(30); // 30ms between messages — well within 100ms heartbeat
}
// Wait a bit after last message for potential heartbeat
await Task.Delay(50);
engine.StopDeliveryLoop();
// Data frames should have been sent
dataFramesSent.Count.ShouldBeGreaterThan(0);
// During continuous data delivery, idle heartbeats from the timer should
// NOT have fired because the timer is reset on each data frame.
// (The queue-based heartbeat frames still fire as part of Enqueue, but
// the idle heartbeat timer counter should be 0 or very low since data
// kept flowing within the heartbeat interval.)
engine.IdleHeartbeatsSent.ShouldBe(0);
}
}

View File

@@ -0,0 +1,196 @@
// Go: consumer.go — Pull consumer timeout enforcement and compiled filter tests.
// ExpiresMs support per consumer.go pull request handling.
// CompiledFilter optimizes multi-subject filter matching for consumers.
using System.Text;
using NATS.Server.JetStream;
using NATS.Server.JetStream.Consumers;
using NATS.Server.JetStream.Models;
using NATS.Server.JetStream.Storage;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
public class PullConsumerTimeoutTests
{
private static StreamHandle MakeStream(MemStore store)
=> new(new StreamConfig { Name = "TEST", Subjects = ["test.>"] }, store);
private static ConsumerHandle MakeConsumer(ConsumerConfig? config = null)
=> new("TEST", config ?? new ConsumerConfig { DurableName = "C1" });
// -------------------------------------------------------------------------
// Test 1 — ExpiresMs returns partial batch when timeout fires
//
// Go reference: consumer.go — pull fetch with expires returns whatever
// messages are available when the timeout fires, even if batch is not full.
// -------------------------------------------------------------------------
[Fact]
public async Task FetchAsync_ExpiresMs_ReturnsPartialBatch()
{
var store = new MemStore();
var stream = MakeStream(store);
// Store only 2 messages, but request a batch of 10
await store.AppendAsync("test.a", Encoding.UTF8.GetBytes("msg1"), CancellationToken.None);
await store.AppendAsync("test.b", Encoding.UTF8.GetBytes("msg2"), CancellationToken.None);
var consumer = MakeConsumer();
var engine = new PullConsumerEngine();
var result = await engine.FetchAsync(stream, consumer, new PullFetchRequest
{
Batch = 10,
ExpiresMs = 100,
}, CancellationToken.None);
// Should get the 2 available messages (partial batch)
result.Messages.Count.ShouldBe(2);
result.Messages[0].Subject.ShouldBe("test.a");
result.Messages[1].Subject.ShouldBe("test.b");
}
// -------------------------------------------------------------------------
// Test 2 — ExpiresMs sets TimedOut = true on partial result
//
// Go reference: consumer.go — when a pull request expires and the batch
// is not fully filled, the response indicates a timeout occurred.
// -------------------------------------------------------------------------
[Fact]
public async Task FetchAsync_ExpiresMs_ReturnsTimedOutTrue()
{
var store = new MemStore();
var stream = MakeStream(store);
// Store no messages — the fetch should time out with empty results
var consumer = MakeConsumer();
var engine = new PullConsumerEngine();
var result = await engine.FetchAsync(stream, consumer, new PullFetchRequest
{
Batch = 5,
ExpiresMs = 50,
}, CancellationToken.None);
result.TimedOut.ShouldBeTrue();
result.Messages.Count.ShouldBe(0);
}
// -------------------------------------------------------------------------
// Test 3 — No ExpiresMs waits for full batch (returns what's available)
//
// Go reference: consumer.go — without expires, the fetch returns available
// messages up to batch size without a timeout constraint.
// -------------------------------------------------------------------------
[Fact]
public async Task FetchAsync_NoExpires_WaitsForFullBatch()
{
var store = new MemStore();
var stream = MakeStream(store);
await store.AppendAsync("test.a", Encoding.UTF8.GetBytes("msg1"), CancellationToken.None);
await store.AppendAsync("test.b", Encoding.UTF8.GetBytes("msg2"), CancellationToken.None);
await store.AppendAsync("test.c", Encoding.UTF8.GetBytes("msg3"), CancellationToken.None);
var consumer = MakeConsumer();
var engine = new PullConsumerEngine();
var result = await engine.FetchAsync(stream, consumer, new PullFetchRequest
{
Batch = 3,
ExpiresMs = 0, // No timeout
}, CancellationToken.None);
result.Messages.Count.ShouldBe(3);
result.TimedOut.ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 4 — CompiledFilter with no filters matches everything
//
// Go reference: consumer.go — a consumer with no filter subjects receives
// all messages from the stream.
// -------------------------------------------------------------------------
[Fact]
public void CompiledFilter_NoFilters_MatchesEverything()
{
var filter = new CompiledFilter([]);
filter.Matches("test.a").ShouldBeTrue();
filter.Matches("foo.bar.baz").ShouldBeTrue();
filter.Matches("anything").ShouldBeTrue();
}
// -------------------------------------------------------------------------
// Test 5 — CompiledFilter with single exact filter matches only that subject
//
// Go reference: consumer.go — single filter_subject matches via MatchLiteral.
// -------------------------------------------------------------------------
[Fact]
public void CompiledFilter_SingleFilter_MatchesExact()
{
var filter = new CompiledFilter(["test.specific"]);
filter.Matches("test.specific").ShouldBeTrue();
filter.Matches("test.other").ShouldBeFalse();
filter.Matches("test").ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 6 — CompiledFilter with single wildcard filter
//
// Go reference: consumer.go — wildcard filter_subject uses MatchLiteral
// which supports * (single token) and > (multi-token) wildcards.
// -------------------------------------------------------------------------
[Fact]
public void CompiledFilter_SingleWildcard_MatchesPattern()
{
var starFilter = new CompiledFilter(["test.*"]);
starFilter.Matches("test.a").ShouldBeTrue();
starFilter.Matches("test.b").ShouldBeTrue();
starFilter.Matches("test.a.b").ShouldBeFalse();
starFilter.Matches("other.a").ShouldBeFalse();
var fwcFilter = new CompiledFilter(["test.>"]);
fwcFilter.Matches("test.a").ShouldBeTrue();
fwcFilter.Matches("test.a.b").ShouldBeTrue();
fwcFilter.Matches("test.a.b.c").ShouldBeTrue();
fwcFilter.Matches("other.a").ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 7 — CompiledFilter with multiple filters matches any
//
// Go reference: consumer.go — filter_subjects (plural) matches if ANY of
// the patterns match. Uses HashSet for exact subjects + MatchLiteral for
// wildcard patterns.
// -------------------------------------------------------------------------
[Fact]
public void CompiledFilter_MultipleFilters_MatchesAny()
{
var filter = new CompiledFilter(["orders.us", "orders.eu", "events.>"]);
// Exact matches
filter.Matches("orders.us").ShouldBeTrue();
filter.Matches("orders.eu").ShouldBeTrue();
// Wildcard match
filter.Matches("events.created").ShouldBeTrue();
filter.Matches("events.updated.v2").ShouldBeTrue();
}
// -------------------------------------------------------------------------
// Test 8 — CompiledFilter with multiple filters rejects non-matching
//
// Go reference: consumer.go — subjects that match none of the filter
// patterns are excluded from delivery.
// -------------------------------------------------------------------------
[Fact]
public void CompiledFilter_MultipleFilters_RejectsNonMatching()
{
var filter = new CompiledFilter(["orders.us", "orders.eu", "events.>"]);
filter.Matches("orders.jp").ShouldBeFalse();
filter.Matches("billing.us").ShouldBeFalse();
filter.Matches("events").ShouldBeFalse(); // ">" requires at least one token after
filter.Matches("random.subject").ShouldBeFalse();
}
}

View File

@@ -0,0 +1,317 @@
// Go: consumer.go (dispatchToDeliver ~line 5040, sendFlowControl ~line 5495,
// sendIdleHeartbeat ~line 5222, rate-limit logic ~line 5120)
using System.Collections.Concurrent;
using System.Text;
using NATS.Server.JetStream;
using NATS.Server.JetStream.Consumers;
using NATS.Server.JetStream.Models;
using NATS.Server.JetStream.Storage;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
public class PushConsumerDeliveryTests
{
// Helper: build a ConsumerHandle wired with the given config
private static ConsumerHandle MakeConsumer(ConsumerConfig config)
=> new("TEST-STREAM", config);
// Helper: build a minimal StoredMessage
private static StoredMessage MakeMessage(ulong seq, string subject = "test.subject", string payload = "hello")
=> new()
{
Sequence = seq,
Subject = subject,
Payload = Encoding.UTF8.GetBytes(payload),
TimestampUtc = DateTime.UtcNow,
};
// -------------------------------------------------------------------------
// Test 1 — Delivery loop sends messages in FIFO order
//
// Go reference: consumer.go:5040 — dispatchToDeliver processes the outbound
// queue sequentially; messages must arrive in the order they were enqueued.
// -------------------------------------------------------------------------
[Fact]
public async Task DeliveryLoop_sends_messages_in_FIFO_order()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "PUSH",
Push = true,
DeliverSubject = "deliver.test",
});
engine.Enqueue(consumer, MakeMessage(1, payload: "first"));
engine.Enqueue(consumer, MakeMessage(2, payload: "second"));
engine.Enqueue(consumer, MakeMessage(3, payload: "third"));
var received = new ConcurrentQueue<(string subject, ReadOnlyMemory<byte> payload)>();
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
engine.StartDeliveryLoop(consumer,
async (subj, _, _, payload, ct) =>
{
received.Enqueue((subj, payload));
await ValueTask.CompletedTask;
},
cts.Token);
// Wait until all three messages are delivered
while (received.Count < 3 && !cts.IsCancellationRequested)
await Task.Delay(5, cts.Token);
engine.StopDeliveryLoop();
received.Count.ShouldBe(3);
var items = received.ToArray();
Encoding.UTF8.GetString(items[0].payload.Span).ShouldBe("first");
Encoding.UTF8.GetString(items[1].payload.Span).ShouldBe("second");
Encoding.UTF8.GetString(items[2].payload.Span).ShouldBe("third");
}
// -------------------------------------------------------------------------
// Test 2 — Rate limiting delays delivery
//
// Go reference: consumer.go:5120 — the rate limiter delays sending when
// AvailableAtUtc is in the future. A frame whose AvailableAtUtc is 100ms
// ahead must not be delivered until that deadline has passed.
// The delivery loop honours frame.AvailableAtUtc directly; this test
// injects a frame with a known future timestamp to verify that behaviour.
// -------------------------------------------------------------------------
[Fact]
public async Task DeliveryLoop_rate_limiting_delays_delivery()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "RATE",
Push = true,
DeliverSubject = "deliver.rate",
});
// Inject a frame with AvailableAtUtc 150ms in the future to simulate
// what Enqueue() computes when RateLimitBps produces a delay.
var msg = MakeMessage(1);
consumer.PushFrames.Enqueue(new PushFrame
{
IsData = true,
Message = msg,
AvailableAtUtc = DateTime.UtcNow.AddMilliseconds(150),
});
var delivered = new TaskCompletionSource<DateTime>();
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
var startedAt = DateTime.UtcNow;
engine.StartDeliveryLoop(consumer,
async (_, _, _, _, _) =>
{
delivered.TrySetResult(DateTime.UtcNow);
await ValueTask.CompletedTask;
},
cts.Token);
var deliveredAt = await delivered.Task.WaitAsync(TimeSpan.FromSeconds(5));
engine.StopDeliveryLoop();
// The loop must have waited at least ~100ms for AvailableAtUtc to pass
var elapsed = deliveredAt - startedAt;
elapsed.TotalMilliseconds.ShouldBeGreaterThan(100);
}
// -------------------------------------------------------------------------
// Test 3 — Heartbeat frames are sent
//
// Go reference: consumer.go:5222 — sendIdleHeartbeat emits a
// "NATS/1.0 100 Idle Heartbeat" status frame on the deliver subject.
// -------------------------------------------------------------------------
[Fact]
public async Task DeliveryLoop_sends_heartbeat_frames()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "HB",
Push = true,
DeliverSubject = "deliver.hb",
HeartbeatMs = 100,
});
// Enqueue one data message; HeartbeatMs > 0 causes Enqueue to also
// append a heartbeat frame immediately after.
engine.Enqueue(consumer, MakeMessage(1));
var headerSnapshots = new ConcurrentBag<ReadOnlyMemory<byte>>();
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
engine.StartDeliveryLoop(consumer,
async (_, _, headers, _, _) =>
{
headerSnapshots.Add(headers);
await ValueTask.CompletedTask;
},
cts.Token);
// Wait for both the data frame and the heartbeat frame
while (headerSnapshots.Count < 2 && !cts.IsCancellationRequested)
await Task.Delay(5, cts.Token);
engine.StopDeliveryLoop();
headerSnapshots.Count.ShouldBeGreaterThanOrEqualTo(2);
// At least one frame must contain "Idle Heartbeat"
var anyHeartbeat = headerSnapshots.Any(h =>
Encoding.ASCII.GetString(h.Span).Contains("Idle Heartbeat"));
anyHeartbeat.ShouldBeTrue();
}
// -------------------------------------------------------------------------
// Test 4 — Flow control frames are sent
//
// Go reference: consumer.go:5495 — sendFlowControl sends a status frame
// "NATS/1.0 100 FlowControl Request" to the deliver subject.
// -------------------------------------------------------------------------
[Fact]
public async Task DeliveryLoop_sends_flow_control_frames()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "FC",
Push = true,
DeliverSubject = "deliver.fc",
FlowControl = true,
HeartbeatMs = 100, // Go requires heartbeat when flow control is on
});
engine.Enqueue(consumer, MakeMessage(1));
var headerSnapshots = new ConcurrentBag<ReadOnlyMemory<byte>>();
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
engine.StartDeliveryLoop(consumer,
async (_, _, headers, _, _) =>
{
headerSnapshots.Add(headers);
await ValueTask.CompletedTask;
},
cts.Token);
// data + flow-control + heartbeat = 3 frames
while (headerSnapshots.Count < 3 && !cts.IsCancellationRequested)
await Task.Delay(5, cts.Token);
engine.StopDeliveryLoop();
var anyFlowControl = headerSnapshots.Any(h =>
Encoding.ASCII.GetString(h.Span).Contains("FlowControl"));
anyFlowControl.ShouldBeTrue();
}
// -------------------------------------------------------------------------
// Test 5 — Delivery stops on cancellation
//
// Go reference: consumer.go — the delivery goroutine exits when the qch
// (quit channel) is signalled, which maps to CancellationToken here.
// -------------------------------------------------------------------------
[Fact]
public async Task DeliveryLoop_stops_on_cancellation()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "CANCEL",
Push = true,
DeliverSubject = "deliver.cancel",
});
var deliveryCount = 0;
var cts = new CancellationTokenSource();
engine.StartDeliveryLoop(consumer,
async (_, _, _, _, _) =>
{
Interlocked.Increment(ref deliveryCount);
await ValueTask.CompletedTask;
},
cts.Token);
// Cancel immediately — nothing enqueued so delivery count must stay 0
await cts.CancelAsync();
engine.StopDeliveryLoop();
// Brief settle — no messages were queued so nothing should have been delivered
await Task.Delay(20);
deliveryCount.ShouldBe(0);
}
// -------------------------------------------------------------------------
// Test 6 — Data frame headers contain JetStream metadata
//
// Go reference: stream.go:586 — JSSequence = "Nats-Sequence",
// JSTimeStamp = "Nats-Time-Stamp", JSSubject = "Nats-Subject"
// -------------------------------------------------------------------------
[Fact]
public async Task DeliveryLoop_data_frame_headers_contain_jetstream_metadata()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "META",
Push = true,
DeliverSubject = "deliver.meta",
});
var msg = MakeMessage(42, subject: "events.created");
engine.Enqueue(consumer, msg);
ReadOnlyMemory<byte>? capturedHeaders = null;
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
var tcs = new TaskCompletionSource<bool>();
engine.StartDeliveryLoop(consumer,
async (_, _, headers, _, _) =>
{
capturedHeaders = headers;
tcs.TrySetResult(true);
await ValueTask.CompletedTask;
},
cts.Token);
await tcs.Task.WaitAsync(TimeSpan.FromSeconds(5));
engine.StopDeliveryLoop();
capturedHeaders.ShouldNotBeNull();
var headerText = Encoding.ASCII.GetString(capturedHeaders!.Value.Span);
headerText.ShouldContain("Nats-Sequence: 42");
headerText.ShouldContain("Nats-Subject: events.created");
headerText.ShouldContain("Nats-Time-Stamp:");
}
// -------------------------------------------------------------------------
// Test 7 — DeliverSubject property is set when StartDeliveryLoop is called
//
// Go reference: consumer.go:1131 — dsubj is set from cfg.DeliverSubject.
// -------------------------------------------------------------------------
[Fact]
public void DeliverSubject_property_is_set_from_consumer_config()
{
var engine = new PushConsumerEngine();
var consumer = MakeConsumer(new ConsumerConfig
{
DurableName = "DS",
Push = true,
DeliverSubject = "my.deliver.subject",
});
using var cts = new CancellationTokenSource();
engine.StartDeliveryLoop(consumer,
(_, _, _, _, _) => ValueTask.CompletedTask,
cts.Token);
engine.DeliverSubject.ShouldBe("my.deliver.subject");
engine.StopDeliveryLoop();
}
}

View File

@@ -0,0 +1,113 @@
using NATS.Server.JetStream.Consumers;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
/// <summary>
/// Tests for the new PriorityQueue-based RedeliveryTracker features.
/// Go reference: consumer.go (rdq redelivery queue).
/// </summary>
public class RedeliveryTrackerPriorityQueueTests
{
[Fact]
public void Schedule_and_get_due_returns_expired()
{
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
var past = DateTimeOffset.UtcNow.AddMilliseconds(-100);
tracker.Schedule(1, past);
tracker.Schedule(2, DateTimeOffset.UtcNow.AddSeconds(60)); // future
var due = tracker.GetDue(DateTimeOffset.UtcNow).ToList();
due.Count.ShouldBe(1);
due[0].ShouldBe(1UL);
}
[Fact]
public void Acknowledge_removes_from_queue()
{
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
tracker.Schedule(1, DateTimeOffset.UtcNow.AddMilliseconds(-100));
tracker.Acknowledge(1);
var due = tracker.GetDue(DateTimeOffset.UtcNow).ToList();
due.ShouldBeEmpty();
}
[Fact]
public void IsMaxDeliveries_returns_true_at_threshold()
{
var tracker = new RedeliveryTracker(maxDeliveries: 3, ackWaitMs: 1000);
tracker.IncrementDeliveryCount(1);
tracker.IncrementDeliveryCount(1);
tracker.IsMaxDeliveries(1).ShouldBeFalse();
tracker.IncrementDeliveryCount(1);
tracker.IsMaxDeliveries(1).ShouldBeTrue();
}
[Fact]
public void Backoff_schedule_uses_delivery_count()
{
var backoff = new long[] { 100, 500, 2000 };
var tracker = new RedeliveryTracker(maxDeliveries: 10, ackWaitMs: 1000, backoffMs: backoff);
// First redeliver: 100ms
var delay1 = tracker.GetBackoffDelay(deliveryCount: 1);
delay1.ShouldBe(100L);
// Second: 500ms
var delay2 = tracker.GetBackoffDelay(deliveryCount: 2);
delay2.ShouldBe(500L);
// Beyond schedule: use last value
var delay4 = tracker.GetBackoffDelay(deliveryCount: 4);
delay4.ShouldBe(2000L);
}
[Fact]
public void GetDue_returns_in_deadline_order()
{
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
var now = DateTimeOffset.UtcNow;
tracker.Schedule(3, now.AddMilliseconds(-300));
tracker.Schedule(1, now.AddMilliseconds(-100));
tracker.Schedule(2, now.AddMilliseconds(-200));
var due = tracker.GetDue(now).ToList();
due.Count.ShouldBe(3);
due[0].ShouldBe(3UL); // earliest deadline first
due[1].ShouldBe(2UL);
due[2].ShouldBe(1UL);
}
[Fact]
public void GetBackoffDelay_with_no_backoff_returns_ackWait()
{
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 2000);
tracker.GetBackoffDelay(1).ShouldBe(2000L);
tracker.GetBackoffDelay(5).ShouldBe(2000L);
}
[Fact]
public void IncrementDeliveryCount_for_untracked_seq_starts_at_one()
{
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
tracker.IncrementDeliveryCount(42);
// First increment should make count = 1, so maxDeliveries=5 means not max yet
tracker.IsMaxDeliveries(42).ShouldBeFalse();
}
[Fact]
public void Acknowledge_also_clears_delivery_count()
{
var tracker = new RedeliveryTracker(maxDeliveries: 3, ackWaitMs: 1000);
tracker.IncrementDeliveryCount(1);
tracker.IncrementDeliveryCount(1);
tracker.Acknowledge(1);
// After ack, delivery count should be cleared
tracker.IsMaxDeliveries(1).ShouldBeFalse();
}
}

View File

@@ -0,0 +1,198 @@
// Go: consumer.go (trackPending ~line 5540, processNak, rdq/rdc map,
// addToRedeliverQueue, maxdeliver check)
using NATS.Server.JetStream.Consumers;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
public class RedeliveryTrackerTests
{
// -------------------------------------------------------------------------
// Test 1 — Backoff array clamping at last entry for high delivery counts
//
// Go reference: consumer.go — backoff index = min(deliveries-1, len(backoff)-1)
// so that sequences with delivery counts past the array length use the last
// backoff value rather than going out of bounds.
// -------------------------------------------------------------------------
[Fact]
public async Task Schedule_clamps_backoff_at_last_entry_for_high_delivery_count()
{
var tracker = new RedeliveryTracker([1, 5000]);
// delivery 1 → backoff[0] = 1ms
tracker.Schedule(seq: 1, deliveryCount: 1);
await Task.Delay(10);
tracker.GetDue().ShouldContain(1UL);
tracker.Acknowledge(1);
// delivery 3 → index clamps to 1 → backoff[1] = 5000ms
tracker.Schedule(seq: 1, deliveryCount: 3);
tracker.GetDue().ShouldNotContain(1UL);
}
// -------------------------------------------------------------------------
// Test 2 — GetDue returns only entries whose deadline has passed
//
// Go reference: consumer.go — rdq items are eligible for redelivery only
// once their scheduled deadline has elapsed.
// -------------------------------------------------------------------------
[Fact]
public async Task GetDue_returns_only_expired_entries()
{
var tracker = new RedeliveryTracker([1, 5000]);
// 1ms backoff → will expire quickly
tracker.Schedule(seq: 10, deliveryCount: 1);
// 5000ms backoff → will not expire in test window
tracker.Schedule(seq: 20, deliveryCount: 2);
// Neither should be due yet immediately after scheduling
tracker.GetDue().ShouldNotContain(10UL);
await Task.Delay(15);
var due = tracker.GetDue();
due.ShouldContain(10UL);
due.ShouldNotContain(20UL);
}
// -------------------------------------------------------------------------
// Test 3 — Acknowledge removes the sequence from tracking
//
// Go reference: consumer.go — acking a sequence removes it from pending map
// so it is never surfaced by GetDue again.
// -------------------------------------------------------------------------
[Fact]
public async Task Acknowledge_removes_sequence_from_tracking()
{
var tracker = new RedeliveryTracker([1]);
tracker.Schedule(seq: 5, deliveryCount: 1);
await Task.Delay(10);
tracker.GetDue().ShouldContain(5UL);
tracker.Acknowledge(5);
tracker.IsTracking(5).ShouldBeFalse();
tracker.GetDue().ShouldNotContain(5UL);
tracker.TrackedCount.ShouldBe(0);
}
// -------------------------------------------------------------------------
// Test 4 — IsMaxDeliveries returns true when threshold is reached
//
// Go reference: consumer.go — when rdc[sseq] >= MaxDeliver the sequence is
// dropped from redelivery and never surfaced again.
// -------------------------------------------------------------------------
[Fact]
public void IsMaxDeliveries_returns_true_when_delivery_count_meets_threshold()
{
var tracker = new RedeliveryTracker([100]);
tracker.Schedule(seq: 7, deliveryCount: 3);
tracker.IsMaxDeliveries(7, maxDeliver: 3).ShouldBeTrue();
tracker.IsMaxDeliveries(7, maxDeliver: 4).ShouldBeFalse();
tracker.IsMaxDeliveries(7, maxDeliver: 2).ShouldBeTrue();
}
// -------------------------------------------------------------------------
// Test 5 — IsMaxDeliveries returns false when maxDeliver is 0 (unlimited)
//
// Go reference: consumer.go — MaxDeliver <= 0 means unlimited redeliveries.
// -------------------------------------------------------------------------
[Fact]
public void IsMaxDeliveries_returns_false_when_maxDeliver_is_zero()
{
var tracker = new RedeliveryTracker([100]);
tracker.Schedule(seq: 99, deliveryCount: 1000);
tracker.IsMaxDeliveries(99, maxDeliver: 0).ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 6 — Empty backoff falls back to ackWait
//
// Go reference: consumer.go — when BackOff is empty the ack-wait duration is
// used as the redelivery delay.
// -------------------------------------------------------------------------
[Fact]
public async Task Schedule_with_empty_backoff_falls_back_to_ackWait()
{
// Empty backoff array → fall back to ackWaitMs
var tracker = new RedeliveryTracker([]);
tracker.Schedule(seq: 1, deliveryCount: 1, ackWaitMs: 1);
await Task.Delay(10);
tracker.GetDue().ShouldContain(1UL);
}
// -------------------------------------------------------------------------
// Test 7 — Empty backoff with large ackWait does not expire prematurely
// -------------------------------------------------------------------------
[Fact]
public void Schedule_with_empty_backoff_and_large_ackWait_does_not_expire()
{
var tracker = new RedeliveryTracker([]);
tracker.Schedule(seq: 2, deliveryCount: 1, ackWaitMs: 5000);
tracker.GetDue().ShouldNotContain(2UL);
}
// -------------------------------------------------------------------------
// Test 8 — Schedule returns the deadline UTC time
//
// Go reference: consumer.go:5540 — trackPending stores the computed deadline.
// -------------------------------------------------------------------------
[Fact]
public void Schedule_returns_deadline_in_the_future()
{
var tracker = new RedeliveryTracker([100]);
var before = DateTime.UtcNow;
var deadline = tracker.Schedule(seq: 3, deliveryCount: 1);
var after = DateTime.UtcNow;
deadline.ShouldBeGreaterThanOrEqualTo(before);
// Deadline should be ahead of scheduling time by at least the backoff value
(deadline - after).TotalMilliseconds.ShouldBeGreaterThan(0);
}
// -------------------------------------------------------------------------
// Test 9 — Multiple sequences tracked independently
// -------------------------------------------------------------------------
[Fact]
public async Task Multiple_sequences_are_tracked_independently()
{
var tracker = new RedeliveryTracker([1, 5000]);
tracker.Schedule(seq: 1, deliveryCount: 1); // 1ms → expires soon
tracker.Schedule(seq: 2, deliveryCount: 2); // 5000ms → won't expire
tracker.TrackedCount.ShouldBe(2);
await Task.Delay(15);
var due = tracker.GetDue();
due.ShouldContain(1UL);
due.ShouldNotContain(2UL);
tracker.Acknowledge(1);
tracker.TrackedCount.ShouldBe(1);
}
// -------------------------------------------------------------------------
// Test 10 — IsMaxDeliveries returns false for untracked sequence
// -------------------------------------------------------------------------
[Fact]
public void IsMaxDeliveries_returns_false_for_untracked_sequence()
{
var tracker = new RedeliveryTracker([100]);
tracker.IsMaxDeliveries(999, maxDeliver: 1).ShouldBeFalse();
}
}

View File

@@ -0,0 +1,174 @@
using NATS.Server.JetStream.Consumers;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
/// <summary>
/// Tests for SampleTracker: sample frequency parsing and stochastic latency sampling.
/// Go reference: consumer.go sampleFrequency, shouldSample, parseSampleFrequency.
/// </summary>
public class SampleModeTests
{
// --- ParseSampleFrequency ---
[Fact]
public void ParseSampleFrequency_one_percent()
{
var rate = SampleTracker.ParseSampleFrequency("1%");
rate.ShouldBe(0.01, 1e-9);
}
[Fact]
public void ParseSampleFrequency_fifty_percent()
{
var rate = SampleTracker.ParseSampleFrequency("50%");
rate.ShouldBe(0.5, 1e-9);
}
[Fact]
public void ParseSampleFrequency_hundred_percent()
{
var rate = SampleTracker.ParseSampleFrequency("100%");
rate.ShouldBe(1.0, 1e-9);
}
[Fact]
public void ParseSampleFrequency_zero()
{
var rate = SampleTracker.ParseSampleFrequency("0%");
rate.ShouldBe(0.0, 1e-9);
}
[Fact]
public void ParseSampleFrequency_no_percent_sign()
{
var rate = SampleTracker.ParseSampleFrequency("25");
rate.ShouldBe(0.25, 1e-9);
}
[Fact]
public void ParseSampleFrequency_empty_string()
{
var rate = SampleTracker.ParseSampleFrequency("");
rate.ShouldBe(0.0, 1e-9);
}
[Fact]
public void ParseSampleFrequency_null()
{
var rate = SampleTracker.ParseSampleFrequency(null);
rate.ShouldBe(0.0, 1e-9);
}
[Fact]
public void ParseSampleFrequency_invalid()
{
var rate = SampleTracker.ParseSampleFrequency("abc");
rate.ShouldBe(0.0, 1e-9);
}
[Fact]
public void ParseSampleFrequency_over_100_clamped()
{
var rate = SampleTracker.ParseSampleFrequency("200%");
rate.ShouldBe(1.0, 1e-9);
}
// --- ShouldSample ---
[Fact]
public void ShouldSample_rate_100_always_samples()
{
var tracker = new SampleTracker(1.0);
for (var i = 0; i < 20; i++)
{
tracker.ShouldSample().ShouldBeTrue();
}
}
[Fact]
public void ShouldSample_rate_0_never_samples()
{
var tracker = new SampleTracker(0.0);
for (var i = 0; i < 20; i++)
{
tracker.ShouldSample().ShouldBeFalse();
}
}
[Fact]
public void ShouldSample_increments_total_deliveries()
{
var tracker = new SampleTracker(0.5);
tracker.TotalDeliveries.ShouldBe(0L);
tracker.ShouldSample();
tracker.TotalDeliveries.ShouldBe(1L);
tracker.ShouldSample();
tracker.TotalDeliveries.ShouldBe(2L);
tracker.ShouldSample();
tracker.TotalDeliveries.ShouldBe(3L);
}
[Fact]
public void ShouldSample_stochastic_with_seeded_random()
{
// Use a seeded Random for deterministic results.
// With seed 42 and rate 0.5, we can predict exact outcomes.
var rng = new Random(42);
var tracker = new SampleTracker(0.5, rng);
// Pre-compute expected outcomes using the same seed.
var expectedRng = new Random(42);
var expected = new bool[10];
for (var i = 0; i < 10; i++)
{
expected[i] = expectedRng.NextDouble() < 0.5;
}
var actual = new bool[10];
for (var i = 0; i < 10; i++)
{
actual[i] = tracker.ShouldSample();
}
actual.ShouldBe(expected);
tracker.TotalDeliveries.ShouldBe(10L);
}
[Fact]
public void RecordLatency_captures_all_fields()
{
var tracker = new SampleTracker(1.0);
var latency = TimeSpan.FromMilliseconds(42);
const ulong seq = 7UL;
const string subject = "orders.new";
var before = DateTime.UtcNow;
var sample = tracker.RecordLatency(latency, seq, subject);
var after = DateTime.UtcNow;
sample.Sequence.ShouldBe(seq);
sample.Subject.ShouldBe(subject);
sample.DeliveryLatency.ShouldBe(latency);
sample.SampledAtUtc.ShouldBeGreaterThanOrEqualTo(before);
sample.SampledAtUtc.ShouldBeLessThanOrEqualTo(after);
}
[Fact]
public void SampleCount_tracks_sampled_only()
{
// Rate 1.0: every delivery is sampled.
var allSampled = new SampleTracker(1.0);
for (var i = 0; i < 5; i++) allSampled.ShouldSample();
allSampled.SampleCount.ShouldBe(5L);
allSampled.TotalDeliveries.ShouldBe(5L);
// Rate 0.0: no delivery is sampled.
var noneSampled = new SampleTracker(0.0);
for (var i = 0; i < 5; i++) noneSampled.ShouldSample();
noneSampled.SampleCount.ShouldBe(0L);
noneSampled.TotalDeliveries.ShouldBe(5L);
}
}

View File

@@ -0,0 +1,209 @@
// Go: consumer.go (rateLimitBps config, rate limiting in consumer delivery)
using NATS.Server.JetStream.Consumers;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
public class TokenBucketTests
{
// -------------------------------------------------------------------------
// Test 1 — TryConsume succeeds when enough tokens are available
//
// Go reference: consumer.go — rate limiter allows delivery when token
// bucket has sufficient capacity for the message payload size.
// -------------------------------------------------------------------------
[Fact]
public void TryConsume_succeeds_when_tokens_available()
{
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 1000);
// Full bucket — consume 100 bytes should succeed
var result = limiter.TryConsume(100);
result.ShouldBeTrue();
}
// -------------------------------------------------------------------------
// Test 2 — TryConsume fails when insufficient tokens remain
//
// Go reference: consumer.go — delivery is gated when bucket is drained.
// -------------------------------------------------------------------------
[Fact]
public void TryConsume_fails_when_insufficient_tokens()
{
// Burst = 2x rate = 200 bytes
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 100);
// Drain all tokens (200 byte burst)
limiter.TryConsume(200).ShouldBeTrue();
// Next consume should fail — no tokens left
var result = limiter.TryConsume(1);
result.ShouldBeFalse();
}
// -------------------------------------------------------------------------
// Test 3 — TryConsume always returns true when rate is zero (unlimited)
//
// Go reference: consumer.go — rateLimitBps=0 means no rate limiting.
// -------------------------------------------------------------------------
[Fact]
public void TryConsume_unlimited_when_rate_zero()
{
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 0);
// Should always succeed regardless of size
limiter.TryConsume(1_000_000).ShouldBeTrue();
limiter.TryConsume(1_000_000).ShouldBeTrue();
limiter.TryConsume(long.MaxValue / 2).ShouldBeTrue();
}
// -------------------------------------------------------------------------
// Test 4 — AvailableTokens starts at burst size
//
// Go reference: consumer.go — bucket starts full so initial burst is allowed.
// -------------------------------------------------------------------------
[Fact]
public void AvailableTokens_starts_at_burst_size()
{
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 1000, burstSize: 500);
limiter.AvailableTokens.ShouldBe(500.0, tolerance: 1.0);
}
// -------------------------------------------------------------------------
// Test 5 — AvailableTokens refills over time
//
// Go reference: consumer.go — token bucket refills at configured bytes/sec
// so that a drained bucket recovers proportionally with elapsed time.
// -------------------------------------------------------------------------
[SlopwatchSuppress("SW004", "Token bucket refill is driven by real elapsed wall-clock time; no synchronisation primitive can replace observing time-based token accumulation")]
[Fact]
public async Task AvailableTokens_refills_over_time()
{
// 10,000 bytes/sec = 10 bytes/ms; burst = 20,000 bytes
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 10_000);
// Drain entire bucket
limiter.TryConsume(20_000).ShouldBeTrue();
limiter.AvailableTokens.ShouldBeLessThan(1.0);
// Wait 50ms — should refill ~500 bytes (10 bytes/ms * 50ms)
await Task.Delay(50);
limiter.AvailableTokens.ShouldBeGreaterThan(100.0);
}
// -------------------------------------------------------------------------
// Test 6 — EstimateWait returns zero when tokens are available
//
// Go reference: consumer.go — no delay when bucket has capacity.
// -------------------------------------------------------------------------
[Fact]
public void EstimateWait_returns_zero_when_tokens_available()
{
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 1000);
var wait = limiter.EstimateWait(100);
wait.ShouldBe(TimeSpan.Zero);
}
// -------------------------------------------------------------------------
// Test 7 — EstimateWait returns positive duration when tokens are insufficient
//
// Go reference: consumer.go — delivery delay calculated from deficit / refill rate.
// -------------------------------------------------------------------------
[Fact]
public void EstimateWait_returns_positive_when_insufficient()
{
// 100 bytes/sec = 0.1 bytes/ms; burst = 200 bytes
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 100);
// Drain all tokens
limiter.TryConsume(200).ShouldBeTrue();
// Requesting 50 more bytes — must wait
var wait = limiter.EstimateWait(50);
wait.ShouldBeGreaterThan(TimeSpan.Zero);
}
// -------------------------------------------------------------------------
// Test 8 — UpdateRate changes the effective rate dynamically
//
// Go reference: consumer.go — rate can be updated via config reload.
// -------------------------------------------------------------------------
[Fact]
public void UpdateRate_changes_rate_dynamically()
{
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 1000);
limiter.UpdateRate(500);
limiter.BytesPerSecond.ShouldBe(500L);
}
// -------------------------------------------------------------------------
// Test 9 — UpdateRate caps existing tokens at new max
//
// Go reference: consumer.go — when burst is reduced, current tokens are
// clamped to not exceed the new maximum.
// -------------------------------------------------------------------------
[Fact]
public void UpdateRate_caps_tokens_at_new_max()
{
// Start with rate=1000, burst=2000
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 1000);
// Reduce to rate=100, burst=200 — existing tokens (2000) must be capped
limiter.UpdateRate(100);
limiter.AvailableTokens.ShouldBeLessThanOrEqualTo(200.0 + 1.0); // +1 for refill epsilon
}
// -------------------------------------------------------------------------
// Test 10 — TryConsume partial consumption leaves correct remainder
//
// Go reference: consumer.go — each delivery subtracts exactly payload bytes
// from the bucket.
// -------------------------------------------------------------------------
[Fact]
public void TryConsume_partial_consumption()
{
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 1000, burstSize: 200);
limiter.TryConsume(100).ShouldBeTrue();
// ~100 tokens should remain (minus any tiny refill drift during test)
limiter.AvailableTokens.ShouldBeInRange(99.0, 101.0);
}
// -------------------------------------------------------------------------
// Test 11 — Default burst size is 2x the bytes-per-second rate
//
// Go reference: consumer.go — default burst allows two seconds worth of data.
// -------------------------------------------------------------------------
[Fact]
public void Default_burst_is_2x_rate()
{
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 500);
// Bucket starts full at burst = 2 * 500 = 1000
limiter.AvailableTokens.ShouldBe(1000.0, tolerance: 1.0);
}
// -------------------------------------------------------------------------
// Test 12 — Custom burst size overrides the default 2x calculation
//
// Go reference: consumer.go — explicit burst size gives precise control
// over maximum allowed burst traffic.
// -------------------------------------------------------------------------
[Fact]
public void Custom_burst_size()
{
var limiter = new TokenBucketRateLimiter(bytesPerSecond: 500, burstSize: 750);
limiter.AvailableTokens.ShouldBe(750.0, tolerance: 1.0);
}
}

View File

@@ -0,0 +1,116 @@
using NATS.Server.JetStream.Consumers;
namespace NATS.Server.JetStream.Tests.JetStream.Consumers;
/// <summary>
/// Tests for WaitingRequestQueue FIFO queue with expiry and batch/byte tracking.
/// Go reference: consumer.go processNextMsgRequest.
/// </summary>
public class WaitingRequestQueueTests
{
[Fact]
public void Enqueue_and_dequeue_fifo()
{
var queue = new WaitingRequestQueue();
queue.Enqueue(new PullRequest("reply.1", Batch: 10, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false));
queue.Enqueue(new PullRequest("reply.2", Batch: 5, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false));
queue.Count.ShouldBe(2);
var first = queue.TryDequeue();
first.ShouldNotBeNull();
first.ReplyTo.ShouldBe("reply.1");
}
[Fact]
public void TryDequeue_returns_null_when_empty()
{
var queue = new WaitingRequestQueue();
queue.TryDequeue().ShouldBeNull();
queue.IsEmpty.ShouldBeTrue();
}
[Fact]
public void Expired_requests_are_removed()
{
var queue = new WaitingRequestQueue();
queue.Enqueue(new PullRequest("expired", Batch: 10, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMilliseconds(-100), NoWait: false));
queue.Enqueue(new PullRequest("valid", Batch: 10, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false));
queue.RemoveExpired(DateTimeOffset.UtcNow);
queue.Count.ShouldBe(1);
var next = queue.TryDequeue();
next!.ReplyTo.ShouldBe("valid");
}
[Fact]
public void NoWait_request_returns_immediately_when_empty()
{
var queue = new WaitingRequestQueue();
queue.Enqueue(new PullRequest("nowait", Batch: 10, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: true));
var req = queue.TryDequeue();
req.ShouldNotBeNull();
req.NoWait.ShouldBeTrue();
}
[Fact]
public void MaxBytes_tracks_accumulation()
{
var queue = new WaitingRequestQueue();
var req = new PullRequest("mb", Batch: 100, MaxBytes: 1024, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false);
queue.Enqueue(req);
var dequeued = queue.TryDequeue()!;
dequeued.MaxBytes.ShouldBe(1024L);
dequeued.RemainingBytes.ShouldBe(1024L);
dequeued.ConsumeBytes(256);
dequeued.RemainingBytes.ShouldBe(768L);
dequeued.IsExhausted.ShouldBeFalse();
dequeued.ConsumeBytes(800);
dequeued.IsExhausted.ShouldBeTrue();
}
[Fact]
public void Batch_decrements_on_delivery()
{
var queue = new WaitingRequestQueue();
var req = new PullRequest("batch", Batch: 3, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false);
queue.Enqueue(req);
var dequeued = queue.TryDequeue()!;
dequeued.RemainingBatch.ShouldBe(3);
dequeued.ConsumeBatch();
dequeued.RemainingBatch.ShouldBe(2);
dequeued.ConsumeBatch();
dequeued.ConsumeBatch();
dequeued.IsExhausted.ShouldBeTrue();
}
[Fact]
public void RemoveExpired_handles_all_expired()
{
var queue = new WaitingRequestQueue();
queue.Enqueue(new PullRequest("a", Batch: 1, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMilliseconds(-100), NoWait: false));
queue.Enqueue(new PullRequest("b", Batch: 1, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMilliseconds(-50), NoWait: false));
queue.RemoveExpired(DateTimeOffset.UtcNow);
queue.Count.ShouldBe(0);
queue.IsEmpty.ShouldBeTrue();
}
[Fact]
public void PinId_is_stored()
{
var queue = new WaitingRequestQueue();
queue.Enqueue(new PullRequest("pin", Batch: 1, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false, PinId: "pin-123"));
var dequeued = queue.TryDequeue()!;
dequeued.PinId.ShouldBe("pin-123");
}
}