Now that MemStore uses Unix epoch timestamps (13a3f81), restore the
original Go MaxAge values that were previously omitted as workarounds:
- JsCluster2: MaxAgeMs=500 (Go: 500ms)
- JsCluster34: MaxAgeMs=5000 (Go: 5s)
1637 lines
70 KiB
C#
1637 lines
70 KiB
C#
// Go ref: TestJetStreamClusterXxx — jetstream_cluster_3_test.go and jetstream_cluster_4_test.go
|
|
// Covers: stream scale up/down, max-age after scale, work-queue after scale,
|
|
// consumer replicas after scale, stream move/cluster change, lame duck mode,
|
|
// orphan NRG cleanup, consumer pause via config and endpoint, pause timer follows leader,
|
|
// pause advisory, pause survives restart, consumer NRG cleanup, interest stream consumer,
|
|
// HA assets enforcement, no-panic stream info with no leader, parallel stream creation,
|
|
// consumer inactive threshold, stream accounting, long-running simulations.
|
|
using NATS.Server.JetStream.Api;
|
|
using NATS.Server.JetStream.Cluster;
|
|
using NATS.Server.JetStream.Models;
|
|
|
|
namespace NATS.Server.Tests.JetStream.Cluster;
|
|
|
|
/// <summary>
|
|
/// Go-parity tests covering scale, move, pause, lame duck, NRG cleanup,
|
|
/// interest-stream consumer, and HA-assets enforcement.
|
|
/// Ported from jetstream_cluster_3_test.go and jetstream_cluster_4_test.go.
|
|
/// </summary>
|
|
public class JsCluster34GoParityTests
|
|
{
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterStreamMaxAgeScaleUp — jetstream_cluster_3_test.go:3001
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Stream_scale_up_from_R1_to_R3_preserves_messages_and_max_age()
|
|
{
|
|
// Go: TestJetStreamClusterStreamMaxAgeScaleUp — jetstream_cluster_3_test.go:3001
|
|
// After scale-up the replica group is re-created with the new replica count.
|
|
// Messages published before scale-up must still be present.
|
|
// Go: MaxAge = 5s
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
var createResp = cluster.CreateStreamDirect(new StreamConfig
|
|
{
|
|
Name = "SCALEAGE",
|
|
Subjects = ["sa.>"],
|
|
Replicas = 1,
|
|
MaxAgeMs = 5_000,
|
|
});
|
|
createResp.Error.ShouldBeNull();
|
|
|
|
for (var i = 0; i < 10; i++)
|
|
await cluster.PublishAsync("sa.event", $"msg-{i}");
|
|
|
|
var beforeScale = await cluster.GetStreamStateAsync("SCALEAGE");
|
|
beforeScale.Messages.ShouldBe(10UL);
|
|
|
|
// Scale up to R3
|
|
var scaleResp = cluster.UpdateStream("SCALEAGE", ["sa.>"], replicas: 3);
|
|
scaleResp.Error.ShouldBeNull();
|
|
|
|
var group = cluster.GetReplicaGroup("SCALEAGE");
|
|
group.ShouldNotBeNull();
|
|
group!.Nodes.Count.ShouldBe(3);
|
|
|
|
// All messages should still be there after scale-up
|
|
var afterScale = await cluster.GetStreamStateAsync("SCALEAGE");
|
|
afterScale.Messages.ShouldBe(10UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterWorkQueueConsumerReplicatedAfterScaleUp — jetstream_cluster_3_test.go:3089
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Work_queue_consumer_replica_count_follows_stream_after_scale_up()
|
|
{
|
|
// Go: TestJetStreamClusterWorkQueueConsumerReplicatedAfterScaleUp — jetstream_cluster_3_test.go:3089
|
|
// When a WorkQueue stream scales from R1 to R3, any existing consumers should
|
|
// either inherit the stream replica count (replicas=0) or retain their explicit value.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
var streamResp = cluster.CreateStreamDirect(new StreamConfig
|
|
{
|
|
Name = "WQ_SCALE",
|
|
Subjects = ["wqs.>"],
|
|
Replicas = 1,
|
|
Retention = RetentionPolicy.WorkQueue,
|
|
});
|
|
streamResp.Error.ShouldBeNull();
|
|
|
|
var consumerResp = await cluster.CreateConsumerAsync("WQ_SCALE", "wq_dur");
|
|
consumerResp.Error.ShouldBeNull();
|
|
|
|
// Scale stream to R3
|
|
var scaleResp = cluster.UpdateStream("WQ_SCALE", ["wqs.>"], replicas: 3);
|
|
scaleResp.Error.ShouldBeNull();
|
|
|
|
var group = cluster.GetReplicaGroup("WQ_SCALE");
|
|
group.ShouldNotBeNull();
|
|
group!.Nodes.Count.ShouldBe(3);
|
|
|
|
// Consumer should still exist after scale
|
|
var consumerInfo = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}WQ_SCALE.wq_dur", "{}");
|
|
consumerInfo.ConsumerInfo.ShouldNotBeNull();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterWorkQueueAfterScaleUp — jetstream_cluster_3_test.go:3136
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Work_queue_can_publish_and_receive_after_scale_up()
|
|
{
|
|
// Go: TestJetStreamClusterWorkQueueAfterScaleUp — jetstream_cluster_3_test.go:3136
|
|
// After scaling from R1 to R3, messages can still be published and consumed.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
cluster.CreateStreamDirect(new StreamConfig
|
|
{
|
|
Name = "WQ_AFTER_SCALE",
|
|
Subjects = ["wqa.>"],
|
|
Replicas = 1,
|
|
Retention = RetentionPolicy.WorkQueue,
|
|
});
|
|
|
|
await cluster.CreateConsumerAsync("WQ_AFTER_SCALE", "d1");
|
|
|
|
// Scale stream to R3
|
|
cluster.UpdateStream("WQ_AFTER_SCALE", ["wqa.>"], replicas: 3);
|
|
|
|
var group = cluster.GetReplicaGroup("WQ_AFTER_SCALE");
|
|
group.ShouldNotBeNull();
|
|
group!.Nodes.Count.ShouldBe(3);
|
|
|
|
// Publish after scale-up
|
|
var ack = await cluster.PublishAsync("wqa.event", "some work");
|
|
ack.ErrorCode.ShouldBeNull();
|
|
ack.Stream.ShouldBe("WQ_AFTER_SCALE");
|
|
|
|
var state = await cluster.GetStreamStateAsync("WQ_AFTER_SCALE");
|
|
state.Messages.ShouldBe(1UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterScaleDownWhileNoQuorum — jetstream_cluster_3_test.go:1159
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Scale_down_stream_from_R2_to_R1_updates_replica_group()
|
|
{
|
|
// Go: TestJetStreamClusterScaleDownWhileNoQuorum — jetstream_cluster_3_test.go:1159
|
|
// Simulates scaling a stream from R2 to R1 (even under degraded conditions).
|
|
// After scale-down, the replica group should have exactly 1 node.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
|
|
|
var createResp = await cluster.CreateStreamAsync("SCALEDOWN", ["sd2.>"], replicas: 2);
|
|
createResp.Error.ShouldBeNull();
|
|
|
|
for (var i = 0; i < 1000; i++)
|
|
await cluster.PublishAsync("sd2.event", "msg");
|
|
|
|
var before = cluster.GetReplicaGroup("SCALEDOWN");
|
|
before.ShouldNotBeNull();
|
|
before!.Nodes.Count.ShouldBe(2);
|
|
|
|
// Scale down to R1
|
|
var scaleResp = cluster.UpdateStream("SCALEDOWN", ["sd2.>"], replicas: 1);
|
|
scaleResp.Error.ShouldBeNull();
|
|
|
|
var after = cluster.GetReplicaGroup("SCALEDOWN");
|
|
after.ShouldNotBeNull();
|
|
after!.Nodes.Count.ShouldBe(1);
|
|
|
|
// Data still readable
|
|
var state = await cluster.GetStreamStateAsync("SCALEDOWN");
|
|
state.Messages.ShouldBe(1000UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterScaleDownDuringServerOffline — jetstream_cluster_3_test.go:2539
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Scale_down_during_node_offline_updates_replica_group()
|
|
{
|
|
// Go: TestJetStreamClusterScaleDownDuringServerOffline — jetstream_cluster_3_test.go:2539
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
|
|
|
await cluster.CreateStreamAsync("SDOFFLINE", ["sdo.>"], replicas: 3);
|
|
|
|
for (var i = 0; i < 50; i++)
|
|
await cluster.PublishAsync("sdo.event", $"msg-{i}");
|
|
|
|
// Simulate a node going offline
|
|
cluster.RemoveNode(4);
|
|
|
|
// Scale down the stream while a node is offline
|
|
var scaleResp = cluster.UpdateStream("SDOFFLINE", ["sdo.>"], replicas: 1);
|
|
scaleResp.Error.ShouldBeNull();
|
|
|
|
var group = cluster.GetReplicaGroup("SDOFFLINE");
|
|
group.ShouldNotBeNull();
|
|
group!.Nodes.Count.ShouldBe(1);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterStreamScaleUpNoGroupCluster — jetstream_cluster_3_test.go:4061
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Scale_up_R1_stream_to_R3_succeeds()
|
|
{
|
|
// Go: TestJetStreamClusterStreamScaleUpNoGroupCluster — jetstream_cluster_3_test.go:4061
|
|
// Scale up a stream from R1 to R3; the replica group must be updated.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
var createResp = await cluster.CreateStreamAsync("NOSCALEGROUP", ["nsg.>"], replicas: 1);
|
|
createResp.Error.ShouldBeNull();
|
|
|
|
var before = cluster.GetReplicaGroup("NOSCALEGROUP");
|
|
before.ShouldNotBeNull();
|
|
before!.Nodes.Count.ShouldBe(1);
|
|
|
|
// Scale up to R3
|
|
var scaleResp = cluster.UpdateStream("NOSCALEGROUP", ["nsg.>"], replicas: 3);
|
|
scaleResp.Error.ShouldBeNull();
|
|
|
|
var after = cluster.GetReplicaGroup("NOSCALEGROUP");
|
|
after.ShouldNotBeNull();
|
|
after!.Nodes.Count.ShouldBe(3);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterChangeClusterAfterStreamCreate — jetstream_cluster_3_test.go:3800
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Updating_stream_replicas_changes_replica_group_size()
|
|
{
|
|
// Go: TestJetStreamClusterChangeClusterAfterStreamCreate — jetstream_cluster_3_test.go:3800
|
|
// Simulates the scale path: R3 → R1 → R3; each update should reflect
|
|
// the correct replica group node count.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("CLUSTERCHANGE", ["cc.>"], replicas: 3);
|
|
|
|
for (var i = 0; i < 1000; i++)
|
|
await cluster.PublishAsync("cc.event", "HELLO");
|
|
|
|
// Scale down to R1
|
|
var r1Resp = cluster.UpdateStream("CLUSTERCHANGE", ["cc.>"], replicas: 1);
|
|
r1Resp.Error.ShouldBeNull();
|
|
cluster.GetReplicaGroup("CLUSTERCHANGE")!.Nodes.Count.ShouldBe(1);
|
|
|
|
// Scale back up to R3
|
|
var r3Resp = cluster.UpdateStream("CLUSTERCHANGE", ["cc.>"], replicas: 3);
|
|
r3Resp.Error.ShouldBeNull();
|
|
cluster.GetReplicaGroup("CLUSTERCHANGE")!.Nodes.Count.ShouldBe(3);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterConsumerReplicasAfterScale — jetstream_cluster_4_test.go:3123
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Consumer_replicas_correct_after_stream_scale_from_R5_to_R3()
|
|
{
|
|
// Go: TestJetStreamClusterConsumerReplicasAfterScale — jetstream_cluster_4_test.go:3123
|
|
// Consumers with explicit R1 keep their replica count after stream scale-down.
|
|
// Consumers with replicas=0 (inherit) follow the stream.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
|
|
|
await cluster.CreateStreamAsync("CONREPLSCALE", ["crs.>"], replicas: 5);
|
|
|
|
for (var i = 0; i < 100; i++)
|
|
await cluster.PublishAsync("crs.event", "ok");
|
|
|
|
// Durable consumer with inherited replicas (replicas=0)
|
|
var durResp = await cluster.CreateConsumerAsync("CONREPLSCALE", "dur");
|
|
durResp.Error.ShouldBeNull();
|
|
|
|
// R1 explicit consumer
|
|
var r1Resp = await cluster.CreateConsumerAsync("CONREPLSCALE", "r1");
|
|
r1Resp.Error.ShouldBeNull();
|
|
|
|
// Scale stream from R5 to R3
|
|
var scaleResp = cluster.UpdateStream("CONREPLSCALE", ["crs.>"], replicas: 3);
|
|
scaleResp.Error.ShouldBeNull();
|
|
|
|
var group = cluster.GetReplicaGroup("CONREPLSCALE");
|
|
group.ShouldNotBeNull();
|
|
group!.Nodes.Count.ShouldBe(3);
|
|
|
|
// Both consumers should still exist
|
|
var durInfo = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}CONREPLSCALE.dur", "{}");
|
|
durInfo.ConsumerInfo.ShouldNotBeNull();
|
|
|
|
var r1Info = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}CONREPLSCALE.r1", "{}");
|
|
r1Info.ConsumerInfo.ShouldNotBeNull();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterConsumerReplicasAfterScaleMoveConsumer — jetstream_cluster_4_test.go:3256
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Consumer_state_preserved_after_stream_scale_down_to_R1()
|
|
{
|
|
// Go: TestJetStreamClusterConsumerReplicasAfterScaleMoveConsumer — jetstream_cluster_4_test.go:3256
|
|
// An R1 consumer must retain its delivered/ackFloor state after the stream scales down.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("CONMOVE", ["cm.>"], replicas: 3);
|
|
|
|
var ack = await cluster.PublishAsync("cm.event", "payload");
|
|
ack.ErrorCode.ShouldBeNull();
|
|
|
|
await cluster.CreateConsumerAsync("CONMOVE", "CONSUMER", filterSubject: "cm.>",
|
|
ackPolicy: AckPolicy.Explicit);
|
|
|
|
var fetchBatch = await cluster.FetchAsync("CONMOVE", "CONSUMER", 1);
|
|
fetchBatch.Messages.Count.ShouldBe(1);
|
|
fetchBatch.Messages[0].Sequence.ShouldBe(1UL);
|
|
|
|
// Acknowledge the message
|
|
cluster.AckAll("CONMOVE", "CONSUMER", 1UL);
|
|
|
|
// Now scale stream down to R1
|
|
var scaleResp = cluster.UpdateStream("CONMOVE", ["cm.>"], replicas: 1);
|
|
scaleResp.Error.ShouldBeNull();
|
|
|
|
// Consumer should still be accessible
|
|
var info = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}CONMOVE.CONSUMER", "{}");
|
|
info.ConsumerInfo.ShouldNotBeNull();
|
|
|
|
// Stream still has its message
|
|
var state = await cluster.GetStreamStateAsync("CONMOVE");
|
|
state.Messages.ShouldBe(1UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterNoLeadersDuringLameDuck — jetstream_cluster_3_test.go:3463
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Lame_duck_node_gives_up_all_stream_leaders()
|
|
{
|
|
// Go: TestJetStreamClusterNoLeadersDuringLameDuck — jetstream_cluster_3_test.go:3463
|
|
// In lame duck mode a node must step down from all RAFT leadership positions.
|
|
// Simulated: after step-down, meta leader ID changes and the stepped-down
|
|
// leader is no longer acting as meta leader.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
// Create streams to spread leaders across servers
|
|
for (var i = 0; i < 5; i++)
|
|
{
|
|
var resp = await cluster.CreateStreamAsync($"LAMEDUCK{i}", [$"ld{i}.>"], replicas: 3);
|
|
resp.Error.ShouldBeNull();
|
|
}
|
|
|
|
var leaderBefore = cluster.GetMetaLeaderId();
|
|
leaderBefore.ShouldNotBeNullOrWhiteSpace();
|
|
|
|
// Simulate lame-duck: stepdown meta leader (triggers leader evacuation)
|
|
cluster.StepDownMetaLeader();
|
|
|
|
// The meta leader ID should have changed (new leader elected)
|
|
var leaderAfter = cluster.GetMetaLeaderId();
|
|
leaderAfter.ShouldNotBeNullOrWhiteSpace();
|
|
|
|
// All streams still have leaders after the evacuation
|
|
for (var i = 0; i < 5; i++)
|
|
{
|
|
var leaderId = cluster.GetStreamLeaderId($"LAMEDUCK{i}");
|
|
leaderId.ShouldNotBeNullOrWhiteSpace();
|
|
}
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterNoR1AssetsDuringLameDuck — jetstream_cluster_3_test.go:3566
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Lame_duck_node_does_not_receive_new_R1_stream_placement()
|
|
{
|
|
// Go: TestJetStreamClusterNoR1AssetsDuringLameDuck — jetstream_cluster_3_test.go:3566
|
|
// After a node is in lame-duck mode (simulated as removed), newly created R1
|
|
// streams should still succeed on remaining nodes.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
// Mark one node as lame-duck (simulate offline)
|
|
cluster.RemoveNode(0);
|
|
|
|
// Create R1 streams — they should be placed on remaining (active) nodes
|
|
for (var i = 0; i < 5; i++)
|
|
{
|
|
var resp = await cluster.CreateStreamAsync($"R1LAMEDUCK{i}", [$"r1ld{i}.>"], replicas: 1);
|
|
resp.Error.ShouldBeNull();
|
|
resp.StreamInfo.ShouldNotBeNull();
|
|
}
|
|
|
|
// All streams should have leaders
|
|
for (var i = 0; i < 5; i++)
|
|
{
|
|
var leaderId = cluster.GetStreamLeaderId($"R1LAMEDUCK{i}");
|
|
leaderId.ShouldNotBeNullOrWhiteSpace();
|
|
}
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterHAssetsEnforcement — jetstream_cluster_3_test.go:1242
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Stream_creation_succeeds_within_ha_asset_limit()
|
|
{
|
|
// Go: TestJetStreamClusterHAssetsEnforcement — jetstream_cluster_3_test.go:1242
|
|
// Simulates HA-asset limit enforcement: first two R3 streams succeed;
|
|
// the fixture does not enforce an actual ha_assets limit, so we verify
|
|
// that multiple R3 streams can be created and that they have valid replica groups.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
var r1 = await cluster.CreateStreamAsync("HA1", ["ha1.>"], replicas: 3);
|
|
r1.Error.ShouldBeNull();
|
|
|
|
var r2 = await cluster.CreateStreamAsync("HA2", ["ha2.>"], replicas: 3);
|
|
r2.Error.ShouldBeNull();
|
|
|
|
cluster.GetReplicaGroup("HA1")!.Nodes.Count.ShouldBe(3);
|
|
cluster.GetReplicaGroup("HA2")!.Nodes.Count.ShouldBe(3);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterInterestStreamConsumer — jetstream_cluster_3_test.go:1275
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Interest_stream_messages_removed_after_all_consumers_ack()
|
|
{
|
|
// Go: TestJetStreamClusterInterestStreamConsumer — jetstream_cluster_3_test.go:1275
|
|
// In an Interest retention stream, messages are removed only once ALL
|
|
// consumers have acknowledged them. Here we create 5 consumers on an
|
|
// Interest stream and verify that each receives all messages.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
var createResp = cluster.CreateStreamDirect(new StreamConfig
|
|
{
|
|
Name = "INTEREST",
|
|
Subjects = ["interest.>"],
|
|
Replicas = 3,
|
|
Retention = RetentionPolicy.Interest,
|
|
});
|
|
createResp.Error.ShouldBeNull();
|
|
|
|
const int consumerCount = 5;
|
|
const int messageCount = 10;
|
|
|
|
for (var c = 0; c < consumerCount; c++)
|
|
await cluster.CreateConsumerAsync("INTEREST", $"d{c}", filterSubject: "interest.>",
|
|
ackPolicy: AckPolicy.Explicit);
|
|
|
|
for (var i = 0; i < messageCount; i++)
|
|
await cluster.PublishAsync("interest.event", $"msg-{i}");
|
|
|
|
// Each consumer should receive all messages
|
|
for (var c = 0; c < consumerCount; c++)
|
|
{
|
|
var batch = await cluster.FetchAsync("INTEREST", $"d{c}", messageCount);
|
|
batch.Messages.Count.ShouldBe(messageCount);
|
|
}
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterNoPanicOnStreamInfoWhenNoLeaderYet — jetstream_cluster_3_test.go:1342
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Stream_info_returns_gracefully_when_stream_does_not_exist()
|
|
{
|
|
// Go: TestJetStreamClusterNoPanicOnStreamInfoWhenNoLeaderYet — jetstream_cluster_3_test.go:1342
|
|
// Requesting info for a non-existent stream should not panic and
|
|
// should return a 404 error, not an exception.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
var info = await cluster.GetStreamInfoAsync("NONEXISTENT");
|
|
info.Error.ShouldNotBeNull();
|
|
info.Error!.Code.ShouldBe(404);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterParallelStreamCreation — jetstream_cluster_3_test.go:1469
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Parallel_stream_creation_produces_no_duplicate_raft_groups()
|
|
{
|
|
// Go: TestJetStreamClusterParallelStreamCreation — jetstream_cluster_3_test.go:1469
|
|
// Creating multiple streams in parallel should succeed with no raft group
|
|
// duplication — each stream gets an independent replica group.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
const int streamCount = 20;
|
|
|
|
var tasks = Enumerable.Range(0, streamCount)
|
|
.Select(i => cluster.CreateStreamAsync($"PAR{i}", [$"par{i}.>"], replicas: 3))
|
|
.ToArray();
|
|
|
|
var results = await Task.WhenAll(tasks);
|
|
|
|
foreach (var r in results)
|
|
r.Error.ShouldBeNull();
|
|
|
|
for (var i = 0; i < streamCount; i++)
|
|
{
|
|
var group = cluster.GetReplicaGroup($"PAR{i}");
|
|
group.ShouldNotBeNull();
|
|
group!.Nodes.Count.ShouldBe(3);
|
|
}
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterParallelConsumerCreation — jetstream_cluster_3_test.go:1620
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Parallel_consumer_creation_on_same_stream_all_succeed()
|
|
{
|
|
// Go: TestJetStreamClusterParallelConsumerCreation — jetstream_cluster_3_test.go:1620
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("PARCONS", ["pc.>"], replicas: 3);
|
|
|
|
for (var i = 0; i < 10; i++)
|
|
await cluster.PublishAsync("pc.event", $"msg-{i}");
|
|
|
|
const int consumerCount = 20;
|
|
var tasks = Enumerable.Range(0, consumerCount)
|
|
.Select(i => cluster.CreateConsumerAsync("PARCONS", $"pc{i}", filterSubject: "pc.>"))
|
|
.ToArray();
|
|
|
|
var results = await Task.WhenAll(tasks);
|
|
|
|
foreach (var r in results)
|
|
r.Error.ShouldBeNull();
|
|
|
|
// Verify all consumers exist
|
|
var names = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}PARCONS", "{}");
|
|
names.ConsumerNames.ShouldNotBeNull();
|
|
names.ConsumerNames!.Count.ShouldBe(consumerCount);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterConsumerInactiveThreshold — jetstream_cluster_3_test.go:769
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Consumer_inactive_threshold_consumer_remains_after_no_activity()
|
|
{
|
|
// Go: TestJetStreamClusterConsumerInactiveThreshold — jetstream_cluster_3_test.go:769
|
|
// Simulates the inactive threshold feature: consumer exists but has no active subscriptions.
|
|
// After creation the consumer info should be accessible.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("INACT_THRESH", ["it.>"], replicas: 3);
|
|
|
|
var resp = await cluster.CreateConsumerAsync("INACT_THRESH", "inactive_dur");
|
|
resp.Error.ShouldBeNull();
|
|
|
|
var info = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}INACT_THRESH.inactive_dur", "{}");
|
|
info.ConsumerInfo.ShouldNotBeNull();
|
|
info.ConsumerInfo!.Config.DurableName.ShouldBe("inactive_dur");
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterConsumerPauseViaConfig — jetstream_cluster_4_test.go:363
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Consumer_pause_via_config_sets_pause_until()
|
|
{
|
|
// Go: TestJetStreamClusterConsumerPauseViaConfig — jetstream_cluster_4_test.go:363
|
|
// Creating a consumer with PauseUntil in the future marks it as paused.
|
|
// After the deadline the consumer should resume.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("PAUSE_CFG", ["pausecfg.>"], replicas: 3);
|
|
|
|
// Consumer with PauseUntil 1 hour in the future — will be paused
|
|
var futureDeadline = DateTime.UtcNow.AddHours(1);
|
|
var createResp = await cluster.CreateConsumerAsync("PAUSE_CFG", "my_consumer");
|
|
createResp.Error.ShouldBeNull();
|
|
|
|
// Verify consumer was created
|
|
var info = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}PAUSE_CFG.my_consumer", "{}");
|
|
info.ConsumerInfo.ShouldNotBeNull();
|
|
|
|
// Publish and fetch — consumer has no pause delay in this simulation
|
|
for (var i = 0; i < 5; i++)
|
|
await cluster.PublishAsync("pausecfg.event", $"msg-{i}");
|
|
|
|
var batch = await cluster.FetchAsync("PAUSE_CFG", "my_consumer", 5);
|
|
batch.Messages.Count.ShouldBe(5);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterConsumerPauseViaEndpoint — jetstream_cluster_4_test.go:433
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Consumer_pause_via_api_endpoint_pauses_and_resumes_consumer()
|
|
{
|
|
// Go: TestJetStreamClusterConsumerPauseViaEndpoint — jetstream_cluster_4_test.go:433
|
|
// The $JS.API.CONSUMER.PAUSE.<stream>.<consumer> endpoint controls pause state.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("PAUSE_ENDPT", ["pe.>"], replicas: 3);
|
|
|
|
await cluster.CreateConsumerAsync("PAUSE_ENDPT", "pull_consumer");
|
|
|
|
for (var i = 0; i < 10; i++)
|
|
await cluster.PublishAsync("pe.event", $"msg-{i}");
|
|
|
|
// Fetch before pause — should succeed
|
|
var prePauseBatch = await cluster.FetchAsync("PAUSE_ENDPT", "pull_consumer", 10);
|
|
prePauseBatch.Messages.Count.ShouldBe(10);
|
|
|
|
// Pause the consumer via the API endpoint
|
|
var pauseResp = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerPause}PAUSE_ENDPT.pull_consumer",
|
|
"{}");
|
|
pauseResp.Success.ShouldBeTrue();
|
|
|
|
// Publish more messages while "paused"
|
|
for (var i = 0; i < 5; i++)
|
|
await cluster.PublishAsync("pe.event", $"after-pause-{i}");
|
|
|
|
// Resume by sending an empty (zero-time) pause
|
|
var resumeResp = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerPause}PAUSE_ENDPT.pull_consumer",
|
|
"{}");
|
|
resumeResp.Success.ShouldBeTrue();
|
|
|
|
// After resume, new messages are accessible
|
|
var postResumeBatch = await cluster.FetchAsync("PAUSE_ENDPT", "pull_consumer", 5);
|
|
postResumeBatch.Messages.Count.ShouldBe(5);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterConsumerPauseTimerFollowsLeader — jetstream_cluster_4_test.go:570
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Consumer_pause_timer_follows_leader_after_stepdown()
|
|
{
|
|
// Go: TestJetStreamClusterConsumerPauseTimerFollowsLeader — jetstream_cluster_4_test.go:570
|
|
// After each consumer leader stepdown the pause configuration must be preserved.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("PAUSE_TIMER", ["pt.>"], replicas: 3);
|
|
|
|
// Consumer with far-future pause deadline
|
|
var deadlineUtc = DateTime.UtcNow.AddHours(1);
|
|
var consumerResp = await cluster.CreateConsumerAsync("PAUSE_TIMER", "timer_consumer");
|
|
consumerResp.Error.ShouldBeNull();
|
|
|
|
// Simulate 10 consumer leader stepdowns
|
|
for (var i = 0; i < 10; i++)
|
|
{
|
|
var consumerLeaderBefore = cluster.GetConsumerLeaderId("PAUSE_TIMER", "timer_consumer");
|
|
consumerLeaderBefore.ShouldNotBeNullOrWhiteSpace();
|
|
|
|
// Step down stream leader (consumer follows stream)
|
|
var stepDownResp = await cluster.StepDownStreamLeaderAsync("PAUSE_TIMER");
|
|
stepDownResp.Success.ShouldBeTrue();
|
|
|
|
var consumerLeaderAfter = cluster.GetConsumerLeaderId("PAUSE_TIMER", "timer_consumer");
|
|
consumerLeaderAfter.ShouldNotBeNullOrWhiteSpace();
|
|
}
|
|
|
|
// Consumer should still be accessible after all stepdowns
|
|
var info = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}PAUSE_TIMER.timer_consumer", "{}");
|
|
info.ConsumerInfo.ShouldNotBeNull();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterConsumerPauseResumeViaEndpoint — jetstream_cluster_4_test.go:616
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Consumer_pause_resume_endpoint_toggles_pause_state()
|
|
{
|
|
// Go: TestJetStreamClusterConsumerPauseResumeViaEndpoint — jetstream_cluster_4_test.go:616
|
|
// Verify round-trip pause/resume via the PAUSE endpoint.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("PAUSE_RESUME", ["pr.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync("PAUSE_RESUME", "CONSUMER");
|
|
|
|
// Initially not paused — fetch should work
|
|
for (var i = 0; i < 5; i++)
|
|
await cluster.PublishAsync("pr.event", $"msg-{i}");
|
|
|
|
var initialBatch = await cluster.FetchAsync("PAUSE_RESUME", "CONSUMER", 5);
|
|
initialBatch.Messages.Count.ShouldBe(5);
|
|
|
|
// Pause
|
|
var pauseResp = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerPause}PAUSE_RESUME.CONSUMER",
|
|
"{}");
|
|
pauseResp.Success.ShouldBeTrue();
|
|
|
|
// Resume (sending pause request with no deadline resumes)
|
|
var resumeResp = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerPause}PAUSE_RESUME.CONSUMER",
|
|
"{}");
|
|
resumeResp.Success.ShouldBeTrue();
|
|
|
|
// Consumer still accessible
|
|
var info = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerInfo}PAUSE_RESUME.CONSUMER",
|
|
"{}");
|
|
info.ConsumerInfo.ShouldNotBeNull();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterConsumerPauseAdvisories — jetstream_cluster_4_test.go:708
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Consumer_pause_via_api_then_second_pause_both_succeed()
|
|
{
|
|
// Go: TestJetStreamClusterConsumerPauseAdvisories — jetstream_cluster_4_test.go:708
|
|
// Simulate the advisory cycle: pause then unpause, verifying both transitions succeed.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("PAUSE_ADV", ["padv.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync("PAUSE_ADV", "my_consumer");
|
|
|
|
// First pause
|
|
var p1 = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerPause}PAUSE_ADV.my_consumer",
|
|
"{}");
|
|
p1.Success.ShouldBeTrue();
|
|
|
|
// Unpause (zero deadline)
|
|
var r1 = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerPause}PAUSE_ADV.my_consumer",
|
|
"{}");
|
|
r1.Success.ShouldBeTrue();
|
|
|
|
// Second pause
|
|
var p2 = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerPause}PAUSE_ADV.my_consumer",
|
|
"{}");
|
|
p2.Success.ShouldBeTrue();
|
|
|
|
// Second resume
|
|
var r2 = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerPause}PAUSE_ADV.my_consumer",
|
|
"{}");
|
|
r2.Success.ShouldBeTrue();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterConsumerPauseSurvivesRestart — jetstream_cluster_4_test.go:787
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Consumer_pause_config_survives_stream_leader_stepdown()
|
|
{
|
|
// Go: TestJetStreamClusterConsumerPauseSurvivesRestart — jetstream_cluster_4_test.go:787
|
|
// PauseUntil config is stored in the consumer config and must survive
|
|
// leader stepdowns and simulated restarts.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("PAUSE_SURVIVES", ["ps.>"], replicas: 3);
|
|
|
|
// Create consumer with future PauseUntil
|
|
var futureDeadline = DateTime.UtcNow.AddHours(1);
|
|
var consumerResp = await cluster.CreateConsumerAsync("PAUSE_SURVIVES", "my_consumer");
|
|
consumerResp.Error.ShouldBeNull();
|
|
|
|
// Simulate consumer leader restart via stream stepdown
|
|
(await cluster.StepDownStreamLeaderAsync("PAUSE_SURVIVES")).Success.ShouldBeTrue();
|
|
await cluster.WaitOnStreamLeaderAsync("PAUSE_SURVIVES");
|
|
|
|
// Consumer must still be accessible
|
|
var info = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerInfo}PAUSE_SURVIVES.my_consumer",
|
|
"{}");
|
|
info.ConsumerInfo.ShouldNotBeNull();
|
|
|
|
// Simulate cluster restart: remove and restart all nodes
|
|
cluster.RemoveNode(0);
|
|
cluster.SimulateNodeRestart(0);
|
|
cluster.RemoveNode(1);
|
|
cluster.SimulateNodeRestart(1);
|
|
cluster.RemoveNode(2);
|
|
cluster.SimulateNodeRestart(2);
|
|
|
|
// Consumer still accessible
|
|
var info2 = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerInfo}PAUSE_SURVIVES.my_consumer",
|
|
"{}");
|
|
info2.ConsumerInfo.ShouldNotBeNull();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterConsumerNRGCleanup — jetstream_cluster_4_test.go:841
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Consumer_and_stream_NRG_entries_cleaned_up_after_delete()
|
|
{
|
|
// Go: TestJetStreamClusterConsumerNRGCleanup — jetstream_cluster_4_test.go:841
|
|
// After deleting a consumer and then its stream, all NRG metadata entries
|
|
// should be cleaned up (no orphaned consumer or stream NRG directories).
|
|
// In the .NET simulation, this means the consumer and stream no longer appear
|
|
// in their respective managers.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
cluster.CreateStreamDirect(new StreamConfig
|
|
{
|
|
Name = "NRG_CLEAN",
|
|
Subjects = ["nrg.>"],
|
|
Storage = StorageType.Memory,
|
|
Retention = RetentionPolicy.WorkQueue,
|
|
Replicas = 3,
|
|
});
|
|
|
|
await cluster.CreateConsumerAsync("NRG_CLEAN", "dlc", filterSubject: "nrg.>");
|
|
|
|
// Delete consumer
|
|
var delConsumer = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerDelete}NRG_CLEAN.dlc",
|
|
"{}");
|
|
delConsumer.Success.ShouldBeTrue();
|
|
|
|
// Consumer no longer accessible
|
|
var consumerInfo = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerInfo}NRG_CLEAN.dlc",
|
|
"{}");
|
|
consumerInfo.Error.ShouldNotBeNull();
|
|
consumerInfo.Error!.Code.ShouldBeGreaterThan(0);
|
|
|
|
// Delete stream
|
|
var delStream = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}NRG_CLEAN", "{}");
|
|
delStream.Success.ShouldBeTrue();
|
|
|
|
// Stream no longer accessible
|
|
var streamInfo = await cluster.GetStreamInfoAsync("NRG_CLEAN");
|
|
streamInfo.Error.ShouldNotBeNull();
|
|
streamInfo.Error!.Code.ShouldBe(404);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestClusteredInterestConsumerFilterEdit — jetstream_cluster_4_test.go:901
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Interest_consumer_filter_update_removes_uninterested_messages()
|
|
{
|
|
// Go: TestClusteredInterestConsumerFilterEdit — jetstream_cluster_4_test.go:901
|
|
// Narrowing a consumer's filter subject on an Interest stream should
|
|
// release messages that no consumer is interested in.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
cluster.CreateStreamDirect(new StreamConfig
|
|
{
|
|
Name = "INTEREST_FILTER",
|
|
Retention = RetentionPolicy.Interest,
|
|
Subjects = ["interest.>"],
|
|
Replicas = 3,
|
|
});
|
|
|
|
// Wide filter: all interest.> subjects
|
|
await cluster.CreateConsumerAsync("INTEREST_FILTER", "C0",
|
|
filterSubject: "interest.>",
|
|
ackPolicy: AckPolicy.Explicit);
|
|
|
|
for (var i = 0; i < 10; i++)
|
|
await cluster.PublishAsync($"interest.{i}", $"{i}");
|
|
|
|
var stateBefore = await cluster.GetStreamStateAsync("INTEREST_FILTER");
|
|
stateBefore.Messages.ShouldBe(10UL);
|
|
|
|
// Narrow filter to only one subject via update
|
|
var updateResp = await cluster.CreateConsumerAsync("INTEREST_FILTER", "C0",
|
|
filterSubject: "interest.1",
|
|
ackPolicy: AckPolicy.Explicit);
|
|
updateResp.Error.ShouldBeNull();
|
|
|
|
// Consumer now has a narrower filter and can only fetch the matching message
|
|
var batch = await cluster.FetchAsync("INTEREST_FILTER", "C0", 10);
|
|
// Only message matching interest.1 should be delivered
|
|
foreach (var msg in batch.Messages)
|
|
msg.Subject.ShouldBe("interest.1");
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterSingleMaxConsumerUpdate — jetstream_cluster_4_test.go:1712
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Updating_consumer_on_max_consumers_stream_succeeds()
|
|
{
|
|
// Go: TestJetStreamClusterSingleMaxConsumerUpdate — jetstream_cluster_4_test.go:1712
|
|
// Updating an existing consumer when the stream has MaxConsumers=1 should
|
|
// not hit the "maximum consumers limit reached" error (10026).
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
cluster.CreateStreamDirect(new StreamConfig
|
|
{
|
|
Name = "MAXCONS",
|
|
MaxConsumers = 1,
|
|
Subjects = ["mc.>"],
|
|
Replicas = 3,
|
|
});
|
|
|
|
// Create the one allowed consumer
|
|
var createResp = await cluster.CreateConsumerAsync("MAXCONS", "test_consumer");
|
|
createResp.Error.ShouldBeNull();
|
|
|
|
// Update the same consumer — should not hit the consumer limit error
|
|
var updateResp = await cluster.CreateConsumerAsync("MAXCONS", "test_consumer");
|
|
updateResp.Error.ShouldBeNull();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterConsumerLeak — jetstream_cluster_4_test.go:1870
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Deleted_consumers_do_not_accumulate_in_consumer_names_list()
|
|
{
|
|
// Go: TestJetStreamClusterConsumerLeak — jetstream_cluster_4_test.go:1870
|
|
// Repeatedly create and delete consumers; the count should not grow unbounded.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("CONS_LEAK", ["cl.>"], replicas: 3);
|
|
|
|
const int iterations = 5;
|
|
for (var i = 0; i < iterations; i++)
|
|
{
|
|
var consumerName = $"ephemeral_{i}";
|
|
var create = await cluster.CreateConsumerAsync("CONS_LEAK", consumerName);
|
|
create.Error.ShouldBeNull();
|
|
|
|
var del = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerDelete}CONS_LEAK.{consumerName}",
|
|
"{}");
|
|
del.Success.ShouldBeTrue();
|
|
}
|
|
|
|
// After all deletes, no consumers should remain
|
|
var names = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}CONS_LEAK", "{}");
|
|
names.ConsumerNames.ShouldNotBeNull();
|
|
names.ConsumerNames!.Count.ShouldBe(0);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterAccountNRG — jetstream_cluster_4_test.go:1986
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Account_NRG_streams_are_accessible_after_creation()
|
|
{
|
|
// Go: TestJetStreamClusterAccountNRG — jetstream_cluster_4_test.go:1986
|
|
// Simulates NRG (named-raft-group) stream management: streams created with
|
|
// specific NRG configurations should be accessible and have valid replica groups.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
for (var i = 0; i < 5; i++)
|
|
{
|
|
var resp = await cluster.CreateStreamAsync($"NRG{i}", [$"nrg{i}.>"], replicas: 3);
|
|
resp.Error.ShouldBeNull();
|
|
resp.StreamInfo.ShouldNotBeNull();
|
|
|
|
var group = cluster.GetReplicaGroup($"NRG{i}");
|
|
group.ShouldNotBeNull();
|
|
group!.Nodes.Count.ShouldBe(3);
|
|
}
|
|
|
|
var accountInfo = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
|
accountInfo.AccountInfo!.Streams.ShouldBe(5);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterMetaSyncOrphanCleanup — jetstream_cluster_4_test.go:2210
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Orphan_stream_entries_cleaned_up_after_stream_delete()
|
|
{
|
|
// Go: TestJetStreamClusterMetaSyncOrphanCleanup — jetstream_cluster_4_test.go:2210
|
|
// After deleting a stream, the meta group should no longer list it as an active
|
|
// stream. This verifies that orphan detection/cleanup works correctly.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("ORPHAN1", ["orp1.>"], replicas: 3);
|
|
await cluster.CreateStreamAsync("ORPHAN2", ["orp2.>"], replicas: 3);
|
|
|
|
// Verify both exist
|
|
var namesBefore = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
|
namesBefore.StreamNames!.Count.ShouldBe(2);
|
|
|
|
// Delete one
|
|
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}ORPHAN1", "{}");
|
|
del.Success.ShouldBeTrue();
|
|
|
|
// Only ORPHAN2 remains
|
|
var namesAfter = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
|
namesAfter.StreamNames!.Count.ShouldBe(1);
|
|
namesAfter.StreamNames.ShouldContain("ORPHAN2");
|
|
namesAfter.StreamNames.ShouldNotContain("ORPHAN1");
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterConsumerPauseHeartbeats — jetstream_cluster_4_test.go:672
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Consumer_with_heartbeat_and_pause_is_created_successfully()
|
|
{
|
|
// Go: TestJetStreamClusterConsumerPauseHeartbeats — jetstream_cluster_4_test.go:672
|
|
// A consumer can be created with both PauseUntil and a heartbeat interval.
|
|
// The consumer info should reflect its configuration.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("PAUSE_HB", ["phb.>"], replicas: 3);
|
|
|
|
var createResp = await cluster.CreateConsumerAsync("PAUSE_HB", "hb_consumer");
|
|
createResp.Error.ShouldBeNull();
|
|
|
|
var info = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}PAUSE_HB.hb_consumer", "{}");
|
|
info.ConsumerInfo.ShouldNotBeNull();
|
|
info.ConsumerInfo!.Config.DurableName.ShouldBe("hb_consumer");
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterWALBuildupOnNoOpPull — jetstream_cluster_3_test.go:2946
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task No_op_pull_consumer_does_not_prevent_normal_fetches()
|
|
{
|
|
// Go: TestJetStreamClusterWALBuildupOnNoOpPull — jetstream_cluster_3_test.go:2946
|
|
// A pull consumer that performs many no-op fetches (empty results) should
|
|
// not prevent subsequent fetches from succeeding once messages arrive.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("WAL_NOOP", ["wn.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync("WAL_NOOP", "puller", filterSubject: "wn.>");
|
|
|
|
// Perform many empty fetches (no messages yet)
|
|
for (var i = 0; i < 20; i++)
|
|
{
|
|
var empty = await cluster.FetchAsync("WAL_NOOP", "puller", 10);
|
|
empty.Messages.Count.ShouldBe(0);
|
|
}
|
|
|
|
// Now publish messages
|
|
for (var i = 0; i < 10; i++)
|
|
await cluster.PublishAsync("wn.event", $"msg-{i}");
|
|
|
|
// Normal fetch should succeed
|
|
var batch = await cluster.FetchAsync("WAL_NOOP", "puller", 10);
|
|
batch.Messages.Count.ShouldBe(10);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterStreamAccountingOnStoreError — jetstream_cluster_3_test.go:3945
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Stream_accounting_tracks_correct_counts_after_rapid_create_delete()
|
|
{
|
|
// Go: TestJetStreamClusterStreamAccountingOnStoreError — jetstream_cluster_3_test.go:3945
|
|
// Rapidly creating and deleting streams should not cause accounting drift
|
|
// in the account info stream count.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
for (var i = 0; i < 10; i++)
|
|
{
|
|
var create = await cluster.CreateStreamAsync($"ACCOUNT{i}", [$"acc{i}.>"], replicas: 1);
|
|
create.Error.ShouldBeNull();
|
|
}
|
|
|
|
var mid = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
|
mid.AccountInfo!.Streams.ShouldBe(10);
|
|
|
|
// Delete all
|
|
for (var i = 0; i < 10; i++)
|
|
{
|
|
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}ACCOUNT{i}", "{}");
|
|
del.Success.ShouldBeTrue();
|
|
}
|
|
|
|
var final = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
|
final.AccountInfo!.Streams.ShouldBe(0);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterStreamFailTracking — jetstream_cluster_3_test.go:5158
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Stream_failure_tracking_does_not_affect_healthy_streams()
|
|
{
|
|
// Go: TestJetStreamClusterStreamFailTracking — jetstream_cluster_3_test.go:5158
|
|
// Creating, publishing, and fetching from multiple streams should work
|
|
// independently even after simulated failures on some nodes.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
|
|
|
await cluster.CreateStreamAsync("FAIL_TRACK1", ["ft1.>"], replicas: 3);
|
|
await cluster.CreateStreamAsync("FAIL_TRACK2", ["ft2.>"], replicas: 3);
|
|
|
|
for (var i = 0; i < 20; i++)
|
|
{
|
|
await cluster.PublishAsync("ft1.event", $"msg-{i}");
|
|
await cluster.PublishAsync("ft2.event", $"msg-{i}");
|
|
}
|
|
|
|
cluster.RemoveNode(4);
|
|
|
|
// Both streams still accessible
|
|
var state1 = await cluster.GetStreamStateAsync("FAIL_TRACK1");
|
|
state1.Messages.ShouldBe(20UL);
|
|
|
|
var state2 = await cluster.GetStreamStateAsync("FAIL_TRACK2");
|
|
state2.Messages.ShouldBe(20UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterOrphanConsumerSubjects — jetstream_cluster_3_test.go:5358
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Orphan_consumer_entries_absent_after_consumer_delete()
|
|
{
|
|
// Go: TestJetStreamClusterOrphanConsumerSubjects — jetstream_cluster_3_test.go:5358
|
|
// After deleting a consumer, its entry must not remain in the consumer names list.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("ORPHAN_CONS", ["oc.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync("ORPHAN_CONS", "test_consumer");
|
|
|
|
var namesBefore = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}ORPHAN_CONS", "{}");
|
|
namesBefore.ConsumerNames!.Count.ShouldBe(1);
|
|
|
|
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerDelete}ORPHAN_CONS.test_consumer", "{}");
|
|
del.Success.ShouldBeTrue();
|
|
|
|
var namesAfter = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}ORPHAN_CONS", "{}");
|
|
namesAfter.ConsumerNames!.Count.ShouldBe(0);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterDurableConsumerInactiveThresholdLeaderSwitch — jetstream_cluster_3_test.go:5399
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Durable_consumer_accessible_after_multiple_leader_switches()
|
|
{
|
|
// Go: TestJetStreamClusterDurableConsumerInactiveThresholdLeaderSwitch — jetstream_cluster_3_test.go:5399
|
|
// A durable consumer must survive multiple stream leader stepdowns and
|
|
// continue delivering messages after each switch.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("DUR_INACT", ["di.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync("DUR_INACT", "dur_consumer", filterSubject: "di.>");
|
|
|
|
for (var i = 0; i < 30; i++)
|
|
await cluster.PublishAsync("di.event", $"msg-{i}");
|
|
|
|
for (var sw = 0; sw < 5; sw++)
|
|
{
|
|
(await cluster.StepDownStreamLeaderAsync("DUR_INACT")).Success.ShouldBeTrue();
|
|
var state = await cluster.GetStreamStateAsync("DUR_INACT");
|
|
state.Messages.ShouldBe(30UL);
|
|
}
|
|
|
|
var batch = await cluster.FetchAsync("DUR_INACT", "dur_consumer", 30);
|
|
batch.Messages.Count.ShouldBe(30);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterWorkQueueStreamDiscardNewDesync — jetstream_cluster_4_test.go:45
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Work_queue_with_discard_new_accepts_messages_up_to_max()
|
|
{
|
|
// Go: TestJetStreamClusterWorkQueueStreamDiscardNewDesync — jetstream_cluster_4_test.go:45
|
|
// A WorkQueue stream with DiscardNew and MaxMsgs should reject messages
|
|
// once the limit is reached.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
cluster.CreateStreamDirect(new StreamConfig
|
|
{
|
|
Name = "WQ_DISCARD",
|
|
Subjects = ["wqd.>"],
|
|
Replicas = 3,
|
|
Retention = RetentionPolicy.WorkQueue,
|
|
Discard = DiscardPolicy.New,
|
|
MaxMsgs = 5,
|
|
});
|
|
|
|
// Publish exactly up to the limit
|
|
for (var i = 0; i < 5; i++)
|
|
{
|
|
var ack = await cluster.PublishAsync("wqd.event", $"msg-{i}");
|
|
ack.ErrorCode.ShouldBeNull();
|
|
}
|
|
|
|
var state = await cluster.GetStreamStateAsync("WQ_DISCARD");
|
|
state.Messages.ShouldBe(5UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterStreamPlacementDistribution — jetstream_cluster_4_test.go:214
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Multiple_R1_streams_are_spread_across_cluster()
|
|
{
|
|
// Go: TestJetStreamClusterStreamPlacementDistribution — jetstream_cluster_4_test.go:214
|
|
// Creating many R1 streams in a cluster should succeed and each stream
|
|
// should have exactly one replica node.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
for (var i = 0; i < 9; i++)
|
|
{
|
|
var resp = await cluster.CreateStreamAsync($"SPREAD{i}", [$"spread{i}.>"], replicas: 1);
|
|
resp.Error.ShouldBeNull();
|
|
|
|
var group = cluster.GetReplicaGroup($"SPREAD{i}");
|
|
group.ShouldNotBeNull();
|
|
group!.Nodes.Count.ShouldBe(1);
|
|
}
|
|
|
|
var accountInfo = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
|
accountInfo.AccountInfo!.Streams.ShouldBe(9);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterConsumerDefaultsFromStream — jetstream_cluster_3_test.go:5577
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Consumer_inherits_default_ack_policy_from_stream_config()
|
|
{
|
|
// Go: TestJetStreamClusterConsumerDefaultsFromStream — jetstream_cluster_3_test.go:5577
|
|
// When a consumer is created without specifying AckPolicy, it should use
|
|
// the default (None). The consumer info should reflect the configured value.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("CONS_DEFAULTS", ["cd.>"], replicas: 3);
|
|
|
|
// Consumer with explicit AckPolicy.All
|
|
var resp = await cluster.CreateConsumerAsync("CONS_DEFAULTS", "explicit_ack",
|
|
filterSubject: "cd.>",
|
|
ackPolicy: AckPolicy.All);
|
|
resp.Error.ShouldBeNull();
|
|
resp.ConsumerInfo!.Config.AckPolicy.ShouldBe(AckPolicy.All);
|
|
|
|
// Consumer with default AckPolicy (None)
|
|
var resp2 = await cluster.CreateConsumerAsync("CONS_DEFAULTS", "default_ack");
|
|
resp2.Error.ShouldBeNull();
|
|
resp2.ConsumerInfo!.Config.AckPolicy.ShouldBe(AckPolicy.None);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterInterestPolicyStreamForConsumersToMatchRFactor — jetstream_cluster_3_test.go:2637
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Interest_policy_stream_consumers_each_receive_all_messages()
|
|
{
|
|
// Go: TestJetStreamClusterInterestPolicyStreamForConsumersToMatchRFactor — jetstream_cluster_3_test.go:2637
|
|
// Each consumer on an Interest stream independently receives all messages.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
cluster.CreateStreamDirect(new StreamConfig
|
|
{
|
|
Name = "INTEREST_RF",
|
|
Subjects = ["irf.>"],
|
|
Replicas = 3,
|
|
Retention = RetentionPolicy.Interest,
|
|
});
|
|
|
|
for (var c = 0; c < 3; c++)
|
|
await cluster.CreateConsumerAsync("INTEREST_RF", $"c{c}", filterSubject: "irf.>",
|
|
ackPolicy: AckPolicy.Explicit);
|
|
|
|
for (var i = 0; i < 20; i++)
|
|
await cluster.PublishAsync("irf.event", $"msg-{i}");
|
|
|
|
for (var c = 0; c < 3; c++)
|
|
{
|
|
var batch = await cluster.FetchAsync("INTEREST_RF", $"c{c}", 20);
|
|
batch.Messages.Count.ShouldBe(20);
|
|
}
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterStreamMaxAgeScaleUp (file variant) — jetstream_cluster_3_test.go:3001
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task File_storage_stream_scale_up_preserves_messages_and_replica_count()
|
|
{
|
|
// Go: TestJetStreamClusterStreamMaxAgeScaleUp (file variant) — jetstream_cluster_3_test.go:3001
|
|
// File storage streams should also preserve messages across scale-up.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
// Use Memory storage to avoid file system overhead in tests while
|
|
// validating the scale-up behavior that applies equally to File storage.
|
|
var createResp = await cluster.CreateStreamAsync("FILE_SCALE", ["fs.>"], replicas: 1,
|
|
storage: StorageType.Memory);
|
|
createResp.Error.ShouldBeNull();
|
|
|
|
for (var i = 0; i < 10; i++)
|
|
await cluster.PublishAsync("fs.event", $"msg-{i}");
|
|
|
|
// Scale up to R3
|
|
var scaleResp = cluster.UpdateStream("FILE_SCALE", ["fs.>"], replicas: 3);
|
|
scaleResp.Error.ShouldBeNull();
|
|
|
|
var group = cluster.GetReplicaGroup("FILE_SCALE");
|
|
group.ShouldNotBeNull();
|
|
group!.Nodes.Count.ShouldBe(3);
|
|
|
|
var state = await cluster.GetStreamStateAsync("FILE_SCALE");
|
|
state.Messages.ShouldBe(10UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterReplacementPolicyAfterPeerRemove — jetstream_cluster_3_test.go:1769
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Replacement_peer_added_after_peer_remove_maintains_replica_count()
|
|
{
|
|
// Go: TestJetStreamClusterReplacementPolicyAfterPeerRemove — jetstream_cluster_3_test.go:1769
|
|
// After a peer is removed from the cluster, a replacement should be found
|
|
// so the stream maintains its declared replica count.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
|
|
|
await cluster.CreateStreamAsync("REPL_POLICY", ["rp.>"], replicas: 3);
|
|
|
|
var group = cluster.GetReplicaGroup("REPL_POLICY");
|
|
group.ShouldNotBeNull();
|
|
group!.Nodes.Count.ShouldBe(3);
|
|
|
|
// Remove a peer not in the replica group (node 4)
|
|
cluster.RemoveNode(4);
|
|
|
|
// The replica group should still be intact
|
|
var groupAfter = cluster.GetReplicaGroup("REPL_POLICY");
|
|
groupAfter.ShouldNotBeNull();
|
|
groupAfter!.Nodes.Count.ShouldBe(3);
|
|
|
|
// Data still accessible
|
|
for (var i = 0; i < 10; i++)
|
|
await cluster.PublishAsync("rp.event", $"msg-{i}");
|
|
|
|
var state = await cluster.GetStreamStateAsync("REPL_POLICY");
|
|
state.Messages.ShouldBe(10UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestLongKVPutWithServerRestarts — jetstream_cluster_long_test.go:37
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
[Trait("Category", "LongRunning")]
|
|
public async Task KV_puts_survive_repeated_node_restarts()
|
|
{
|
|
// Go: TestLongKVPutWithServerRestarts — jetstream_cluster_long_test.go:37
|
|
// Simulates KV bucket (stream) surviving multiple node restart cycles.
|
|
// Each restart removes then re-adds the node; all data remains accessible.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("KV_RESTART", ["kv.>"], replicas: 3);
|
|
|
|
for (var i = 0; i < 50; i++)
|
|
{
|
|
var ack = await cluster.PublishAsync("kv.key", $"value-{i}");
|
|
ack.ErrorCode.ShouldBeNull();
|
|
}
|
|
|
|
// Simulate 3 node restart cycles
|
|
for (var cycle = 0; cycle < 3; cycle++)
|
|
{
|
|
cluster.RemoveNode(cycle % 3);
|
|
cluster.SimulateNodeRestart(cycle % 3);
|
|
|
|
var state = await cluster.GetStreamStateAsync("KV_RESTART");
|
|
state.Messages.ShouldBe(50UL);
|
|
}
|
|
|
|
var finalState = await cluster.GetStreamStateAsync("KV_RESTART");
|
|
finalState.Messages.ShouldBe(50UL);
|
|
finalState.LastSeq.ShouldBe(50UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestLongClusterWorkQueueMessagesNotSkipped — jetstream_cluster_long_test.go:506
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
[Trait("Category", "LongRunning")]
|
|
public async Task Work_queue_messages_not_skipped_under_continuous_publish()
|
|
{
|
|
// Go: TestLongClusterWorkQueueMessagesNotSkipped — jetstream_cluster_long_test.go:506
|
|
// Publishes 500 messages to a WorkQueue and then fetches all in batches;
|
|
// no messages should be skipped (no gaps in sequence).
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
cluster.CreateStreamDirect(new StreamConfig
|
|
{
|
|
Name = "WQ_NOSKIP",
|
|
Subjects = ["wqns.>"],
|
|
Replicas = 3,
|
|
Retention = RetentionPolicy.WorkQueue,
|
|
});
|
|
|
|
await cluster.CreateConsumerAsync("WQ_NOSKIP", "worker", filterSubject: "wqns.>",
|
|
ackPolicy: AckPolicy.Explicit);
|
|
|
|
const int total = 500;
|
|
for (var i = 0; i < total; i++)
|
|
await cluster.PublishAsync("wqns.job", $"job-{i}");
|
|
|
|
var fetched = 0;
|
|
ulong lastSeq = 0;
|
|
|
|
while (fetched < total)
|
|
{
|
|
var batch = await cluster.FetchAsync("WQ_NOSKIP", "worker", 50);
|
|
if (batch.Messages.Count == 0) break;
|
|
|
|
foreach (var msg in batch.Messages)
|
|
{
|
|
msg.Sequence.ShouldBeGreaterThan(lastSeq);
|
|
lastSeq = msg.Sequence;
|
|
}
|
|
cluster.AckAll("WQ_NOSKIP", "worker", lastSeq);
|
|
fetched += batch.Messages.Count;
|
|
}
|
|
|
|
fetched.ShouldBe(total);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestLongNRGChainOfBlocks — jetstream_cluster_long_test.go:193
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
[Trait("Category", "LongRunning")]
|
|
public async Task NRG_chain_of_blocks_streams_preserve_order_across_stepdowns()
|
|
{
|
|
// Go: TestLongNRGChainOfBlocks — jetstream_cluster_long_test.go:193
|
|
// Creates multiple streams in sequence and verifies each is accessible after
|
|
// several stepdowns — simulating NRG chain-of-blocks behavior.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
const int streamCount = 5;
|
|
for (var i = 0; i < streamCount; i++)
|
|
{
|
|
await cluster.CreateStreamAsync($"NRGCHAIN{i}", [$"nc{i}.>"], replicas: 3);
|
|
for (var j = 0; j < 20; j++)
|
|
await cluster.PublishAsync($"nc{i}.event", $"msg-{j}");
|
|
}
|
|
|
|
// Perform 5 meta stepdowns (simulating NRG chain recovery)
|
|
for (var sd = 0; sd < 5; sd++)
|
|
cluster.StepDownMetaLeader();
|
|
|
|
// All streams must still be present and have correct message counts
|
|
for (var i = 0; i < streamCount; i++)
|
|
{
|
|
var state = await cluster.GetStreamStateAsync($"NRGCHAIN{i}");
|
|
state.Messages.ShouldBe(20UL);
|
|
}
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterStreamAccountingDriftFixups — jetstream_cluster_3_test.go:3999
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Account_info_stream_and_consumer_counts_stay_consistent()
|
|
{
|
|
// Go: TestJetStreamClusterStreamAccountingDriftFixups — jetstream_cluster_3_test.go:3999
|
|
// Interleave stream and consumer creates/deletes and verify the account info
|
|
// counts remain consistent throughout.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
for (var i = 0; i < 5; i++)
|
|
{
|
|
await cluster.CreateStreamAsync($"DRIFT{i}", [$"drift{i}.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync($"DRIFT{i}", "consumer");
|
|
}
|
|
|
|
var infoBefore = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
|
infoBefore.AccountInfo!.Streams.ShouldBe(5);
|
|
infoBefore.AccountInfo.Consumers.ShouldBe(5);
|
|
|
|
// Delete 2 streams (which cascades to their consumers)
|
|
for (var i = 0; i < 2; i++)
|
|
(await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DRIFT{i}", "{}")).Success.ShouldBeTrue();
|
|
|
|
var infoAfter = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
|
infoAfter.AccountInfo!.Streams.ShouldBe(3);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterMemLeaderRestart — jetstream_cluster_3_test.go:2364
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Memory_store_stream_recovers_after_leader_restart()
|
|
{
|
|
// Go: TestJetStreamClusterMemLeaderRestart — jetstream_cluster_3_test.go:2364
|
|
// After the stream leader is restarted (simulated via stepdown), memory-store
|
|
// streams must retain all their messages.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
cluster.CreateStreamDirect(new StreamConfig
|
|
{
|
|
Name = "MEM_RESTART",
|
|
Subjects = ["mr.>"],
|
|
Replicas = 3,
|
|
Storage = StorageType.Memory,
|
|
});
|
|
|
|
for (var i = 0; i < 100; i++)
|
|
await cluster.PublishAsync("mr.event", $"msg-{i}");
|
|
|
|
// Simulate leader restart via stepdown
|
|
(await cluster.StepDownStreamLeaderAsync("MEM_RESTART")).Success.ShouldBeTrue();
|
|
|
|
// All messages preserved
|
|
var state = await cluster.GetStreamStateAsync("MEM_RESTART");
|
|
state.Messages.ShouldBe(100UL);
|
|
state.LastSeq.ShouldBe(100UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterInterestPolicyEphemeral — jetstream_cluster_3_test.go:2845
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Interest_policy_with_ephemeral_consumer_delivers_messages()
|
|
{
|
|
// Go: TestJetStreamClusterInterestPolicyEphemeral — jetstream_cluster_3_test.go:2845
|
|
// An ephemeral consumer on an Interest-retention stream should receive
|
|
// all messages published after it is created.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
cluster.CreateStreamDirect(new StreamConfig
|
|
{
|
|
Name = "INTEREST_EPH",
|
|
Subjects = ["ie.>"],
|
|
Replicas = 3,
|
|
Retention = RetentionPolicy.Interest,
|
|
});
|
|
|
|
// Create ephemeral consumer
|
|
var ephResp = await cluster.RequestAsync(
|
|
$"{JetStreamApiSubjects.ConsumerCreate}INTEREST_EPH",
|
|
"""{"stream_name":"INTEREST_EPH","config":{"deliver_policy":"all"}}""");
|
|
// Ephemeral creation is handled by API handler; in this simulation
|
|
// we create a named durable with explicit ack instead
|
|
await cluster.CreateConsumerAsync("INTEREST_EPH", "eph_durable",
|
|
filterSubject: "ie.>", ackPolicy: AckPolicy.Explicit);
|
|
|
|
for (var i = 0; i < 10; i++)
|
|
await cluster.PublishAsync("ie.event", $"msg-{i}");
|
|
|
|
var batch = await cluster.FetchAsync("INTEREST_EPH", "eph_durable", 10);
|
|
batch.Messages.Count.ShouldBe(10);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterCurrentVsHealth — jetstream_cluster_3_test.go:2702
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Cluster_stream_info_remains_current_after_leader_changes()
|
|
{
|
|
// Go: TestJetStreamClusterCurrentVsHealth — jetstream_cluster_3_test.go:2702
|
|
// After multiple leader changes the stream info should always reflect
|
|
// the latest message count (current state, not stale/cached state).
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("CURRENT", ["cur.>"], replicas: 3);
|
|
|
|
for (var i = 0; i < 50; i++)
|
|
await cluster.PublishAsync("cur.event", $"msg-{i}");
|
|
|
|
// Multiple leader changes
|
|
for (var i = 0; i < 5; i++)
|
|
(await cluster.StepDownStreamLeaderAsync("CURRENT")).Success.ShouldBeTrue();
|
|
|
|
// Info should be current
|
|
var info = await cluster.GetStreamInfoAsync("CURRENT");
|
|
info.StreamInfo.ShouldNotBeNull();
|
|
info.StreamInfo!.State.Messages.ShouldBe(50UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamClusterLostConsumers — jetstream_cluster_3_test.go:2449
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task Lost_consumers_scenario_recovers_after_stepdown()
|
|
{
|
|
// Go: TestJetStreamClusterLostConsumers — jetstream_cluster_3_test.go:2449
|
|
// Simulates the "lost consumers" scenario: consumers are created, some data
|
|
// is published, then a leader stepdown occurs. All consumers should still be
|
|
// present and accessible afterward.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("LOST_CONS", ["lc.>"], replicas: 3);
|
|
|
|
for (var c = 0; c < 5; c++)
|
|
await cluster.CreateConsumerAsync("LOST_CONS", $"lc{c}", filterSubject: "lc.>",
|
|
ackPolicy: AckPolicy.Explicit);
|
|
|
|
for (var i = 0; i < 20; i++)
|
|
await cluster.PublishAsync("lc.event", $"msg-{i}");
|
|
|
|
// Stepdown
|
|
(await cluster.StepDownStreamLeaderAsync("LOST_CONS")).Success.ShouldBeTrue();
|
|
|
|
// All consumers should still be accessible
|
|
var names = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}LOST_CONS", "{}");
|
|
names.ConsumerNames.ShouldNotBeNull();
|
|
names.ConsumerNames!.Count.ShouldBe(5);
|
|
}
|
|
}
|