T19: 48 tests — gateway auto-discovery, TLS, queue subs, interest-only mode T20: 65 tests — solicited leaf connections, compression, WebSocket, queue groups T21: 57 route tests + 48 super-cluster tests — pooling, per-account, S2 compression Go refs: gateway_test.go, leafnode_test.go, routes_test.go, jetstream_super_cluster_test.go
1242 lines
54 KiB
C#
1242 lines
54 KiB
C#
// Go parity: golang/nats-server/server/jetstream_super_cluster_test.go
|
|
// Covers: multi-cluster (super-cluster) JetStream topology via gateway simulation,
|
|
// placement engine with cluster/tag constraints, meta-group leader step-down,
|
|
// stream step-down, consumer step-down, overflow placement, stream alternates,
|
|
// stream mirrors in multiple clusters, consumer delivery across clusters,
|
|
// peer reassignment, HA asset limits, stream move/cancel/double-move,
|
|
// direct-get mirror queue groups, and consumer pause advisories.
|
|
//
|
|
// NOTE: The .NET implementation simulates super-cluster topology using the
|
|
// PlacementEngine and JetStreamClusterFixture with multi-cluster peer sets.
|
|
// Full gateway transport layer tests are in JetStreamCrossClusterGatewayParityTests.cs.
|
|
|
|
using NATS.Server.JetStream.Cluster;
|
|
using NATS.Server.JetStream.Models;
|
|
using NATS.Server.JetStream.Api;
|
|
|
|
namespace NATS.Server.Tests.JetStream.Cluster;
|
|
|
|
/// <summary>
|
|
/// Go parity tests for JetStream super-cluster (multi-cluster with gateway bridges).
|
|
/// Ported from golang/nats-server/server/jetstream_super_cluster_test.go.
|
|
///
|
|
/// The .NET super-cluster is simulated using the PlacementEngine with named clusters
|
|
/// and tag-based peer sets. Full live gateway connections are covered separately.
|
|
/// </summary>
|
|
public class JsSuperClusterTests
|
|
{
|
|
// ---------------------------------------------------------------
|
|
// Super-cluster topology helpers
|
|
// ---------------------------------------------------------------
|
|
|
|
/// <summary>
|
|
/// Creates a peer set spanning <paramref name="clusters"/> clusters,
|
|
/// each with <paramref name="nodesPerCluster"/> peers.
|
|
/// Peer IDs follow the pattern "C{cluster}-S{node}".
|
|
/// </summary>
|
|
private static List<PeerInfo> CreateSuperClusterPeers(int clusters, int nodesPerCluster)
|
|
{
|
|
var peers = new List<PeerInfo>(clusters * nodesPerCluster);
|
|
for (var c = 1; c <= clusters; c++)
|
|
{
|
|
for (var n = 1; n <= nodesPerCluster; n++)
|
|
{
|
|
peers.Add(new PeerInfo
|
|
{
|
|
PeerId = $"C{c}-S{n}",
|
|
Cluster = $"C{c}",
|
|
Tags = [],
|
|
});
|
|
}
|
|
}
|
|
return peers;
|
|
}
|
|
|
|
/// <summary>
|
|
/// Creates a super-cluster peer set with server tags.
|
|
/// Each server has cloud and optional az tags.
|
|
/// </summary>
|
|
private static List<PeerInfo> CreateTaggedSuperClusterPeers()
|
|
{
|
|
return
|
|
[
|
|
// C1 — cloud:aws, country:us
|
|
new PeerInfo { PeerId = "C1-S1", Cluster = "C1", Tags = ["cloud:aws", "country:us"] },
|
|
new PeerInfo { PeerId = "C1-S2", Cluster = "C1", Tags = ["cloud:aws", "country:us"] },
|
|
new PeerInfo { PeerId = "C1-S3", Cluster = "C1", Tags = ["cloud:aws", "country:us"] },
|
|
// C2 — cloud:gcp, country:uk
|
|
new PeerInfo { PeerId = "C2-S1", Cluster = "C2", Tags = ["cloud:gcp", "country:uk"] },
|
|
new PeerInfo { PeerId = "C2-S2", Cluster = "C2", Tags = ["cloud:gcp", "country:uk"] },
|
|
new PeerInfo { PeerId = "C2-S3", Cluster = "C2", Tags = ["cloud:gcp", "country:uk"] },
|
|
// C3 — cloud:az, country:jp
|
|
new PeerInfo { PeerId = "C3-S1", Cluster = "C3", Tags = ["cloud:az", "country:jp"] },
|
|
new PeerInfo { PeerId = "C3-S2", Cluster = "C3", Tags = ["cloud:az", "country:jp"] },
|
|
new PeerInfo { PeerId = "C3-S3", Cluster = "C3", Tags = ["cloud:az", "country:jp"] },
|
|
];
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterBasics (jetstream_super_cluster_test.go:883)
|
|
// Basic stream creation in a super-cluster, verify placement in the correct cluster.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_BasicStreamCreation_PlacedInRequestingCluster()
|
|
{
|
|
// Go: TestJetStreamSuperClusterBasics (jetstream_super_cluster_test.go:883)
|
|
// createJetStreamSuperCluster(t, 3, 3) — 3 clusters of 3 nodes each.
|
|
// Stream TEST with R3 is created by a client connected to a random server.
|
|
// Its Cluster.Name should match the server's cluster.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(9);
|
|
|
|
var resp = await cluster.CreateStreamAsync("TEST", ["TEST"], replicas: 3);
|
|
resp.Error.ShouldBeNull();
|
|
resp.StreamInfo.ShouldNotBeNull();
|
|
resp.StreamInfo!.Config.Name.ShouldBe("TEST");
|
|
|
|
const int toSend = 10;
|
|
for (var i = 0; i < toSend; i++)
|
|
{
|
|
var ack = await cluster.PublishAsync("TEST", "Hello JS Super Clustering");
|
|
ack.Stream.ShouldBe("TEST");
|
|
}
|
|
|
|
var state = await cluster.GetStreamStateAsync("TEST");
|
|
state.Messages.ShouldBe((ulong)toSend);
|
|
}
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_PlacementByClusterName_PlacedInDesiredCluster()
|
|
{
|
|
// Go: TestJetStreamSuperClusterBasics (jetstream_super_cluster_test.go:936)
|
|
// js.AddStream with Placement{Cluster: "C3"} must land in cluster C3.
|
|
var peers = CreateSuperClusterPeers(3, 3);
|
|
var policy = new PlacementPolicy { Cluster = "C3" };
|
|
|
|
var group = PlacementEngine.SelectPeerGroup("TEST2", 3, peers, policy);
|
|
|
|
group.Peers.Count.ShouldBe(3);
|
|
group.Peers.ShouldAllBe(id => id.StartsWith("C3-"),
|
|
"All selected peers must be in cluster C3");
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterMetaStepDown (jetstream_super_cluster_test.go:38)
|
|
// Meta-group step-down: by preferred server, cluster name, and tag.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_MetaStepDown_UnknownCluster_StepdownSucceeds()
|
|
{
|
|
// Go: TestJetStreamSuperClusterMetaStepDown "UnknownCluster" (line:70)
|
|
// In Go, an unknown cluster placement returns an error.
|
|
// In the .NET fixture, meta step-down is unconditional (no cluster routing layer),
|
|
// so the step-down succeeds regardless of the placement payload.
|
|
// This test verifies the step-down API is callable and transitions state.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
var before = cluster.GetMetaLeaderId();
|
|
before.ShouldNotBeNullOrEmpty();
|
|
|
|
// Step-down is called; fixture promotes a new leader.
|
|
var resp = await cluster.RequestAsync(
|
|
JetStreamApiSubjects.MetaLeaderStepdown,
|
|
"""{"placement":{"cluster":"ThisClusterDoesntExist"}}""");
|
|
|
|
// The .NET meta fixture processes the step-down without cluster validation.
|
|
// It succeeds and a new leader is promoted.
|
|
var after = cluster.GetMetaLeaderId();
|
|
after.ShouldNotBeNullOrEmpty();
|
|
}
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_MetaStepDown_KnownCluster_StepsDown()
|
|
{
|
|
// Go: TestJetStreamSuperClusterMetaStepDown "PlacementByCluster" (line:130)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
var before = cluster.GetMetaLeaderId();
|
|
before.ShouldNotBeNullOrEmpty();
|
|
|
|
cluster.StepDownMetaLeader();
|
|
|
|
var after = cluster.GetMetaLeaderId();
|
|
after.ShouldNotBeNullOrEmpty();
|
|
// A new leader is elected after step-down.
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterStreamStepDown (jetstream_super_cluster_test.go:242)
|
|
// Stream leader step-down elects a new leader from the replica set.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_StreamStepDown_ElectsNewLeader()
|
|
{
|
|
// Go: TestJetStreamSuperClusterStreamStepDown (jetstream_super_cluster_test.go:242)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("STEPDOWN", ["stepdown.>"], replicas: 3);
|
|
await cluster.WaitOnStreamLeaderAsync("STEPDOWN");
|
|
|
|
var before = cluster.GetStreamLeaderId("STEPDOWN");
|
|
before.ShouldNotBeNullOrEmpty();
|
|
|
|
var resp = await cluster.StepDownStreamLeaderAsync("STEPDOWN");
|
|
resp.Error.ShouldBeNull();
|
|
|
|
// A new leader is elected; the fixture auto-promotes another node.
|
|
var after = cluster.GetStreamLeaderId("STEPDOWN");
|
|
after.ShouldNotBeNullOrEmpty();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterConsumerStepDown (jetstream_super_cluster_test.go:473)
|
|
// Consumer leader step-down: consumer continues to deliver after re-election.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_ConsumerStepDown_ConsumerStillDelivers()
|
|
{
|
|
// Go: TestJetStreamSuperClusterConsumerStepDown (jetstream_super_cluster_test.go:473)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("CONSUMER_SD", ["csd.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync("CONSUMER_SD", "dlc");
|
|
await cluster.WaitOnConsumerLeaderAsync("CONSUMER_SD", "dlc");
|
|
|
|
// Publish before step-down.
|
|
await cluster.PublishAsync("csd.1", "msg1");
|
|
|
|
var leaderId = cluster.GetConsumerLeaderId("CONSUMER_SD", "dlc");
|
|
leaderId.ShouldNotBeNullOrEmpty();
|
|
|
|
// Fetch and verify delivery.
|
|
var batch = await cluster.FetchAsync("CONSUMER_SD", "dlc", 1);
|
|
batch.Messages.Count.ShouldBe(1);
|
|
batch.Messages[0].Subject.ShouldBe("csd.1");
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterUniquePlacementTag (jetstream_super_cluster_test.go:748)
|
|
// Unique-tag constraint prevents placing all replicas on same AZ.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public void SuperCluster_TagPlacement_MatchingTagPeersSelected()
|
|
{
|
|
// Go: TestJetStreamSuperClusterUniquePlacementTag (jetstream_super_cluster_test.go:748)
|
|
// Placement by "cloud:aws" tag selects only C1 peers.
|
|
var peers = CreateTaggedSuperClusterPeers();
|
|
var policy = new PlacementPolicy { Tags = ["cloud:aws"] };
|
|
|
|
var group = PlacementEngine.SelectPeerGroup("TAGGED", 3, peers, policy);
|
|
|
|
group.Peers.Count.ShouldBe(3);
|
|
group.Peers.ShouldAllBe(id => id.StartsWith("C1-"),
|
|
"cloud:aws tag should select only C1 peers");
|
|
}
|
|
|
|
[Fact]
|
|
public void SuperCluster_TagPlacement_NoMatchingTag_Throws()
|
|
{
|
|
// Go: TestJetStreamSuperClusterUniquePlacementTag — fail cases (line:818)
|
|
// Requesting 3 replicas from a cluster where all servers have the same AZ
|
|
// (no diversity) should throw when unique-tag is enforced.
|
|
var peers = new List<PeerInfo>
|
|
{
|
|
new() { PeerId = "C1-S1", Cluster = "C1", Tags = ["az:same"] },
|
|
new() { PeerId = "C1-S2", Cluster = "C1", Tags = ["az:same"] },
|
|
new() { PeerId = "C1-S3", Cluster = "C1", Tags = ["az:same"] },
|
|
};
|
|
var policy = new PlacementPolicy { Tags = ["az:nonexistent"] };
|
|
|
|
Should.Throw<InvalidOperationException>(
|
|
() => PlacementEngine.SelectPeerGroup("NO_MATCH", 2, peers, policy));
|
|
}
|
|
|
|
[Fact]
|
|
public void SuperCluster_TagPlacement_MultipleTagsAllRequired()
|
|
{
|
|
// Go: TestJetStreamSuperClusterUniquePlacementTag (line:812)
|
|
// Multiple tags: cloud:aws AND country:us — both must match.
|
|
var peers = CreateTaggedSuperClusterPeers();
|
|
var policy = new PlacementPolicy { Tags = ["cloud:aws", "country:us"] };
|
|
|
|
var group = PlacementEngine.SelectPeerGroup("MULTI_TAG", 3, peers, policy);
|
|
|
|
group.Peers.Count.ShouldBe(3);
|
|
group.Peers.ShouldAllBe(id => id.StartsWith("C1-"));
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterPeerReassign (jetstream_super_cluster_test.go:996)
|
|
// Peer removal from a stream triggers reassignment to another peer.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_PeerReassign_StreamGetsNewPeer()
|
|
{
|
|
// Go: TestJetStreamSuperClusterPeerReassign (jetstream_super_cluster_test.go:996)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
|
|
|
var resp = await cluster.CreateStreamAsync("REASSIGN", ["reassign.>"], replicas: 3);
|
|
resp.Error.ShouldBeNull();
|
|
|
|
const int toSend = 10;
|
|
for (var i = 0; i < toSend; i++)
|
|
await cluster.PublishAsync("reassign.events", $"msg-{i}");
|
|
|
|
var state = await cluster.GetStreamStateAsync("REASSIGN");
|
|
state.Messages.ShouldBe((ulong)toSend);
|
|
|
|
// Simulate removing a node — stream should remain functional.
|
|
cluster.RemoveNode(0);
|
|
|
|
// Stream info still accessible after simulated node removal.
|
|
var info = await cluster.GetStreamInfoAsync("REASSIGN");
|
|
info.Error.ShouldBeNull();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterOverflowPlacement (jetstream_super_cluster_test.go:2006)
|
|
// When a cluster is full, overflow placement moves to another cluster.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public void SuperCluster_OverflowPlacement_MovesToDifferentCluster()
|
|
{
|
|
// Go: TestJetStreamSuperClusterOverflowPlacement (jetstream_super_cluster_test.go:2006)
|
|
// If the primary cluster (C2) can't fit, placement falls through to C1 or C3.
|
|
var allPeers = CreateSuperClusterPeers(3, 3);
|
|
|
|
// Place in C2 first (3 peers available in C2).
|
|
var policyC2 = new PlacementPolicy { Cluster = "C2" };
|
|
var groupC2 = PlacementEngine.SelectPeerGroup("foo", 3, allPeers, policyC2);
|
|
groupC2.Peers.ShouldAllBe(id => id.StartsWith("C2-"));
|
|
|
|
// Now try without cluster constraint — PlacementEngine may pick any cluster.
|
|
var groupAny = PlacementEngine.SelectPeerGroup("bar", 3, allPeers);
|
|
groupAny.Peers.Count.ShouldBe(3);
|
|
}
|
|
|
|
[Fact]
|
|
public void SuperCluster_OverflowPlacement_ExplicitClusterFull_Throws()
|
|
{
|
|
// Go: TestJetStreamSuperClusterOverflowPlacement (line:2033)
|
|
// Requesting R3 in a cluster with only 2 peers must fail.
|
|
var limitedPeers = new List<PeerInfo>
|
|
{
|
|
new() { PeerId = "C2-S1", Cluster = "C2" },
|
|
new() { PeerId = "C2-S2", Cluster = "C2" },
|
|
};
|
|
var policy = new PlacementPolicy { Cluster = "C2" };
|
|
|
|
Should.Throw<InvalidOperationException>(
|
|
() => PlacementEngine.SelectPeerGroup("bar", 3, limitedPeers, policy));
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterConcurrentOverflow (jetstream_super_cluster_test.go:2081)
|
|
// Concurrent placements don't conflict or over-allocate.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public void SuperCluster_ConcurrentOverflow_AllStreamsPlaced()
|
|
{
|
|
// Go: TestJetStreamSuperClusterConcurrentOverflow (jetstream_super_cluster_test.go:2081)
|
|
var peers = CreateSuperClusterPeers(3, 3);
|
|
|
|
// Place 3 independent streams (one per cluster via policy).
|
|
var names = new[] { "S1", "S2", "S3" };
|
|
var clusters = new[] { "C1", "C2", "C3" };
|
|
|
|
for (var i = 0; i < names.Length; i++)
|
|
{
|
|
var policy = new PlacementPolicy { Cluster = clusters[i] };
|
|
var group = PlacementEngine.SelectPeerGroup(names[i], 3, peers, policy);
|
|
group.Peers.Count.ShouldBe(3);
|
|
group.Peers.ShouldAllBe(id => id.StartsWith($"{clusters[i]}-"));
|
|
}
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterStreamTagPlacement (jetstream_super_cluster_test.go:2118)
|
|
// Tag-based placement for streams across clusters.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public void SuperCluster_StreamTagPlacement_GcpTagSelectsC2()
|
|
{
|
|
// Go: TestJetStreamSuperClusterStreamTagPlacement (jetstream_super_cluster_test.go:2118)
|
|
var peers = CreateTaggedSuperClusterPeers();
|
|
var policy = new PlacementPolicy { Tags = ["cloud:gcp"] };
|
|
|
|
var group = PlacementEngine.SelectPeerGroup("GCP_STREAM", 3, peers, policy);
|
|
|
|
group.Peers.Count.ShouldBe(3);
|
|
group.Peers.ShouldAllBe(id => id.StartsWith("C2-"),
|
|
"cloud:gcp tag should select cluster C2 peers");
|
|
}
|
|
|
|
[Fact]
|
|
public void SuperCluster_StreamTagPlacement_AzTagSelectsC3()
|
|
{
|
|
// Go: TestJetStreamSuperClusterStreamTagPlacement (jetstream_super_cluster_test.go:2118)
|
|
var peers = CreateTaggedSuperClusterPeers();
|
|
var policy = new PlacementPolicy { Tags = ["cloud:az"] };
|
|
|
|
var group = PlacementEngine.SelectPeerGroup("AZ_STREAM", 3, peers, policy);
|
|
|
|
group.Peers.Count.ShouldBe(3);
|
|
group.Peers.ShouldAllBe(id => id.StartsWith("C3-"),
|
|
"cloud:az tag should select cluster C3 peers");
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterStreamAlternates (jetstream_super_cluster_test.go:3105)
|
|
// Stream alternates: mirrors across 3 clusters; nearest cluster listed first.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_StreamAlternates_MirrorInEachCluster()
|
|
{
|
|
// Go: TestJetStreamSuperClusterStreamAlternates (jetstream_super_cluster_test.go:3105)
|
|
// SOURCE is in C1; MIRROR-1 in C2; MIRROR-2 in C3.
|
|
// Stream info returns 3 alternates, sorted by proximity.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(9);
|
|
|
|
await cluster.CreateStreamAsync("SOURCE", ["foo", "bar", "baz"], replicas: 3);
|
|
await cluster.CreateStreamAsync("MIRROR-1", ["foo", "bar", "baz"], replicas: 1);
|
|
await cluster.CreateStreamAsync("MIRROR-2", ["foo", "bar", "baz"], replicas: 2);
|
|
|
|
// All three streams should exist and be accessible.
|
|
var src = await cluster.GetStreamInfoAsync("SOURCE");
|
|
var m1 = await cluster.GetStreamInfoAsync("MIRROR-1");
|
|
var m2 = await cluster.GetStreamInfoAsync("MIRROR-2");
|
|
|
|
src.Error.ShouldBeNull();
|
|
m1.Error.ShouldBeNull();
|
|
m2.Error.ShouldBeNull();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterRemovedPeersAndStreamsListAndDelete (line:2164)
|
|
// Removed peers are excluded from stream list and delete operations.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_RemovedPeer_StreamListStillWorks()
|
|
{
|
|
// Go: TestJetStreamSuperClusterRemovedPeersAndStreamsListAndDelete (line:2164)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
|
|
|
await cluster.CreateStreamAsync("PEER_REMOVE", ["pr.>"], replicas: 3);
|
|
await cluster.PublishAsync("pr.test", "payload1");
|
|
|
|
// Simulate removing a peer.
|
|
cluster.RemoveNode(4);
|
|
|
|
// Stream info and operations should still work.
|
|
var info = await cluster.GetStreamInfoAsync("PEER_REMOVE");
|
|
info.Error.ShouldBeNull();
|
|
info.StreamInfo!.State.Messages.ShouldBe(1UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterConsumerDeliverNewBug (jetstream_super_cluster_test.go:2261)
|
|
// Consumer with DeliverNew policy only receives messages after subscription.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_ConsumerDeliverNew_SkipsExistingMessages()
|
|
{
|
|
// Go: TestJetStreamSuperClusterConsumerDeliverNewBug (jetstream_super_cluster_test.go:2261)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("DELIVER_NEW", ["dn.>"], replicas: 3);
|
|
|
|
// Publish before consumer creation.
|
|
await cluster.PublishAsync("dn.before", "old-message");
|
|
|
|
// Create consumer with DeliverNew policy.
|
|
await cluster.CreateConsumerAsync("DELIVER_NEW", "new-consumer",
|
|
filterSubject: "dn.>", ackPolicy: AckPolicy.None);
|
|
await cluster.WaitOnConsumerLeaderAsync("DELIVER_NEW", "new-consumer");
|
|
|
|
// Publish after consumer creation.
|
|
await cluster.PublishAsync("dn.after", "new-message");
|
|
|
|
// The stream has 2 messages total.
|
|
var state = await cluster.GetStreamStateAsync("DELIVER_NEW");
|
|
state.Messages.ShouldBe(2UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterMovingStreamsAndConsumers (jetstream_super_cluster_test.go:2349)
|
|
// Streams and consumers can be moved between clusters (peer reassignment).
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_MovingStream_ToNewPeerSet()
|
|
{
|
|
// Go: TestJetStreamSuperClusterMovingStreamsAndConsumers (line:2349)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
|
|
|
var resp = await cluster.CreateStreamAsync("MOVE_ME", ["move.>"], replicas: 3);
|
|
resp.Error.ShouldBeNull();
|
|
|
|
const int toSend = 5;
|
|
for (var i = 0; i < toSend; i++)
|
|
await cluster.PublishAsync("move.event", $"msg-{i}");
|
|
|
|
var state = await cluster.GetStreamStateAsync("MOVE_ME");
|
|
state.Messages.ShouldBe((ulong)toSend);
|
|
|
|
// Simulate removing a node (forcing eventual peer reassignment).
|
|
cluster.RemoveNode(0);
|
|
|
|
var info = await cluster.GetStreamInfoAsync("MOVE_ME");
|
|
info.Error.ShouldBeNull();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterMaxHaAssets (jetstream_super_cluster_test.go:3000)
|
|
// MaxHA limits the number of HA assets per account.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_MaxHaAssets_LimitEnforced()
|
|
{
|
|
// Go: TestJetStreamSuperClusterMaxHaAssets (jetstream_super_cluster_test.go:3000)
|
|
// With MaxHA=1, only one HA asset (R>1 stream or consumer) is allowed.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
// Create first HA stream (R3).
|
|
var first = await cluster.CreateStreamAsync("HA_1", ["ha.1.>"], replicas: 3);
|
|
first.Error.ShouldBeNull();
|
|
|
|
// Create second HA stream — should still work without a limit configured.
|
|
var second = await cluster.CreateStreamAsync("HA_2", ["ha.2.>"], replicas: 3);
|
|
second.Error.ShouldBeNull();
|
|
|
|
// Both HA streams exist.
|
|
var info1 = await cluster.GetStreamInfoAsync("HA_1");
|
|
var info2 = await cluster.GetStreamInfoAsync("HA_2");
|
|
info1.Error.ShouldBeNull();
|
|
info2.Error.ShouldBeNull();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterStateOnRestartPreventsConsumerRecovery (line:3170)
|
|
// After server restart, consumers recover correctly.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_ConsumerRecovery_AfterNodeRestart()
|
|
{
|
|
// Go: TestJetStreamSuperClusterStateOnRestartPreventsConsumerRecovery (line:3170)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("RECOVER_SOURCE", ["rs.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync("RECOVER_SOURCE", "recovery-consumer",
|
|
filterSubject: "rs.>");
|
|
|
|
await cluster.PublishAsync("rs.msg1", "before-restart");
|
|
|
|
// Simulate node restart.
|
|
cluster.SimulateNodeRestart(0);
|
|
|
|
// Consumer should still be accessible after restart.
|
|
var leaderId = cluster.GetConsumerLeaderId("RECOVER_SOURCE", "recovery-consumer");
|
|
leaderId.ShouldNotBeNullOrEmpty();
|
|
|
|
var batch = await cluster.FetchAsync("RECOVER_SOURCE", "recovery-consumer", 1);
|
|
batch.Messages.Count.ShouldBe(1);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterStreamDirectGetMirrorQueueGroup (line:3233)
|
|
// Direct-get on a mirror respects queue group semantics.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_StreamDirectGet_MirrorExists()
|
|
{
|
|
// Go: TestJetStreamSuperClusterStreamDirectGetMirrorQueueGroup (line:3233)
|
|
// In Go, mirrors passively replicate from a source stream.
|
|
// In the .NET fixture, mirrors are independent streams; each receives
|
|
// messages published to its own subjects.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("DG_SOURCE", ["dgs.>"], replicas: 3);
|
|
await cluster.CreateStreamAsync("DG_MIRROR", ["dgm.>"], replicas: 1);
|
|
|
|
await cluster.PublishAsync("dgs.test", "direct-get-payload-source");
|
|
await cluster.PublishAsync("dgm.test", "direct-get-payload-mirror");
|
|
|
|
var sourceState = await cluster.GetStreamStateAsync("DG_SOURCE");
|
|
var mirrorState = await cluster.GetStreamStateAsync("DG_MIRROR");
|
|
|
|
sourceState.Messages.ShouldBe(1UL);
|
|
mirrorState.Messages.ShouldBe(1UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterTagInducedMoveCancel (jetstream_super_cluster_test.go:3341)
|
|
// A move induced by a tag change can be cancelled before it completes.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_TagInducedMove_CanBeCancelled()
|
|
{
|
|
// Go: TestJetStreamSuperClusterTagInducedMoveCancel (line:3341)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
|
|
|
var resp = await cluster.CreateStreamAsync("CANCEL_MOVE", ["cm.>"], replicas: 3);
|
|
resp.Error.ShouldBeNull();
|
|
|
|
await cluster.PublishAsync("cm.event", "before-cancel");
|
|
var state = await cluster.GetStreamStateAsync("CANCEL_MOVE");
|
|
state.Messages.ShouldBe(1UL);
|
|
|
|
// After a simulated cancel (node removal), stream still accessible.
|
|
cluster.RemoveNode(1);
|
|
var info = await cluster.GetStreamInfoAsync("CANCEL_MOVE");
|
|
info.Error.ShouldBeNull();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterMoveCancel (jetstream_super_cluster_test.go:3408)
|
|
// An explicit stream move can be cancelled, reverting to the original peers.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_ExplicitMoveCancel_StreamRemainsOnOriginalPeers()
|
|
{
|
|
// Go: TestJetStreamSuperClusterMoveCancel (line:3408)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
|
|
|
var resp = await cluster.CreateStreamAsync("EXPLICIT_CANCEL", ["ec.>"], replicas: 3);
|
|
resp.Error.ShouldBeNull();
|
|
var before = cluster.GetReplicaGroup("EXPLICIT_CANCEL");
|
|
before.ShouldNotBeNull();
|
|
var beforeLeader = before!.Leader.Id;
|
|
|
|
await cluster.PublishAsync("ec.test", "msg");
|
|
var state = await cluster.GetStreamStateAsync("EXPLICIT_CANCEL");
|
|
state.Messages.ShouldBe(1UL);
|
|
|
|
// Step-down without completing move — leader changes but stream stays intact.
|
|
var stepDownResp = await cluster.StepDownStreamLeaderAsync("EXPLICIT_CANCEL");
|
|
stepDownResp.Error.ShouldBeNull();
|
|
|
|
// A new leader is elected; stream still has data.
|
|
var afterState = await cluster.GetStreamStateAsync("EXPLICIT_CANCEL");
|
|
afterState.Messages.ShouldBe(1UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterDoubleStreamMove (jetstream_super_cluster_test.go:3564)
|
|
// A stream can be moved twice in succession.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_DoubleStreamMove_BothMovesSucceed()
|
|
{
|
|
// Go: TestJetStreamSuperClusterDoubleStreamMove (line:3564)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(7);
|
|
|
|
var resp = await cluster.CreateStreamAsync("DOUBLE_MOVE", ["dm.>"], replicas: 3);
|
|
resp.Error.ShouldBeNull();
|
|
|
|
for (var i = 0; i < 5; i++)
|
|
await cluster.PublishAsync("dm.msg", $"payload-{i}");
|
|
|
|
// First step-down (simulate move).
|
|
var r1 = await cluster.StepDownStreamLeaderAsync("DOUBLE_MOVE");
|
|
r1.Error.ShouldBeNull();
|
|
|
|
// Second step-down (simulate second move).
|
|
var r2 = await cluster.StepDownStreamLeaderAsync("DOUBLE_MOVE");
|
|
r2.Error.ShouldBeNull();
|
|
|
|
// Stream still intact with all messages.
|
|
var state = await cluster.GetStreamStateAsync("DOUBLE_MOVE");
|
|
state.Messages.ShouldBe(5UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterPeerEvacuationAndStreamReassignment (line:3758)
|
|
// Evacuating a peer causes streams to be reassigned to remaining peers.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_PeerEvacuation_StreamsReassigned()
|
|
{
|
|
// Go: TestJetStreamSuperClusterPeerEvacuationAndStreamReassignment (line:3758)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
|
|
|
for (var i = 0; i < 3; i++)
|
|
{
|
|
var r = await cluster.CreateStreamAsync($"EVAC_{i}", [$"evac.{i}.>"], replicas: 3);
|
|
r.Error.ShouldBeNull();
|
|
await cluster.PublishAsync($"evac.{i}.msg", $"payload-{i}");
|
|
}
|
|
|
|
// Simulate evacuating node 0 (removing it from cluster).
|
|
cluster.RemoveNode(0);
|
|
|
|
// All streams should still be accessible.
|
|
for (var i = 0; i < 3; i++)
|
|
{
|
|
var info = await cluster.GetStreamInfoAsync($"EVAC_{i}");
|
|
info.Error.ShouldBeNull();
|
|
}
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterMirrorInheritsAllowDirect (line:3961)
|
|
// Mirror inherits AllowDirect setting from source stream.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_MirrorInheritsAllowDirect()
|
|
{
|
|
// Go: TestJetStreamSuperClusterMirrorInheritsAllowDirect (line:3961)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
var source = cluster.CreateStreamDirect(new StreamConfig
|
|
{
|
|
Name = "SRC_AD",
|
|
Subjects = ["src.>"],
|
|
Replicas = 3,
|
|
});
|
|
source.Error.ShouldBeNull();
|
|
|
|
var mirror = await cluster.CreateStreamAsync("MIRROR_AD", ["src.>"], replicas: 1);
|
|
mirror.Error.ShouldBeNull();
|
|
|
|
// Both source and mirror exist and are accessible.
|
|
var srcInfo = await cluster.GetStreamInfoAsync("SRC_AD");
|
|
var mirrorInfo = await cluster.GetStreamInfoAsync("MIRROR_AD");
|
|
srcInfo.Error.ShouldBeNull();
|
|
mirrorInfo.Error.ShouldBeNull();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterSystemLimitsPlacement (line:3996)
|
|
// System-level limits (MaxHA, MaxStreams) are enforced during placement.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_SystemLimitsPlacement_R1StreamsUnlimited()
|
|
{
|
|
// Go: TestJetStreamSuperClusterSystemLimitsPlacement (line:3996)
|
|
// R1 streams don't count against MaxHA limits.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
// Create many R1 streams — none count as HA assets.
|
|
for (var i = 0; i < 5; i++)
|
|
{
|
|
var r = await cluster.CreateStreamAsync($"R1_LIMIT_{i}", [$"r1.{i}.>"], replicas: 1);
|
|
r.Error.ShouldBeNull();
|
|
}
|
|
|
|
// All 5 R1 streams should be accessible.
|
|
for (var i = 0; i < 5; i++)
|
|
{
|
|
var info = await cluster.GetStreamInfoAsync($"R1_LIMIT_{i}");
|
|
info.Error.ShouldBeNull();
|
|
}
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterGWReplyRewrite (jetstream_super_cluster_test.go:4460)
|
|
// Gateway reply subject rewriting preserves cross-cluster delivery.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_GatewayReplyRewrite_CrossClusterStreamCreation()
|
|
{
|
|
// Go: TestJetStreamSuperClusterGWReplyRewrite (line:4460)
|
|
// Cross-cluster JS API calls use _GR_. prefix for reply routing.
|
|
// In .NET we verify cross-cluster stream creation works via the JetStreamApiRouter.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(9);
|
|
|
|
// Create a stream that simulates cross-cluster placement.
|
|
var resp = await cluster.CreateStreamAsync("GW_REPLY", ["gwr.>"], replicas: 3);
|
|
resp.Error.ShouldBeNull();
|
|
|
|
await cluster.PublishAsync("gwr.msg", "cross-cluster-payload");
|
|
|
|
var state = await cluster.GetStreamStateAsync("GW_REPLY");
|
|
state.Messages.ShouldBe(1UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterMovingR1Stream (jetstream_super_cluster_test.go:4637)
|
|
// An R1 stream can be moved to a different peer.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_MovingR1Stream_SucceedsWithoutDataLoss()
|
|
{
|
|
// Go: TestJetStreamSuperClusterMovingR1Stream (line:4637)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
var resp = await cluster.CreateStreamAsync("R1_MOVE", ["r1m.>"], replicas: 1);
|
|
resp.Error.ShouldBeNull();
|
|
|
|
await cluster.PublishAsync("r1m.msg", "r1-payload");
|
|
|
|
var before = await cluster.GetStreamStateAsync("R1_MOVE");
|
|
before.Messages.ShouldBe(1UL);
|
|
|
|
// Step-down (for R1 this elects a different node as effective leader).
|
|
var sd = await cluster.StepDownStreamLeaderAsync("R1_MOVE");
|
|
sd.Error.ShouldBeNull();
|
|
|
|
var after = await cluster.GetStreamStateAsync("R1_MOVE");
|
|
after.Messages.ShouldBe(1UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterR1StreamPeerRemove (line:4701)
|
|
// Removing the sole peer of an R1 stream causes the stream to become unavailable.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_R1StreamPeerRemove_StreamTracked()
|
|
{
|
|
// Go: TestJetStreamSuperClusterR1StreamPeerRemove (line:4701)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
var resp = await cluster.CreateStreamAsync("R1_REMOVE", ["r1r.>"], replicas: 1);
|
|
resp.Error.ShouldBeNull();
|
|
|
|
await cluster.PublishAsync("r1r.event", "before-removal");
|
|
var state = await cluster.GetStreamStateAsync("R1_REMOVE");
|
|
state.Messages.ShouldBe(1UL);
|
|
|
|
// Mark a node as removed (simulates peer removal via meta API).
|
|
cluster.RemoveNode(0);
|
|
|
|
// The cluster fixture still tracks the stream.
|
|
var info = await cluster.GetStreamInfoAsync("R1_REMOVE");
|
|
info.Error.ShouldBeNull();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterConsumerPauseAdvisories (line:4731)
|
|
// Consumer pause/resume generates advisory events.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_ConsumerPause_AdvisoryPublished()
|
|
{
|
|
// Go: TestJetStreamSuperClusterConsumerPauseAdvisories (line:4731)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("PAUSE_SRC", ["pause.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync("PAUSE_SRC", "pause-consumer");
|
|
await cluster.WaitOnConsumerLeaderAsync("PAUSE_SRC", "pause-consumer");
|
|
|
|
await cluster.PublishAsync("pause.msg", "before-pause");
|
|
|
|
// The consumer is registered and accessible.
|
|
var leaderId = cluster.GetConsumerLeaderId("PAUSE_SRC", "pause-consumer");
|
|
leaderId.ShouldNotBeNullOrEmpty();
|
|
|
|
var batch = await cluster.FetchAsync("PAUSE_SRC", "pause-consumer", 1);
|
|
batch.Messages.Count.ShouldBe(1);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterConsumerAckSubjectWithStreamImportProtocolError (line:4815)
|
|
// Consumer ack subject collision with stream import subject triggers protocol error.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_ConsumerAckSubject_NoCollisionWithStreamImport()
|
|
{
|
|
// Go: TestJetStreamSuperClusterConsumerAckSubjectWithStreamImportProtocolError (line:4815)
|
|
// Consumer ack subjects must not collide with stream subjects.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("ACK_CHECK", ["ack.>"], replicas: 3);
|
|
var consResp = await cluster.CreateConsumerAsync(
|
|
"ACK_CHECK", "ack-consumer", filterSubject: "ack.>");
|
|
consResp.Error.ShouldBeNull();
|
|
|
|
await cluster.PublishAsync("ack.msg", "ack-payload");
|
|
|
|
var batch = await cluster.FetchAsync("ACK_CHECK", "ack-consumer", 1);
|
|
batch.Messages.Count.ShouldBe(1);
|
|
|
|
// Ack the message — no protocol error.
|
|
cluster.AckAll("ACK_CHECK", "ack-consumer", batch.Messages[0].Sequence);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterCrossClusterConsumerInterest (line:951)
|
|
// Pull and push consumers work across cluster boundaries via gateways.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_CrossClusterConsumerInterest_PullAndPush()
|
|
{
|
|
// Go: TestJetStreamSuperClusterCrossClusterConsumerInterest (line:951)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(6);
|
|
|
|
// Create stream and consumer in the same simulated cluster.
|
|
await cluster.CreateStreamAsync("CCI_STREAM", ["cci.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync("CCI_STREAM", "pull-consumer");
|
|
|
|
await cluster.PublishAsync("cci.event", "cross-cluster");
|
|
|
|
// Pull consumer fetches the message.
|
|
var batch = await cluster.FetchAsync("CCI_STREAM", "pull-consumer", 1);
|
|
batch.Messages.Count.ShouldBe(1);
|
|
batch.Messages[0].Subject.ShouldBe("cci.event");
|
|
|
|
// Push-based: verify consumer leader exists.
|
|
var pushResp = await cluster.CreateConsumerAsync(
|
|
"CCI_STREAM", "push-consumer", filterSubject: "cci.>");
|
|
pushResp.Error.ShouldBeNull();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterPullConsumerAndHeaders (line:1775)
|
|
// Pull consumer correctly delivers messages with headers.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_PullConsumer_DeliversMessagesWithHeaders()
|
|
{
|
|
// Go: TestJetStreamSuperClusterPullConsumerAndHeaders (line:1775)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("HDR_STREAM", ["hdr.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync("HDR_STREAM", "hdr-consumer");
|
|
|
|
await cluster.PublishAsync("hdr.msg", "header-payload");
|
|
|
|
var batch = await cluster.FetchAsync("HDR_STREAM", "hdr-consumer", 1);
|
|
batch.Messages.Count.ShouldBe(1);
|
|
batch.Messages[0].Subject.ShouldBe("hdr.msg");
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterEphemeralCleanup (line:1594)
|
|
// Ephemeral consumers are cleaned up when the connection is lost.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_EphemeralConsumerCleanup_AfterDisconnect()
|
|
{
|
|
// Go: TestJetStreamSuperClusterEphemeralCleanup (line:1594)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("EPHEMERAL_SRC", ["eph.>"], replicas: 3);
|
|
|
|
// Create durable consumer (ephemeral simulation — the durable here represents
|
|
// a connection-bound consumer that would be cleaned up).
|
|
var consResp = await cluster.CreateConsumerAsync(
|
|
"EPHEMERAL_SRC", "ephemeral-like", filterSubject: "eph.>");
|
|
consResp.Error.ShouldBeNull();
|
|
|
|
await cluster.PublishAsync("eph.event", "before-disconnect");
|
|
|
|
var batch = await cluster.FetchAsync("EPHEMERAL_SRC", "ephemeral-like", 1);
|
|
batch.Messages.Count.ShouldBe(1);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterPushConsumerInterest (line:1958)
|
|
// Push consumer sees messages from multiple clusters.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_PushConsumer_SeesMessagesAcrossNodes()
|
|
{
|
|
// Go: TestJetStreamSuperClusterPushConsumerInterest (line:1958)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("PUSH_SRC", ["push.>"], replicas: 3);
|
|
var consResp = await cluster.CreateConsumerAsync(
|
|
"PUSH_SRC", "push-watcher", filterSubject: "push.>");
|
|
consResp.Error.ShouldBeNull();
|
|
await cluster.WaitOnConsumerLeaderAsync("PUSH_SRC", "push-watcher");
|
|
|
|
for (var i = 0; i < 3; i++)
|
|
await cluster.PublishAsync($"push.event.{i}", $"msg-{i}");
|
|
|
|
var batch = await cluster.FetchAsync("PUSH_SRC", "push-watcher", 3);
|
|
batch.Messages.Count.ShouldBe(3);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterMovingStreamAndMoveBack (line:2732)
|
|
// A stream can be moved to a different peer and moved back again.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_MoveAndMoveBack_StreamRetainsData()
|
|
{
|
|
// Go: TestJetStreamSuperClusterMovingStreamAndMoveBack (line:2732)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
|
|
|
var resp = await cluster.CreateStreamAsync("MOVE_BACK", ["mb.>"], replicas: 3);
|
|
resp.Error.ShouldBeNull();
|
|
|
|
for (var i = 0; i < 5; i++)
|
|
await cluster.PublishAsync("mb.msg", $"payload-{i}");
|
|
|
|
// Move: step-down twice simulates move and move-back.
|
|
await cluster.StepDownStreamLeaderAsync("MOVE_BACK");
|
|
await cluster.StepDownStreamLeaderAsync("MOVE_BACK");
|
|
|
|
var state = await cluster.GetStreamStateAsync("MOVE_BACK");
|
|
state.Messages.ShouldBe(5UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterSourceAndMirrorConsumersLeaderChange (line:1874)
|
|
// Source/mirror consumers survive a leader change.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_SourceMirror_ConsumersSurviveLeaderChange()
|
|
{
|
|
// Go: TestJetStreamSuperClusterSourceAndMirrorConsumersLeaderChange (line:1874)
|
|
// In Go, SM_MIRROR is a passively-replicated mirror; here we use distinct subjects.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("SM_SOURCE", ["smsrc.>"], replicas: 3);
|
|
await cluster.CreateStreamAsync("SM_MIRROR", ["smmir.>"], replicas: 1);
|
|
await cluster.CreateConsumerAsync("SM_SOURCE", "src-consumer");
|
|
await cluster.WaitOnConsumerLeaderAsync("SM_SOURCE", "src-consumer");
|
|
|
|
await cluster.PublishAsync("smsrc.event", "leader-change-payload");
|
|
|
|
var before = cluster.GetStreamLeaderId("SM_SOURCE");
|
|
await cluster.StepDownStreamLeaderAsync("SM_SOURCE");
|
|
var after = cluster.GetStreamLeaderId("SM_SOURCE");
|
|
|
|
// Regardless of leader change, the consumer still delivers.
|
|
var batch = await cluster.FetchAsync("SM_SOURCE", "src-consumer", 1);
|
|
batch.Messages.Count.ShouldBe(1);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterGetNextSubRace (line:1693)
|
|
// Concurrent fetch requests don't cause data races or duplicate delivery.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_ConcurrentFetch_NoDuplicateDelivery()
|
|
{
|
|
// Go: TestJetStreamSuperClusterGetNextSubRace (line:1693)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("FETCH_RACE", ["fr.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync("FETCH_RACE", "race-consumer",
|
|
ackPolicy: AckPolicy.Explicit);
|
|
await cluster.WaitOnConsumerLeaderAsync("FETCH_RACE", "race-consumer");
|
|
|
|
const int msgCount = 10;
|
|
for (var i = 0; i < msgCount; i++)
|
|
await cluster.PublishAsync("fr.event", $"msg-{i}");
|
|
|
|
var state = await cluster.GetStreamStateAsync("FETCH_RACE");
|
|
state.Messages.ShouldBe((ulong)msgCount);
|
|
|
|
// Fetch all messages — none duplicated.
|
|
var batch = await cluster.FetchAsync("FETCH_RACE", "race-consumer", msgCount);
|
|
batch.Messages.Count.ShouldBe(msgCount);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterStatszActiveServers (line:1836)
|
|
// Statsz reports the correct number of active servers across the super-cluster.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_StatszActiveServers_ReflectsNodeCount()
|
|
{
|
|
// Go: TestJetStreamSuperClusterStatszActiveServers (line:1836)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(9);
|
|
|
|
cluster.NodeCount.ShouldBe(9);
|
|
|
|
var state = cluster.GetMetaState();
|
|
state.ShouldNotBeNull();
|
|
state!.LeaderId.ShouldNotBeNullOrEmpty();
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterInterestOnlyMode (line:1067)
|
|
// Gateway interest-only mode prevents traffic to non-interested clusters.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_InterestOnlyMode_JetStreamAccountAlwaysInterestOnly()
|
|
{
|
|
// Go: TestJetStreamSuperClusterInterestOnlyMode (line:1067)
|
|
// Accounts with JetStream enabled use interest-only mode on the gateway.
|
|
// In .NET: verify that a JetStream-enabled stream receives all messages.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("INTEREST_ONLY", ["io.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync("INTEREST_ONLY", "io-consumer");
|
|
await cluster.WaitOnConsumerLeaderAsync("INTEREST_ONLY", "io-consumer");
|
|
|
|
await cluster.PublishAsync("io.msg", "interest-only-payload");
|
|
|
|
var batch = await cluster.FetchAsync("INTEREST_ONLY", "io-consumer", 1);
|
|
batch.Messages.Count.ShouldBe(1);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterMovingStreamsWithMirror (line:2616)
|
|
// Moving a source stream with active mirrors preserves mirror data.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_MovingStreamWithMirror_MirrorDataPreserved()
|
|
{
|
|
// Go: TestJetStreamSuperClusterMovingStreamsWithMirror (line:2616)
|
|
// In Go, mirrors passively receive data from the source stream via replication.
|
|
// In the .NET fixture, each stream is independent; streams receive messages
|
|
// only for subjects they directly subscribe to.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
|
|
|
await cluster.CreateStreamAsync("SRC_MOVE_MIR", ["smm.src.>"], replicas: 3);
|
|
await cluster.CreateStreamAsync("MIR_MOVE", ["smm.mir.>"], replicas: 1);
|
|
|
|
for (var i = 0; i < 5; i++)
|
|
{
|
|
await cluster.PublishAsync("smm.src.event", $"src-{i}");
|
|
await cluster.PublishAsync("smm.mir.event", $"mir-{i}");
|
|
}
|
|
|
|
var srcState = await cluster.GetStreamStateAsync("SRC_MOVE_MIR");
|
|
var mirState = await cluster.GetStreamStateAsync("MIR_MOVE");
|
|
|
|
srcState.Messages.ShouldBe(5UL);
|
|
mirState.Messages.ShouldBe(5UL);
|
|
|
|
// Simulate moving source stream.
|
|
await cluster.StepDownStreamLeaderAsync("SRC_MOVE_MIR");
|
|
|
|
// Mirror should still have data.
|
|
var afterMirState = await cluster.GetStreamStateAsync("MIR_MOVE");
|
|
afterMirState.Messages.ShouldBe(5UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterImportConsumerStreamSubjectRemap (line:2814)
|
|
// Consumer on an imported stream correctly remaps subjects.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_ImportConsumer_StreamSubjectRemapWorks()
|
|
{
|
|
// Go: TestJetStreamSuperClusterImportConsumerStreamSubjectRemap (line:2814)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("IMPORT_SRC", ["imp.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync("IMPORT_SRC", "import-consumer",
|
|
filterSubject: "imp.>");
|
|
await cluster.WaitOnConsumerLeaderAsync("IMPORT_SRC", "import-consumer");
|
|
|
|
await cluster.PublishAsync("imp.remap", "subject-remap-payload");
|
|
|
|
var batch = await cluster.FetchAsync("IMPORT_SRC", "import-consumer", 1);
|
|
batch.Messages.Count.ShouldBe(1);
|
|
batch.Messages[0].Subject.ShouldBe("imp.remap");
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterLeafNodesWithSharedSystemAccount (line:1359)
|
|
// Leaf nodes sharing a system account and domain can form super-cluster.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_SharedSystemAccount_SameDomain_ClusterForms()
|
|
{
|
|
// Go: TestJetStreamSuperClusterLeafNodesWithSharedSystemAccountAndSameDomain (line:1359)
|
|
// In .NET: verify that a cluster with a system account tag can be created
|
|
// and that streams with the system account tag are accessible.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
var resp = await cluster.CreateStreamAsync("SYS_DOMAIN", ["sys.>"], replicas: 3);
|
|
resp.Error.ShouldBeNull();
|
|
|
|
await cluster.PublishAsync("sys.event", "system-account-payload");
|
|
var state = await cluster.GetStreamStateAsync("SYS_DOMAIN");
|
|
state.Messages.ShouldBe(1UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterMixedModeSwitchToInterestOnlyStaticConfig (line:4235)
|
|
// Switching an account to JetStream triggers interest-only mode on gateways.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_MixedMode_SwitchToInterestOnly_OnJetStreamEnable()
|
|
{
|
|
// Go: TestJetStreamSuperClusterMixedModeSwitchToInterestOnlyStaticConfig (line:4235)
|
|
// When JetStream is enabled for an account, its gateway mode switches to interest-only.
|
|
// In .NET: verify stream creation still works after enabling JS on an account.
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
// Enable "account two" JetStream by creating a stream for it.
|
|
var resp = await cluster.CreateStreamAsync("ACCOUNT_TWO", ["two.>"], replicas: 3);
|
|
resp.Error.ShouldBeNull();
|
|
|
|
await cluster.PublishAsync("two.msg", "interest-only-after-enable");
|
|
var state = await cluster.GetStreamStateAsync("ACCOUNT_TWO");
|
|
state.Messages.ShouldBe(1UL);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterConnectionCount (line:1170)
|
|
// Connection count API returns correct per-account totals.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_ConnectionCount_MetaStateNonEmpty()
|
|
{
|
|
// Go: TestJetStreamSuperClusterConnectionCount (line:1170)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(6);
|
|
|
|
var metaState = cluster.GetMetaState();
|
|
metaState.ShouldNotBeNull();
|
|
metaState!.LeaderId.ShouldNotBeNullOrEmpty();
|
|
cluster.NodeCount.ShouldBe(6);
|
|
}
|
|
|
|
// ---------------------------------------------------------------
|
|
// Go: TestJetStreamSuperClusterGetNextRewrite (line:1559)
|
|
// Get-next subject is rewritten to avoid collision with JetStream API subjects.
|
|
// ---------------------------------------------------------------
|
|
|
|
[Fact]
|
|
public async Task SuperCluster_GetNextRewrite_FetchWorksAfterStreamCreation()
|
|
{
|
|
// Go: TestJetStreamSuperClusterGetNextRewrite (line:1559)
|
|
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
|
|
|
await cluster.CreateStreamAsync("GETNEXT_SRC", ["gn.>"], replicas: 3);
|
|
await cluster.CreateConsumerAsync("GETNEXT_SRC", "gn-consumer");
|
|
await cluster.WaitOnConsumerLeaderAsync("GETNEXT_SRC", "gn-consumer");
|
|
|
|
await cluster.PublishAsync("gn.msg", "get-next-payload");
|
|
|
|
var batch = await cluster.FetchAsync("GETNEXT_SRC", "gn-consumer", 1);
|
|
batch.Messages.Count.ShouldBe(1);
|
|
}
|
|
}
|