refactor: extract NATS.Server.JetStream.Tests project
Move 225 JetStream-related test files from NATS.Server.Tests into a dedicated NATS.Server.JetStream.Tests project. This includes root-level JetStream*.cs files, storage test files (FileStore, MemStore, StreamStoreContract), and the full JetStream/ subfolder tree (Api, Cluster, Consumers, MirrorSource, Snapshots, Storage, Streams). Updated all namespaces, added InternalsVisibleTo, registered in the solution file, and added the JETSTREAM_INTEGRATION_MATRIX define.
This commit is contained in:
@@ -0,0 +1,367 @@
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for AssignmentCodec: binary serialization for stream and consumer assignments
|
||||
/// with optional S2/Snappy compression for large payloads.
|
||||
/// Go reference: jetstream_cluster.go:8703-9246 (encodeAddStreamAssignment,
|
||||
/// encodeAddConsumerAssignment, decodeStreamAssignment, decodeConsumerAssignment,
|
||||
/// encodeAddConsumerAssignmentCompressed, decodeConsumerAssignmentCompressed).
|
||||
/// </summary>
|
||||
public class AssignmentCodecTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// StreamAssignment round-trip
|
||||
// Go reference: jetstream_cluster.go:8703 encodeAddStreamAssignment /
|
||||
// 8733 decodeStreamAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Encode_decode_stream_assignment_round_trip()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:8703 encodeAddStreamAssignment + 8733 decodeStreamAssignment
|
||||
var created = new DateTime(2025, 3, 15, 9, 0, 0, DateTimeKind.Utc);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "orders",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "rg-orders",
|
||||
Peers = ["peer-1", "peer-2", "peer-3"],
|
||||
StorageType = "file",
|
||||
Cluster = "cluster-east",
|
||||
Preferred = "peer-1",
|
||||
DesiredReplicas = 3,
|
||||
},
|
||||
Created = created,
|
||||
ConfigJson = """{"subjects":["orders.>"],"storage":"file","replicas":3}""",
|
||||
SyncSubject = "$JS.SYNC.orders",
|
||||
Responded = true,
|
||||
Recovering = false,
|
||||
Reassigning = true,
|
||||
};
|
||||
|
||||
var encoded = AssignmentCodec.EncodeStreamAssignment(sa);
|
||||
encoded.ShouldNotBeEmpty();
|
||||
|
||||
var decoded = AssignmentCodec.DecodeStreamAssignment(encoded);
|
||||
decoded.ShouldNotBeNull();
|
||||
decoded!.StreamName.ShouldBe("orders");
|
||||
decoded.Group.Name.ShouldBe("rg-orders");
|
||||
decoded.Group.Peers.ShouldBe(["peer-1", "peer-2", "peer-3"]);
|
||||
decoded.Group.StorageType.ShouldBe("file");
|
||||
decoded.Group.Cluster.ShouldBe("cluster-east");
|
||||
decoded.Group.Preferred.ShouldBe("peer-1");
|
||||
decoded.Group.DesiredReplicas.ShouldBe(3);
|
||||
decoded.Created.ShouldBe(created);
|
||||
decoded.ConfigJson.ShouldBe("""{"subjects":["orders.>"],"storage":"file","replicas":3}""");
|
||||
decoded.SyncSubject.ShouldBe("$JS.SYNC.orders");
|
||||
decoded.Responded.ShouldBeTrue();
|
||||
decoded.Recovering.ShouldBeFalse();
|
||||
decoded.Reassigning.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ConsumerAssignment round-trip
|
||||
// Go reference: jetstream_cluster.go:9175 encodeAddConsumerAssignment /
|
||||
// 9195 decodeConsumerAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Encode_decode_consumer_assignment_round_trip()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:9175 encodeAddConsumerAssignment + 9195 decodeConsumerAssignment
|
||||
var created = new DateTime(2025, 6, 1, 12, 0, 0, DateTimeKind.Utc);
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "push-consumer",
|
||||
StreamName = "events",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "rg-push",
|
||||
Peers = ["node-a", "node-b"],
|
||||
StorageType = "memory",
|
||||
DesiredReplicas = 2,
|
||||
},
|
||||
Created = created,
|
||||
ConfigJson = """{"deliver_subject":"push.out","filter_subject":"events.>"}""",
|
||||
Responded = true,
|
||||
Recovering = true,
|
||||
};
|
||||
|
||||
var encoded = AssignmentCodec.EncodeConsumerAssignment(ca);
|
||||
encoded.ShouldNotBeEmpty();
|
||||
|
||||
var decoded = AssignmentCodec.DecodeConsumerAssignment(encoded);
|
||||
decoded.ShouldNotBeNull();
|
||||
decoded!.ConsumerName.ShouldBe("push-consumer");
|
||||
decoded.StreamName.ShouldBe("events");
|
||||
decoded.Group.Name.ShouldBe("rg-push");
|
||||
decoded.Group.Peers.ShouldBe(["node-a", "node-b"]);
|
||||
decoded.Group.StorageType.ShouldBe("memory");
|
||||
decoded.Group.DesiredReplicas.ShouldBe(2);
|
||||
decoded.Created.ShouldBe(created);
|
||||
decoded.ConfigJson.ShouldBe("""{"deliver_subject":"push.out","filter_subject":"events.>"}""");
|
||||
decoded.Responded.ShouldBeTrue();
|
||||
decoded.Recovering.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Error handling
|
||||
// Go reference: jetstream_cluster.go:8733 error return on bad unmarshal
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Decode_returns_null_for_invalid_data()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:8736 json.Unmarshal error → nil, error
|
||||
var garbage = new byte[] { 0xDE, 0xAD, 0xBE, 0xEF, 0x00, 0x01, 0x02, 0x03 };
|
||||
var result = AssignmentCodec.DecodeStreamAssignment(garbage);
|
||||
result.ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Decode_returns_null_for_empty_data()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:8733 empty buf → json.Unmarshal fails → nil
|
||||
var result = AssignmentCodec.DecodeStreamAssignment(ReadOnlySpan<byte>.Empty);
|
||||
result.ShouldBeNull();
|
||||
|
||||
var caResult = AssignmentCodec.DecodeConsumerAssignment(ReadOnlySpan<byte>.Empty);
|
||||
caResult.ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Compression: CompressIfLarge
|
||||
// Go reference: jetstream_cluster.go:9226 encodeAddConsumerAssignmentCompressed
|
||||
// uses s2.NewWriter for large consumer configs
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void CompressIfLarge_compresses_when_above_threshold()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:9226 — S2 compression applied to large consumer assignments
|
||||
var largeData = Encoding.UTF8.GetBytes(new string('X', 2048));
|
||||
var compressed = AssignmentCodec.CompressIfLarge(largeData, threshold: 1024);
|
||||
|
||||
// Snappy compressed data with the stream magic is larger for uniform input but will differ
|
||||
// The important thing is that the result is NOT the same bytes as the input
|
||||
compressed.ShouldNotBeSameAs(largeData);
|
||||
// Compressed form of repeated bytes should typically be shorter
|
||||
compressed.Length.ShouldBeLessThan(largeData.Length);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CompressIfLarge_no_compress_below_threshold()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go — small consumer assignments sent uncompressed
|
||||
var smallData = Encoding.UTF8.GetBytes("""{"stream_name":"foo"}""");
|
||||
var result = AssignmentCodec.CompressIfLarge(smallData, threshold: 1024);
|
||||
|
||||
result.ShouldBe(smallData);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Compression: DecompressIfNeeded
|
||||
// Go reference: jetstream_cluster.go:9238 decodeConsumerAssignmentCompressed
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void DecompressIfNeeded_decompresses_snappy_data()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:9238 decodeConsumerAssignmentCompressed
|
||||
var original = Encoding.UTF8.GetBytes("""{"stream_name":"test","group":{"name":"rg"}}""");
|
||||
var compressed = AssignmentCodec.CompressIfLarge(original, threshold: 0); // force compress
|
||||
|
||||
var decompressed = AssignmentCodec.DecompressIfNeeded(compressed);
|
||||
decompressed.ShouldBe(original);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void DecompressIfNeeded_returns_raw_for_non_compressed()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:9195 decodeConsumerAssignment (non-compressed path)
|
||||
var plainJson = Encoding.UTF8.GetBytes("""{"stream_name":"test"}""");
|
||||
var result = AssignmentCodec.DecompressIfNeeded(plainJson);
|
||||
result.ShouldBe(plainJson);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer preservation in StreamAssignment round-trip
|
||||
// Go reference: jetstream_cluster.go streamAssignment.Consumers map serialization
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Stream_assignment_preserves_consumer_assignments()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go streamAssignment consumers map preserved in encoding
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "events",
|
||||
Group = new RaftGroup { Name = "rg-events", Peers = ["n1", "n2", "n3"] },
|
||||
ConfigJson = """{"subjects":["events.>"]}""",
|
||||
};
|
||||
|
||||
sa.Consumers["consumer-alpha"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "consumer-alpha",
|
||||
StreamName = "events",
|
||||
Group = new RaftGroup { Name = "rg-alpha", Peers = ["n1"] },
|
||||
ConfigJson = """{"deliver_subject":"out.alpha"}""",
|
||||
Responded = true,
|
||||
};
|
||||
sa.Consumers["consumer-beta"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "consumer-beta",
|
||||
StreamName = "events",
|
||||
Group = new RaftGroup { Name = "rg-beta", Peers = ["n2"] },
|
||||
Recovering = true,
|
||||
};
|
||||
sa.Consumers["consumer-gamma"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "consumer-gamma",
|
||||
StreamName = "events",
|
||||
Group = new RaftGroup { Name = "rg-gamma", Peers = ["n3"] },
|
||||
};
|
||||
|
||||
var encoded = AssignmentCodec.EncodeStreamAssignment(sa);
|
||||
var decoded = AssignmentCodec.DecodeStreamAssignment(encoded);
|
||||
|
||||
decoded.ShouldNotBeNull();
|
||||
decoded!.Consumers.Count.ShouldBe(3);
|
||||
decoded.Consumers["consumer-alpha"].ConsumerName.ShouldBe("consumer-alpha");
|
||||
decoded.Consumers["consumer-alpha"].Responded.ShouldBeTrue();
|
||||
decoded.Consumers["consumer-beta"].Recovering.ShouldBeTrue();
|
||||
decoded.Consumers["consumer-gamma"].Group.Name.ShouldBe("rg-gamma");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// RaftGroup peer list preservation
|
||||
// Go reference: jetstream_cluster.go raftGroup.Peers serialization
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Stream_assignment_preserves_raft_group_peers()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:154 raftGroup.Peers in assignment encoding
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "telemetry",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "rg-telemetry",
|
||||
Peers = ["peer-alpha", "peer-beta", "peer-gamma"],
|
||||
DesiredReplicas = 3,
|
||||
},
|
||||
};
|
||||
|
||||
var encoded = AssignmentCodec.EncodeStreamAssignment(sa);
|
||||
var decoded = AssignmentCodec.DecodeStreamAssignment(encoded);
|
||||
|
||||
decoded.ShouldNotBeNull();
|
||||
decoded!.Group.Peers.Count.ShouldBe(3);
|
||||
decoded.Group.Peers.ShouldContain("peer-alpha");
|
||||
decoded.Group.Peers.ShouldContain("peer-beta");
|
||||
decoded.Group.Peers.ShouldContain("peer-gamma");
|
||||
decoded.Group.DesiredReplicas.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Large ConfigJson round-trip through compression
|
||||
// Go reference: jetstream_cluster.go:9226 encodeAddConsumerAssignmentCompressed
|
||||
// for large consumer configs
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Compress_decompress_round_trip_with_large_config()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:9226 — compressed consumer assignment with large config
|
||||
var largeConfig = """{"subjects":[""" +
|
||||
string.Join(",", Enumerable.Range(1, 50).Select(i => $"\"events.topic.{i}.>\"")) +
|
||||
"""],"storage":"file","replicas":3,"max_msgs":1000000,"max_bytes":1073741824}""";
|
||||
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "large-config-consumer",
|
||||
StreamName = "big-stream",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "rg-large",
|
||||
Peers = ["n1", "n2", "n3"],
|
||||
},
|
||||
ConfigJson = largeConfig,
|
||||
};
|
||||
|
||||
var encoded = AssignmentCodec.EncodeConsumerAssignment(ca);
|
||||
var compressed = AssignmentCodec.CompressIfLarge(encoded, threshold: 512);
|
||||
|
||||
// Compressed should be present (input is large)
|
||||
compressed.Length.ShouldBeGreaterThan(0);
|
||||
|
||||
var decompressed = AssignmentCodec.DecompressIfNeeded(compressed);
|
||||
var decoded = AssignmentCodec.DecodeConsumerAssignment(decompressed);
|
||||
|
||||
decoded.ShouldNotBeNull();
|
||||
decoded!.ConsumerName.ShouldBe("large-config-consumer");
|
||||
decoded.ConfigJson.ShouldBe(largeConfig);
|
||||
decoded.Group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Golden fixture test: known-good JSON bytes decode correctly
|
||||
// Go reference: jetstream_cluster.go decodeStreamAssignment / decodeConsumerAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Golden_fixture_known_bytes()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:8733 decodeStreamAssignment — format stability test.
|
||||
// This fixture encodes a specific known StreamAssignment JSON and verifies that
|
||||
// the codec can decode it correctly, ensuring the serialization format remains stable.
|
||||
//
|
||||
// The JSON uses snake_case property names (JsonNamingPolicy.SnakeCaseLower).
|
||||
// Created timestamp: 2025-01-15T00:00:00Z = 638717280000000000 ticks.
|
||||
const string goldenJson = """
|
||||
{
|
||||
"stream_name": "golden-stream",
|
||||
"group": {
|
||||
"name": "rg-golden",
|
||||
"peers": ["node-1", "node-2", "node-3"],
|
||||
"storage_type": "file",
|
||||
"cluster": "us-east",
|
||||
"preferred": "node-1",
|
||||
"desired_replicas": 3
|
||||
},
|
||||
"created": "2025-01-15T00:00:00Z",
|
||||
"config_json": "{\"subjects\":[\"golden.>\"]}",
|
||||
"sync_subject": "$JS.SYNC.golden-stream",
|
||||
"responded": true,
|
||||
"recovering": false,
|
||||
"reassigning": false,
|
||||
"consumers": {}
|
||||
}
|
||||
""";
|
||||
|
||||
var bytes = Encoding.UTF8.GetBytes(goldenJson);
|
||||
var decoded = AssignmentCodec.DecodeStreamAssignment(bytes);
|
||||
|
||||
decoded.ShouldNotBeNull();
|
||||
decoded!.StreamName.ShouldBe("golden-stream");
|
||||
decoded.Group.Name.ShouldBe("rg-golden");
|
||||
decoded.Group.Peers.ShouldBe(["node-1", "node-2", "node-3"]);
|
||||
decoded.Group.StorageType.ShouldBe("file");
|
||||
decoded.Group.Cluster.ShouldBe("us-east");
|
||||
decoded.Group.Preferred.ShouldBe("node-1");
|
||||
decoded.Group.DesiredReplicas.ShouldBe(3);
|
||||
decoded.Created.ShouldBe(new DateTime(2025, 1, 15, 0, 0, 0, DateTimeKind.Utc));
|
||||
decoded.ConfigJson.ShouldBe("""{"subjects":["golden.>"]}""");
|
||||
decoded.SyncSubject.ShouldBe("$JS.SYNC.golden-stream");
|
||||
decoded.Responded.ShouldBeTrue();
|
||||
decoded.Recovering.ShouldBeFalse();
|
||||
decoded.Reassigning.ShouldBeFalse();
|
||||
decoded.Consumers.ShouldBeEmpty();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,245 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go
|
||||
// Covers: RaftGroup quorum calculation, HasQuorum checks, StreamAssignment
|
||||
// and ConsumerAssignment creation, consumer dictionary operations,
|
||||
// Preferred peer tracking.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for ClusterAssignmentTypes: RaftGroup quorum semantics,
|
||||
/// StreamAssignment lifecycle, and ConsumerAssignment defaults.
|
||||
/// Go reference: jetstream_cluster.go:154-266 (raftGroup, streamAssignment, consumerAssignment).
|
||||
/// </summary>
|
||||
public class AssignmentSerializationTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// RaftGroup quorum calculation
|
||||
// Go reference: jetstream_cluster.go:154-163 raftGroup.quorumNeeded()
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_quorum_size_for_single_node_is_one()
|
||||
{
|
||||
var group = new RaftGroup { Name = "test-r1", Peers = ["peer-1"] };
|
||||
|
||||
group.QuorumSize.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_quorum_size_for_three_nodes_is_two()
|
||||
{
|
||||
var group = new RaftGroup { Name = "test-r3", Peers = ["p1", "p2", "p3"] };
|
||||
|
||||
group.QuorumSize.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_quorum_size_for_five_nodes_is_three()
|
||||
{
|
||||
var group = new RaftGroup { Name = "test-r5", Peers = ["p1", "p2", "p3", "p4", "p5"] };
|
||||
|
||||
group.QuorumSize.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_quorum_size_for_empty_peers_is_one()
|
||||
{
|
||||
var group = new RaftGroup { Name = "test-empty", Peers = [] };
|
||||
|
||||
// (0 / 2) + 1 = 1
|
||||
group.QuorumSize.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// HasQuorum checks
|
||||
// Go reference: jetstream_cluster.go raftGroup quorum check
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void HasQuorum_returns_true_when_acks_meet_quorum()
|
||||
{
|
||||
var group = new RaftGroup { Name = "q-test", Peers = ["p1", "p2", "p3"] };
|
||||
|
||||
group.HasQuorum(2).ShouldBeTrue();
|
||||
group.HasQuorum(3).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void HasQuorum_returns_false_when_acks_below_quorum()
|
||||
{
|
||||
var group = new RaftGroup { Name = "q-test", Peers = ["p1", "p2", "p3"] };
|
||||
|
||||
group.HasQuorum(1).ShouldBeFalse();
|
||||
group.HasQuorum(0).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void HasQuorum_single_node_requires_one_ack()
|
||||
{
|
||||
var group = new RaftGroup { Name = "q-r1", Peers = ["p1"] };
|
||||
|
||||
group.HasQuorum(1).ShouldBeTrue();
|
||||
group.HasQuorum(0).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void HasQuorum_five_nodes_requires_three_acks()
|
||||
{
|
||||
var group = new RaftGroup { Name = "q-r5", Peers = ["p1", "p2", "p3", "p4", "p5"] };
|
||||
|
||||
group.HasQuorum(2).ShouldBeFalse();
|
||||
group.HasQuorum(3).ShouldBeTrue();
|
||||
group.HasQuorum(5).ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// RaftGroup property defaults
|
||||
// Go reference: jetstream_cluster.go:154-163
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_defaults_storage_to_file()
|
||||
{
|
||||
var group = new RaftGroup { Name = "defaults" };
|
||||
|
||||
group.StorageType.ShouldBe("file");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_defaults_cluster_to_empty()
|
||||
{
|
||||
var group = new RaftGroup { Name = "defaults" };
|
||||
|
||||
group.Cluster.ShouldBe(string.Empty);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_preferred_peer_tracking()
|
||||
{
|
||||
var group = new RaftGroup { Name = "pref-test", Peers = ["p1", "p2", "p3"] };
|
||||
|
||||
group.Preferred.ShouldBe(string.Empty);
|
||||
|
||||
group.Preferred = "p2";
|
||||
group.Preferred.ShouldBe("p2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// StreamAssignment creation
|
||||
// Go reference: jetstream_cluster.go:166-184 streamAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void StreamAssignment_created_with_defaults()
|
||||
{
|
||||
var group = new RaftGroup { Name = "sa-group", Peers = ["p1"] };
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "TEST-STREAM",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
sa.StreamName.ShouldBe("TEST-STREAM");
|
||||
sa.Group.ShouldBeSameAs(group);
|
||||
sa.ConfigJson.ShouldBe("{}");
|
||||
sa.SyncSubject.ShouldBe(string.Empty);
|
||||
sa.Responded.ShouldBeFalse();
|
||||
sa.Recovering.ShouldBeFalse();
|
||||
sa.Reassigning.ShouldBeFalse();
|
||||
sa.Consumers.ShouldBeEmpty();
|
||||
sa.Created.ShouldBeGreaterThan(DateTime.MinValue);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StreamAssignment_consumers_dictionary_operations()
|
||||
{
|
||||
var group = new RaftGroup { Name = "sa-cons", Peers = ["p1", "p2", "p3"] };
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "MY-STREAM",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
var consumerGroup = new RaftGroup { Name = "cons-group", Peers = ["p1"] };
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "durable-1",
|
||||
StreamName = "MY-STREAM",
|
||||
Group = consumerGroup,
|
||||
};
|
||||
|
||||
sa.Consumers["durable-1"] = ca;
|
||||
sa.Consumers.Count.ShouldBe(1);
|
||||
sa.Consumers["durable-1"].ConsumerName.ShouldBe("durable-1");
|
||||
|
||||
sa.Consumers.Remove("durable-1");
|
||||
sa.Consumers.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ConsumerAssignment creation
|
||||
// Go reference: jetstream_cluster.go:250-266 consumerAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ConsumerAssignment_created_with_defaults()
|
||||
{
|
||||
var group = new RaftGroup { Name = "ca-group", Peers = ["p1"] };
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "my-consumer",
|
||||
StreamName = "MY-STREAM",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
ca.ConsumerName.ShouldBe("my-consumer");
|
||||
ca.StreamName.ShouldBe("MY-STREAM");
|
||||
ca.Group.ShouldBeSameAs(group);
|
||||
ca.ConfigJson.ShouldBe("{}");
|
||||
ca.Responded.ShouldBeFalse();
|
||||
ca.Recovering.ShouldBeFalse();
|
||||
ca.Created.ShouldBeGreaterThan(DateTime.MinValue);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ConsumerAssignment_mutable_flags()
|
||||
{
|
||||
var group = new RaftGroup { Name = "ca-flags", Peers = ["p1"] };
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "c1",
|
||||
StreamName = "S1",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
ca.Responded = true;
|
||||
ca.Recovering = true;
|
||||
|
||||
ca.Responded.ShouldBeTrue();
|
||||
ca.Recovering.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StreamAssignment_mutable_flags()
|
||||
{
|
||||
var group = new RaftGroup { Name = "sa-flags", Peers = ["p1"] };
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "S1",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
sa.Responded = true;
|
||||
sa.Recovering = true;
|
||||
sa.Reassigning = true;
|
||||
sa.ConfigJson = """{"subjects":["test.>"]}""";
|
||||
sa.SyncSubject = "$JS.SYNC.S1";
|
||||
|
||||
sa.Responded.ShouldBeTrue();
|
||||
sa.Recovering.ShouldBeTrue();
|
||||
sa.Reassigning.ShouldBeTrue();
|
||||
sa.ConfigJson.ShouldBe("""{"subjects":["test.>"]}""");
|
||||
sa.SyncSubject.ShouldBe("$JS.SYNC.S1");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,723 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go
|
||||
// Covers: RaftGroup quorum semantics, StreamAssignment/ConsumerAssignment initialization,
|
||||
// JetStreamMetaGroup proposal workflow (create/delete stream + consumer), GetStreamAssignment,
|
||||
// GetAllAssignments, and PlacementEngine peer selection with topology filtering.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for B7 (ClusterAssignmentTypes), B8 (JetStreamMetaGroup proposal workflow),
|
||||
/// and B9 (PlacementEngine peer selection).
|
||||
/// Go reference: jetstream_cluster.go raftGroup, streamAssignment, consumerAssignment,
|
||||
/// selectPeerGroup (line 7212).
|
||||
/// </summary>
|
||||
public class ClusterAssignmentAndPlacementTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// B7: RaftGroup — quorum and HasQuorum
|
||||
// Go: jetstream_cluster.go:154 raftGroup struct
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_quorum_size_for_single_node_is_one()
|
||||
{
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "R1",
|
||||
Peers = ["n1"],
|
||||
};
|
||||
|
||||
group.QuorumSize.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_quorum_size_for_three_nodes_is_two()
|
||||
{
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "R3",
|
||||
Peers = ["n1", "n2", "n3"],
|
||||
};
|
||||
|
||||
group.QuorumSize.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_quorum_size_for_five_nodes_is_three()
|
||||
{
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "R5",
|
||||
Peers = ["n1", "n2", "n3", "n4", "n5"],
|
||||
};
|
||||
|
||||
group.QuorumSize.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_has_quorum_with_majority_acks()
|
||||
{
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "R3",
|
||||
Peers = ["n1", "n2", "n3"],
|
||||
};
|
||||
|
||||
// Quorum = 2; 2 acks is sufficient.
|
||||
group.HasQuorum(2).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_no_quorum_with_minority_acks()
|
||||
{
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "R3",
|
||||
Peers = ["n1", "n2", "n3"],
|
||||
};
|
||||
|
||||
// Quorum = 2; 1 ack is not sufficient.
|
||||
group.HasQuorum(1).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_has_quorum_with_all_acks()
|
||||
{
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "R5",
|
||||
Peers = ["n1", "n2", "n3", "n4", "n5"],
|
||||
};
|
||||
|
||||
group.HasQuorum(5).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RaftGroup_no_quorum_with_zero_acks()
|
||||
{
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "R3",
|
||||
Peers = ["n1", "n2", "n3"],
|
||||
};
|
||||
|
||||
group.HasQuorum(0).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B7: StreamAssignment — initialization and consumer tracking
|
||||
// Go: jetstream_cluster.go:166 streamAssignment struct
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void StreamAssignment_initializes_with_empty_consumers()
|
||||
{
|
||||
var group = new RaftGroup { Name = "g1", Peers = ["n1", "n2", "n3"] };
|
||||
var assignment = new StreamAssignment
|
||||
{
|
||||
StreamName = "ORDERS",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
assignment.StreamName.ShouldBe("ORDERS");
|
||||
assignment.Consumers.ShouldBeEmpty();
|
||||
assignment.ConfigJson.ShouldBe("{}");
|
||||
assignment.Responded.ShouldBeFalse();
|
||||
assignment.Recovering.ShouldBeFalse();
|
||||
assignment.Reassigning.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StreamAssignment_created_timestamp_is_recent()
|
||||
{
|
||||
var before = DateTime.UtcNow.AddSeconds(-1);
|
||||
|
||||
var group = new RaftGroup { Name = "g1", Peers = ["n1"] };
|
||||
var assignment = new StreamAssignment
|
||||
{
|
||||
StreamName = "TS_STREAM",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
var after = DateTime.UtcNow.AddSeconds(1);
|
||||
|
||||
assignment.Created.ShouldBeGreaterThan(before);
|
||||
assignment.Created.ShouldBeLessThan(after);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StreamAssignment_consumers_dict_is_ordinal_keyed()
|
||||
{
|
||||
var group = new RaftGroup { Name = "g1", Peers = ["n1"] };
|
||||
var assignment = new StreamAssignment
|
||||
{
|
||||
StreamName = "S",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
var consGroup = new RaftGroup { Name = "cg", Peers = ["n1"] };
|
||||
assignment.Consumers["ALPHA"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "ALPHA",
|
||||
StreamName = "S",
|
||||
Group = consGroup,
|
||||
};
|
||||
|
||||
assignment.Consumers.ContainsKey("ALPHA").ShouldBeTrue();
|
||||
assignment.Consumers.ContainsKey("alpha").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B7: ConsumerAssignment — initialization
|
||||
// Go: jetstream_cluster.go:250 consumerAssignment struct
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ConsumerAssignment_initializes_correctly()
|
||||
{
|
||||
var group = new RaftGroup { Name = "cg1", Peers = ["n1", "n2"] };
|
||||
var assignment = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "PUSH_CONSUMER",
|
||||
StreamName = "EVENTS",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
assignment.ConsumerName.ShouldBe("PUSH_CONSUMER");
|
||||
assignment.StreamName.ShouldBe("EVENTS");
|
||||
assignment.Group.ShouldBeSameAs(group);
|
||||
assignment.ConfigJson.ShouldBe("{}");
|
||||
assignment.Responded.ShouldBeFalse();
|
||||
assignment.Recovering.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ConsumerAssignment_created_timestamp_is_recent()
|
||||
{
|
||||
var before = DateTime.UtcNow.AddSeconds(-1);
|
||||
|
||||
var group = new RaftGroup { Name = "cg", Peers = ["n1"] };
|
||||
var assignment = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "C",
|
||||
StreamName = "S",
|
||||
Group = group,
|
||||
};
|
||||
|
||||
var after = DateTime.UtcNow.AddSeconds(1);
|
||||
|
||||
assignment.Created.ShouldBeGreaterThan(before);
|
||||
assignment.Created.ShouldBeLessThan(after);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B8: JetStreamMetaGroup — ProposeCreateStreamAsync with assignment
|
||||
// Go: jetstream_cluster.go processStreamAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeCreateStream_with_group_stores_assignment()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "ORDERS_grp", Peers = ["n1", "n2", "n3"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "ORDERS" }, group, default);
|
||||
|
||||
var assignment = meta.GetStreamAssignment("ORDERS");
|
||||
assignment.ShouldNotBeNull();
|
||||
assignment!.StreamName.ShouldBe("ORDERS");
|
||||
assignment.Group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeCreateStream_without_group_still_stores_assignment()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "NOGROUP" }, default);
|
||||
|
||||
var assignment = meta.GetStreamAssignment("NOGROUP");
|
||||
assignment.ShouldNotBeNull();
|
||||
assignment!.StreamName.ShouldBe("NOGROUP");
|
||||
assignment.Group.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeCreateStream_also_appears_in_GetState_streams()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "g", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "VISIBLE" }, group, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
state.Streams.ShouldContain("VISIBLE");
|
||||
state.AssignmentCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeCreateStream_duplicate_is_idempotent()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "g", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "DUP" }, group, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "DUP" }, group, default);
|
||||
|
||||
meta.GetAllAssignments().Count.ShouldBe(1);
|
||||
meta.GetState().Streams.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B8: JetStreamMetaGroup — ProposeDeleteStreamAsync
|
||||
// Go: jetstream_cluster.go processStreamDelete
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeDeleteStream_removes_assignment_and_stream_name()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "g", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "DELETEME" }, group, default);
|
||||
|
||||
meta.GetStreamAssignment("DELETEME").ShouldNotBeNull();
|
||||
meta.GetState().Streams.ShouldContain("DELETEME");
|
||||
|
||||
await meta.ProposeDeleteStreamAsync("DELETEME", default);
|
||||
|
||||
meta.GetStreamAssignment("DELETEME").ShouldBeNull();
|
||||
meta.GetState().Streams.ShouldNotContain("DELETEME");
|
||||
meta.GetState().AssignmentCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeDeleteStream_nonexistent_stream_is_safe()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
// Should not throw.
|
||||
await meta.ProposeDeleteStreamAsync("MISSING", default);
|
||||
meta.GetAllAssignments().Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeDeleteStream_only_removes_target_not_others()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "g", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "KEEP" }, group, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "REMOVE" }, group, default);
|
||||
|
||||
await meta.ProposeDeleteStreamAsync("REMOVE", default);
|
||||
|
||||
meta.GetStreamAssignment("KEEP").ShouldNotBeNull();
|
||||
meta.GetStreamAssignment("REMOVE").ShouldBeNull();
|
||||
meta.GetState().Streams.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B8: JetStreamMetaGroup — ProposeCreateConsumerAsync
|
||||
// Go: jetstream_cluster.go processConsumerAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeCreateConsumer_adds_consumer_to_stream_assignment()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var streamGroup = new RaftGroup { Name = "sg", Peers = ["n1", "n2", "n3"] };
|
||||
var consumerGroup = new RaftGroup { Name = "cg", Peers = ["n1", "n2"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "ORDERS" }, streamGroup, default);
|
||||
await meta.ProposeCreateConsumerAsync("ORDERS", "PROCESSOR", consumerGroup, default);
|
||||
|
||||
var assignment = meta.GetStreamAssignment("ORDERS");
|
||||
assignment.ShouldNotBeNull();
|
||||
assignment!.Consumers.ContainsKey("PROCESSOR").ShouldBeTrue();
|
||||
assignment.Consumers["PROCESSOR"].ConsumerName.ShouldBe("PROCESSOR");
|
||||
assignment.Consumers["PROCESSOR"].StreamName.ShouldBe("ORDERS");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeCreateConsumer_multiple_consumers_on_same_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["n1"] };
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "MULTI" }, sg, default);
|
||||
await meta.ProposeCreateConsumerAsync("MULTI", "C1", cg, default);
|
||||
await meta.ProposeCreateConsumerAsync("MULTI", "C2", cg, default);
|
||||
await meta.ProposeCreateConsumerAsync("MULTI", "C3", cg, default);
|
||||
|
||||
var assignment = meta.GetStreamAssignment("MULTI");
|
||||
assignment!.Consumers.Count.ShouldBe(3);
|
||||
assignment.Consumers.ContainsKey("C1").ShouldBeTrue();
|
||||
assignment.Consumers.ContainsKey("C2").ShouldBeTrue();
|
||||
assignment.Consumers.ContainsKey("C3").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeCreateConsumer_on_nonexistent_stream_is_safe()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["n1"] };
|
||||
|
||||
// Should not throw — stream not found means consumer is simply not tracked.
|
||||
await meta.ProposeCreateConsumerAsync("MISSING_STREAM", "C1", cg, default);
|
||||
meta.GetStreamAssignment("MISSING_STREAM").ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B8: JetStreamMetaGroup — ProposeDeleteConsumerAsync
|
||||
// Go: jetstream_cluster.go processConsumerDelete
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeDeleteConsumer_removes_consumer_from_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["n1"] };
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "EVENTS" }, sg, default);
|
||||
await meta.ProposeCreateConsumerAsync("EVENTS", "PUSH", cg, default);
|
||||
|
||||
meta.GetStreamAssignment("EVENTS")!.Consumers.ContainsKey("PUSH").ShouldBeTrue();
|
||||
|
||||
await meta.ProposeDeleteConsumerAsync("EVENTS", "PUSH", default);
|
||||
|
||||
meta.GetStreamAssignment("EVENTS")!.Consumers.ContainsKey("PUSH").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeDeleteConsumer_only_removes_target_consumer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["n1"] };
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "S" }, sg, default);
|
||||
await meta.ProposeCreateConsumerAsync("S", "KEEP", cg, default);
|
||||
await meta.ProposeCreateConsumerAsync("S", "REMOVE", cg, default);
|
||||
|
||||
await meta.ProposeDeleteConsumerAsync("S", "REMOVE", default);
|
||||
|
||||
var assignment = meta.GetStreamAssignment("S");
|
||||
assignment!.Consumers.ContainsKey("KEEP").ShouldBeTrue();
|
||||
assignment.Consumers.ContainsKey("REMOVE").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeDeleteConsumer_on_nonexistent_consumer_is_safe()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["n1"] };
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "S" }, sg, default);
|
||||
|
||||
// Should not throw.
|
||||
await meta.ProposeDeleteConsumerAsync("S", "MISSING_CONSUMER", default);
|
||||
meta.GetStreamAssignment("S")!.Consumers.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B8: JetStreamMetaGroup — GetStreamAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void GetStreamAssignment_returns_null_for_missing_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.GetStreamAssignment("NOT_THERE").ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetAllAssignments_returns_all_tracked_streams()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(5);
|
||||
var group = new RaftGroup { Name = "g", Peers = ["n1", "n2", "n3"] };
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = $"STREAM{i}" }, group, default);
|
||||
|
||||
meta.GetAllAssignments().Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B9: PlacementEngine — basic selection
|
||||
// Go: jetstream_cluster.go:7212 selectPeerGroup
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_selects_requested_number_of_peers()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1" },
|
||||
new() { PeerId = "n2" },
|
||||
new() { PeerId = "n3" },
|
||||
new() { PeerId = "n4" },
|
||||
new() { PeerId = "n5" },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("TEST", replicas: 3, peers);
|
||||
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
group.Name.ShouldBe("TEST");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_returns_raft_group_with_correct_name()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1" },
|
||||
new() { PeerId = "n2" },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("MY_GROUP", replicas: 1, peers);
|
||||
|
||||
group.Name.ShouldBe("MY_GROUP");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B9: PlacementEngine — cluster affinity filtering
|
||||
// Go: jetstream_cluster.go selectPeerGroup cluster filter
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_cluster_affinity_filters_to_matching_cluster()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Cluster = "east" },
|
||||
new() { PeerId = "n2", Cluster = "east" },
|
||||
new() { PeerId = "n3", Cluster = "west" },
|
||||
new() { PeerId = "n4", Cluster = "west" },
|
||||
};
|
||||
|
||||
var policy = new PlacementPolicy { Cluster = "east" };
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldContain("n1");
|
||||
group.Peers.ShouldContain("n2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_cluster_affinity_is_case_insensitive()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Cluster = "EAST" },
|
||||
new() { PeerId = "n2", Cluster = "west" },
|
||||
};
|
||||
|
||||
var policy = new PlacementPolicy { Cluster = "east" };
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 1, peers, policy);
|
||||
|
||||
group.Peers.ShouldContain("n1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B9: PlacementEngine — tag filtering
|
||||
// Go: jetstream_cluster.go selectPeerGroup tag filter
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_tag_filter_selects_peers_with_all_required_tags()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Tags = new(StringComparer.OrdinalIgnoreCase) { "ssd", "fast" } },
|
||||
new() { PeerId = "n2", Tags = new(StringComparer.OrdinalIgnoreCase) { "ssd" } },
|
||||
new() { PeerId = "n3", Tags = new(StringComparer.OrdinalIgnoreCase) { "fast" } },
|
||||
new() { PeerId = "n4", Tags = new(StringComparer.OrdinalIgnoreCase) { "ssd", "fast" } },
|
||||
};
|
||||
|
||||
var policy = new PlacementPolicy
|
||||
{
|
||||
Tags = new(StringComparer.OrdinalIgnoreCase) { "ssd", "fast" },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.All(p => p == "n1" || p == "n4").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_tag_filter_is_case_insensitive()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Tags = new(StringComparer.OrdinalIgnoreCase) { "SSD" } },
|
||||
new() { PeerId = "n2", Tags = new(StringComparer.OrdinalIgnoreCase) { "hdd" } },
|
||||
};
|
||||
|
||||
var policy = new PlacementPolicy
|
||||
{
|
||||
Tags = new(StringComparer.OrdinalIgnoreCase) { "ssd" },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 1, peers, policy);
|
||||
|
||||
group.Peers.ShouldContain("n1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B9: PlacementEngine — exclude tag filtering
|
||||
// Go: jetstream_cluster.go selectPeerGroup exclude-tag logic
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_exclude_tag_filters_out_peers_with_those_tags()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Tags = new(StringComparer.OrdinalIgnoreCase) { "nvme" } },
|
||||
new() { PeerId = "n2", Tags = new(StringComparer.OrdinalIgnoreCase) { "spinning" } },
|
||||
new() { PeerId = "n3", Tags = new(StringComparer.OrdinalIgnoreCase) { "nvme" } },
|
||||
new() { PeerId = "n4" },
|
||||
};
|
||||
|
||||
var policy = new PlacementPolicy
|
||||
{
|
||||
ExcludeTags = new(StringComparer.OrdinalIgnoreCase) { "spinning" },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 3, peers, policy);
|
||||
|
||||
group.Peers.ShouldNotContain("n2");
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_exclude_tag_is_case_insensitive()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Tags = new(StringComparer.OrdinalIgnoreCase) { "SLOW" } },
|
||||
new() { PeerId = "n2" },
|
||||
};
|
||||
|
||||
var policy = new PlacementPolicy
|
||||
{
|
||||
ExcludeTags = new(StringComparer.OrdinalIgnoreCase) { "slow" },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 1, peers, policy);
|
||||
|
||||
group.Peers.ShouldNotContain("n1");
|
||||
group.Peers.ShouldContain("n2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B9: PlacementEngine — throws when not enough peers
|
||||
// Go: jetstream_cluster.go selectPeerGroup insufficient peer error
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_throws_when_not_enough_peers()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1" },
|
||||
};
|
||||
|
||||
var act = () => PlacementEngine.SelectPeerGroup("G", replicas: 3, peers);
|
||||
|
||||
act.ShouldThrow<InvalidOperationException>();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_throws_when_filter_leaves_insufficient_peers()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Cluster = "east" },
|
||||
new() { PeerId = "n2", Cluster = "east" },
|
||||
new() { PeerId = "n3", Cluster = "west" },
|
||||
};
|
||||
|
||||
var policy = new PlacementPolicy { Cluster = "east" };
|
||||
var act = () => PlacementEngine.SelectPeerGroup("G", replicas: 3, peers, policy);
|
||||
|
||||
act.ShouldThrow<InvalidOperationException>();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_throws_when_unavailable_peers_reduce_below_requested()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1", Available = true },
|
||||
new() { PeerId = "n2", Available = false },
|
||||
new() { PeerId = "n3", Available = false },
|
||||
};
|
||||
|
||||
var act = () => PlacementEngine.SelectPeerGroup("G", replicas: 2, peers);
|
||||
|
||||
act.ShouldThrow<InvalidOperationException>();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// B9: PlacementEngine — sorts by available storage descending
|
||||
// Go: jetstream_cluster.go selectPeerGroup storage sort
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_sorts_by_available_storage_descending()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "small", AvailableStorage = 100 },
|
||||
new() { PeerId = "large", AvailableStorage = 10_000 },
|
||||
new() { PeerId = "medium", AvailableStorage = 500 },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 2, peers);
|
||||
|
||||
// Should pick the two with most storage: large and medium.
|
||||
group.Peers.ShouldContain("large");
|
||||
group.Peers.ShouldContain("medium");
|
||||
group.Peers.ShouldNotContain("small");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_unavailable_peers_are_excluded()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "online1", Available = true },
|
||||
new() { PeerId = "offline1", Available = false },
|
||||
new() { PeerId = "online2", Available = true },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 2, peers);
|
||||
|
||||
group.Peers.ShouldContain("online1");
|
||||
group.Peers.ShouldContain("online2");
|
||||
group.Peers.ShouldNotContain("offline1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PlacementEngine_no_policy_selects_all_available_up_to_replicas()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "n1" },
|
||||
new() { PeerId = "n2" },
|
||||
new() { PeerId = "n3" },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("G", replicas: 3, peers);
|
||||
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,251 @@
|
||||
using System.Text;
|
||||
using NATS.Server.Configuration;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
using NATS.Server.JetStream.Validation;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Go parity tests for JetStream cluster formation and multi-replica streams.
|
||||
/// Reference: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
/// - TestJetStreamClusterConfig (line 43)
|
||||
/// - TestJetStreamClusterMultiReplicaStreams (line 299)
|
||||
/// </summary>
|
||||
public class ClusterFormationParityTests
|
||||
{
|
||||
/// <summary>
|
||||
/// Validates that JetStream cluster mode requires server_name to be set.
|
||||
/// When JetStream and cluster are both configured but server_name is missing,
|
||||
/// validation must fail with an appropriate error.
|
||||
/// Go parity: TestJetStreamClusterConfig — check("requires `server_name`")
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Cluster_config_requires_server_name_when_jetstream_and_cluster_enabled()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
ServerName = null,
|
||||
JetStream = new JetStreamOptions
|
||||
{
|
||||
StoreDir = "/tmp/js",
|
||||
MaxMemoryStore = 16L * 1024 * 1024 * 1024,
|
||||
MaxFileStore = 10L * 1024 * 1024 * 1024 * 1024,
|
||||
},
|
||||
Cluster = new ClusterOptions
|
||||
{
|
||||
Port = 6222,
|
||||
},
|
||||
};
|
||||
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
|
||||
result.IsValid.ShouldBeFalse();
|
||||
result.Message.ShouldContain("server_name");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Validates that JetStream cluster mode requires cluster.name to be set.
|
||||
/// When JetStream, cluster, and server_name are configured but cluster.name
|
||||
/// is missing, validation must fail.
|
||||
/// Go parity: TestJetStreamClusterConfig — check("requires `cluster.name`")
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Cluster_config_requires_cluster_name_when_jetstream_and_cluster_enabled()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
ServerName = "TEST",
|
||||
JetStream = new JetStreamOptions
|
||||
{
|
||||
StoreDir = "/tmp/js",
|
||||
MaxMemoryStore = 16L * 1024 * 1024 * 1024,
|
||||
MaxFileStore = 10L * 1024 * 1024 * 1024 * 1024,
|
||||
},
|
||||
Cluster = new ClusterOptions
|
||||
{
|
||||
Name = null,
|
||||
Port = 6222,
|
||||
},
|
||||
};
|
||||
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
|
||||
result.IsValid.ShouldBeFalse();
|
||||
result.Message.ShouldContain("cluster.name");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Validates that when both server_name and cluster.name are set alongside
|
||||
/// JetStream and cluster config, the validation passes.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Cluster_config_passes_when_server_name_and_cluster_name_are_set()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
ServerName = "TEST",
|
||||
JetStream = new JetStreamOptions
|
||||
{
|
||||
StoreDir = "/tmp/js",
|
||||
},
|
||||
Cluster = new ClusterOptions
|
||||
{
|
||||
Name = "JSC",
|
||||
Port = 6222,
|
||||
},
|
||||
};
|
||||
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
|
||||
result.IsValid.ShouldBeTrue();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a 3-replica stream in a simulated 5-node cluster, publishes
|
||||
/// 10 messages, verifies stream info and state, then creates a durable
|
||||
/// consumer and confirms pending count matches published message count.
|
||||
/// Go parity: TestJetStreamClusterMultiReplicaStreams (line 299)
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Multi_replica_stream_accepts_publishes_and_consumer_tracks_pending()
|
||||
{
|
||||
await using var fixture = await ClusterFormationFixture.StartAsync(nodes: 5);
|
||||
|
||||
// Create a 3-replica stream (Go: js.AddStream with Replicas=3)
|
||||
var createResult = await fixture.CreateStreamAsync("TEST", ["foo", "bar"], replicas: 3);
|
||||
createResult.Error.ShouldBeNull();
|
||||
createResult.StreamInfo.ShouldNotBeNull();
|
||||
createResult.StreamInfo!.Config.Name.ShouldBe("TEST");
|
||||
|
||||
// Publish 10 messages (Go: js.Publish("foo", msg) x 10)
|
||||
const int toSend = 10;
|
||||
for (var i = 0; i < toSend; i++)
|
||||
{
|
||||
var ack = await fixture.PublishAsync("foo", $"Hello JS Clustering {i}");
|
||||
ack.Stream.ShouldBe("TEST");
|
||||
ack.Seq.ShouldBeGreaterThan((ulong)0);
|
||||
}
|
||||
|
||||
// Verify stream info reports correct message count
|
||||
var info = await fixture.GetStreamInfoAsync("TEST");
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.Config.Name.ShouldBe("TEST");
|
||||
info.StreamInfo.State.Messages.ShouldBe((ulong)toSend);
|
||||
|
||||
// Create a durable consumer and verify pending count
|
||||
var consumer = await fixture.CreateConsumerAsync("TEST", "dlc");
|
||||
consumer.Error.ShouldBeNull();
|
||||
consumer.ConsumerInfo.ShouldNotBeNull();
|
||||
|
||||
// Verify replica group was formed with the correct replica count
|
||||
var replicaGroup = fixture.GetReplicaGroup("TEST");
|
||||
replicaGroup.ShouldNotBeNull();
|
||||
replicaGroup!.Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that the asset placement planner caps replica count at the
|
||||
/// cluster size. Requesting more replicas than available nodes produces
|
||||
/// a placement list bounded by the node count.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Placement_planner_caps_replicas_at_cluster_size()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
|
||||
var placement = planner.PlanReplicas(replicas: 5);
|
||||
|
||||
placement.Count.ShouldBe(3);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Test fixture simulating a JetStream cluster with meta group, stream manager,
|
||||
/// consumer manager, and replica groups. Duplicates helpers locally per project
|
||||
/// conventions (no shared TestHelpers).
|
||||
/// </summary>
|
||||
internal sealed class ClusterFormationFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private ClusterFormationFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router,
|
||||
JetStreamPublisher publisher)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = publisher;
|
||||
}
|
||||
|
||||
public static Task<ClusterFormationFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var streamManager = new StreamManager(meta);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
return Task.FromResult(new ClusterFormationFixture(meta, streamManager, consumerManager, router, publisher));
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), out var ack))
|
||||
return Task.FromResult(ack);
|
||||
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match any stream.");
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> GetStreamInfoAsync(string name)
|
||||
{
|
||||
var response = _streamManager.GetInfo(name);
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateConsumerAsync(string stream, string durableName)
|
||||
{
|
||||
var response = _consumerManager.CreateOrUpdate(stream, new ConsumerConfig
|
||||
{
|
||||
DurableName = durableName,
|
||||
});
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public StreamReplicaGroup? GetReplicaGroup(string streamName)
|
||||
{
|
||||
// Access internal replica group state via stream manager reflection-free approach:
|
||||
// The StreamManager creates replica groups internally. We verify via the meta group state.
|
||||
var meta = _metaGroup.GetState();
|
||||
if (!meta.Streams.Contains(streamName))
|
||||
return null;
|
||||
|
||||
// Create a parallel replica group to verify the expected structure.
|
||||
// The real replica group is managed internally by StreamManager.
|
||||
return new StreamReplicaGroup(streamName, replicas: 3);
|
||||
}
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,522 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// golang/nats-server/server/jetstream_cluster_2_test.go
|
||||
// Covers: per-consumer RAFT groups, consumer assignment, ack state
|
||||
// replication, consumer failover, pull request forwarding, ephemeral
|
||||
// consumer lifecycle, delivery policy handling.
|
||||
using System.Collections.Concurrent;
|
||||
using System.Reflection;
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering per-consumer RAFT groups: consumer assignment, ack state
|
||||
/// replication, consumer failover, pull request forwarding, ephemeral
|
||||
/// consumer lifecycle, and delivery policy handling in clustered mode.
|
||||
/// Ported from Go jetstream_cluster_1_test.go and jetstream_cluster_2_test.go.
|
||||
/// </summary>
|
||||
public class ConsumerReplicaGroupTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerState server/jetstream_cluster_1_test.go:700
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_creation_registers_in_manager()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("REG", ["reg.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.CreateConsumerAsync("REG", "d1");
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.DurableName.ShouldBe("d1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerState server/jetstream_cluster_1_test.go:700
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_pending_count_tracks_unacked_messages()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PEND", ["pend.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("PEND", "acker", filterSubject: "pend.>", ackPolicy: AckPolicy.Explicit);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("pend.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("PEND", "acker", 3);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
|
||||
fx.GetPendingCount("PEND", "acker").ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterFullConsumerState server/jetstream_cluster_1_test.go:795
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AckAll_reduces_pending_count()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("ACKRED", ["ar.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("ACKRED", "acker", filterSubject: "ar.>", ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("ar.event", $"msg-{i}");
|
||||
|
||||
await fx.FetchAsync("ACKRED", "acker", 10);
|
||||
fx.AckAll("ACKRED", "acker", 7);
|
||||
|
||||
fx.GetPendingCount("ACKRED", "acker").ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterFullConsumerState server/jetstream_cluster_1_test.go:795
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AckAll_to_last_seq_clears_all_pending()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("ACKCLEAR", ["ac.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("ACKCLEAR", "acker", filterSubject: "ac.>", ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("ac.event", $"msg-{i}");
|
||||
|
||||
await fx.FetchAsync("ACKCLEAR", "acker", 5);
|
||||
fx.AckAll("ACKCLEAR", "acker", 5);
|
||||
|
||||
fx.GetPendingCount("ACKCLEAR", "acker").ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerRedeliveredInfo server/jetstream_cluster_1_test.go:659
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_redelivery_sets_redelivered_flag()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("REDEL", ["rd.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("REDEL", "rdc", filterSubject: "rd.>",
|
||||
ackPolicy: AckPolicy.Explicit, ackWaitMs: 1, maxDeliver: 5);
|
||||
|
||||
await fx.PublishAsync("rd.event", "will-redeliver");
|
||||
|
||||
var batch1 = await fx.FetchAsync("REDEL", "rdc", 1);
|
||||
batch1.Messages.Count.ShouldBe(1);
|
||||
batch1.Messages[0].Redelivered.ShouldBeFalse();
|
||||
|
||||
await Task.Delay(50);
|
||||
|
||||
var batch2 = await fx.FetchAsync("REDEL", "rdc", 1);
|
||||
batch2.Messages.Count.ShouldBe(1);
|
||||
batch2.Messages[0].Redelivered.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterRestoreSingleConsumer server/jetstream_cluster_1_test.go:1028
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_survives_stream_leader_stepdown()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CSURV", ["csv.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CSURV", "durable1", filterSubject: "csv.>");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("csv.event", $"msg-{i}");
|
||||
|
||||
var batch1 = await fx.FetchAsync("CSURV", "durable1", 5);
|
||||
batch1.Messages.Count.ShouldBe(5);
|
||||
|
||||
await fx.StepDownStreamLeaderAsync("CSURV");
|
||||
|
||||
var batch2 = await fx.FetchAsync("CSURV", "durable1", 5);
|
||||
batch2.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterPullConsumerLeakedSubs server/jetstream_cluster_2_test.go:2239
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Pull_consumer_fetch_returns_correct_batch()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PULL", ["pull.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("PULL", "puller", filterSubject: "pull.>");
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await fx.PublishAsync("pull.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("PULL", "puller", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerLastActiveReporting server/jetstream_cluster_2_test.go:2371
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_info_returns_correct_config()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("INFO", ["ci.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("INFO", "info_dur", filterSubject: "ci.>", ackPolicy: AckPolicy.Explicit);
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("INFO", "info_dur");
|
||||
info.Config.DurableName.ShouldBe("info_dur");
|
||||
info.Config.AckPolicy.ShouldBe(AckPolicy.Explicit);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterEphemeralConsumerNoImmediateInterest server/jetstream_cluster_1_test.go:2481
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Ephemeral_consumer_creation_succeeds()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("EPHEM", ["eph.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.CreateConsumerAsync("EPHEM", null, ephemeral: true);
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.DurableName.ShouldNotBeNullOrEmpty();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterEphemeralConsumersNotReplicated server/jetstream_cluster_1_test.go:2599
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Ephemeral_consumers_get_unique_names()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("UNIQ", ["u.>"], replicas: 3);
|
||||
|
||||
var resp1 = await fx.CreateConsumerAsync("UNIQ", null, ephemeral: true);
|
||||
var resp2 = await fx.CreateConsumerAsync("UNIQ", null, ephemeral: true);
|
||||
|
||||
resp1.ConsumerInfo!.Config.DurableName
|
||||
.ShouldNotBe(resp2.ConsumerInfo!.Config.DurableName);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterCreateConcurrentDurableConsumers server/jetstream_cluster_2_test.go:1572
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Durable_consumer_create_is_idempotent()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("IDEMP", ["id.>"], replicas: 3);
|
||||
|
||||
var resp1 = await fx.CreateConsumerAsync("IDEMP", "same");
|
||||
var resp2 = await fx.CreateConsumerAsync("IDEMP", "same");
|
||||
|
||||
resp1.ConsumerInfo!.Config.DurableName.ShouldBe("same");
|
||||
resp2.ConsumerInfo!.Config.DurableName.ShouldBe("same");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMaxConsumers server/jetstream_cluster_2_test.go:1978
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_delete_succeeds()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("DEL", ["del.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("DEL", "to_delete");
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerDelete}DEL.to_delete", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerPause server/jetstream_cluster_1_test.go:4203
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_pause_and_resume_via_api()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PAUSE", ["pause.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("PAUSE", "pausable");
|
||||
|
||||
var pause = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerPause}PAUSE.pausable", """{"pause":true}""");
|
||||
pause.Success.ShouldBeTrue();
|
||||
|
||||
var resume = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerPause}PAUSE.pausable", """{"pause":false}""");
|
||||
resume.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerResetPendingDeliveriesOnMaxAckPendingUpdate
|
||||
// server/jetstream_cluster_1_test.go:8696
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_reset_resets_sequence_to_beginning()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("RESET", ["reset.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("RESET", "resettable", filterSubject: "reset.>");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("reset.event", $"msg-{i}");
|
||||
|
||||
// Advance the consumer
|
||||
await fx.FetchAsync("RESET", "resettable", 3);
|
||||
|
||||
// Reset
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerReset}RESET.resettable", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
|
||||
// After reset should re-deliver from sequence 1
|
||||
var batch = await fx.FetchAsync("RESET", "resettable", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
batch.Messages[0].Sequence.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterFlowControlRequiresHeartbeats server/jetstream_cluster_2_test.go:2712
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_with_filter_subject_delivers_matching_only()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("FILT", ["filt.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("FILT", "filtered", filterSubject: "filt.alpha");
|
||||
|
||||
await fx.PublishAsync("filt.alpha", "match");
|
||||
await fx.PublishAsync("filt.beta", "no-match");
|
||||
await fx.PublishAsync("filt.alpha", "match2");
|
||||
|
||||
var batch = await fx.FetchAsync("FILT", "filtered", 10);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerDeliverPolicy server/jetstream_cluster_2_test.go:550
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task DeliverPolicy_Last_starts_at_last_message()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("DLAST", ["dl.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("dl.event", $"msg-{i}");
|
||||
|
||||
await fx.CreateConsumerAsync("DLAST", "last_c", filterSubject: "dl.>",
|
||||
deliverPolicy: DeliverPolicy.Last);
|
||||
|
||||
var batch = await fx.FetchAsync("DLAST", "last_c", 10);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
batch.Messages[0].Sequence.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerDeliverPolicy server/jetstream_cluster_2_test.go:550
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task DeliverPolicy_New_skips_existing_messages()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("DNEW", ["dn.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("dn.event", $"msg-{i}");
|
||||
|
||||
await fx.CreateConsumerAsync("DNEW", "new_c", filterSubject: "dn.>",
|
||||
deliverPolicy: DeliverPolicy.New);
|
||||
|
||||
var batch = await fx.FetchAsync("DNEW", "new_c", 10);
|
||||
batch.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerDeliverPolicy server/jetstream_cluster_2_test.go:550
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task DeliverPolicy_ByStartSequence_starts_at_given_seq()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("DSTART", ["ds.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("ds.event", $"msg-{i}");
|
||||
|
||||
await fx.CreateConsumerAsync("DSTART", "start_c", filterSubject: "ds.>",
|
||||
deliverPolicy: DeliverPolicy.ByStartSequence, optStartSeq: 7);
|
||||
|
||||
var batch = await fx.FetchAsync("DSTART", "start_c", 10);
|
||||
batch.Messages.Count.ShouldBe(4);
|
||||
batch.Messages[0].Sequence.ShouldBe(7UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerUnpin server/jetstream_cluster_1_test.go:4109
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_unpin_api_returns_success()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("UNPIN", ["unpin.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("UNPIN", "pinned");
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerUnpin}UNPIN.pinned", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerLeaderStepdown server/jetstream_cluster_2_test.go:1400
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_leader_stepdown_api_returns_success()
|
||||
{
|
||||
await using var fx = await ConsumerReplicaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CLS", ["cls.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CLS", "dur1");
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerLeaderStepdown}CLS.dur1", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Self-contained fixture for consumer replica group tests.
|
||||
/// </summary>
|
||||
internal sealed class ConsumerReplicaFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private ConsumerReplicaFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router,
|
||||
JetStreamPublisher publisher)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = publisher;
|
||||
}
|
||||
|
||||
public static Task<ConsumerReplicaFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var streamManager = new StreamManager(meta, consumerManager: consumerManager);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
return Task.FromResult(new ConsumerReplicaFixture(meta, streamManager, consumerManager, router, publisher));
|
||||
}
|
||||
|
||||
public Task CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
if (response.Error is not null)
|
||||
throw new InvalidOperationException(response.Error.Description);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateConsumerAsync(
|
||||
string stream,
|
||||
string? durableName,
|
||||
string? filterSubject = null,
|
||||
AckPolicy ackPolicy = AckPolicy.None,
|
||||
int ackWaitMs = 30_000,
|
||||
int maxDeliver = 1,
|
||||
bool ephemeral = false,
|
||||
DeliverPolicy deliverPolicy = DeliverPolicy.All,
|
||||
ulong optStartSeq = 0)
|
||||
{
|
||||
var config = new ConsumerConfig
|
||||
{
|
||||
DurableName = durableName ?? string.Empty,
|
||||
AckPolicy = ackPolicy,
|
||||
AckWaitMs = ackWaitMs,
|
||||
MaxDeliver = maxDeliver,
|
||||
Ephemeral = ephemeral,
|
||||
DeliverPolicy = deliverPolicy,
|
||||
OptStartSeq = optStartSeq,
|
||||
};
|
||||
if (!string.IsNullOrWhiteSpace(filterSubject))
|
||||
config.FilterSubject = filterSubject;
|
||||
|
||||
return Task.FromResult(_consumerManager.CreateOrUpdate(stream, config));
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), null, out var ack))
|
||||
{
|
||||
if (ack.ErrorCode == null && _streamManager.TryGet(ack.Stream, out var handle))
|
||||
{
|
||||
var stored = handle.Store.LoadAsync(ack.Seq, default).GetAwaiter().GetResult();
|
||||
if (stored != null)
|
||||
_consumerManager.OnPublished(ack.Stream, stored);
|
||||
}
|
||||
|
||||
return Task.FromResult(ack);
|
||||
}
|
||||
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match a stream.");
|
||||
}
|
||||
|
||||
public Task<PullFetchBatch> FetchAsync(string stream, string durableName, int batch)
|
||||
=> _consumerManager.FetchAsync(stream, durableName, batch, _streamManager, default).AsTask();
|
||||
|
||||
public void AckAll(string stream, string durableName, ulong sequence)
|
||||
=> _consumerManager.AckAll(stream, durableName, sequence);
|
||||
|
||||
public int GetPendingCount(string stream, string durableName)
|
||||
=> _consumerManager.GetPendingCount(stream, durableName);
|
||||
|
||||
public Task<JetStreamConsumerInfo> GetConsumerInfoAsync(string stream, string durableName)
|
||||
{
|
||||
var resp = _consumerManager.GetInfo(stream, durableName);
|
||||
if (resp.ConsumerInfo == null)
|
||||
throw new InvalidOperationException("Consumer not found.");
|
||||
return Task.FromResult(resp.ConsumerInfo);
|
||||
}
|
||||
|
||||
public Task StepDownStreamLeaderAsync(string stream)
|
||||
=> _streamManager.StepDownStreamLeaderAsync(stream, default);
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
=> Task.FromResult(_router.Route(subject, Encoding.UTF8.GetBytes(payload)));
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,496 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go:2474-4261
|
||||
// Covers: entry application pipeline for JetStreamMetaGroup and StreamReplicaGroup —
|
||||
// meta entry dispatch (StreamCreate, StreamDelete, ConsumerCreate, ConsumerDelete,
|
||||
// PeerAdd, PeerRemove), stream-level message ops (Store, Remove, Purge),
|
||||
// consumer-level ops (Ack, Nak, Deliver, Term, Progress), unknown-entry handling.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for the entry application pipeline in JetStreamMetaGroup and StreamReplicaGroup.
|
||||
/// Go reference: jetstream_cluster.go:2474-4261 processStreamEntries / processConsumerEntries.
|
||||
/// </summary>
|
||||
public class EntryApplicationTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyEntry — StreamCreate (existing behaviour verification)
|
||||
// Go reference: jetstream_cluster.go processStreamAssignment apply
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_StreamCreate_creates_stream()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:4541 processStreamAssignment — apply creates stream.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "orders-group", Peers = ["p1", "p2", "p3"] };
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "ORDERS", group: group);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.GetStreamAssignment("ORDERS").ShouldNotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_StreamDelete_removes_stream()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go processStreamRemoval apply.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "ORDERS");
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.StreamDelete, "ORDERS");
|
||||
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
meta.GetStreamAssignment("ORDERS").ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_ConsumerCreate_creates_consumer_on_stream()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:5300 processConsumerAssignment apply.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "ORDERS");
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.ConsumerCreate, "push-consumer", streamName: "ORDERS");
|
||||
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
meta.GetConsumerAssignment("ORDERS", "push-consumer").ShouldNotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_ConsumerDelete_removes_consumer()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go processConsumerRemoval apply.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "ORDERS");
|
||||
meta.ApplyEntry(MetaEntryType.ConsumerCreate, "push-consumer", streamName: "ORDERS");
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.ConsumerDelete, "push-consumer", streamName: "ORDERS");
|
||||
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_ConsumerCreate_without_streamName_throws()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
Should.Throw<ArgumentNullException>(() =>
|
||||
meta.ApplyEntry(MetaEntryType.ConsumerCreate, "consumer"));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyEntry — PeerAdd (new entry type dispatch)
|
||||
// Go reference: jetstream_cluster.go:2290 processAddPeer
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_PeerAdd_triggers_peer_processing()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:2290 processAddPeer — peer registered on apply.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
// Should not throw and should register the peer.
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-42");
|
||||
|
||||
meta.GetKnownPeers().ShouldContain("peer-42");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_PeerAdd_registers_multiple_peers()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-A");
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-B");
|
||||
|
||||
meta.GetKnownPeers().ShouldContain("peer-A");
|
||||
meta.GetKnownPeers().ShouldContain("peer-B");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_PeerAdd_is_idempotent_for_same_peer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-X");
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-X");
|
||||
|
||||
// HashSet deduplicates — exactly one entry.
|
||||
meta.GetKnownPeers().Count(p => p == "peer-X").ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyEntry — PeerRemove (new entry type dispatch)
|
||||
// Go reference: jetstream_cluster.go:2342 processRemovePeer
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_PeerRemove_triggers_peer_processing()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:2342 processRemovePeer — peer removed on apply.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-42");
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.PeerRemove, "peer-42");
|
||||
|
||||
meta.GetKnownPeers().ShouldNotContain("peer-42");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_PeerRemove_triggers_stream_reassignment()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:2342 processRemovePeer — affected streams identified.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "stream-group", Peers = ["peer-1", "peer-2", "peer-3"] };
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "EVENTS", group: group);
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-1");
|
||||
meta.ApplyEntry(MetaEntryType.PeerAdd, "peer-replacement");
|
||||
|
||||
// Removing peer-1: the stream that had peer-1 should be reassigned.
|
||||
meta.ApplyEntry(MetaEntryType.PeerRemove, "peer-1");
|
||||
|
||||
// peer-1 should no longer be in the known peers set.
|
||||
meta.GetKnownPeers().ShouldNotContain("peer-1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyStreamMsgOp — Store
|
||||
// Go reference: jetstream_cluster.go processStreamMsg store
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyStreamMsgOp_Store_increments_message_count()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:2474 processStreamEntries — store op increments Msgs.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
var before = srg.MessageCount;
|
||||
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 1);
|
||||
|
||||
srg.MessageCount.ShouldBe(before + 1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyStreamMsgOp_Store_advances_last_sequence()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 42);
|
||||
|
||||
srg.LastSequence.ShouldBe(42L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyStreamMsgOp_Store_multiple_times_accumulates_count()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 1);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 2);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 3);
|
||||
|
||||
srg.MessageCount.ShouldBe(3L);
|
||||
srg.LastSequence.ShouldBe(3L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyStreamMsgOp — Remove
|
||||
// Go reference: jetstream_cluster.go processStreamMsg remove
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyStreamMsgOp_Remove_decrements_message_count()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:3100 processStreamEntries — remove op decrements Msgs.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 1);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 2);
|
||||
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Remove);
|
||||
|
||||
srg.MessageCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyStreamMsgOp_Remove_does_not_go_below_zero()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — safe guard on remove when already empty.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
// Remove from empty — should not underflow.
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Remove);
|
||||
|
||||
srg.MessageCount.ShouldBe(0L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyStreamMsgOp — Purge
|
||||
// Go reference: jetstream_cluster.go processStreamMsg purge
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyStreamMsgOp_Purge_clears_messages()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:3200 processStreamEntries — purge resets state.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 1);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 2);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 3);
|
||||
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Purge);
|
||||
|
||||
srg.MessageCount.ShouldBe(0L);
|
||||
srg.LastSequence.ShouldBe(0L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyStreamMsgOp_Purge_then_Store_increments_from_zero()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 5);
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Purge);
|
||||
|
||||
srg.ApplyStreamMsgOp(StreamMsgOp.Store, index: 6);
|
||||
|
||||
srg.MessageCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyConsumerEntries — Ack
|
||||
// Go reference: jetstream_cluster.go processConsumerEntries ack
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyConsumerEntries_Ack_processes_acknowledgment()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:3500 processConsumerEntries — ack increments ack floor.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Ack);
|
||||
|
||||
srg.AckCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyConsumerEntries_Ack_accumulates_across_multiple_calls()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Ack);
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Ack);
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Ack);
|
||||
|
||||
srg.AckCount.ShouldBe(3L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyConsumerEntries — Nak
|
||||
// Go reference: jetstream_cluster.go processConsumerEntries nak
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyConsumerEntries_Nak_processes_negative_acknowledgment()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:3520 processConsumerEntries — nak schedules redelivery.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Nak);
|
||||
|
||||
srg.NakCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyConsumerEntries — Deliver
|
||||
// Go reference: jetstream_cluster.go processConsumerEntries deliver
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyConsumerEntries_Deliver_processes_delivery()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go:3540 processConsumerEntries — deliver advances dseq.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Deliver);
|
||||
|
||||
srg.DeliverCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyConsumerEntries_Term_does_not_throw()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
// Term is valid but has no counter in this model.
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Term);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyConsumerEntries_Progress_does_not_throw()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
|
||||
srg.ApplyConsumerEntry(ConsumerOp.Progress);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyCommittedEntriesAsync — smsg: dispatch
|
||||
// Go reference: jetstream_cluster.go processStreamEntries command routing
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_smsg_store_increments_count()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("smsg:store", default);
|
||||
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.MessageCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_smsg_purge_clears_messages()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("smsg:store", default);
|
||||
await srg.Leader.ProposeAsync("smsg:store", default);
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
await srg.Leader.ProposeAsync("smsg:purge", default);
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.MessageCount.ShouldBe(0L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_smsg_remove_decrements_count()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("smsg:store", default);
|
||||
await srg.Leader.ProposeAsync("smsg:store", default);
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
await srg.Leader.ProposeAsync("smsg:remove", default);
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.MessageCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyCommittedEntriesAsync — centry: dispatch
|
||||
// Go reference: jetstream_cluster.go processConsumerEntries command routing
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_centry_ack_increments_ack_count()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("centry:ack", default);
|
||||
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.AckCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_centry_nak_increments_nak_count()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("centry:nak", default);
|
||||
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.NakCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_centry_deliver_increments_deliver_count()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("centry:deliver", default);
|
||||
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.DeliverCount.ShouldBe(1L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Unknown entry type — logged and skipped
|
||||
// Go reference: jetstream_cluster.go default case in apply loop
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Unknown_entry_type_logged_and_skipped()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go processStreamEntries — unknown ops are skipped.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("smsg:unknown-op", default);
|
||||
|
||||
// Should not throw.
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
// Message count unchanged; unknown command is recorded.
|
||||
srg.MessageCount.ShouldBe(0L);
|
||||
srg.LastUnknownCommand.ShouldBe("smsg:unknown-op");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Unknown_centry_op_logged_and_skipped()
|
||||
{
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("centry:bogus", default);
|
||||
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.AckCount.ShouldBe(0L);
|
||||
srg.LastUnknownCommand.ShouldBe("centry:bogus");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Completely_unknown_prefix_is_logged_and_skipped()
|
||||
{
|
||||
// A command with an entirely unrecognised prefix is recorded and skipped.
|
||||
var srg = new StreamReplicaGroup("ORDERS", 1);
|
||||
await srg.Leader.ProposeAsync("xyzzy:something", default);
|
||||
|
||||
await srg.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
srg.LastUnknownCommand.ShouldBe("xyzzy:something");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// MetaEntryType enum values exist
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void MetaEntryType_enum_includes_PeerAdd_and_PeerRemove()
|
||||
{
|
||||
// Compile-time check: ensures the enum values exist.
|
||||
_ = MetaEntryType.PeerAdd;
|
||||
_ = MetaEntryType.PeerRemove;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// StreamMsgOp and ConsumerOp enum values exist
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void StreamMsgOp_enum_has_expected_values()
|
||||
{
|
||||
_ = StreamMsgOp.Store;
|
||||
_ = StreamMsgOp.Remove;
|
||||
_ = StreamMsgOp.Purge;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ConsumerOp_enum_has_expected_values()
|
||||
{
|
||||
_ = ConsumerOp.Ack;
|
||||
_ = ConsumerOp.Nak;
|
||||
_ = ConsumerOp.Deliver;
|
||||
_ = ConsumerOp.Term;
|
||||
_ = ConsumerOp.Progress;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,169 @@
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for validated stream/consumer assignment processing.
|
||||
/// Go reference: jetstream_cluster.go:4541-5925.
|
||||
/// </summary>
|
||||
public class JetStreamAssignmentProcessingTests
|
||||
{
|
||||
[Fact]
|
||||
public void ProcessStreamAssignment_validates_config()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "valid-stream",
|
||||
Group = new RaftGroup { Name = "rg-1", Peers = ["n1", "n2", "n3"] },
|
||||
ConfigJson = """{"subjects":["test.>"]}""",
|
||||
};
|
||||
|
||||
meta.ProcessStreamAssignment(sa).ShouldBeTrue();
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamAssignment_rejects_empty_name()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = CreateStreamAssignment("", "rg-1");
|
||||
meta.ProcessStreamAssignment(sa).ShouldBeFalse();
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessUpdateStreamAssignment_applies_config_change()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("updatable", "rg-u", """{"subjects":["old.>"]}"""));
|
||||
|
||||
var updated = CreateStreamAssignment("updatable", "rg-u", """{"subjects":["new.>"]}""");
|
||||
meta.ProcessUpdateStreamAssignment(updated).ShouldBeTrue();
|
||||
|
||||
var assignment = meta.GetStreamAssignment("updatable");
|
||||
assignment!.ConfigJson.ShouldContain("new.>");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessUpdateStreamAssignment_returns_false_for_nonexistent()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = CreateStreamAssignment("ghost", "rg-g");
|
||||
meta.ProcessUpdateStreamAssignment(sa).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerAssignment_requires_existing_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "orphan-consumer",
|
||||
StreamName = "nonexistent-stream",
|
||||
Group = new RaftGroup { Name = "rg-c", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.ProcessConsumerAssignment(ca).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerAssignment_succeeds_with_existing_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("s1", "rg-s1"));
|
||||
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "c1",
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.ProcessConsumerAssignment(ca).ShouldBeTrue();
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamRemoval_cascades_to_consumers()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("cascade", "rg-cas"));
|
||||
meta.ProcessConsumerAssignment(new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "c1",
|
||||
StreamName = "cascade",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2", "n3"] },
|
||||
});
|
||||
|
||||
meta.ProcessStreamRemoval("cascade").ShouldBeTrue();
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamRemoval_returns_false_for_nonexistent()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamRemoval("nope").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerRemoval_returns_false_for_nonexistent_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessConsumerRemoval("ghost", "c1").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerRemoval_returns_false_for_nonexistent_consumer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("s1", "rg-s1"));
|
||||
meta.ProcessConsumerRemoval("s1", "nope").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerRemoval_succeeds()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("s1", "rg-s1"));
|
||||
meta.ProcessConsumerAssignment(new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "c1",
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2"] },
|
||||
});
|
||||
|
||||
meta.ProcessConsumerRemoval("s1", "c1").ShouldBeTrue();
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessUpdateStreamAssignment_preserves_consumers()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("s1", "rg-s1", """{"subjects":["old"]}"""));
|
||||
meta.ProcessConsumerAssignment(new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "c1",
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2"] },
|
||||
});
|
||||
|
||||
var updated = CreateStreamAssignment("s1", "rg-s1", """{"subjects":["new"]}""");
|
||||
meta.ProcessUpdateStreamAssignment(updated).ShouldBeTrue();
|
||||
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
meta.GetConsumerAssignment("s1", "c1").ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Helper to create a StreamAssignment (StreamName is `required` so we must always provide it)
|
||||
private static StreamAssignment CreateStreamAssignment(string name, string groupName, string config = "{}")
|
||||
=> new()
|
||||
{
|
||||
StreamName = name,
|
||||
Group = new RaftGroup { Name = groupName, Peers = ["n1", "n2", "n3"] },
|
||||
ConfigJson = config,
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,644 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: consumer creation, ack propagation, consumer state,
|
||||
// ephemeral consumers, consumer scaling, pull/push delivery,
|
||||
// redelivery, ack policies, filter subjects.
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering clustered JetStream consumer creation, leader election,
|
||||
/// ack propagation, delivery policies, ephemeral consumers, and scaling.
|
||||
/// Ported from Go jetstream_cluster_1_test.go and jetstream_cluster_2_test.go.
|
||||
/// </summary>
|
||||
public class JetStreamClusterConsumerTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerState server/jetstream_cluster_1_test.go:700
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_state_tracks_pending_after_fetch()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("CSTATE", ["cs.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CSTATE", "track", filterSubject: "cs.>", ackPolicy: AckPolicy.Explicit);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("cs.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("CSTATE", "track", 3);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
|
||||
var pending = fx.GetPendingCount("CSTATE", "track");
|
||||
pending.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerRedeliveredInfo server/jetstream_cluster_1_test.go:659
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_redelivery_marks_messages_as_redelivered()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("REDELIV", ["rd.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("REDELIV", "rdc", filterSubject: "rd.>",
|
||||
ackPolicy: AckPolicy.Explicit, ackWaitMs: 1, maxDeliver: 5);
|
||||
|
||||
await fx.PublishAsync("rd.event", "will-redeliver");
|
||||
|
||||
// First fetch should get the message
|
||||
var batch1 = await fx.FetchAsync("REDELIV", "rdc", 1);
|
||||
batch1.Messages.Count.ShouldBe(1);
|
||||
batch1.Messages[0].Redelivered.ShouldBeFalse();
|
||||
|
||||
// Wait for ack timeout
|
||||
await Task.Delay(50);
|
||||
|
||||
// Second fetch should get redelivered message
|
||||
var batch2 = await fx.FetchAsync("REDELIV", "rdc", 1);
|
||||
batch2.Messages.Count.ShouldBe(1);
|
||||
batch2.Messages[0].Redelivered.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterFullConsumerState server/jetstream_cluster_1_test.go:795
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Full_consumer_state_reflects_ack_floor_after_ack_all()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("FULLCS", ["fcs.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("FULLCS", "full", filterSubject: "fcs.>", ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("fcs.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("FULLCS", "full", 10);
|
||||
batch.Messages.Count.ShouldBe(10);
|
||||
|
||||
// Ack all up to sequence 5
|
||||
fx.AckAll("FULLCS", "full", 5);
|
||||
|
||||
var pending = fx.GetPendingCount("FULLCS", "full");
|
||||
pending.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterEphemeralConsumerNoImmediateInterest server/jetstream_cluster_1_test.go:2481
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Ephemeral_consumer_creation_succeeds()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("EPHEM", ["eph.>"], replicas: 3);
|
||||
var resp = await fx.CreateConsumerAsync("EPHEM", null, ephemeral: true);
|
||||
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.DurableName.ShouldNotBeNullOrEmpty();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterEphemeralConsumersNotReplicated server/jetstream_cluster_1_test.go:2599
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_ephemeral_consumers_have_unique_names()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("EPHUNIQ", ["eu.>"], replicas: 3);
|
||||
|
||||
var resp1 = await fx.CreateConsumerAsync("EPHUNIQ", null, ephemeral: true);
|
||||
var resp2 = await fx.CreateConsumerAsync("EPHUNIQ", null, ephemeral: true);
|
||||
|
||||
resp1.ConsumerInfo!.Config.DurableName.ShouldNotBe(resp2.ConsumerInfo!.Config.DurableName);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterCreateConcurrentDurableConsumers server/jetstream_cluster_2_test.go:1572
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Concurrent_durable_consumer_creation_is_idempotent()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("CONC", ["conc.>"], replicas: 3);
|
||||
|
||||
// Create same consumer twice; both should succeed
|
||||
var resp1 = await fx.CreateConsumerAsync("CONC", "same");
|
||||
var resp2 = await fx.CreateConsumerAsync("CONC", "same");
|
||||
|
||||
resp1.ConsumerInfo.ShouldNotBeNull();
|
||||
resp2.ConsumerInfo.ShouldNotBeNull();
|
||||
resp1.ConsumerInfo!.Config.DurableName.ShouldBe("same");
|
||||
resp2.ConsumerInfo!.Config.DurableName.ShouldBe("same");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterPullConsumerLeakedSubs server/jetstream_cluster_2_test.go:2239
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Pull_consumer_fetch_returns_correct_batch_size()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("PULLBS", ["pb.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("PULLBS", "puller", filterSubject: "pb.>", ackPolicy: AckPolicy.None);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await fx.PublishAsync("pb.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("PULLBS", "puller", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerLastActiveReporting server/jetstream_cluster_2_test.go:2371
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_info_returns_config_after_creation()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("CINFO", ["ci.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CINFO", "info_dur", filterSubject: "ci.>", ackPolicy: AckPolicy.Explicit);
|
||||
|
||||
var info = await fx.GetConsumerInfoAsync("CINFO", "info_dur");
|
||||
info.ShouldNotBeNull();
|
||||
info.Config.DurableName.ShouldBe("info_dur");
|
||||
info.Config.AckPolicy.ShouldBe(AckPolicy.Explicit);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAckPendingWithExpired server/jetstream_cluster_2_test.go:309
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Ack_pending_tracks_expired_messages()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("ACKEXP", ["ae.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("ACKEXP", "acker", filterSubject: "ae.>",
|
||||
ackPolicy: AckPolicy.Explicit, ackWaitMs: 1, maxDeliver: 10);
|
||||
|
||||
await fx.PublishAsync("ae.event", "will-expire");
|
||||
|
||||
// Fetch to register pending
|
||||
var batch1 = await fx.FetchAsync("ACKEXP", "acker", 1);
|
||||
batch1.Messages.Count.ShouldBe(1);
|
||||
|
||||
fx.GetPendingCount("ACKEXP", "acker").ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAckPendingWithMaxRedelivered server/jetstream_cluster_2_test.go:377
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Max_deliver_limits_redelivery_attempts()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("MAXRED", ["mr.>"], replicas: 3);
|
||||
// maxDeliver=2: allows initial delivery (deliveries=1) + one redelivery (deliveries=2).
|
||||
// After ScheduleRedelivery increments to deliveries=2, the next check has deliveries=2 > maxDeliver=2 = false,
|
||||
// so it redelivers once more. Only at deliveries=3 > 2 does it stop.
|
||||
await fx.CreateConsumerAsync("MAXRED", "maxr", filterSubject: "mr.>",
|
||||
ackPolicy: AckPolicy.Explicit, ackWaitMs: 1, maxDeliver: 2);
|
||||
|
||||
await fx.PublishAsync("mr.event", "limited-redeliver");
|
||||
|
||||
// First fetch (initial delivery, Register sets deliveries=1)
|
||||
var batch1 = await fx.FetchAsync("MAXRED", "maxr", 1);
|
||||
batch1.Messages.Count.ShouldBe(1);
|
||||
|
||||
// Wait for expiry
|
||||
await Task.Delay(50);
|
||||
|
||||
// Second fetch: TryGetExpired returns deliveries=1, 1 > 2 is false, so redeliver.
|
||||
// ScheduleRedelivery increments to deliveries=2.
|
||||
var batch2 = await fx.FetchAsync("MAXRED", "maxr", 1);
|
||||
batch2.Messages.Count.ShouldBe(1);
|
||||
batch2.Messages[0].Redelivered.ShouldBeTrue();
|
||||
|
||||
// Wait for expiry
|
||||
await Task.Delay(50);
|
||||
|
||||
// Third fetch: TryGetExpired returns deliveries=2, 2 > 2 is false, so redeliver again.
|
||||
// ScheduleRedelivery increments to deliveries=3.
|
||||
var batch3 = await fx.FetchAsync("MAXRED", "maxr", 1);
|
||||
batch3.Messages.Count.ShouldBe(1);
|
||||
batch3.Messages[0].Redelivered.ShouldBeTrue();
|
||||
|
||||
// Wait for expiry
|
||||
await Task.Delay(50);
|
||||
|
||||
// Fourth fetch: TryGetExpired returns deliveries=3, 3 > 2 is true, so AckAll triggers
|
||||
// and returns empty batch (max deliver exceeded).
|
||||
var batch4 = await fx.FetchAsync("MAXRED", "maxr", 1);
|
||||
batch4.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMaxConsumers server/jetstream_cluster_2_test.go:1978
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_delete_succeeds_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("CDEL", ["cdel.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CDEL", "to_delete");
|
||||
|
||||
var del = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerDelete}CDEL.to_delete", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
|
||||
var info = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}CDEL.to_delete", "{}");
|
||||
info.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterFlowControlRequiresHeartbeats server/jetstream_cluster_2_test.go:2712
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_with_filter_subjects_delivers_matching_only()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("FILT", ["filt.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("FILT", "filtered", filterSubject: "filt.alpha");
|
||||
|
||||
await fx.PublishAsync("filt.alpha", "match");
|
||||
await fx.PublishAsync("filt.beta", "no-match");
|
||||
await fx.PublishAsync("filt.alpha", "match2");
|
||||
|
||||
var batch = await fx.FetchAsync("FILT", "filtered", 10);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
batch.Messages[0].Subject.ShouldBe("filt.alpha");
|
||||
batch.Messages[1].Subject.ShouldBe("filt.alpha");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerScaleUp server/jetstream_cluster_1_test.go:4203
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_pause_and_resume_via_api()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("PAUSE", ["pause.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("PAUSE", "pausable");
|
||||
|
||||
var pauseResp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerPause}PAUSE.pausable", """{"pause":true}""");
|
||||
pauseResp.Success.ShouldBeTrue();
|
||||
|
||||
var resumeResp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerPause}PAUSE.pausable", """{"pause":false}""");
|
||||
resumeResp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerResetPendingDeliveriesOnMaxAckPendingUpdate
|
||||
// server/jetstream_cluster_1_test.go:8696
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_reset_resets_next_sequence_and_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("RESET", ["reset.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("RESET", "resettable", filterSubject: "reset.>");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("reset.event", $"msg-{i}");
|
||||
|
||||
// Fetch some messages to advance the consumer
|
||||
var batch1 = await fx.FetchAsync("RESET", "resettable", 3);
|
||||
batch1.Messages.Count.ShouldBe(3);
|
||||
|
||||
// Reset via API
|
||||
var resetResp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerReset}RESET.resettable", "{}");
|
||||
resetResp.Success.ShouldBeTrue();
|
||||
|
||||
// After reset, consumer should re-deliver from sequence 1
|
||||
var batch2 = await fx.FetchAsync("RESET", "resettable", 5);
|
||||
batch2.Messages.Count.ShouldBe(5);
|
||||
batch2.Messages[0].Sequence.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterPushConsumerQueueGroup server/jetstream_cluster_2_test.go:2300
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Push_consumer_creation_with_heartbeat()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("PUSHHB", ["ph.>"], replicas: 3);
|
||||
var resp = await fx.CreateConsumerAsync("PUSHHB", "pusher", push: true, heartbeatMs: 100);
|
||||
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.Push.ShouldBeTrue();
|
||||
resp.ConsumerInfo.Config.HeartbeatMs.ShouldBe(100);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterScaleConsumer server/jetstream_cluster_1_test.go:4109
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_unpin_via_api()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("UNPIN", ["unpin.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("UNPIN", "pinned");
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerUnpin}UNPIN.pinned", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: Consumer AckAll policy acks all up to given sequence
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AckAll_policy_consumer_acks_all_preceding_messages()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("ACKALL", ["aa.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("ACKALL", "acker", filterSubject: "aa.>", ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("aa.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("ACKALL", "acker", 10);
|
||||
batch.Messages.Count.ShouldBe(10);
|
||||
|
||||
// Ack up to seq 7 (all 1-7 should be acked, 8-10 remain pending)
|
||||
fx.AckAll("ACKALL", "acker", 7);
|
||||
fx.GetPendingCount("ACKALL", "acker").ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: DeliverPolicy.Last consumer starts at last message
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task DeliverPolicy_Last_consumer_starts_at_last_sequence()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("DLAST", ["dl.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("dl.event", $"msg-{i}");
|
||||
|
||||
await fx.CreateConsumerAsync("DLAST", "last_cons", filterSubject: "dl.>",
|
||||
deliverPolicy: DeliverPolicy.Last);
|
||||
|
||||
var batch = await fx.FetchAsync("DLAST", "last_cons", 10);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
batch.Messages[0].Sequence.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: DeliverPolicy.New consumer skips existing messages
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task DeliverPolicy_New_consumer_skips_existing()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("DNEW", ["dn.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("dn.event", $"msg-{i}");
|
||||
|
||||
await fx.CreateConsumerAsync("DNEW", "new_cons", filterSubject: "dn.>",
|
||||
deliverPolicy: DeliverPolicy.New);
|
||||
|
||||
// Should get no messages since consumer starts at LastSeq+1
|
||||
var batch = await fx.FetchAsync("DNEW", "new_cons", 10);
|
||||
batch.Messages.Count.ShouldBe(0);
|
||||
|
||||
// Publish a new message after consumer creation
|
||||
await fx.PublishAsync("dn.event", "after-consumer");
|
||||
|
||||
var batch2 = await fx.FetchAsync("DNEW", "new_cons", 10);
|
||||
batch2.Messages.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: DeliverPolicy.ByStartSequence
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task DeliverPolicy_ByStartSequence_starts_at_given_sequence()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("DSTART", ["ds.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("ds.event", $"msg-{i}");
|
||||
|
||||
await fx.CreateConsumerAsync("DSTART", "start_cons", filterSubject: "ds.>",
|
||||
deliverPolicy: DeliverPolicy.ByStartSequence, optStartSeq: 7);
|
||||
|
||||
var batch = await fx.FetchAsync("DSTART", "start_cons", 10);
|
||||
batch.Messages.Count.ShouldBe(4); // seq 7, 8, 9, 10
|
||||
batch.Messages[0].Sequence.ShouldBe(7UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: Multiple filter subjects
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_with_multiple_filter_subjects()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("MFILT", ["mf.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("MFILT", "multi_filt",
|
||||
filterSubjects: ["mf.alpha", "mf.gamma"]);
|
||||
|
||||
await fx.PublishAsync("mf.alpha", "a");
|
||||
await fx.PublishAsync("mf.beta", "b");
|
||||
await fx.PublishAsync("mf.gamma", "g");
|
||||
await fx.PublishAsync("mf.delta", "d");
|
||||
|
||||
var batch = await fx.FetchAsync("MFILT", "multi_filt", 10);
|
||||
batch.Messages.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: NoWait fetch returns empty when no messages
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task NoWait_fetch_returns_empty_when_no_pending()
|
||||
{
|
||||
await using var fx = await ClusterConsumerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("NOWAIT", ["nw.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("NOWAIT", "nw_cons", filterSubject: "nw.>");
|
||||
|
||||
var batch = await fx.FetchNoWaitAsync("NOWAIT", "nw_cons", 5);
|
||||
batch.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Self-contained fixture for JetStream cluster consumer tests.
|
||||
/// </summary>
|
||||
internal sealed class ClusterConsumerFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private ClusterConsumerFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router,
|
||||
JetStreamPublisher publisher)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = publisher;
|
||||
}
|
||||
|
||||
public static Task<ClusterConsumerFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var streamManager = new StreamManager(meta, consumerManager: consumerManager);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
return Task.FromResult(new ClusterConsumerFixture(meta, streamManager, consumerManager, router, publisher));
|
||||
}
|
||||
|
||||
public Task CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
if (response.Error is not null)
|
||||
throw new InvalidOperationException(response.Error.Description);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateConsumerAsync(
|
||||
string stream,
|
||||
string? durableName,
|
||||
string? filterSubject = null,
|
||||
AckPolicy ackPolicy = AckPolicy.None,
|
||||
int ackWaitMs = 30_000,
|
||||
int maxDeliver = 1,
|
||||
bool ephemeral = false,
|
||||
bool push = false,
|
||||
int heartbeatMs = 0,
|
||||
DeliverPolicy deliverPolicy = DeliverPolicy.All,
|
||||
ulong optStartSeq = 0,
|
||||
IReadOnlyList<string>? filterSubjects = null)
|
||||
{
|
||||
var config = new ConsumerConfig
|
||||
{
|
||||
DurableName = durableName ?? string.Empty,
|
||||
AckPolicy = ackPolicy,
|
||||
AckWaitMs = ackWaitMs,
|
||||
MaxDeliver = maxDeliver,
|
||||
Ephemeral = ephemeral,
|
||||
Push = push,
|
||||
HeartbeatMs = heartbeatMs,
|
||||
DeliverPolicy = deliverPolicy,
|
||||
OptStartSeq = optStartSeq,
|
||||
};
|
||||
if (!string.IsNullOrWhiteSpace(filterSubject))
|
||||
config.FilterSubject = filterSubject;
|
||||
if (filterSubjects is { Count: > 0 })
|
||||
config.FilterSubjects = [.. filterSubjects];
|
||||
|
||||
return Task.FromResult(_consumerManager.CreateOrUpdate(stream, config));
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), null, out var ack))
|
||||
{
|
||||
if (ack.ErrorCode == null && _streamManager.TryGet(ack.Stream, out var handle))
|
||||
{
|
||||
var stored = handle.Store.LoadAsync(ack.Seq, default).GetAwaiter().GetResult();
|
||||
if (stored != null)
|
||||
_consumerManager.OnPublished(ack.Stream, stored);
|
||||
}
|
||||
|
||||
return Task.FromResult(ack);
|
||||
}
|
||||
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match a stream.");
|
||||
}
|
||||
|
||||
public Task<PullFetchBatch> FetchAsync(string stream, string durableName, int batch)
|
||||
=> _consumerManager.FetchAsync(stream, durableName, batch, _streamManager, default).AsTask();
|
||||
|
||||
public Task<PullFetchBatch> FetchNoWaitAsync(string stream, string durableName, int batch)
|
||||
=> _consumerManager.FetchAsync(stream, durableName, new PullFetchRequest
|
||||
{
|
||||
Batch = batch,
|
||||
NoWait = true,
|
||||
}, _streamManager, default).AsTask();
|
||||
|
||||
public void AckAll(string stream, string durableName, ulong sequence)
|
||||
=> _consumerManager.AckAll(stream, durableName, sequence);
|
||||
|
||||
public int GetPendingCount(string stream, string durableName)
|
||||
=> _consumerManager.GetPendingCount(stream, durableName);
|
||||
|
||||
public Task<JetStreamConsumerInfo> GetConsumerInfoAsync(string stream, string durableName)
|
||||
{
|
||||
var resp = _consumerManager.GetInfo(stream, durableName);
|
||||
if (resp.ConsumerInfo == null)
|
||||
throw new InvalidOperationException("Consumer not found.");
|
||||
return Task.FromResult(resp.ConsumerInfo);
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
=> Task.FromResult(_router.Route(subject, Encoding.UTF8.GetBytes(payload)));
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,532 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: stream leader stepdown, consumer leader stepdown,
|
||||
// meta leader stepdown, peer removal, node loss recovery,
|
||||
// snapshot catchup, consumer failover, data preservation.
|
||||
using System.Reflection;
|
||||
using System.Collections.Concurrent;
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream cluster failover scenarios: leader stepdown,
|
||||
/// peer removal, node loss/recovery, snapshot catchup, and consumer failover.
|
||||
/// Ported from Go jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class JetStreamClusterFailoverTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_leader_stepdown_elects_new_leader_and_preserves_data()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("STEPDOWN", ["sd.>"], replicas: 3);
|
||||
|
||||
for (var i = 1; i <= 10; i++)
|
||||
(await fx.PublishAsync($"sd.{i}", $"msg-{i}")).Seq.ShouldBe((ulong)i);
|
||||
|
||||
var leaderBefore = fx.GetStreamLeaderId("STEPDOWN");
|
||||
leaderBefore.ShouldNotBeNullOrWhiteSpace();
|
||||
|
||||
var resp = await fx.StepDownStreamLeaderAsync("STEPDOWN");
|
||||
resp.Success.ShouldBeTrue();
|
||||
|
||||
var leaderAfter = fx.GetStreamLeaderId("STEPDOWN");
|
||||
leaderAfter.ShouldNotBe(leaderBefore);
|
||||
|
||||
var state = await fx.GetStreamStateAsync("STEPDOWN");
|
||||
state.Messages.ShouldBe(10UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(10UL);
|
||||
|
||||
// New leader accepts writes
|
||||
var ack = await fx.PublishAsync("sd.post", "after-stepdown");
|
||||
ack.Seq.ShouldBe(11UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeaderStepdown server/jetstream_cluster_1_test.go:5464
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_increments_version_and_preserves_streams()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("META_SD", ["meta.>"], replicas: 3);
|
||||
|
||||
var before = fx.GetMetaState();
|
||||
before.ClusterSize.ShouldBe(3);
|
||||
var leaderBefore = before.LeaderId;
|
||||
var versionBefore = before.LeadershipVersion;
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
|
||||
var after = fx.GetMetaState();
|
||||
after.LeaderId.ShouldNotBe(leaderBefore);
|
||||
after.LeadershipVersion.ShouldBe(versionBefore + 1);
|
||||
after.Streams.ShouldContain("META_SD");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consecutive_stepdowns_cycle_through_distinct_leaders()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CYCLE", ["cyc.>"], replicas: 3);
|
||||
|
||||
var leaders = new List<string> { fx.GetStreamLeaderId("CYCLE") };
|
||||
|
||||
(await fx.StepDownStreamLeaderAsync("CYCLE")).Success.ShouldBeTrue();
|
||||
leaders.Add(fx.GetStreamLeaderId("CYCLE"));
|
||||
|
||||
(await fx.StepDownStreamLeaderAsync("CYCLE")).Success.ShouldBeTrue();
|
||||
leaders.Add(fx.GetStreamLeaderId("CYCLE"));
|
||||
|
||||
leaders[1].ShouldNotBe(leaders[0]);
|
||||
leaders[2].ShouldNotBe(leaders[1]);
|
||||
|
||||
var ack = await fx.PublishAsync("cyc.verify", "alive");
|
||||
ack.Stream.ShouldBe("CYCLE");
|
||||
ack.Seq.ShouldBeGreaterThan(0UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterPeerRemovalAPI server/jetstream_cluster_1_test.go:3469
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Peer_removal_api_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PEERREM", ["pr.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.StreamPeerRemove}PEERREM", """{"peer":"n2"}""");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterPeerRemovalAndStreamReassignment server/jetstream_cluster_1_test.go:3544
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Peer_removal_preserves_stream_data()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("REASSIGN", ["ra.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("ra.event", $"msg-{i}");
|
||||
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamPeerRemove}REASSIGN", """{"peer":"n2"}""")).Success.ShouldBeTrue();
|
||||
|
||||
var state = await fx.GetStreamStateAsync("REASSIGN");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerLeaderStepdown (consumer stepdown)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_leader_stepdown_api_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CLSD", ["clsd.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CLSD", "dur1");
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerLeaderStepdown}CLSD.dur1", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamNormalCatchup server/jetstream_cluster_1_test.go:1607
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_publishes_survive_leader_stepdown_and_catchup()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CATCHUP", ["cu.>"], replicas: 3);
|
||||
|
||||
// Publish some messages
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("cu.event", $"before-{i}");
|
||||
|
||||
// Step down the leader
|
||||
(await fx.StepDownStreamLeaderAsync("CATCHUP")).Success.ShouldBeTrue();
|
||||
|
||||
// Publish more messages after stepdown
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("cu.event", $"after-{i}");
|
||||
|
||||
var state = await fx.GetStreamStateAsync("CATCHUP");
|
||||
state.Messages.ShouldBe(20UL);
|
||||
state.LastSeq.ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamSnapshotCatchup server/jetstream_cluster_1_test.go:1667
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Snapshot_and_restore_survives_leader_transition()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("SNAPCAT", ["sc.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("sc.event", $"msg-{i}");
|
||||
|
||||
// Take snapshot
|
||||
var snapshot = await fx.RequestAsync($"{JetStreamApiSubjects.StreamSnapshot}SNAPCAT", "{}");
|
||||
snapshot.Snapshot.ShouldNotBeNull();
|
||||
|
||||
// Step down leader
|
||||
(await fx.StepDownStreamLeaderAsync("SNAPCAT")).Success.ShouldBeTrue();
|
||||
|
||||
// Purge and restore
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamPurge}SNAPCAT", "{}")).Success.ShouldBeTrue();
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamRestore}SNAPCAT", snapshot.Snapshot!.Payload)).Success.ShouldBeTrue();
|
||||
|
||||
var state = await fx.GetStreamStateAsync("SNAPCAT");
|
||||
state.Messages.ShouldBe(10UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamSnapshotCatchupWithPurge server/jetstream_cluster_1_test.go:1822
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Snapshot_restore_after_purge_preserves_original_data()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PURGECAT", ["pc.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await fx.PublishAsync("pc.event", $"msg-{i}");
|
||||
|
||||
var snapshot = await fx.RequestAsync($"{JetStreamApiSubjects.StreamSnapshot}PURGECAT", "{}");
|
||||
|
||||
// Purge the stream
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamPurge}PURGECAT", "{}")).Success.ShouldBeTrue();
|
||||
var afterPurge = await fx.GetStreamStateAsync("PURGECAT");
|
||||
afterPurge.Messages.ShouldBe(0UL);
|
||||
|
||||
// Restore from snapshot
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamRestore}PURGECAT", snapshot.Snapshot!.Payload)).Success.ShouldBeTrue();
|
||||
var restored = await fx.GetStreamStateAsync("PURGECAT");
|
||||
restored.Messages.ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMetaSnapshotsAndCatchup server/jetstream_cluster_1_test.go:833
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_survives_multiple_stepdowns()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("META1", ["m1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("META2", ["m2.>"], replicas: 3);
|
||||
|
||||
// Step down meta leader twice
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var state = fx.GetMetaState();
|
||||
state.Streams.ShouldContain("META1");
|
||||
state.Streams.ShouldContain("META2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMetaSnapshotsMultiChange server/jetstream_cluster_1_test.go:881
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_and_create_across_stepdowns_reflected_in_stream_names()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("MULTI1", ["mul1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("MULTI2", ["mul2.>"], replicas: 3);
|
||||
|
||||
// Delete one stream
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamDelete}MULTI1", "{}")).Success.ShouldBeTrue();
|
||||
|
||||
// Step down meta leader
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
// Create another stream
|
||||
await fx.CreateStreamAsync("MULTI3", ["mul3.>"], replicas: 3);
|
||||
|
||||
// Verify via stream names API (reflects actual active streams)
|
||||
var names = await fx.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.ShouldNotContain("MULTI1");
|
||||
names.StreamNames.ShouldContain("MULTI2");
|
||||
names.StreamNames.ShouldContain("MULTI3");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDeleteMsgAndRestart server/jetstream_cluster_1_test.go:1785
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_message_survives_leader_stepdown()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("DELMSGSD", ["dms.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("dms.event", $"msg-{i}");
|
||||
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamMessageDelete}DELMSGSD", """{"seq":3}""")).Success.ShouldBeTrue();
|
||||
|
||||
(await fx.StepDownStreamLeaderAsync("DELMSGSD")).Success.ShouldBeTrue();
|
||||
|
||||
var state = await fx.GetStreamStateAsync("DELMSGSD");
|
||||
state.Messages.ShouldBe(4UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterRestoreSingleConsumer server/jetstream_cluster_1_test.go:1028
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_survives_stream_leader_stepdown()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CSURV", ["csv.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CSURV", "durable1", filterSubject: "csv.>");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("csv.event", $"msg-{i}");
|
||||
|
||||
// Fetch before stepdown
|
||||
var batch1 = await fx.FetchAsync("CSURV", "durable1", 5);
|
||||
batch1.Messages.Count.ShouldBe(5);
|
||||
|
||||
// Step down stream leader
|
||||
(await fx.StepDownStreamLeaderAsync("CSURV")).Success.ShouldBeTrue();
|
||||
|
||||
// Consumer should still be fetchable
|
||||
var batch2 = await fx.FetchAsync("CSURV", "durable1", 5);
|
||||
batch2.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: Multiple stepdowns do not lose accumulated state
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_stepdowns_preserve_accumulated_messages()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("ACCUM", ["acc.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("acc.event", $"batch1-{i}");
|
||||
|
||||
(await fx.StepDownStreamLeaderAsync("ACCUM")).Success.ShouldBeTrue();
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("acc.event", $"batch2-{i}");
|
||||
|
||||
(await fx.StepDownStreamLeaderAsync("ACCUM")).Success.ShouldBeTrue();
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("acc.event", $"batch3-{i}");
|
||||
|
||||
var state = await fx.GetStreamStateAsync("ACCUM");
|
||||
state.Messages.ShouldBe(15UL);
|
||||
state.LastSeq.ShouldBe(15UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: Stream info available after leader stepdown
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_available_after_leader_stepdown()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("INFOSD", ["isd.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
await fx.PublishAsync("isd.event", $"msg-{i}");
|
||||
|
||||
(await fx.StepDownStreamLeaderAsync("INFOSD")).Success.ShouldBeTrue();
|
||||
|
||||
var info = await fx.GetStreamInfoAsync("INFOSD");
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.Config.Name.ShouldBe("INFOSD");
|
||||
info.StreamInfo.State.Messages.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: Stepdown non-existent stream does not crash
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stepdown_non_existent_stream_returns_success_gracefully()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
|
||||
// Stepping down a non-existent stream should not throw
|
||||
var resp = await fx.StepDownStreamLeaderAsync("NONEXISTENT");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: AccountPurge returns success
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_purge_api_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PURGEACCT", ["pa.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.AccountPurge}GLOBAL", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: Server remove returns success
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Server_remove_api_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterFailoverFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.ServerRemove, "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Self-contained fixture for JetStream cluster failover tests.
|
||||
/// </summary>
|
||||
internal sealed class ClusterFailoverFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private ClusterFailoverFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router,
|
||||
JetStreamPublisher publisher)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = publisher;
|
||||
}
|
||||
|
||||
public static Task<ClusterFailoverFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var streamManager = new StreamManager(meta, consumerManager: consumerManager);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
return Task.FromResult(new ClusterFailoverFixture(meta, streamManager, consumerManager, router, publisher));
|
||||
}
|
||||
|
||||
public Task CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
if (response.Error is not null)
|
||||
throw new InvalidOperationException(response.Error.Description);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateConsumerAsync(string stream, string durableName, string? filterSubject = null)
|
||||
{
|
||||
var config = new ConsumerConfig { DurableName = durableName };
|
||||
if (!string.IsNullOrWhiteSpace(filterSubject))
|
||||
config.FilterSubject = filterSubject;
|
||||
return Task.FromResult(_consumerManager.CreateOrUpdate(stream, config));
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), null, out var ack))
|
||||
{
|
||||
if (ack.ErrorCode == null && _streamManager.TryGet(ack.Stream, out var handle))
|
||||
{
|
||||
var stored = handle.Store.LoadAsync(ack.Seq, default).GetAwaiter().GetResult();
|
||||
if (stored != null)
|
||||
_consumerManager.OnPublished(ack.Stream, stored);
|
||||
}
|
||||
|
||||
return Task.FromResult(ack);
|
||||
}
|
||||
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match a stream.");
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> StepDownStreamLeaderAsync(string stream)
|
||||
=> Task.FromResult(_router.Route(
|
||||
$"{JetStreamApiSubjects.StreamLeaderStepdown}{stream}",
|
||||
"{}"u8));
|
||||
|
||||
public string GetStreamLeaderId(string stream)
|
||||
{
|
||||
var field = typeof(StreamManager)
|
||||
.GetField("_replicaGroups", BindingFlags.NonPublic | BindingFlags.Instance)!;
|
||||
var groups = (ConcurrentDictionary<string, StreamReplicaGroup>)field.GetValue(_streamManager)!;
|
||||
if (groups.TryGetValue(stream, out var group))
|
||||
return group.Leader.Id;
|
||||
return string.Empty;
|
||||
}
|
||||
|
||||
public MetaGroupState GetMetaState() => _metaGroup.GetState();
|
||||
|
||||
public Task<ApiStreamState> GetStreamStateAsync(string name)
|
||||
=> _streamManager.GetStateAsync(name, default).AsTask();
|
||||
|
||||
public Task<JetStreamApiResponse> GetStreamInfoAsync(string name)
|
||||
=> Task.FromResult(_streamManager.GetInfo(name));
|
||||
|
||||
public Task<PullFetchBatch> FetchAsync(string stream, string durableName, int batch)
|
||||
=> _consumerManager.FetchAsync(stream, durableName, batch, _streamManager, default).AsTask();
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
{
|
||||
var response = _router.Route(subject, Encoding.UTF8.GetBytes(payload));
|
||||
|
||||
if (subject.Equals(JetStreamApiSubjects.MetaLeaderStepdown, StringComparison.Ordinal) && response.Success)
|
||||
_metaGroup.BecomeLeader();
|
||||
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,415 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_helpers_test.go
|
||||
// Smoke tests for JetStreamClusterFixture — verifies that the unified fixture
|
||||
// correctly wires up the JetStream cluster simulation and exposes all capabilities
|
||||
// expected by Tasks 6-10 (leader election, stream ops, consumer ops, failover, routing).
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Smoke tests verifying that JetStreamClusterFixture starts correctly and
|
||||
/// exposes all capabilities needed by the cluster test suites (Tasks 6-10).
|
||||
/// </summary>
|
||||
public class JetStreamClusterFixtureTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Fixture creation
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: checkClusterFormed in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task Three_node_cluster_starts_and_reports_node_count()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
fx.NodeCount.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Five_node_cluster_starts_and_reports_node_count()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 5);
|
||||
fx.NodeCount.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream operations via fixture
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Create_stream_and_publish_returns_valid_ack()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
var resp = await fx.CreateStreamAsync("SMOKE", ["smoke.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("SMOKE");
|
||||
|
||||
var ack = await fx.PublishAsync("smoke.test", "hello");
|
||||
ack.Stream.ShouldBe("SMOKE");
|
||||
ack.Seq.ShouldBe(1UL);
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Create_multi_replica_stream_and_verify_info()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
var resp = await fx.CreateStreamAsync("MULTI", ["multi.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.Replicas.ShouldBe(3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("multi.event", $"msg-{i}");
|
||||
|
||||
var info = await fx.GetStreamInfoAsync("MULTI");
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.State.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Meta leader helpers
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: c.leader() in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task GetMetaLeaderId_returns_nonempty_leader()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
var leader = fx.GetMetaLeaderId();
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: c.leader().Shutdown() / waitOnLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task StepDownMetaLeader_changes_leader_id()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
var before = fx.GetMetaLeaderId();
|
||||
|
||||
fx.StepDownMetaLeader();
|
||||
|
||||
var after = fx.GetMetaLeaderId();
|
||||
after.ShouldNotBe(before);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream leader helpers
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: streamLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task GetStreamLeaderId_returns_leader_after_stream_creation()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("SLEADER", ["sl.>"], replicas: 3);
|
||||
|
||||
var leader = fx.GetStreamLeaderId("SLEADER");
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: waitOnStreamLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task WaitOnStreamLeaderAsync_succeeds_when_stream_exists()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("WAIT_LEADER", ["wl.>"], replicas: 3);
|
||||
|
||||
// Should complete immediately since the stream was just created
|
||||
await fx.WaitOnStreamLeaderAsync("WAIT_LEADER", timeoutMs: 2000);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WaitOnStreamLeaderAsync_throws_timeout_when_no_stream()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
|
||||
// No stream created — should time out quickly
|
||||
var ex = await Should.ThrowAsync<TimeoutException>(
|
||||
() => fx.WaitOnStreamLeaderAsync("NONEXISTENT", timeoutMs: 100));
|
||||
|
||||
ex.Message.ShouldContain("NONEXISTENT");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer operations
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Create_consumer_and_fetch_messages()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CFETCH", ["cf.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CFETCH", "dur1", filterSubject: "cf.>");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("cf.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("CFETCH", "dur1", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// Go ref: consumerLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task GetConsumerLeaderId_returns_id_after_consumer_creation()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CLEADER", ["cld.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CLEADER", "dur1");
|
||||
|
||||
var leader = fx.GetConsumerLeaderId("CLEADER", "dur1");
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: waitOnConsumerLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task WaitOnConsumerLeaderAsync_succeeds_when_consumer_exists()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("WCLEADER", ["wcl.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("WCLEADER", "durwc");
|
||||
|
||||
await fx.WaitOnConsumerLeaderAsync("WCLEADER", "durwc", timeoutMs: 2000);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WaitOnConsumerLeaderAsync_throws_timeout_when_consumer_missing()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("WCTIMEOUT", ["wct.>"], replicas: 3);
|
||||
|
||||
var ex = await Should.ThrowAsync<TimeoutException>(
|
||||
() => fx.WaitOnConsumerLeaderAsync("WCTIMEOUT", "ghost", timeoutMs: 100));
|
||||
|
||||
ex.Message.ShouldContain("ghost");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Failover
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterStreamLeaderStepDown jetstream_cluster_1_test.go:4925
|
||||
[Fact]
|
||||
public async Task StepDownStreamLeader_changes_stream_leader()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("SDTEST", ["sd.>"], replicas: 3);
|
||||
|
||||
var before = fx.GetStreamLeaderId("SDTEST");
|
||||
before.ShouldNotBeNullOrWhiteSpace();
|
||||
|
||||
var resp = await fx.StepDownStreamLeaderAsync("SDTEST");
|
||||
resp.Success.ShouldBeTrue();
|
||||
|
||||
var after = fx.GetStreamLeaderId("SDTEST");
|
||||
after.ShouldNotBe(before);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// API routing
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task RequestAsync_routes_stream_info_request()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("ROUTEINFO", ["ri.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.StreamInfo}ROUTEINFO", "{}");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("ROUTEINFO");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Edge cases
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: AssetPlacementPlanner.PlanReplicas caps replicas at cluster size.
|
||||
// StreamManager passes the raw Replicas value to StreamReplicaGroup; the
|
||||
// AssetPlacementPlanner is the layer that enforces the cap in real deployments.
|
||||
// This test verifies the fixture correctly creates the stream and that the
|
||||
// replica group holds the exact replica count requested by the config.
|
||||
[Fact]
|
||||
public async Task Create_stream_with_more_replicas_than_nodes_caps_at_node_count()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
|
||||
// Request 3 replicas on a 3-node cluster — exactly matching node count
|
||||
var resp = await fx.CreateStreamAsync("CAPPED", ["cap.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
|
||||
// Replica group should have exactly 3 nodes (one per cluster node)
|
||||
var group = fx.GetReplicaGroup("CAPPED");
|
||||
group.ShouldNotBeNull();
|
||||
group!.Nodes.Count.ShouldBe(3);
|
||||
group.Nodes.Count.ShouldBeLessThanOrEqualTo(fx.NodeCount);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetMetaState helper
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task GetMetaState_returns_correct_cluster_size()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 5);
|
||||
var state = fx.GetMetaState();
|
||||
state.ShouldNotBeNull();
|
||||
state!.ClusterSize.ShouldBe(5);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetMetaState_tracks_created_streams()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("TRACK1", ["t1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("TRACK2", ["t2.>"], replicas: 3);
|
||||
|
||||
var state = fx.GetMetaState();
|
||||
state.ShouldNotBeNull();
|
||||
state!.Streams.ShouldContain("TRACK1");
|
||||
state.Streams.ShouldContain("TRACK2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// UpdateStream helper
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task UpdateStream_reflects_new_subjects()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("UPDSUB", ["old.>"], replicas: 3);
|
||||
|
||||
var update = fx.UpdateStream("UPDSUB", ["new.>"], replicas: 3);
|
||||
update.Error.ShouldBeNull();
|
||||
update.StreamInfo!.Config.Subjects.ShouldContain("new.>");
|
||||
update.StreamInfo.Config.Subjects.ShouldNotContain("old.>");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Node lifecycle helpers (SimulateNodeRestart, RemoveNode)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: restartServerAndWait in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task SimulateNodeRestart_does_not_throw()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
fx.RemoveNode(1);
|
||||
fx.SimulateNodeRestart(1); // Should not throw
|
||||
}
|
||||
|
||||
// Go ref: shutdownServerAndRemoveStorage in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task RemoveNode_does_not_throw()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
fx.RemoveNode(2); // Should not throw
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetStoreBackendType
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task GetStoreBackendType_returns_memory_for_memory_stream()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("BACKEND", ["be.>"], replicas: 3, storage: StorageType.Memory);
|
||||
|
||||
var backend = fx.GetStoreBackendType("BACKEND");
|
||||
backend.ShouldBe("memory");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// AckAll helper
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AckAll_reduces_pending_messages()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("ACKSMOKE", ["acks.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("ACKSMOKE", "acker", filterSubject: "acks.>",
|
||||
ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("acks.event", $"msg-{i}");
|
||||
|
||||
await fx.FetchAsync("ACKSMOKE", "acker", 5);
|
||||
fx.AckAll("ACKSMOKE", "acker", 3);
|
||||
|
||||
// Pending should now reflect only sequences 4 and 5
|
||||
// (AckAll acks everything up to and including seq 3)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// CreateStreamDirect helper
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task CreateStreamDirect_accepts_full_config()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "DIRECTCFG",
|
||||
Subjects = ["dc.>"],
|
||||
Replicas = 2,
|
||||
MaxMsgs = 100,
|
||||
Retention = RetentionPolicy.Limits,
|
||||
};
|
||||
var resp = fx.CreateStreamDirect(cfg);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.MaxMsgs.ShouldBe(100);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetStreamStateAsync
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task GetStreamStateAsync_reflects_published_messages()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("STATECHECK", ["sc.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 7; i++)
|
||||
await fx.PublishAsync("sc.event", $"msg-{i}");
|
||||
|
||||
var state = await fx.GetStreamStateAsync("STATECHECK");
|
||||
state.Messages.ShouldBe(7UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(7UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetReplicaGroup
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task GetReplicaGroup_returns_null_for_unknown_stream()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
var group = fx.GetReplicaGroup("NO_SUCH_STREAM");
|
||||
group.ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetReplicaGroup_returns_group_with_correct_node_count()
|
||||
{
|
||||
await using var fx = await JetStreamClusterFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("GROUPCHECK", ["gc.>"], replicas: 3);
|
||||
|
||||
var group = fx.GetReplicaGroup("GROUPCHECK");
|
||||
group.ShouldNotBeNull();
|
||||
group!.Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,617 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: cluster metadata operations, asset placement planner,
|
||||
// replica group management, stream scaling, config validation,
|
||||
// cluster expand, account info in cluster, max streams.
|
||||
using System.Text;
|
||||
using NATS.Server.Configuration;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
using NATS.Server.JetStream.Validation;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream cluster metadata operations: asset placement,
|
||||
/// replica group management, config validation, scaling, and account operations.
|
||||
/// Ported from Go jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class JetStreamClusterMetaTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConfig server/jetstream_cluster_1_test.go:43
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Config_requires_server_name_for_jetstream_cluster()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
ServerName = null,
|
||||
JetStream = new JetStreamOptions { StoreDir = "/tmp/js" },
|
||||
Cluster = new ClusterOptions { Port = 6222 },
|
||||
};
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
result.IsValid.ShouldBeFalse();
|
||||
result.Message.ShouldContain("server_name");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Config_requires_cluster_name_for_jetstream_cluster()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
ServerName = "S1",
|
||||
JetStream = new JetStreamOptions { StoreDir = "/tmp/js" },
|
||||
Cluster = new ClusterOptions { Name = null, Port = 6222 },
|
||||
};
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
result.IsValid.ShouldBeFalse();
|
||||
result.Message.ShouldContain("cluster.name");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Config_valid_when_server_and_cluster_names_set()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
ServerName = "S1",
|
||||
JetStream = new JetStreamOptions { StoreDir = "/tmp/js" },
|
||||
Cluster = new ClusterOptions { Name = "JSC", Port = 6222 },
|
||||
};
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
result.IsValid.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Config_skips_cluster_checks_when_no_cluster_configured()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
JetStream = new JetStreamOptions { StoreDir = "/tmp/js" },
|
||||
};
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
result.IsValid.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Config_skips_cluster_checks_when_no_jetstream_configured()
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
Cluster = new ClusterOptions { Port = 6222 },
|
||||
};
|
||||
var result = JetStreamConfigValidator.ValidateClusterConfig(options);
|
||||
result.IsValid.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Placement planner tests
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_returns_requested_replica_count()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 5);
|
||||
var placement = planner.PlanReplicas(replicas: 3);
|
||||
placement.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_caps_at_cluster_size()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: 5);
|
||||
placement.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_minimum_is_one_replica()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: 0);
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_handles_single_node_cluster()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 1);
|
||||
var placement = planner.PlanReplicas(replicas: 3);
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Meta group lifecycle tests
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_initial_state_is_correct()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var state = meta.GetState();
|
||||
|
||||
state.ClusterSize.ShouldBe(3);
|
||||
state.LeaderId.ShouldNotBeNullOrWhiteSpace();
|
||||
state.LeadershipVersion.ShouldBe(1);
|
||||
state.Streams.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_group_tracks_stream_proposals()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "S1" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "S2" }, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
state.Streams.Count.ShouldBe(2);
|
||||
state.Streams.ShouldContain("S1");
|
||||
state.Streams.ShouldContain("S2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_stepdown_cycles_leader()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var leader1 = meta.GetState().LeaderId;
|
||||
|
||||
meta.StepDown();
|
||||
var leader2 = meta.GetState().LeaderId;
|
||||
leader2.ShouldNotBe(leader1);
|
||||
|
||||
meta.StepDown();
|
||||
var leader3 = meta.GetState().LeaderId;
|
||||
leader3.ShouldNotBe(leader2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_stepdown_wraps_around()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(2);
|
||||
var leaders = new HashSet<string>();
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
{
|
||||
leaders.Add(meta.GetState().LeaderId);
|
||||
meta.StepDown();
|
||||
}
|
||||
|
||||
// Should cycle between 2 leaders
|
||||
leaders.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_leadership_version_increments()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.GetState().LeadershipVersion.ShouldBe(1);
|
||||
|
||||
meta.StepDown();
|
||||
meta.GetState().LeadershipVersion.ShouldBe(2);
|
||||
|
||||
meta.StepDown();
|
||||
meta.GetState().LeadershipVersion.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Replica group tests
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_creates_correct_node_count()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 3);
|
||||
group.Nodes.Count.ShouldBe(3);
|
||||
group.StreamName.ShouldBe("TEST");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_elects_initial_leader()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 3);
|
||||
group.Leader.ShouldNotBeNull();
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_stepdown_changes_leader()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 3);
|
||||
var leaderBefore = group.Leader.Id;
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
var leaderAfter = group.Leader.Id;
|
||||
|
||||
leaderAfter.ShouldNotBe(leaderBefore);
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_leader_accepts_proposals()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 3);
|
||||
|
||||
var index = await group.ProposeAsync("PUB test.1", default);
|
||||
index.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_apply_placement_scales_up()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 1);
|
||||
group.Nodes.Count.ShouldBe(1);
|
||||
|
||||
await group.ApplyPlacementAsync([1, 2, 3], default);
|
||||
group.Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_apply_placement_scales_down()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 5);
|
||||
group.Nodes.Count.ShouldBe(5);
|
||||
|
||||
await group.ApplyPlacementAsync([1, 2], default);
|
||||
group.Nodes.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_apply_same_size_is_noop()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 3);
|
||||
var leaderBefore = group.Leader.Id;
|
||||
|
||||
await group.ApplyPlacementAsync([1, 2, 3], default);
|
||||
group.Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAccountInfo server/jetstream_cluster_1_test.go:94
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_info_tracks_streams_and_consumers_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("ACCT1", ["a1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("ACCT2", ["a2.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("ACCT1", "c1");
|
||||
await fx.CreateConsumerAsync("ACCT1", "c2");
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
resp.AccountInfo.ShouldNotBeNull();
|
||||
resp.AccountInfo!.Streams.ShouldBe(2);
|
||||
resp.AccountInfo.Consumers.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExtendedAccountInfo server/jetstream_cluster_1_test.go:3389
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_info_after_stream_delete_reflects_removal()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("DEL1", ["d1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("DEL2", ["d2.>"], replicas: 3);
|
||||
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DEL1", "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
resp.AccountInfo!.Streams.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAccountPurge server/jetstream_cluster_1_test.go:3891
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_purge_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PURGE1", ["pur.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.AccountPurge}GLOBAL", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLimitWithAccountDefaults server/jetstream_cluster_1_test.go:124
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_with_max_bytes_and_replicas_created_successfully()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "MBLIMIT",
|
||||
Subjects = ["mbl.>"],
|
||||
Replicas = 2,
|
||||
MaxBytes = 4 * 1024 * 1024,
|
||||
};
|
||||
var resp = fx.CreateStreamDirect(cfg);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.MaxBytes.ShouldBe(4 * 1024 * 1024);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMaxStreamsReached server/jetstream_cluster_1_test.go:3177
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_streams_tracked_correctly_in_meta()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.CreateStreamAsync($"MS{i}", [$"ms{i}.>"], replicas: 3);
|
||||
|
||||
var names = await fx.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames!.Count.ShouldBe(10);
|
||||
|
||||
var meta = fx.GetMetaState();
|
||||
meta.Streams.Count.ShouldBe(10);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Direct API tests (DirectGet)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Direct_get_returns_message_by_sequence()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "DIRECT",
|
||||
Subjects = ["dir.>"],
|
||||
Replicas = 3,
|
||||
AllowDirect = true,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("dir.event", $"msg-{i}");
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.DirectGet}DIRECT", """{"seq":3}""");
|
||||
resp.DirectMessage.ShouldNotBeNull();
|
||||
resp.DirectMessage!.Sequence.ShouldBe(3UL);
|
||||
resp.DirectMessage.Subject.ShouldBe("dir.event");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream message get
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_message_get_returns_correct_payload()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("MSGGET", ["mg.>"], replicas: 3);
|
||||
|
||||
await fx.PublishAsync("mg.event", "payload-1");
|
||||
await fx.PublishAsync("mg.event", "payload-2");
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.StreamMessageGet}MSGGET", """{"seq":2}""");
|
||||
resp.StreamMessage.ShouldNotBeNull();
|
||||
resp.StreamMessage!.Sequence.ShouldBe(2UL);
|
||||
resp.StreamMessage.Payload.ShouldBe("payload-2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer list and names
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_list_via_api_router()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("CLISTM", ["clm.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CLISTM", "d1");
|
||||
await fx.CreateConsumerAsync("CLISTM", "d2");
|
||||
|
||||
var names = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}CLISTM", "{}");
|
||||
names.ConsumerNames.ShouldNotBeNull();
|
||||
names.ConsumerNames!.Count.ShouldBe(2);
|
||||
|
||||
var list = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerList}CLISTM", "{}");
|
||||
list.ConsumerNames.ShouldNotBeNull();
|
||||
list.ConsumerNames!.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Account stream move returns success shape
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_stream_move_api_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.AccountStreamMove}TEST", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Account stream move cancel returns success shape
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_stream_move_cancel_api_returns_success()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.AccountStreamMoveCancel}TEST", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream create requires name
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Stream_create_without_name_returns_error()
|
||||
{
|
||||
var streamManager = new StreamManager();
|
||||
var resp = streamManager.CreateOrUpdate(new StreamConfig { Name = "" });
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Description.ShouldContain("name");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// NotFound for unknown API subject
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Unknown_api_subject_returns_not_found()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync("$JS.API.UNKNOWN.SUBJECT", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream info for non-existent stream returns 404
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_nonexistent_returns_not_found()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.StreamInfo}NOSTREAM", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer info for non-existent consumer returns 404
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_info_nonexistent_returns_not_found()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("NOCONS", ["nc.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}NOCONS.MISSING", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Delete non-existent stream returns 404
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_nonexistent_stream_returns_not_found()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.StreamDelete}GONE", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Delete non-existent consumer returns 404
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_nonexistent_consumer_returns_not_found()
|
||||
{
|
||||
await using var fx = await ClusterMetaFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("NODEL", ["nd.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerDelete}NODEL.MISSING", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Self-contained fixture for JetStream cluster meta tests.
|
||||
/// </summary>
|
||||
internal sealed class ClusterMetaFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private ClusterMetaFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router,
|
||||
JetStreamPublisher publisher)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = publisher;
|
||||
}
|
||||
|
||||
public static Task<ClusterMetaFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var streamManager = new StreamManager(meta, consumerManager: consumerManager);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
return Task.FromResult(new ClusterMetaFixture(meta, streamManager, consumerManager, router, publisher));
|
||||
}
|
||||
|
||||
public Task CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
if (response.Error is not null)
|
||||
throw new InvalidOperationException(response.Error.Description);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public JetStreamApiResponse CreateStreamDirect(StreamConfig config)
|
||||
=> _streamManager.CreateOrUpdate(config);
|
||||
|
||||
public Task<JetStreamApiResponse> CreateConsumerAsync(string stream, string durableName)
|
||||
{
|
||||
return Task.FromResult(_consumerManager.CreateOrUpdate(stream, new ConsumerConfig
|
||||
{
|
||||
DurableName = durableName,
|
||||
}));
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), null, out var ack))
|
||||
return Task.FromResult(ack);
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match a stream.");
|
||||
}
|
||||
|
||||
public MetaGroupState GetMetaState() => _metaGroup.GetState();
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
=> Task.FromResult(_router.Route(subject, Encoding.UTF8.GetBytes(payload)));
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,344 @@
|
||||
using System.Threading.Channels;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.Raft;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for JetStreamClusterMonitor — background meta entry processing.
|
||||
/// Go reference: jetstream_cluster.go:1455-1825 (monitorCluster).
|
||||
/// </summary>
|
||||
public class JetStreamClusterMonitorTests
|
||||
{
|
||||
// Each test uses a 5-second CancellationToken as a hard upper bound so a
|
||||
// hung monitor doesn't stall the test run indefinitely.
|
||||
private static CancellationTokenSource TestTimeout() =>
|
||||
new(TimeSpan.FromSeconds(5));
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_processes_stream_assignment_entry()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster assignStream op
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "test-stream",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
Config = """{"subjects":["test.>"]}""",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(1, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.GetStreamAssignment("test-stream").ShouldNotBeNull();
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_processes_consumer_assignment_entry()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster assignConsumer op
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var streamJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "s1",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
Config = """{"subjects":["x.>"]}""",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, streamJson));
|
||||
|
||||
var consumerJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignConsumer",
|
||||
StreamName = "s1",
|
||||
ConsumerName = "c1",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, consumerJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_processes_stream_removal()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster removeStream op
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "to-remove",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
Config = """{"subjects":["rm.>"]}""",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, assignJson));
|
||||
|
||||
var removeJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "removeStream",
|
||||
StreamName = "to-remove",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, removeJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_applies_meta_snapshot()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster snapshot op — replaces all state
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var assignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["snap-stream"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "snap-stream",
|
||||
Group = new RaftGroup { Name = "rg-snap", Peers = ["n1", "n2", "n3"] },
|
||||
},
|
||||
};
|
||||
var snapshotB64 = Convert.ToBase64String(MetaSnapshotCodec.Encode(assignments));
|
||||
|
||||
var snapshotJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "snapshot",
|
||||
Data = snapshotB64,
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, snapshotJson));
|
||||
await monitor.WaitForProcessedAsync(1, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.GetStreamAssignment("snap-stream").ShouldNotBeNull();
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_processes_consumer_removal()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster removeConsumer op
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var streamJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "s1",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, streamJson));
|
||||
|
||||
var consumerJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignConsumer",
|
||||
StreamName = "s1",
|
||||
ConsumerName = "c1",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, consumerJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
|
||||
var removeJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "removeConsumer",
|
||||
StreamName = "s1",
|
||||
ConsumerName = "c1",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(3, 1, removeJson));
|
||||
await monitor.WaitForProcessedAsync(3, cts.Token);
|
||||
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_skips_malformed_entries()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster — malformed entries must not abort the loop
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, "not-json"));
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "after-bad",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.GetStreamAssignment("after-bad").ShouldNotBeNull();
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_stops_on_cancellation()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster shuts down cleanly when stop channel closes
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask; // Should complete without throwing
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_ignores_entry_with_no_op_field()
|
||||
{
|
||||
// Entries missing the "Op" property are silently ignored (forward-compat).
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, """{"NotOp":"whatever"}"""));
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "after-no-op",
|
||||
Peers = new[] { "n1" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_ignores_unknown_op()
|
||||
{
|
||||
// Unknown op names are silently ignored — forward compatibility.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, """{"Op":"futureFoo","Data":"xyz"}"""));
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "after-unknown-op",
|
||||
Peers = new[] { "n1" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_snapshot_replaces_existing_state()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go — snapshot apply wipes old assignments
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "old-stream",
|
||||
Peers = new[] { "n1" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(1, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
|
||||
var newAssignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["new-stream"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "new-stream",
|
||||
Group = new RaftGroup { Name = "rg-new", Peers = ["n1", "n2", "n3"] },
|
||||
},
|
||||
};
|
||||
var snapshotB64 = Convert.ToBase64String(MetaSnapshotCodec.Encode(newAssignments));
|
||||
var snapshotJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "snapshot",
|
||||
Data = snapshotB64,
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, snapshotJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.GetStreamAssignment("old-stream").ShouldBeNull();
|
||||
meta.GetStreamAssignment("new-stream").ShouldNotBeNull();
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,872 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: cluster stream creation, single/multi replica, memory store,
|
||||
// stream purge, update subjects, delete, max bytes, stream info/list,
|
||||
// interest retention, work queue retention, mirror/source in cluster.
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering clustered JetStream stream creation, replication, storage,
|
||||
/// purge, update, delete, retention policies, and mirror/source in cluster mode.
|
||||
/// Ported from Go jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class JetStreamClusterStreamTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterSingleReplicaStreams server/jetstream_cluster_1_test.go:223
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Single_replica_stream_creation_and_publish_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.CreateStreamAsync("R1S", ["foo", "bar"], replicas: 1);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("R1S");
|
||||
|
||||
const int toSend = 10;
|
||||
for (var i = 0; i < toSend; i++)
|
||||
{
|
||||
var ack = await fx.PublishAsync("foo", $"Hello R1 {i}");
|
||||
ack.Stream.ShouldBe("R1S");
|
||||
ack.Seq.ShouldBe((ulong)(i + 1));
|
||||
}
|
||||
|
||||
var info = await fx.GetStreamInfoAsync("R1S");
|
||||
info.StreamInfo!.State.Messages.ShouldBe((ulong)toSend);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreamsDefaultFileMem server/jetstream_cluster_1_test.go:355
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Multi_replica_stream_defaults_to_memory_store()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.CreateStreamAsync("MEMTEST", ["mem.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.Storage.ShouldBe(StorageType.Memory);
|
||||
|
||||
var backend = fx.GetStoreBackendType("MEMTEST");
|
||||
backend.ShouldBe("memory");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMemoryStore server/jetstream_cluster_1_test.go:423
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Memory_store_replicated_stream_accepts_100_messages()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.CreateStreamAsync("R3M", ["foo", "bar"], replicas: 3, storage: StorageType.Memory);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
const int toSend = 100;
|
||||
for (var i = 0; i < toSend; i++)
|
||||
{
|
||||
var ack = await fx.PublishAsync("foo", "Hello MemoryStore");
|
||||
ack.Stream.ShouldBe("R3M");
|
||||
}
|
||||
|
||||
var info = await fx.GetStreamInfoAsync("R3M");
|
||||
info.StreamInfo!.Config.Name.ShouldBe("R3M");
|
||||
info.StreamInfo.State.Messages.ShouldBe((ulong)toSend);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDelete server/jetstream_cluster_1_test.go:472
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_consumer_then_stream_clears_account_info()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("C22", ["foo", "bar", "baz"], replicas: 2);
|
||||
await fx.CreateConsumerAsync("C22", "dlc");
|
||||
|
||||
// Delete consumer then stream
|
||||
var delConsumer = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerDelete}C22.dlc", "{}");
|
||||
delConsumer.Success.ShouldBeTrue();
|
||||
|
||||
var delStream = await fx.RequestAsync($"{JetStreamApiSubjects.StreamDelete}C22", "{}");
|
||||
delStream.Success.ShouldBeTrue();
|
||||
|
||||
// Account info should show zero streams
|
||||
var accountInfo = await fx.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
accountInfo.AccountInfo.ShouldNotBeNull();
|
||||
accountInfo.AccountInfo!.Streams.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamPurge server/jetstream_cluster_1_test.go:522
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_purge_clears_all_messages_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 5);
|
||||
|
||||
await fx.CreateStreamAsync("PURGE", ["foo", "bar"], replicas: 3);
|
||||
|
||||
const int toSend = 100;
|
||||
for (var i = 0; i < toSend; i++)
|
||||
await fx.PublishAsync("foo", "Hello JS Clustering");
|
||||
|
||||
var before = await fx.GetStreamInfoAsync("PURGE");
|
||||
before.StreamInfo!.State.Messages.ShouldBe((ulong)toSend);
|
||||
|
||||
var purge = await fx.RequestAsync($"{JetStreamApiSubjects.StreamPurge}PURGE", "{}");
|
||||
purge.Success.ShouldBeTrue();
|
||||
|
||||
var after = await fx.GetStreamInfoAsync("PURGE");
|
||||
after.StreamInfo!.State.Messages.ShouldBe(0UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamUpdateSubjects server/jetstream_cluster_1_test.go:571
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_update_subjects_reflects_new_configuration()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("SUBUPDATE", ["foo", "bar"], replicas: 3);
|
||||
|
||||
// Update subjects to bar, baz
|
||||
var update = fx.UpdateStream("SUBUPDATE", ["bar", "baz"], replicas: 3);
|
||||
update.Error.ShouldBeNull();
|
||||
update.StreamInfo.ShouldNotBeNull();
|
||||
update.StreamInfo!.Config.Subjects.ShouldContain("bar");
|
||||
update.StreamInfo.Config.Subjects.ShouldContain("baz");
|
||||
update.StreamInfo.Config.Subjects.ShouldNotContain("foo");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInfoList server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_names_and_list_return_all_streams()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("S1", ["s1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("S2", ["s2.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("S3", ["s3.>"], replicas: 1);
|
||||
|
||||
var names = await fx.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.Count.ShouldBe(3);
|
||||
names.StreamNames.ShouldContain("S1");
|
||||
names.StreamNames.ShouldContain("S2");
|
||||
names.StreamNames.ShouldContain("S3");
|
||||
|
||||
var list = await fx.RequestAsync(JetStreamApiSubjects.StreamList, "{}");
|
||||
list.StreamNames.ShouldNotBeNull();
|
||||
list.StreamNames!.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMaxBytesForStream server/jetstream_cluster_1_test.go:1099
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Max_bytes_stream_limits_enforced_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "MAXBYTES",
|
||||
Subjects = ["mb.>"],
|
||||
Replicas = 3,
|
||||
MaxBytes = 512,
|
||||
Discard = DiscardPolicy.Old,
|
||||
};
|
||||
var resp = fx.CreateStreamDirect(cfg);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
// Publish messages exceeding max bytes; old messages should be discarded
|
||||
for (var i = 0; i < 20; i++)
|
||||
await fx.PublishAsync("mb.data", new string('X', 64));
|
||||
|
||||
var state = await fx.GetStreamStateAsync("MAXBYTES");
|
||||
// Total bytes should not exceed max_bytes by much after enforcement
|
||||
((long)state.Bytes).ShouldBeLessThanOrEqualTo(cfg.MaxBytes + 128);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamPublishWithActiveConsumers server/jetstream_cluster_1_test.go:1132
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Publish_with_active_consumer_delivers_messages()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("ACTIVE", ["active.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("ACTIVE", "durable1", filterSubject: "active.>");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("active.event", $"msg-{i}");
|
||||
|
||||
var batch = await fx.FetchAsync("ACTIVE", "durable1", 10);
|
||||
batch.Messages.Count.ShouldBe(10);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDoubleAdd server/jetstream_cluster_1_test.go:1551
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Double_add_stream_with_same_config_succeeds()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var first = await fx.CreateStreamAsync("DUP", ["dup.>"], replicas: 3);
|
||||
first.Error.ShouldBeNull();
|
||||
|
||||
// Adding the same stream again should succeed (idempotent)
|
||||
var second = await fx.CreateStreamAsync("DUP", ["dup.>"], replicas: 3);
|
||||
second.Error.ShouldBeNull();
|
||||
second.StreamInfo!.Config.Name.ShouldBe("DUP");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamOverlapSubjects server/jetstream_cluster_1_test.go:1248
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Publish_routes_to_correct_stream_among_non_overlapping()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("ALPHA", ["alpha.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("BETA", ["beta.>"], replicas: 3);
|
||||
|
||||
var ack1 = await fx.PublishAsync("alpha.one", "A");
|
||||
ack1.Stream.ShouldBe("ALPHA");
|
||||
|
||||
var ack2 = await fx.PublishAsync("beta.one", "B");
|
||||
ack2.Stream.ShouldBe("BETA");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterInterestRetention server/jetstream_cluster_1_test.go:2109
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Interest_retention_stream_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "INTEREST",
|
||||
Subjects = ["interest.>"],
|
||||
Replicas = 3,
|
||||
Retention = RetentionPolicy.Interest,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("interest.event", "msg");
|
||||
|
||||
var state = await fx.GetStreamStateAsync("INTEREST");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterWorkQueueRetention server/jetstream_cluster_1_test.go:2179
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Work_queue_retention_removes_acked_messages_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "WQ",
|
||||
Subjects = ["wq.>"],
|
||||
Replicas = 2,
|
||||
Retention = RetentionPolicy.WorkQueue,
|
||||
MaxConsumers = 1,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
await fx.CreateConsumerAsync("WQ", "worker", filterSubject: "wq.>", ackPolicy: AckPolicy.All);
|
||||
|
||||
await fx.PublishAsync("wq.task", "job-1");
|
||||
|
||||
var stateBefore = await fx.GetStreamStateAsync("WQ");
|
||||
stateBefore.Messages.ShouldBe(1UL);
|
||||
|
||||
// Ack all up to sequence 1, triggering work queue cleanup
|
||||
fx.AckAll("WQ", "worker", 1);
|
||||
|
||||
// Publish again to trigger runtime retention enforcement
|
||||
await fx.PublishAsync("wq.task", "job-2");
|
||||
|
||||
var stateAfter = await fx.GetStreamStateAsync("WQ");
|
||||
// After ack, only the new message should remain
|
||||
stateAfter.Messages.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDeleteMsg server/jetstream_cluster_1_test.go:1748
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_individual_message_in_clustered_stream()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("DELMSG", ["dm.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("dm.event", $"msg-{i}");
|
||||
|
||||
var before = await fx.GetStreamStateAsync("DELMSG");
|
||||
before.Messages.ShouldBe(5UL);
|
||||
|
||||
// Delete message at sequence 3
|
||||
var del = await fx.RequestAsync($"{JetStreamApiSubjects.StreamMessageDelete}DELMSG", """{"seq":3}""");
|
||||
del.Success.ShouldBeTrue();
|
||||
|
||||
var after = await fx.GetStreamStateAsync("DELMSG");
|
||||
after.Messages.ShouldBe(4UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamUpdate server/jetstream_cluster_1_test.go:1433
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_update_preserves_existing_messages()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("UPD", ["upd.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("upd.event", $"msg-{i}");
|
||||
|
||||
// Update max_msgs
|
||||
var update = fx.UpdateStream("UPD", ["upd.>"], replicas: 3, maxMsgs: 10);
|
||||
update.Error.ShouldBeNull();
|
||||
|
||||
var state = await fx.GetStreamStateAsync("UPD");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAccountInfo server/jetstream_cluster_1_test.go:94
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_info_reports_stream_and_consumer_counts()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("AI1", ["ai1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("AI2", ["ai2.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("AI1", "c1");
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
resp.AccountInfo.ShouldNotBeNull();
|
||||
resp.AccountInfo!.Streams.ShouldBe(2);
|
||||
resp.AccountInfo.Consumers.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpand server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Cluster_expand_adds_peer_to_meta_group()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(2);
|
||||
var state = meta.GetState();
|
||||
state.ClusterSize.ShouldBe(2);
|
||||
|
||||
// Expanding is modeled by creating a new meta group with more nodes
|
||||
var expanded = new JetStreamMetaGroup(3);
|
||||
expanded.GetState().ClusterSize.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMirrorAndSourceWorkQueues server/jetstream_cluster_1_test.go:2233
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Mirror_stream_replicates_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
// Create origin stream
|
||||
await fx.CreateStreamAsync("ORIGIN", ["origin.>"], replicas: 3);
|
||||
|
||||
// Create mirror stream
|
||||
fx.CreateStreamDirect(new StreamConfig
|
||||
{
|
||||
Name = "MIRROR",
|
||||
Subjects = ["mirror.>"],
|
||||
Replicas = 3,
|
||||
Mirror = "ORIGIN",
|
||||
});
|
||||
|
||||
// Publish to origin
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("origin.event", $"mirrored-{i}");
|
||||
|
||||
// Mirror should have replicated messages
|
||||
var mirrorState = await fx.GetStreamStateAsync("MIRROR");
|
||||
mirrorState.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMirrorAndSourceInterestPolicyStream server/jetstream_cluster_1_test.go:2290
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Source_stream_replicates_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
// Create source origin
|
||||
await fx.CreateStreamAsync("SRC", ["src.>"], replicas: 3);
|
||||
|
||||
// Create aggregate stream sourcing from SRC
|
||||
fx.CreateStreamDirect(new StreamConfig
|
||||
{
|
||||
Name = "AGG",
|
||||
Subjects = ["agg.>"],
|
||||
Replicas = 3,
|
||||
Sources = [new StreamSourceConfig { Name = "SRC" }],
|
||||
});
|
||||
|
||||
// Publish to source
|
||||
for (var i = 0; i < 3; i++)
|
||||
await fx.PublishAsync("src.event", $"sourced-{i}");
|
||||
|
||||
var aggState = await fx.GetStreamStateAsync("AGG");
|
||||
aggState.Messages.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterUserSnapshotAndRestore server/jetstream_cluster_1_test.go:2652
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Snapshot_and_restore_preserves_messages_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("SNAP", ["snap.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("snap.event", $"msg-{i}");
|
||||
|
||||
// Create snapshot
|
||||
var snapshot = await fx.RequestAsync($"{JetStreamApiSubjects.StreamSnapshot}SNAP", "{}");
|
||||
snapshot.Snapshot.ShouldNotBeNull();
|
||||
snapshot.Snapshot!.Payload.ShouldNotBeNullOrEmpty();
|
||||
|
||||
// Purge the stream
|
||||
await fx.RequestAsync($"{JetStreamApiSubjects.StreamPurge}SNAP", "{}");
|
||||
var afterPurge = await fx.GetStreamStateAsync("SNAP");
|
||||
afterPurge.Messages.ShouldBe(0UL);
|
||||
|
||||
// Restore from snapshot
|
||||
var restore = await fx.RequestAsync($"{JetStreamApiSubjects.StreamRestore}SNAP", snapshot.Snapshot.Payload);
|
||||
restore.Success.ShouldBeTrue();
|
||||
|
||||
var afterRestore = await fx.GetStreamStateAsync("SNAP");
|
||||
afterRestore.Messages.ShouldBe(10UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamSynchedTimeStamps server/jetstream_cluster_1_test.go:977
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replicated_stream_messages_have_monotonic_sequences()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("SEQ", ["seq.>"], replicas: 3);
|
||||
|
||||
var sequences = new List<ulong>();
|
||||
for (var i = 0; i < 20; i++)
|
||||
{
|
||||
var ack = await fx.PublishAsync("seq.event", $"msg-{i}");
|
||||
sequences.Add(ack.Seq);
|
||||
}
|
||||
|
||||
// Verify strictly monotonically increasing sequences
|
||||
for (var i = 1; i < sequences.Count; i++)
|
||||
sequences[i].ShouldBeGreaterThan(sequences[i - 1]);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLimits server/jetstream_cluster_1_test.go:3248
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Max_msgs_limit_enforced_in_clustered_stream()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "LIMITED",
|
||||
Subjects = ["limited.>"],
|
||||
Replicas = 3,
|
||||
MaxMsgs = 5,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("limited.event", $"msg-{i}");
|
||||
|
||||
var state = await fx.GetStreamStateAsync("LIMITED");
|
||||
state.Messages.ShouldBeLessThanOrEqualTo(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInterestOnlyPolicy server/jetstream_cluster_1_test.go:3310
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Interest_only_policy_stream_stores_messages_without_consumers()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "INTONLY",
|
||||
Subjects = ["intonly.>"],
|
||||
Replicas = 3,
|
||||
Retention = RetentionPolicy.Interest,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
await fx.PublishAsync("intonly.data", $"msg-{i}");
|
||||
|
||||
// Without consumers, interest retention still stores messages
|
||||
// (they are removed only when all consumers have acked)
|
||||
var state = await fx.GetStreamStateAsync("INTONLY");
|
||||
state.Messages.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerInfoList server/jetstream_cluster_1_test.go:1349
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_names_and_list_return_all_consumers()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("CLIST", ["clist.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CLIST", "c1");
|
||||
await fx.CreateConsumerAsync("CLIST", "c2");
|
||||
await fx.CreateConsumerAsync("CLIST", "c3");
|
||||
|
||||
var names = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}CLIST", "{}");
|
||||
names.ConsumerNames.ShouldNotBeNull();
|
||||
names.ConsumerNames!.Count.ShouldBe(3);
|
||||
names.ConsumerNames.ShouldContain("c1");
|
||||
names.ConsumerNames.ShouldContain("c2");
|
||||
names.ConsumerNames.ShouldContain("c3");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDefaultMaxAckPending server/jetstream_cluster_1_test.go:1580
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_default_ack_policy_is_none()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("ACKDEF", ["ackdef.>"], replicas: 3);
|
||||
var resp = await fx.CreateConsumerAsync("ACKDEF", "test_consumer");
|
||||
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.AckPolicy.ShouldBe(AckPolicy.None);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExtendedStreamInfo server/jetstream_cluster_1_test.go:1878
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_returns_config_and_state()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("EXTINFO", ["ext.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("ext.event", $"msg-{i}");
|
||||
|
||||
var info = await fx.GetStreamInfoAsync("EXTINFO");
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.Config.Name.ShouldBe("EXTINFO");
|
||||
info.StreamInfo.Config.Replicas.ShouldBe(3);
|
||||
info.StreamInfo.State.Messages.ShouldBe(5UL);
|
||||
info.StreamInfo.State.FirstSeq.ShouldBe(1UL);
|
||||
info.StreamInfo.State.LastSeq.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExtendedStreamInfoSingleReplica server/jetstream_cluster_1_test.go:2033
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Single_replica_stream_info_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("R1INFO", ["r1info.>"], replicas: 1);
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
await fx.PublishAsync("r1info.event", $"msg-{i}");
|
||||
|
||||
var info = await fx.GetStreamInfoAsync("R1INFO");
|
||||
info.StreamInfo!.Config.Replicas.ShouldBe(1);
|
||||
info.StreamInfo.State.Messages.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams (maxmsgs_per behavior)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Max_msgs_per_subject_enforced_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "PERSUBJ",
|
||||
Subjects = ["ps.>"],
|
||||
Replicas = 3,
|
||||
MaxMsgsPer = 2,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
// Publish 5 messages to same subject; only 2 should remain
|
||||
for (var i = 0; i < 5; i++)
|
||||
await fx.PublishAsync("ps.topic", $"msg-{i}");
|
||||
|
||||
var state = await fx.GetStreamStateAsync("PERSUBJ");
|
||||
state.Messages.ShouldBeLessThanOrEqualTo(2UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamExtendedUpdates server/jetstream_cluster_1_test.go:1513
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_update_can_change_max_msgs()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "EXTUPD",
|
||||
Subjects = ["eu.>"],
|
||||
Replicas = 3,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await fx.PublishAsync("eu.event", $"msg-{i}");
|
||||
|
||||
// Update to limit max_msgs
|
||||
var update = fx.UpdateStream("EXTUPD", ["eu.>"], replicas: 3, maxMsgs: 5);
|
||||
update.Error.ShouldBeNull();
|
||||
update.StreamInfo!.Config.MaxMsgs.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: Sealed stream rejects purge
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Sealed_stream_rejects_purge_in_cluster()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "SEALED",
|
||||
Subjects = ["sealed.>"],
|
||||
Replicas = 3,
|
||||
Sealed = true,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
var purge = await fx.RequestAsync($"{JetStreamApiSubjects.StreamPurge}SEALED", "{}");
|
||||
// Sealed streams should not allow purge
|
||||
purge.Success.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional: DenyDelete stream rejects message delete
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task DenyDelete_stream_rejects_message_delete()
|
||||
{
|
||||
await using var fx = await ClusterStreamFixture.StartAsync(nodes: 3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "NODELDENY",
|
||||
Subjects = ["nodel.>"],
|
||||
Replicas = 3,
|
||||
DenyDelete = true,
|
||||
};
|
||||
fx.CreateStreamDirect(cfg);
|
||||
|
||||
await fx.PublishAsync("nodel.event", "msg");
|
||||
|
||||
var del = await fx.RequestAsync($"{JetStreamApiSubjects.StreamMessageDelete}NODELDENY", """{"seq":1}""");
|
||||
del.Success.ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Self-contained fixture for JetStream cluster stream tests. Wires up
|
||||
/// meta group, stream manager, consumer manager, API router, and publisher.
|
||||
/// </summary>
|
||||
internal sealed class ClusterStreamFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private ClusterStreamFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router,
|
||||
JetStreamPublisher publisher)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = publisher;
|
||||
}
|
||||
|
||||
public static Task<ClusterStreamFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var streamManager = new StreamManager(meta, consumerManager: consumerManager);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
return Task.FromResult(new ClusterStreamFixture(meta, streamManager, consumerManager, router, publisher));
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateStreamAsync(string name, string[] subjects, int replicas, StorageType storage = StorageType.Memory)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
Storage = storage,
|
||||
});
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public JetStreamApiResponse CreateStreamDirect(StreamConfig config)
|
||||
=> _streamManager.CreateOrUpdate(config);
|
||||
|
||||
public JetStreamApiResponse UpdateStream(string name, string[] subjects, int replicas, int maxMsgs = 0)
|
||||
{
|
||||
return _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
MaxMsgs = maxMsgs,
|
||||
});
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), null, out var ack))
|
||||
{
|
||||
if (ack.ErrorCode == null && _streamManager.TryGet(ack.Stream, out var handle))
|
||||
{
|
||||
var stored = handle.Store.LoadAsync(ack.Seq, default).GetAwaiter().GetResult();
|
||||
if (stored != null)
|
||||
_consumerManager.OnPublished(ack.Stream, stored);
|
||||
}
|
||||
|
||||
return Task.FromResult(ack);
|
||||
}
|
||||
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match a stream.");
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> GetStreamInfoAsync(string name)
|
||||
=> Task.FromResult(_streamManager.GetInfo(name));
|
||||
|
||||
public Task<ApiStreamState> GetStreamStateAsync(string name)
|
||||
=> _streamManager.GetStateAsync(name, default).AsTask();
|
||||
|
||||
public string GetStoreBackendType(string name) => _streamManager.GetStoreBackendType(name);
|
||||
|
||||
public Task<JetStreamApiResponse> CreateConsumerAsync(
|
||||
string stream,
|
||||
string durableName,
|
||||
string? filterSubject = null,
|
||||
AckPolicy ackPolicy = AckPolicy.None)
|
||||
{
|
||||
var config = new ConsumerConfig
|
||||
{
|
||||
DurableName = durableName,
|
||||
AckPolicy = ackPolicy,
|
||||
};
|
||||
if (!string.IsNullOrWhiteSpace(filterSubject))
|
||||
config.FilterSubject = filterSubject;
|
||||
|
||||
return Task.FromResult(_consumerManager.CreateOrUpdate(stream, config));
|
||||
}
|
||||
|
||||
public Task<PullFetchBatch> FetchAsync(string stream, string durableName, int batch)
|
||||
=> _consumerManager.FetchAsync(stream, durableName, batch, _streamManager, default).AsTask();
|
||||
|
||||
public void AckAll(string stream, string durableName, ulong sequence)
|
||||
=> _consumerManager.AckAll(stream, durableName, sequence);
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
=> Task.FromResult(_router.Route(subject, Encoding.UTF8.GetBytes(payload)));
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,146 @@
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
public class JetStreamInflightTrackingTests
|
||||
{
|
||||
[Fact]
|
||||
public void TrackInflightStreamProposal_increments_ops()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "inflight-1",
|
||||
Group = new RaftGroup { Name = "rg-inf", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.InflightStreamCount.ShouldBe(1);
|
||||
meta.IsStreamInflight("ACC", "inflight-1").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemoveInflightStreamProposal_clears_when_zero()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "inflight-2",
|
||||
Group = new RaftGroup { Name = "rg-inf2", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.RemoveInflightStreamProposal("ACC", "inflight-2");
|
||||
meta.IsStreamInflight("ACC", "inflight-2").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Duplicate_proposal_increments_ops_count()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "dup-stream",
|
||||
Group = new RaftGroup { Name = "rg-dup", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.InflightStreamCount.ShouldBe(1); // still one unique stream
|
||||
|
||||
// Need two removes to fully clear
|
||||
meta.RemoveInflightStreamProposal("ACC", "dup-stream");
|
||||
meta.IsStreamInflight("ACC", "dup-stream").ShouldBeTrue(); // ops > 0
|
||||
meta.RemoveInflightStreamProposal("ACC", "dup-stream");
|
||||
meta.IsStreamInflight("ACC", "dup-stream").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsStreamInflight_returns_false_for_unknown_account()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.IsStreamInflight("UNKNOWN", "no-stream").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void TrackInflightConsumerProposal_tracks_by_account()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.TrackInflightConsumerProposal("ACC", "stream1", "consumer1");
|
||||
|
||||
meta.InflightConsumerCount.ShouldBe(1);
|
||||
meta.IsConsumerInflight("ACC", "stream1", "consumer1").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemoveInflightConsumerProposal_clears_when_zero()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.TrackInflightConsumerProposal("ACC", "stream1", "consumer1");
|
||||
meta.RemoveInflightConsumerProposal("ACC", "stream1", "consumer1");
|
||||
|
||||
meta.IsConsumerInflight("ACC", "stream1", "consumer1").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ClearAllInflight_removes_everything()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC1", sa);
|
||||
meta.TrackInflightConsumerProposal("ACC2", "s2", "c1");
|
||||
|
||||
meta.ClearAllInflight();
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
meta.InflightConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StepDown_clears_inflight()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.StepDown();
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Multiple_accounts_tracked_independently()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa1 = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg1", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
var sa2 = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1", // same stream name, different account
|
||||
Group = new RaftGroup { Name = "rg2", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC1", sa1);
|
||||
meta.TrackInflightStreamProposal("ACC2", sa2);
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(2); // one per account
|
||||
meta.IsStreamInflight("ACC1", "s1").ShouldBeTrue();
|
||||
meta.IsStreamInflight("ACC2", "s1").ShouldBeTrue();
|
||||
|
||||
meta.RemoveInflightStreamProposal("ACC1", "s1");
|
||||
meta.IsStreamInflight("ACC1", "s1").ShouldBeFalse();
|
||||
meta.IsStreamInflight("ACC2", "s1").ShouldBeTrue(); // still tracked
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
public class JetStreamLeadershipTests
|
||||
{
|
||||
[Fact]
|
||||
public void ProcessLeaderChange_clears_inflight_on_step_down()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.TrackInflightStreamProposal("ACC", new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg", Peers = ["n1", "n2", "n3"] },
|
||||
});
|
||||
|
||||
meta.ProcessLeaderChange(isLeader: false);
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessLeaderChange_fires_event_on_become_leader()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var leaderChanged = false;
|
||||
meta.OnLeaderChange += (isLeader) => leaderChanged = true;
|
||||
|
||||
meta.ProcessLeaderChange(isLeader: true);
|
||||
|
||||
leaderChanged.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessLeaderChange_fires_event_on_step_down()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
bool? receivedIsLeader = null;
|
||||
meta.OnLeaderChange += (isLeader) => receivedIsLeader = isLeader;
|
||||
|
||||
meta.ProcessLeaderChange(isLeader: false);
|
||||
|
||||
receivedIsLeader.ShouldNotBeNull();
|
||||
receivedIsLeader.Value.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StepDown_triggers_leader_change_event()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
bool? receivedIsLeader = null;
|
||||
meta.OnLeaderChange += (isLeader) => receivedIsLeader = isLeader;
|
||||
|
||||
meta.StepDown();
|
||||
|
||||
receivedIsLeader.ShouldNotBeNull();
|
||||
receivedIsLeader.Value.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StepDown_clears_inflight_via_process_leader_change()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.TrackInflightStreamProposal("ACC", new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg", Peers = ["n1", "n2", "n3"] },
|
||||
});
|
||||
meta.TrackInflightConsumerProposal("ACC", "s1", "c1");
|
||||
|
||||
meta.StepDown();
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
meta.InflightConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void BecomeLeader_makes_IsLeader_true()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.StepDown(); // move leader away from self
|
||||
meta.IsLeader().ShouldBeFalse();
|
||||
|
||||
meta.BecomeLeader();
|
||||
|
||||
meta.IsLeader().ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void OnLeaderChange_not_fired_when_no_subscribers()
|
||||
{
|
||||
// Should not throw when no handlers attached
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
Should.NotThrow(() => meta.ProcessLeaderChange(isLeader: true));
|
||||
Should.NotThrow(() => meta.ProcessLeaderChange(isLeader: false));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,641 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: meta group leadership, API routing through meta leader,
|
||||
// stream/consumer placement decisions, asset distribution,
|
||||
// R1/R3 placement, preferred tags, cluster-wide operations.
|
||||
using System.Collections.Concurrent;
|
||||
using System.Reflection;
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream meta controller leadership, API routing through
|
||||
/// the meta leader, stream/consumer placement decisions, asset distribution,
|
||||
/// R1/R3 placement, and cluster-wide operations.
|
||||
/// Ported from Go jetstream_cluster_1_test.go and jetstream_cluster_2_test.go.
|
||||
/// </summary>
|
||||
public class JetStreamMetaControllerTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_initial_leader_is_meta_1()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var state = meta.GetState();
|
||||
|
||||
state.LeaderId.ShouldBe("meta-1");
|
||||
state.ClusterSize.ShouldBe(3);
|
||||
state.LeadershipVersion.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_stepdown_advances_leader_id()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.GetState().LeaderId.ShouldBe("meta-1");
|
||||
|
||||
meta.StepDown();
|
||||
meta.GetState().LeaderId.ShouldBe("meta-2");
|
||||
|
||||
meta.StepDown();
|
||||
meta.GetState().LeaderId.ShouldBe("meta-3");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_stepdown_wraps_around_to_first_node()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.StepDown(); // meta-2
|
||||
meta.StepDown(); // meta-3
|
||||
meta.StepDown(); // meta-1 (wrap)
|
||||
|
||||
meta.GetState().LeaderId.ShouldBe("meta-1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_leadership_version_increments_on_each_stepdown()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
for (var i = 1; i <= 5; i++)
|
||||
{
|
||||
meta.GetState().LeadershipVersion.ShouldBe(i);
|
||||
meta.StepDown();
|
||||
}
|
||||
|
||||
meta.GetState().LeadershipVersion.ShouldBe(6);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConfig server/jetstream_cluster_1_test.go:43
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_group_propose_creates_stream_record()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "TEST" }, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
state.Streams.Count.ShouldBe(1);
|
||||
state.Streams.ShouldContain("TEST");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_group_tracks_multiple_stream_proposals()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(5);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = $"S{i}" }, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
state.Streams.Count.ShouldBe(10);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_group_streams_are_sorted_alphabetically()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "ZULU" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "ALPHA" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "MIKE" }, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
state.Streams[0].ShouldBe("ALPHA");
|
||||
state.Streams[1].ShouldBe("MIKE");
|
||||
state.Streams[2].ShouldBe("ZULU");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConfig server/jetstream_cluster_1_test.go:43
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_group_duplicate_stream_proposal_is_idempotent()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "DUP" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "DUP" }, default);
|
||||
|
||||
meta.GetState().Streams.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_single_node_cluster_has_leader()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(1);
|
||||
var state = meta.GetState();
|
||||
|
||||
state.ClusterSize.ShouldBe(1);
|
||||
state.LeaderId.ShouldBe("meta-1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Meta_group_single_node_stepdown_returns_to_same_leader()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(1);
|
||||
meta.StepDown();
|
||||
|
||||
meta.GetState().LeaderId.ShouldBe("meta-1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeaderStepdown server/jetstream_cluster_1_test.go:5464
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Api_meta_leader_stepdown_changes_leader_and_preserves_streams()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("KEEPME", ["keep.>"], replicas: 3);
|
||||
|
||||
var before = fx.GetMetaState();
|
||||
var leaderBefore = before.LeaderId;
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
|
||||
var after = fx.GetMetaState();
|
||||
after.LeaderId.ShouldNotBe(leaderBefore);
|
||||
after.Streams.ShouldContain("KEEPME");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAccountInfo server/jetstream_cluster_1_test.go:94
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Api_routing_through_meta_leader_returns_account_info()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("A", ["a.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("B", ["b.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("A", "c1");
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
resp.AccountInfo.ShouldNotBeNull();
|
||||
resp.AccountInfo!.Streams.ShouldBe(2);
|
||||
resp.AccountInfo.Consumers.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLimitWithAccountDefaults server/jetstream_cluster_1_test.go:124
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Placement_planner_r1_creates_single_node_placement()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 5);
|
||||
var placement = planner.PlanReplicas(replicas: 1);
|
||||
|
||||
placement.Count.ShouldBe(1);
|
||||
placement[0].ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_r3_creates_three_node_placement()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 5);
|
||||
var placement = planner.PlanReplicas(replicas: 3);
|
||||
|
||||
placement.Count.ShouldBe(3);
|
||||
placement[0].ShouldBe(1);
|
||||
placement[1].ShouldBe(2);
|
||||
placement[2].ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_caps_replicas_at_cluster_size()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: 7);
|
||||
|
||||
placement.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_negative_replicas_returns_one()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 5);
|
||||
var placement = planner.PlanReplicas(replicas: -1);
|
||||
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConfig server/jetstream_cluster_1_test.go:43
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_zero_nodes_returns_one()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 0);
|
||||
var placement = planner.PlanReplicas(replicas: 3);
|
||||
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamCreate server/jetstream_cluster_1_test.go:160
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_via_meta_leader_sets_replica_group()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 5);
|
||||
|
||||
var resp = await fx.CreateStreamAsync("REPGRP", ["rg.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
// The stream manager creates a replica group internally
|
||||
var meta = fx.GetMetaState();
|
||||
meta.Streams.ShouldContain("REPGRP");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMaxStreamsReached server/jetstream_cluster_1_test.go:3177
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_stream_creates_all_tracked_in_meta_group()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await fx.CreateStreamAsync($"MS{i}", [$"ms{i}.>"], replicas: 3);
|
||||
|
||||
var meta = fx.GetMetaState();
|
||||
meta.Streams.Count.ShouldBe(20);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamNames server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_names_api_returns_all_streams_through_meta_leader()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("S1", ["s1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("S2", ["s2.>"], replicas: 1);
|
||||
await fx.CreateStreamAsync("S3", ["s3.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
resp.StreamNames.ShouldNotBeNull();
|
||||
resp.StreamNames!.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDelete server/jetstream_cluster_1_test.go:472
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_removes_from_active_names()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("DEL1", ["d1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("DEL2", ["d2.>"], replicas: 3);
|
||||
|
||||
var del = await fx.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DEL1", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
|
||||
var names = await fx.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames!.Count.ShouldBe(1);
|
||||
names.StreamNames.ShouldContain("DEL2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDoubleAdd server/jetstream_cluster_1_test.go:1551
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_idempotent_with_same_config()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
var first = await fx.CreateStreamAsync("IDEM", ["idem.>"], replicas: 3);
|
||||
first.Error.ShouldBeNull();
|
||||
|
||||
var second = await fx.CreateStreamAsync("IDEM", ["idem.>"], replicas: 3);
|
||||
second.Error.ShouldBeNull();
|
||||
|
||||
var meta = fx.GetMetaState();
|
||||
meta.Streams.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInfoList server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_create_tracked_in_cluster()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("CC", ["cc.>"], replicas: 3);
|
||||
await fx.CreateConsumerAsync("CC", "d1");
|
||||
await fx.CreateConsumerAsync("CC", "d2");
|
||||
|
||||
var names = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}CC", "{}");
|
||||
names.ConsumerNames.ShouldNotBeNull();
|
||||
names.ConsumerNames!.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterPeerRemovalAPI server/jetstream_cluster_1_test.go:3469
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Peer_removal_api_routed_through_meta()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("PR", ["pr.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.StreamPeerRemove}PR", """{"peer":"n2"}""");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMetaSnapshotsAndCatchup server/jetstream_cluster_1_test.go:833
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_preserved_across_multiple_stepdowns()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("M1", ["m1.>"], replicas: 3);
|
||||
await fx.CreateStreamAsync("M2", ["m2.>"], replicas: 3);
|
||||
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var state = fx.GetMetaState();
|
||||
state.Streams.ShouldContain("M1");
|
||||
state.Streams.ShouldContain("M2");
|
||||
state.LeadershipVersion.ShouldBe(4);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMetaSnapshotsMultiChange server/jetstream_cluster_1_test.go:881
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Create_and_delete_across_stepdowns_reflected_in_names()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
await fx.CreateStreamAsync("A", ["a.>"], replicas: 3);
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
await fx.CreateStreamAsync("B", ["b.>"], replicas: 3);
|
||||
(await fx.RequestAsync($"{JetStreamApiSubjects.StreamDelete}A", "{}")).Success.ShouldBeTrue();
|
||||
|
||||
(await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var names = await fx.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames!.Count.ShouldBe(1);
|
||||
names.StreamNames.ShouldContain("B");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamCreate server/jetstream_cluster_1_test.go:160
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_for_nonexistent_stream_returns_404()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.StreamInfo}MISSING", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterConsumerCreate server/jetstream_cluster_1_test.go:700
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_info_for_nonexistent_consumer_returns_404()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("NOCON", ["nc.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}NOCON.MISSING", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamCreate server/jetstream_cluster_1_test.go:160
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Stream_create_without_name_returns_error()
|
||||
{
|
||||
var streamManager = new StreamManager();
|
||||
var resp = streamManager.CreateOrUpdate(new StreamConfig { Name = "" });
|
||||
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Description.ShouldContain("name");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamCreate server/jetstream_cluster_1_test.go:160
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Unknown_api_subject_returns_404()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync("$JS.API.UNKNOWN.SUBJECT", "{}");
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAccountPurge server/jetstream_cluster_1_test.go:3891
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_purge_via_meta_returns_success()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("P", ["p.>"], replicas: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.AccountPurge}GLOBAL", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterServerRemove server/jetstream_cluster_1_test.go:3620
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Server_remove_via_meta_returns_success()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync(JetStreamApiSubjects.ServerRemove, "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAccountStreamMove server/jetstream_cluster_1_test.go:3750
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_stream_move_via_meta_returns_success()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.AccountStreamMove}TEST", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterAccountStreamMoveCancel server/jetstream_cluster_1_test.go:3780
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_stream_move_cancel_via_meta_returns_success()
|
||||
{
|
||||
await using var fx = await MetaControllerFixture.StartAsync(nodes: 3);
|
||||
|
||||
var resp = await fx.RequestAsync($"{JetStreamApiSubjects.AccountStreamMoveCancel}TEST", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Self-contained fixture for JetStream meta controller tests.
|
||||
/// </summary>
|
||||
internal sealed class MetaControllerFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private MetaControllerFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router,
|
||||
JetStreamPublisher publisher)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = publisher;
|
||||
}
|
||||
|
||||
public static Task<MetaControllerFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var streamManager = new StreamManager(meta, consumerManager: consumerManager);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
return Task.FromResult(new MetaControllerFixture(meta, streamManager, consumerManager, router, publisher));
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> CreateConsumerAsync(string stream, string durableName)
|
||||
{
|
||||
return Task.FromResult(_consumerManager.CreateOrUpdate(stream, new ConsumerConfig
|
||||
{
|
||||
DurableName = durableName,
|
||||
}));
|
||||
}
|
||||
|
||||
public MetaGroupState GetMetaState() => _metaGroup.GetState();
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
{
|
||||
var response = _router.Route(subject, Encoding.UTF8.GetBytes(payload));
|
||||
|
||||
// In a real cluster, after stepdown a new leader is elected.
|
||||
// Simulate this node becoming the new leader so subsequent mutating
|
||||
// operations through the router succeed.
|
||||
if (subject.Equals(JetStreamApiSubjects.MetaLeaderStepdown, StringComparison.Ordinal) && response.Success)
|
||||
_metaGroup.BecomeLeader();
|
||||
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,744 @@
|
||||
// Go ref: TestJetStreamClusterXxx — jetstream_cluster_4_test.go
|
||||
// Covers: large clusters, many-subject streams, wildcard streams, high-message-count
|
||||
// publishes, multi-stream mixed replica counts, create/delete/recreate cycles,
|
||||
// consumer on high-message streams, purge/republish, stream delete cascades,
|
||||
// node removal and restart lifecycle markers.
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Advanced JetStream cluster tests covering high-load scenarios, large clusters,
|
||||
/// many-subject streams, wildcard subjects, multi-stream environments, consumer
|
||||
/// lifecycle edge cases, purge/republish cycles, and node lifecycle markers.
|
||||
/// Ported from Go jetstream_cluster_4_test.go.
|
||||
/// </summary>
|
||||
public class JsClusterAdvancedTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLargeClusterR5 — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Large_seven_node_cluster_with_R5_stream_accepts_publishes()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLargeClusterR5 — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(7);
|
||||
|
||||
cluster.NodeCount.ShouldBe(7);
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("R5LARGE", ["r5.>"], replicas: 5);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Replicas.ShouldBe(5);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("r5.event", $"msg-{i}");
|
||||
ack.Stream.ShouldBe("R5LARGE");
|
||||
ack.Seq.ShouldBe((ulong)(i + 1));
|
||||
}
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("R5LARGE");
|
||||
state.Messages.ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamWithManySubjects — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_with_twenty_subjects_routes_all_correctly()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamWithManySubjects — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var subjects = Enumerable.Range(1, 20).Select(i => $"topic.{i}").ToArray();
|
||||
var resp = await cluster.CreateStreamAsync("MANYSUBJ", subjects, replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.Subjects.Count.ShouldBe(20);
|
||||
|
||||
// Publish to each subject
|
||||
for (var i = 1; i <= 20; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync($"topic.{i}", $"payload-{i}");
|
||||
ack.Stream.ShouldBe("MANYSUBJ");
|
||||
}
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("MANYSUBJ");
|
||||
state.Messages.ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterWildcardSubjectStream — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_with_wildcard_gt_subject_captures_all_sub_subjects()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterWildcardSubjectStream — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("WILDCARD", [">"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
await cluster.PublishAsync("any.subject.here", "msg1");
|
||||
await cluster.PublishAsync("totally.different", "msg2");
|
||||
await cluster.PublishAsync("nested.deep.path.to.leaf", "msg3");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("WILDCARD");
|
||||
state.Messages.ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterPublish1000MessagesToReplicatedStream — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Publish_1000_messages_to_R3_stream_all_acknowledged()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterPublish1000MessagesToReplicatedStream — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("BIG3", ["big.>"], replicas: 3);
|
||||
|
||||
var lastSeq = 0UL;
|
||||
for (var i = 0; i < 1000; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("big.event", $"msg-{i}");
|
||||
ack.Stream.ShouldBe("BIG3");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
lastSeq = ack.Seq;
|
||||
}
|
||||
|
||||
lastSeq.ShouldBe(1000UL);
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("BIG3");
|
||||
state.Messages.ShouldBe(1000UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(1000UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterPublish1000MessagesToR1Stream — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Publish_1000_messages_to_R1_stream_all_acknowledged()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterPublish1000MessagesToR1Stream — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("BIG1", ["b1.>"], replicas: 1);
|
||||
|
||||
for (var i = 0; i < 1000; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("b1.event", $"msg-{i}");
|
||||
ack.Stream.ShouldBe("BIG1");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("BIG1");
|
||||
state.Messages.ShouldBe(1000UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamStateAfter1000Messages — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_state_accurate_after_1000_messages()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamStateAfter1000Messages — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("STATE1K", ["s1k.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 1000; i++)
|
||||
await cluster.PublishAsync("s1k.data", $"payload-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("STATE1K");
|
||||
state.Messages.ShouldBe(1000UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(1000UL);
|
||||
state.Bytes.ShouldBeGreaterThan(0UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterMultipleStreamsMixedReplicas — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Ten_streams_with_mixed_replica_counts_all_independent()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterMultipleStreamsMixedReplicas — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
{
|
||||
var replicas = (i % 3) + 1;
|
||||
var resp = await cluster.CreateStreamAsync($"MIX{i}", [$"mix{i}.>"], replicas: replicas);
|
||||
resp.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
// Publish to each stream independently
|
||||
for (var i = 0; i < 10; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync($"mix{i}.event", $"stream-{i}-msg");
|
||||
ack.Stream.ShouldBe($"MIX{i}");
|
||||
}
|
||||
|
||||
// Verify each stream has exactly 1 message
|
||||
for (var i = 0; i < 10; i++)
|
||||
{
|
||||
var state = await cluster.GetStreamStateAsync($"MIX{i}");
|
||||
state.Messages.ShouldBe(1UL);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterCreatePublishDeleteRecreate — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Create_publish_delete_recreate_cycle_three_times()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterCreatePublishDeleteRecreate — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var cycle = 0; cycle < 3; cycle++)
|
||||
{
|
||||
// Create stream
|
||||
var create = await cluster.CreateStreamAsync("CYCLE", ["cyc.>"], replicas: 3);
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
// Publish messages
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("cyc.event", $"cycle-{cycle}-msg-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("CYCLE");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
|
||||
// Delete stream
|
||||
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}CYCLE", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterConsumerOn1000MessageStream — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_on_stream_with_1000_messages_fetches_correctly()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterConsumerOn1000MessageStream — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("FETCH1K", ["f1k.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 1000; i++)
|
||||
await cluster.PublishAsync("f1k.event", $"msg-{i}");
|
||||
|
||||
await cluster.CreateConsumerAsync("FETCH1K", "fetcher", filterSubject: "f1k.>");
|
||||
|
||||
var batch = await cluster.FetchAsync("FETCH1K", "fetcher", 100);
|
||||
batch.Messages.Count.ShouldBe(100);
|
||||
batch.Messages[0].Sequence.ShouldBe(1UL);
|
||||
batch.Messages[99].Sequence.ShouldBe(100UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterAckAllFor1000Messages — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AckAll_for_1000_messages_reduces_pending_to_zero()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterAckAllFor1000Messages — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("ACKBIG", ["ab.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("ACKBIG", "acker", filterSubject: "ab.>",
|
||||
ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 1000; i++)
|
||||
await cluster.PublishAsync("ab.event", $"msg-{i}");
|
||||
|
||||
var batch = await cluster.FetchAsync("ACKBIG", "acker", 1000);
|
||||
batch.Messages.Count.ShouldBe(1000);
|
||||
|
||||
// AckAll up to last sequence
|
||||
cluster.AckAll("ACKBIG", "acker", 1000);
|
||||
|
||||
// After acking all 1000, state remains but pending is cleared
|
||||
var state = await cluster.GetStreamStateAsync("ACKBIG");
|
||||
state.Messages.ShouldBe(1000UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamInfoConsistentAfterManyOps — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_consistent_after_many_operations()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamInfoConsistentAfterManyOps — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("INFOCONSIST", ["ic.>"], replicas: 3);
|
||||
|
||||
// Interleave publishes and info requests
|
||||
for (var i = 0; i < 50; i++)
|
||||
{
|
||||
await cluster.PublishAsync("ic.event", $"msg-{i}");
|
||||
var info = await cluster.GetStreamInfoAsync("INFOCONSIST");
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.State.Messages.ShouldBe((ulong)(i + 1));
|
||||
}
|
||||
|
||||
var finalInfo = await cluster.GetStreamInfoAsync("INFOCONSIST");
|
||||
finalInfo.StreamInfo!.Config.Name.ShouldBe("INFOCONSIST");
|
||||
finalInfo.StreamInfo.Config.Replicas.ShouldBe(3);
|
||||
finalInfo.StreamInfo.State.Messages.ShouldBe(50UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterMetaStateAfter10StreamOps — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_after_creating_and_deleting_ten_streams()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterMetaStateAfter10StreamOps — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.CreateStreamAsync($"META{i}", [$"meta{i}.>"], replicas: 3);
|
||||
|
||||
// Delete half
|
||||
for (var i = 0; i < 5; i++)
|
||||
{
|
||||
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}META{i}", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
var metaState = cluster.GetMetaState();
|
||||
metaState.ShouldNotBeNull();
|
||||
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.Count.ShouldBe(5);
|
||||
for (var i = 5; i < 10; i++)
|
||||
names.StreamNames.ShouldContain($"META{i}");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterMultipleConsumersIndependentPending — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Five_consumers_on_same_stream_have_independent_pending()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterMultipleConsumersIndependentPending — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("MULTIDUP", ["md.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("md.event", $"msg-{i}");
|
||||
|
||||
for (var c = 0; c < 5; c++)
|
||||
await cluster.CreateConsumerAsync("MULTIDUP", $"consumer{c}", filterSubject: "md.>");
|
||||
|
||||
// Each consumer should independently see all 10 messages
|
||||
for (var c = 0; c < 5; c++)
|
||||
{
|
||||
var batch = await cluster.FetchAsync("MULTIDUP", $"consumer{c}", 10);
|
||||
batch.Messages.Count.ShouldBe(10);
|
||||
batch.Messages[0].Sequence.ShouldBe(1UL);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterConsumerWildcardFilter — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_with_wildcard_filter_delivers_only_matching_messages()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterConsumerWildcardFilter — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("WFILT", ["wf.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("WFILT", "wildcons", filterSubject: "wf.alpha.>");
|
||||
|
||||
await cluster.PublishAsync("wf.alpha.one", "match1");
|
||||
await cluster.PublishAsync("wf.beta.two", "no-match");
|
||||
await cluster.PublishAsync("wf.alpha.three", "match2");
|
||||
await cluster.PublishAsync("wf.gamma.four", "no-match2");
|
||||
await cluster.PublishAsync("wf.alpha.five", "match3");
|
||||
|
||||
var batch = await cluster.FetchAsync("WFILT", "wildcons", 10);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
foreach (var msg in batch.Messages)
|
||||
msg.Subject.ShouldStartWith("wf.alpha.");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamUpdateAddSubjectsAfterPublish — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_update_adding_subjects_after_publishes_works()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamUpdateAddSubjectsAfterPublish — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("ADDSUB", ["as.alpha"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("as.alpha", $"msg-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("ADDSUB");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
|
||||
// Add more subjects via update
|
||||
var update = cluster.UpdateStream("ADDSUB", ["as.alpha", "as.beta", "as.gamma"], replicas: 3);
|
||||
update.Error.ShouldBeNull();
|
||||
update.StreamInfo!.Config.Subjects.Count.ShouldBe(3);
|
||||
update.StreamInfo.Config.Subjects.ShouldContain("as.beta");
|
||||
|
||||
// Now publish to new subjects
|
||||
await cluster.PublishAsync("as.beta", "beta-msg");
|
||||
await cluster.PublishAsync("as.gamma", "gamma-msg");
|
||||
|
||||
var finalState = await cluster.GetStreamStateAsync("ADDSUB");
|
||||
finalState.Messages.ShouldBe(7UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamPurgeAndRepublish — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_purge_in_cluster_then_republish_works_correctly()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamPurgeAndRepublish — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("PURGEREP", ["pr.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 100; i++)
|
||||
await cluster.PublishAsync("pr.data", $"msg-{i}");
|
||||
|
||||
var before = await cluster.GetStreamStateAsync("PURGEREP");
|
||||
before.Messages.ShouldBe(100UL);
|
||||
|
||||
// Purge
|
||||
var purge = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamPurge}PURGEREP", "{}");
|
||||
purge.Success.ShouldBeTrue();
|
||||
|
||||
var afterPurge = await cluster.GetStreamStateAsync("PURGEREP");
|
||||
afterPurge.Messages.ShouldBe(0UL);
|
||||
|
||||
// Re-publish
|
||||
for (var i = 0; i < 50; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("pr.data", $"new-msg-{i}");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
var final = await cluster.GetStreamStateAsync("PURGEREP");
|
||||
final.Messages.ShouldBe(50UL);
|
||||
// Sequences restart after purge
|
||||
final.FirstSeq.ShouldBeGreaterThan(0UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterFetchEmptyAfterPurge — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Fetch_empty_after_stream_purge()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterFetchEmptyAfterPurge — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("PURGEDRAIN", ["pd.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("PURGEDRAIN", "reader", filterSubject: "pd.>");
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await cluster.PublishAsync("pd.event", $"msg-{i}");
|
||||
|
||||
// Fetch to advance the consumer
|
||||
var pre = await cluster.FetchAsync("PURGEDRAIN", "reader", 20);
|
||||
pre.Messages.Count.ShouldBe(20);
|
||||
|
||||
// Purge the stream
|
||||
(await cluster.RequestAsync($"{JetStreamApiSubjects.StreamPurge}PURGEDRAIN", "{}")).Success.ShouldBeTrue();
|
||||
|
||||
// Fetch should now return empty
|
||||
var post = await cluster.FetchAsync("PURGEDRAIN", "reader", 20);
|
||||
post.Messages.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamDeleteCascadesConsumers — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_cascades_consumer_removal()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamDeleteCascadesConsumers — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("CASCADE", ["cas.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("CASCADE", "c1");
|
||||
await cluster.CreateConsumerAsync("CASCADE", "c2");
|
||||
await cluster.CreateConsumerAsync("CASCADE", "c3");
|
||||
|
||||
// Verify consumers exist
|
||||
var names = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}CASCADE", "{}");
|
||||
names.ConsumerNames!.Count.ShouldBe(3);
|
||||
|
||||
// Delete the stream
|
||||
(await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}CASCADE", "{}")).Success.ShouldBeTrue();
|
||||
|
||||
// Stream no longer exists
|
||||
var info = await cluster.GetStreamInfoAsync("CASCADE");
|
||||
info.Error.ShouldNotBeNull();
|
||||
info.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterNodeRemovalPreservesDataReads — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Node_removal_does_not_affect_stream_data_reads()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterNodeRemovalPreservesDataReads — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
await cluster.CreateStreamAsync("NODEREM", ["nr.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 30; i++)
|
||||
await cluster.PublishAsync("nr.event", $"msg-{i}");
|
||||
|
||||
var before = await cluster.GetStreamStateAsync("NODEREM");
|
||||
before.Messages.ShouldBe(30UL);
|
||||
|
||||
// Simulate removing a node
|
||||
cluster.RemoveNode(4);
|
||||
|
||||
// Data reads should still work on remaining nodes
|
||||
var after = await cluster.GetStreamStateAsync("NODEREM");
|
||||
after.Messages.ShouldBe(30UL);
|
||||
after.LastSeq.ShouldBe(30UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterNodeRestartPreservesLifecycleMarkers — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Node_restart_records_lifecycle_markers_correctly()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterNodeRestartPreservesLifecycleMarkers — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("RESTART", ["rs.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("rs.event", $"msg-{i}");
|
||||
|
||||
// Simulate node removal
|
||||
cluster.RemoveNode(2);
|
||||
|
||||
// State still accessible with remaining nodes
|
||||
var mid = await cluster.GetStreamStateAsync("RESTART");
|
||||
mid.Messages.ShouldBe(10UL);
|
||||
|
||||
// Publish more while node is "down"
|
||||
for (var i = 10; i < 20; i++)
|
||||
await cluster.PublishAsync("rs.event", $"msg-{i}");
|
||||
|
||||
// Simulate node restart
|
||||
cluster.SimulateNodeRestart(2);
|
||||
|
||||
// All messages still accessible
|
||||
var final = await cluster.GetStreamStateAsync("RESTART");
|
||||
final.Messages.ShouldBe(20UL);
|
||||
final.LastSeq.ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLeaderStepdownDuringPublish — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Leader_stepdown_during_publish_sequence_is_monotonic()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLeaderStepdownDuringPublish — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("SEQSTEP", ["seq.>"], replicas: 3);
|
||||
|
||||
var seqs = new List<ulong>();
|
||||
for (var i = 0; i < 10; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("seq.event", $"msg-{i}");
|
||||
seqs.Add(ack.Seq);
|
||||
}
|
||||
|
||||
// Step down leader
|
||||
(await cluster.StepDownStreamLeaderAsync("SEQSTEP")).Success.ShouldBeTrue();
|
||||
|
||||
for (var i = 10; i < 20; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("seq.event", $"msg-{i}");
|
||||
seqs.Add(ack.Seq);
|
||||
}
|
||||
|
||||
// All sequences must be strictly increasing
|
||||
for (var i = 1; i < seqs.Count; i++)
|
||||
seqs[i].ShouldBeGreaterThan(seqs[i - 1]);
|
||||
|
||||
seqs[^1].ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamInfoAfterStepdown — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_accurate_after_leader_stepdown_with_many_messages()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamInfoAfterStepdown — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("INFOSD1K", ["isd.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 500; i++)
|
||||
await cluster.PublishAsync("isd.event", $"msg-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("INFOSD1K")).Success.ShouldBeTrue();
|
||||
|
||||
for (var i = 500; i < 1000; i++)
|
||||
await cluster.PublishAsync("isd.event", $"msg-{i}");
|
||||
|
||||
var info = await cluster.GetStreamInfoAsync("INFOSD1K");
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.State.Messages.ShouldBe(1000UL);
|
||||
info.StreamInfo.State.FirstSeq.ShouldBe(1UL);
|
||||
info.StreamInfo.State.LastSeq.ShouldBe(1000UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterStreamReplicaGroupHasCorrectNodes — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_for_stream_has_correct_node_count()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterStreamReplicaGroupHasCorrectNodes — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
await cluster.CreateStreamAsync("GRPCHECK", ["gc.>"], replicas: 3);
|
||||
|
||||
var group = cluster.GetReplicaGroup("GRPCHECK");
|
||||
group.ShouldNotBeNull();
|
||||
group!.Nodes.Count.ShouldBe(3);
|
||||
group.Leader.ShouldNotBeNull();
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterConsumerLeaderAfterStreamStepdown — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_leader_remains_valid_after_stream_stepdown()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterConsumerLeaderAfterStreamStepdown — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("CONSLEADER", ["cl.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("CONSLEADER", "durable1");
|
||||
|
||||
var leaderBefore = cluster.GetConsumerLeaderId("CONSLEADER", "durable1");
|
||||
leaderBefore.ShouldNotBeNullOrWhiteSpace();
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("CONSLEADER")).Success.ShouldBeTrue();
|
||||
|
||||
var leaderAfter = cluster.GetConsumerLeaderId("CONSLEADER", "durable1");
|
||||
leaderAfter.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterWaitOnStreamLeaderAfterCreation — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task WaitOnStreamLeader_resolves_immediately_for_existing_stream()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterWaitOnStreamLeaderAfterCreation — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("WLEADER", ["wl.>"], replicas: 3);
|
||||
|
||||
// Should complete immediately, no timeout
|
||||
await cluster.WaitOnStreamLeaderAsync("WLEADER", timeoutMs: 1000);
|
||||
|
||||
var leaderId = cluster.GetStreamLeaderId("WLEADER");
|
||||
leaderId.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterConsumerWaitOnLeaderAfterCreation — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task WaitOnConsumerLeader_resolves_for_existing_consumer()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterConsumerWaitOnLeaderAfterCreation — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("WCLEADER2", ["wcl2.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("WCLEADER2", "dur-wc");
|
||||
|
||||
await cluster.WaitOnConsumerLeaderAsync("WCLEADER2", "dur-wc", timeoutMs: 1000);
|
||||
|
||||
var leaderId = cluster.GetConsumerLeaderId("WCLEADER2", "dur-wc");
|
||||
leaderId.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterAccountInfoAfterBatchDelete — jetstream_cluster_4_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_info_reflects_accurate_stream_count_after_batch_delete()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterAccountInfoAfterBatchDelete — jetstream_cluster_4_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 8; i++)
|
||||
await cluster.CreateStreamAsync($"BATCH{i}", [$"batch{i}.>"], replicas: 3);
|
||||
|
||||
var pre = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
pre.AccountInfo!.Streams.ShouldBe(8);
|
||||
|
||||
// Delete 3 streams
|
||||
for (var i = 0; i < 3; i++)
|
||||
(await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}BATCH{i}", "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var post = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
post.AccountInfo!.Streams.ShouldBe(5);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,584 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: messages surviving stream leader stepdown, consumer state surviving
|
||||
// leader failover, fetch continuing after stream leader change, AckAll surviving
|
||||
// leader failover, multiple failovers in sequence not losing data, remove node
|
||||
// not affecting stream operations, restart node lifecycle, publish during/after
|
||||
// failover, consumer creation after stream leader failover, stream update after
|
||||
// meta leader stepdown, stream delete after leader failover, rapid succession
|
||||
// stepdowns preserving data integrity.
|
||||
//
|
||||
// Go reference functions:
|
||||
// TestJetStreamClusterStreamLeaderStepDown (line 4925)
|
||||
// TestJetStreamClusterLeaderStepdown (line 5464)
|
||||
// TestJetStreamClusterNormalCatchup (line 1607)
|
||||
// TestJetStreamClusterStreamSnapshotCatchup (line 1667)
|
||||
// TestJetStreamClusterRestoreSingleConsumer (line 1028)
|
||||
// TestJetStreamClusterPeerRemovalAPI (line 3469)
|
||||
// TestJetStreamClusterDeleteMsgAndRestart (line 1785)
|
||||
// restartServerAndWait, shutdownServerAndRemoveStorage in jetstream_helpers_test.go
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream cluster failover scenarios: leader stepdown while
|
||||
/// messages are in flight, consumer state preservation across leader changes,
|
||||
/// rapid successive stepdowns, remove/restart node lifecycle, and data integrity
|
||||
/// guarantees across failover sequences. Uses JetStreamClusterFixture.
|
||||
/// Ported from Go jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class JsClusterFailoverTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown line 4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: publish before stepdown, verify state and new leader after
|
||||
[Fact]
|
||||
public async Task Messages_survive_stream_leader_stepdown_state_preserved()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("SURVIVE", ["sv.>"], replicas: 3);
|
||||
|
||||
for (var i = 1; i <= 10; i++)
|
||||
(await cluster.PublishAsync($"sv.{i}", $"msg-{i}")).Seq.ShouldBe((ulong)i);
|
||||
|
||||
var leaderBefore = cluster.GetStreamLeaderId("SURVIVE");
|
||||
(await cluster.StepDownStreamLeaderAsync("SURVIVE")).Success.ShouldBeTrue();
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("SURVIVE");
|
||||
state.Messages.ShouldBe(10UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(10UL);
|
||||
|
||||
cluster.GetStreamLeaderId("SURVIVE").ShouldNotBe(leaderBefore);
|
||||
}
|
||||
|
||||
// Go ref: TestJetStreamClusterStreamLeaderStepDown — write after stepdown is accepted
|
||||
[Fact]
|
||||
public async Task New_leader_accepts_writes_after_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("POSTSD", ["psd.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("psd.pre", $"before-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("POSTSD")).Success.ShouldBeTrue();
|
||||
|
||||
var ack = await cluster.PublishAsync("psd.post", "after-stepdown");
|
||||
ack.Seq.ShouldBe(6UL);
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer state survives leader failover
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterRestoreSingleConsumer line 1028
|
||||
[Fact]
|
||||
public async Task Consumer_state_survives_stream_leader_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CSURVFO", ["csf.>"], replicas: 3);
|
||||
// Use AckPolicy.None so fetch cursor advances without pending-check blocking the second fetch.
|
||||
await cluster.CreateConsumerAsync("CSURVFO", "durable1", filterSubject: "csf.>");
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("csf.event", $"msg-{i}");
|
||||
|
||||
var batch1 = await cluster.FetchAsync("CSURVFO", "durable1", 5);
|
||||
batch1.Messages.Count.ShouldBe(5);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("CSURVFO")).Success.ShouldBeTrue();
|
||||
|
||||
// New leader: consumer cursor is at seq 6; remaining 5 messages are still deliverable.
|
||||
var batch2 = await cluster.FetchAsync("CSURVFO", "durable1", 5);
|
||||
batch2.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// Go ref: consumer fetch continues after leader change
|
||||
[Fact]
|
||||
public async Task Fetch_continues_after_stream_leader_change()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("FETCHFO", ["ffo.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("FETCHFO", "reader", filterSubject: "ffo.>");
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await cluster.PublishAsync("ffo.event", $"msg-{i}");
|
||||
|
||||
// Fetch some messages, then step down
|
||||
var batch1 = await cluster.FetchAsync("FETCHFO", "reader", 10);
|
||||
batch1.Messages.Count.ShouldBe(10);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("FETCHFO")).Success.ShouldBeTrue();
|
||||
|
||||
// Fetch remaining messages through the new leader
|
||||
var batch2 = await cluster.FetchAsync("FETCHFO", "reader", 10);
|
||||
batch2.Messages.Count.ShouldBe(10);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// AckAll survives leader failover
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: ackAll state persisted across failover
|
||||
[Fact]
|
||||
public async Task AckAll_survives_stream_leader_failover()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("ACKFO", ["afo.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("ACKFO", "acker", filterSubject: "afo.>",
|
||||
ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("afo.event", $"msg-{i}");
|
||||
|
||||
// Fetch all 10 messages; AckPolicy.All leaves them pending until explicitly acked.
|
||||
var batch = await cluster.FetchAsync("ACKFO", "acker", 10);
|
||||
batch.Messages.Count.ShouldBe(10);
|
||||
|
||||
// Ack the first 5 (seq 1-5); 5 messages (seq 6-10) remain pending.
|
||||
cluster.AckAll("ACKFO", "acker", 5);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("ACKFO")).Success.ShouldBeTrue();
|
||||
|
||||
// After failover the stream leader has changed, but the consumer state persists —
|
||||
// the stream itself (managed by StreamManager) is unaffected by the leader election model.
|
||||
// Verify by confirming the stream still has all 10 messages.
|
||||
var state = await cluster.GetStreamStateAsync("ACKFO");
|
||||
state.Messages.ShouldBe(10UL);
|
||||
|
||||
// Verify stream leader changed (failover happened).
|
||||
cluster.GetStreamLeaderId("ACKFO").ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Multiple failovers in sequence don't lose data
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterNormalCatchup line 1607 — data survives multiple transitions
|
||||
[Fact]
|
||||
public async Task Multiple_failovers_in_sequence_preserve_all_data()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("MULTI_FO", ["mfo.>"], replicas: 3);
|
||||
|
||||
// Publish batch 1
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("mfo.event", $"b1-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("MULTI_FO")).Success.ShouldBeTrue();
|
||||
|
||||
// Publish batch 2 after first failover
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("mfo.event", $"b2-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("MULTI_FO")).Success.ShouldBeTrue();
|
||||
|
||||
// Publish batch 3 after second failover
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("mfo.event", $"b3-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("MULTI_FO");
|
||||
state.Messages.ShouldBe(15UL);
|
||||
state.LastSeq.ShouldBe(15UL);
|
||||
}
|
||||
|
||||
// Go ref: rapid 5x stepdowns preserve data integrity
|
||||
[Fact]
|
||||
public async Task Rapid_five_stepdowns_preserve_all_published_messages()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("RAPID5", ["r5.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await cluster.PublishAsync("r5.event", $"msg-{i}");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
(await cluster.StepDownStreamLeaderAsync("RAPID5")).Success.ShouldBeTrue();
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("RAPID5");
|
||||
state.Messages.ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Remove node doesn't affect stream operations
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: shutdownServerAndRemoveStorage — stream still readable after node removal
|
||||
[Fact]
|
||||
public async Task Stream_state_intact_after_node_removal()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("NODEREM", ["nr.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("nr.event", $"msg-{i}");
|
||||
|
||||
cluster.RemoveNode(2);
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("NODEREM");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// Go ref: publish still works after node removal
|
||||
[Fact]
|
||||
public async Task Publish_still_works_after_node_removal()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("PUBNR", ["pnr.>"], replicas: 3);
|
||||
|
||||
cluster.RemoveNode(1);
|
||||
|
||||
var ack = await cluster.PublishAsync("pnr.event", "after-removal");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
ack.Stream.ShouldBe("PUBNR");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Restart node lifecycle
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: restartServerAndWait — stream accessible after node restart
|
||||
[Fact]
|
||||
public async Task Stream_accessible_after_node_restart()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("RESTART", ["rst.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("rst.event", $"msg-{i}");
|
||||
|
||||
cluster.RemoveNode(1);
|
||||
cluster.SimulateNodeRestart(1);
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("RESTART");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// Go ref: node restart cycle does not affect consumer fetch
|
||||
[Fact]
|
||||
public async Task Consumer_fetch_works_after_node_restart_cycle()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("RSTCONS", ["rsc.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("RSTCONS", "reader", filterSubject: "rsc.>");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("rsc.event", $"msg-{i}");
|
||||
|
||||
cluster.RemoveNode(2);
|
||||
cluster.SimulateNodeRestart(2);
|
||||
|
||||
var batch = await cluster.FetchAsync("RSTCONS", "reader", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Publish during/after failover sequence
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: publish interleaved with stepdown sequence
|
||||
[Fact]
|
||||
public async Task Publish_before_and_after_each_stepdown_maintains_monotonic_sequences()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("INTERLEAVE", ["il.>"], replicas: 3);
|
||||
|
||||
var seqs = new List<ulong>();
|
||||
|
||||
// Publish -> stepdown -> publish -> stepdown -> publish
|
||||
seqs.Add((await cluster.PublishAsync("il.event", "pre-1")).Seq);
|
||||
seqs.Add((await cluster.PublishAsync("il.event", "pre-2")).Seq);
|
||||
await cluster.StepDownStreamLeaderAsync("INTERLEAVE");
|
||||
seqs.Add((await cluster.PublishAsync("il.event", "mid-1")).Seq);
|
||||
seqs.Add((await cluster.PublishAsync("il.event", "mid-2")).Seq);
|
||||
await cluster.StepDownStreamLeaderAsync("INTERLEAVE");
|
||||
seqs.Add((await cluster.PublishAsync("il.event", "post-1")).Seq);
|
||||
|
||||
// Sequences must be strictly increasing
|
||||
for (var i = 1; i < seqs.Count; i++)
|
||||
seqs[i].ShouldBeGreaterThan(seqs[i - 1]);
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("INTERLEAVE");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
state.LastSeq.ShouldBe(seqs[^1]);
|
||||
}
|
||||
|
||||
// Go ref: publish immediately after stepdown uses new leader
|
||||
[Fact]
|
||||
public async Task Publish_immediately_after_stepdown_routes_to_new_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("IMMPOST", ["ip.>"], replicas: 3);
|
||||
|
||||
var ack1 = await cluster.PublishAsync("ip.event", "first");
|
||||
ack1.Seq.ShouldBe(1UL);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("IMMPOST")).Success.ShouldBeTrue();
|
||||
|
||||
var ack2 = await cluster.PublishAsync("ip.event", "second");
|
||||
ack2.Seq.ShouldBe(2UL);
|
||||
ack2.Stream.ShouldBe("IMMPOST");
|
||||
ack2.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer creation after stream leader failover
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: consumer created on new leader is functional
|
||||
[Fact]
|
||||
public async Task Consumer_created_after_stream_leader_failover_is_functional()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CPOSTFO", ["cpf.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("cpf.event", $"pre-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("CPOSTFO")).Success.ShouldBeTrue();
|
||||
|
||||
// Create consumer on new leader
|
||||
var resp = await cluster.CreateConsumerAsync("CPOSTFO", "post_failover", filterSubject: "cpf.>");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
|
||||
var batch = await cluster.FetchAsync("CPOSTFO", "post_failover", 10);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// Go ref: consumer created before failover accessible after new messages and stepdown
|
||||
[Fact]
|
||||
public async Task Consumer_created_before_failover_still_delivers_new_messages_after_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CBEFORE", ["cbf.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("CBEFORE", "pre_dur", filterSubject: "cbf.>");
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
await cluster.PublishAsync("cbf.event", $"before-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("CBEFORE")).Success.ShouldBeTrue();
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
await cluster.PublishAsync("cbf.event", $"after-{i}");
|
||||
|
||||
var batch = await cluster.FetchAsync("CBEFORE", "pre_dur", 10);
|
||||
batch.Messages.Count.ShouldBe(6);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream update after meta leader stepdown
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterLeaderStepdown — stream operations post meta stepdown
|
||||
[Fact]
|
||||
public async Task Stream_update_succeeds_after_meta_leader_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("UPDSD", ["upd.>"], replicas: 3);
|
||||
|
||||
(await cluster.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var update = cluster.UpdateStream("UPDSD", ["upd.>", "extra.>"], replicas: 3);
|
||||
update.Error.ShouldBeNull();
|
||||
update.StreamInfo!.Config.Subjects.ShouldContain("extra.>");
|
||||
}
|
||||
|
||||
// Go ref: create new stream after meta leader stepdown
|
||||
[Fact]
|
||||
public async Task Create_stream_after_meta_leader_stepdown_succeeds()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
(await cluster.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("POST_META_SD", ["pms.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("POST_META_SD");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream delete after leader failover
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: stream delete after failover returns success
|
||||
[Fact]
|
||||
public async Task Stream_delete_succeeds_after_stream_leader_failover()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("DELFO", ["dfo.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("dfo.event", $"msg-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("DELFO")).Success.ShouldBeTrue();
|
||||
|
||||
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DELFO", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go ref: stream info reflects deletion after failover
|
||||
[Fact]
|
||||
public async Task Stream_info_returns_404_after_delete_following_failover()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("DELFOI", ["dfoi.>"], replicas: 3);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("DELFOI")).Success.ShouldBeTrue();
|
||||
(await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DELFOI", "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var info = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamInfo}DELFOI", "{}");
|
||||
info.Error.ShouldNotBeNull();
|
||||
info.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream info and state consistent after failover
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: stream info available through new leader
|
||||
[Fact]
|
||||
public async Task Stream_info_available_from_new_leader_after_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("INFOFO", ["ifo.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("ifo.event", $"msg-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("INFOFO")).Success.ShouldBeTrue();
|
||||
|
||||
var info = await cluster.GetStreamInfoAsync("INFOFO");
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.Config.Name.ShouldBe("INFOFO");
|
||||
info.StreamInfo.State.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// Go ref: first/last sequence intact after failover
|
||||
[Fact]
|
||||
public async Task First_and_last_sequence_intact_after_stream_leader_failover()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("SEQFO", ["sfo.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 7; i++)
|
||||
await cluster.PublishAsync("sfo.event", $"msg-{i}");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("SEQFO")).Success.ShouldBeTrue();
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("SEQFO");
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(7UL);
|
||||
state.Messages.ShouldBe(7UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Meta state survives stream leader failover
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: meta tracks streams even after stream leader stepdown
|
||||
[Fact]
|
||||
public async Task Meta_state_still_tracks_stream_after_stream_leader_failover()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("METATRK", ["mtk.>"], replicas: 3);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("METATRK")).Success.ShouldBeTrue();
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta.ShouldNotBeNull();
|
||||
meta!.Streams.ShouldContain("METATRK");
|
||||
}
|
||||
|
||||
// Go ref: multiple streams tracked after mixed stepdowns
|
||||
[Fact]
|
||||
public async Task Meta_state_tracks_multiple_streams_across_mixed_stepdowns()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("MIX1", ["mix1.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("MIX2", ["mix2.>"], replicas: 1);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("MIX1")).Success.ShouldBeTrue();
|
||||
(await cluster.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.Streams.ShouldContain("MIX1");
|
||||
meta.Streams.ShouldContain("MIX2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// WaitOnStreamLeader after stepdown
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: waitOnStreamLeader resolves after stepdown
|
||||
[Fact]
|
||||
public async Task WaitOnStreamLeader_resolves_after_stream_leader_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("WAITSD", ["wsd.>"], replicas: 3);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("WAITSD")).Success.ShouldBeTrue();
|
||||
|
||||
// New leader should be immediately available
|
||||
await cluster.WaitOnStreamLeaderAsync("WAITSD", timeoutMs: 2000);
|
||||
cluster.GetStreamLeaderId("WAITSD").ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Message delete survives leader transition
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterDeleteMsgAndRestart line 1785
|
||||
[Fact]
|
||||
public async Task Message_delete_survives_leader_transition()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("DELMSGFO", ["dmf.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("dmf.event", $"msg-{i}");
|
||||
|
||||
(await cluster.RequestAsync(
|
||||
$"{JetStreamApiSubjects.StreamMessageDelete}DELMSGFO",
|
||||
"""{"seq":3}""")).Success.ShouldBeTrue();
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("DELMSGFO")).Success.ShouldBeTrue();
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("DELMSGFO");
|
||||
state.Messages.ShouldBe(4UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Multiple streams — stepdown on one does not affect the other
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: independent streams have independent leader groups
|
||||
[Fact]
|
||||
public async Task Stepdown_on_one_stream_does_not_affect_sibling_stream()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("SIBLING_A", ["siba.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("SIBLING_B", ["sibb.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("siba.event", $"a-{i}");
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("sibb.event", $"b-{i}");
|
||||
|
||||
var leaderB = cluster.GetStreamLeaderId("SIBLING_B");
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("SIBLING_A")).Success.ShouldBeTrue();
|
||||
|
||||
cluster.GetStreamLeaderId("SIBLING_B").ShouldBe(leaderB);
|
||||
(await cluster.GetStreamStateAsync("SIBLING_B")).Messages.ShouldBe(5UL);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,589 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: meta-leader election (3-node and 5-node clusters), stream leader
|
||||
// selection (R1 and R3), consumer leader selection, leader ID non-empty checks,
|
||||
// meta stepdown producing new leader, stream stepdown producing new leader,
|
||||
// multiple stepdowns cycling through different leaders, leader ID consistency,
|
||||
// meta state reflecting correct cluster size and leadership version increments,
|
||||
// and meta state tracking all created streams.
|
||||
//
|
||||
// Go reference functions:
|
||||
// TestJetStreamClusterLeader (line 73)
|
||||
// TestJetStreamClusterStreamLeaderStepDown (line 4925)
|
||||
// TestJetStreamClusterLeaderStepdown (line 5464)
|
||||
// TestJetStreamClusterMultiReplicaStreams (line 299)
|
||||
// waitOnStreamLeader, waitOnConsumerLeader, c.leader in jetstream_helpers_test.go
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream cluster leader election for the meta-cluster,
|
||||
/// streams, and consumers. Uses the unified JetStreamClusterFixture.
|
||||
/// Ported from Go jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class JsClusterLeaderElectionTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader line 73 — meta leader election
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: c.leader() in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task Three_node_cluster_elects_nonempty_meta_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var leader = cluster.GetMetaLeaderId();
|
||||
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: c.leader() in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task Five_node_cluster_elects_nonempty_meta_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
var leader = cluster.GetMetaLeaderId();
|
||||
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: checkClusterFormed — meta cluster size is equal to node count
|
||||
[Fact]
|
||||
public async Task Three_node_cluster_meta_state_reports_correct_size()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var state = cluster.GetMetaState();
|
||||
|
||||
state.ShouldNotBeNull();
|
||||
state!.ClusterSize.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go ref: checkClusterFormed — meta cluster size is equal to node count
|
||||
[Fact]
|
||||
public async Task Five_node_cluster_meta_state_reports_correct_size()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
var state = cluster.GetMetaState();
|
||||
|
||||
state.ShouldNotBeNull();
|
||||
state!.ClusterSize.ShouldBe(5);
|
||||
}
|
||||
|
||||
// Go ref: TestJetStreamClusterLeader — initial leadership version is 1
|
||||
[Fact]
|
||||
public async Task Three_node_cluster_initial_leadership_version_is_one()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var state = cluster.GetMetaState();
|
||||
|
||||
state!.LeadershipVersion.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream leader selection — R1
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: streamLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task R1_stream_has_nonempty_leader_after_creation()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("R1ELECT", ["r1e.>"], replicas: 1);
|
||||
|
||||
var leader = cluster.GetStreamLeaderId("R1ELECT");
|
||||
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: streamLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task R3_stream_has_nonempty_leader_after_creation_in_3_node_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("R3ELECT", ["r3e.>"], replicas: 3);
|
||||
|
||||
var leader = cluster.GetStreamLeaderId("R3ELECT");
|
||||
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: streamLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task R3_stream_has_nonempty_leader_after_creation_in_5_node_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
await cluster.CreateStreamAsync("R3E5", ["r3e5.>"], replicas: 3);
|
||||
|
||||
var leader = cluster.GetStreamLeaderId("R3E5");
|
||||
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: waitOnStreamLeader in jetstream_helpers_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task WaitOnStreamLeader_completes_immediately_when_stream_already_has_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("WAITLDR", ["wl.>"], replicas: 3);
|
||||
|
||||
await cluster.WaitOnStreamLeaderAsync("WAITLDR", timeoutMs: 2000);
|
||||
|
||||
cluster.GetStreamLeaderId("WAITLDR").ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WaitOnStreamLeader_throws_timeout_for_nonexistent_stream()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var ex = await Should.ThrowAsync<TimeoutException>(
|
||||
() => cluster.WaitOnStreamLeaderAsync("GHOST", timeoutMs: 100));
|
||||
|
||||
ex.Message.ShouldContain("GHOST");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer leader selection
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: consumerLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task Durable_consumer_on_R3_stream_has_nonempty_leader_id()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CLELECT", ["cle.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("CLELECT", "dlc");
|
||||
|
||||
var leader = cluster.GetConsumerLeaderId("CLELECT", "dlc");
|
||||
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: consumerLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task Durable_consumer_on_R1_stream_has_nonempty_leader_id()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CLELECTR1", ["cler1.>"], replicas: 1);
|
||||
await cluster.CreateConsumerAsync("CLELECTR1", "consumer1");
|
||||
|
||||
var leader = cluster.GetConsumerLeaderId("CLELECTR1", "consumer1");
|
||||
|
||||
leader.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: waitOnConsumerLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task WaitOnConsumerLeader_completes_when_consumer_exists()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("WCLE", ["wcle.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("WCLE", "dur1");
|
||||
|
||||
await cluster.WaitOnConsumerLeaderAsync("WCLE", "dur1", timeoutMs: 2000);
|
||||
|
||||
cluster.GetConsumerLeaderId("WCLE", "dur1").ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task WaitOnConsumerLeader_throws_timeout_when_consumer_missing()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("WCLETOUT", ["wclet.>"], replicas: 3);
|
||||
|
||||
var ex = await Should.ThrowAsync<TimeoutException>(
|
||||
() => cluster.WaitOnConsumerLeaderAsync("WCLETOUT", "ghost-consumer", timeoutMs: 100));
|
||||
|
||||
ex.Message.ShouldContain("ghost-consumer");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeaderStepdown line 5464 — meta leader stepdown
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: c.leader().Shutdown() + waitOnLeader in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_produces_different_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var before = cluster.GetMetaLeaderId();
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var after = cluster.GetMetaLeaderId();
|
||||
after.ShouldNotBe(before);
|
||||
after.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: meta stepdown via API subject $JS.API.META.LEADER.STEPDOWN
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_via_api_returns_success()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp = await cluster.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}");
|
||||
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go ref: meta step-down increments leadership version
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_increments_leadership_version()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var versionBefore = cluster.GetMetaState()!.LeadershipVersion;
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var versionAfter = cluster.GetMetaState()!.LeadershipVersion;
|
||||
versionAfter.ShouldBe(versionBefore + 1);
|
||||
}
|
||||
|
||||
// Go ref: multiple meta step-downs each increment the version
|
||||
[Fact]
|
||||
public async Task Multiple_meta_stepdowns_increment_leadership_version_sequentially()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
cluster.GetMetaState()!.LeadershipVersion.ShouldBe(4);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown line 4925 — stream leader stepdown
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: JSApiStreamLeaderStepDownT in jetstream_helpers_test.go
|
||||
[Fact]
|
||||
public async Task Stream_leader_stepdown_produces_different_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("SLEADSD", ["sls.>"], replicas: 3);
|
||||
var before = cluster.GetStreamLeaderId("SLEADSD");
|
||||
|
||||
var resp = await cluster.StepDownStreamLeaderAsync("SLEADSD");
|
||||
|
||||
resp.Success.ShouldBeTrue();
|
||||
var after = cluster.GetStreamLeaderId("SLEADSD");
|
||||
after.ShouldNotBe(before);
|
||||
after.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: TestJetStreamClusterStreamLeaderStepDown — new leader still accepts writes
|
||||
[Fact]
|
||||
public async Task Stream_leader_stepdown_new_leader_accepts_writes()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("SDWRITE", ["sdw.>"], replicas: 3);
|
||||
await cluster.PublishAsync("sdw.pre", "before");
|
||||
|
||||
await cluster.StepDownStreamLeaderAsync("SDWRITE");
|
||||
var ack = await cluster.PublishAsync("sdw.post", "after");
|
||||
|
||||
ack.Stream.ShouldBe("SDWRITE");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Multiple stepdowns cycle through different leaders
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterLeader line 73 — consecutive elections
|
||||
[Fact]
|
||||
public async Task Two_consecutive_stream_stepdowns_cycle_through_different_leaders()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CYCLE2", ["cy2.>"], replicas: 3);
|
||||
|
||||
var l0 = cluster.GetStreamLeaderId("CYCLE2");
|
||||
(await cluster.StepDownStreamLeaderAsync("CYCLE2")).Success.ShouldBeTrue();
|
||||
var l1 = cluster.GetStreamLeaderId("CYCLE2");
|
||||
(await cluster.StepDownStreamLeaderAsync("CYCLE2")).Success.ShouldBeTrue();
|
||||
var l2 = cluster.GetStreamLeaderId("CYCLE2");
|
||||
|
||||
l1.ShouldNotBe(l0);
|
||||
l2.ShouldNotBe(l1);
|
||||
}
|
||||
|
||||
// Go ref: multiple stepdowns in sequence — each produces a distinct leader
|
||||
[Fact]
|
||||
public async Task Three_consecutive_meta_stepdowns_cycle_through_distinct_leaders()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var observed = new HashSet<string>();
|
||||
|
||||
observed.Add(cluster.GetMetaLeaderId());
|
||||
cluster.StepDownMetaLeader();
|
||||
observed.Add(cluster.GetMetaLeaderId());
|
||||
cluster.StepDownMetaLeader();
|
||||
observed.Add(cluster.GetMetaLeaderId());
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
// With 3 nodes cycling round-robin we see at least 2 unique leaders
|
||||
observed.Count.ShouldBeGreaterThanOrEqualTo(2);
|
||||
}
|
||||
|
||||
// Go ref: TestJetStreamClusterLeader — wraps around after exhausting peers
|
||||
[Fact]
|
||||
public async Task Meta_stepdowns_wrap_around_producing_only_node_count_unique_leaders()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var observed = new HashSet<string>();
|
||||
|
||||
for (var i = 0; i < 9; i++)
|
||||
{
|
||||
observed.Add(cluster.GetMetaLeaderId());
|
||||
cluster.StepDownMetaLeader();
|
||||
}
|
||||
|
||||
// 3-node cluster cycles through exactly 3 unique leader IDs
|
||||
observed.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Leader ID consistency
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: streamLeader queried multiple times returns same stable ID
|
||||
[Fact]
|
||||
public async Task Stream_leader_id_is_stable_across_repeated_queries_without_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("STABLE", ["stb.>"], replicas: 3);
|
||||
|
||||
var ids = Enumerable.Range(0, 5)
|
||||
.Select(_ => cluster.GetStreamLeaderId("STABLE"))
|
||||
.ToList();
|
||||
|
||||
ids.Distinct().Count().ShouldBe(1);
|
||||
ids[0].ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// Go ref: meta leader queried multiple times is stable between stepdowns
|
||||
[Fact]
|
||||
public async Task Meta_leader_id_is_stable_between_stepdowns()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var a = cluster.GetMetaLeaderId();
|
||||
var b = cluster.GetMetaLeaderId();
|
||||
a.ShouldBe(b);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var c = cluster.GetMetaLeaderId();
|
||||
var d = cluster.GetMetaLeaderId();
|
||||
c.ShouldBe(d);
|
||||
|
||||
c.ShouldNotBe(a);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Meta state reflecting all created streams
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: getMetaState in tests — streams tracked in meta state
|
||||
[Fact]
|
||||
public async Task Meta_state_tracks_single_created_stream()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("MTRACK1", ["mt1.>"], replicas: 3);
|
||||
|
||||
var state = cluster.GetMetaState();
|
||||
|
||||
state.ShouldNotBeNull();
|
||||
state!.Streams.ShouldContain("MTRACK1");
|
||||
}
|
||||
|
||||
// Go ref: getMetaState tracks multiple streams
|
||||
[Fact]
|
||||
public async Task Meta_state_tracks_all_created_streams()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("MTRK_A", ["mta.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("MTRK_B", ["mtb.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("MTRK_C", ["mtc.>"], replicas: 1);
|
||||
|
||||
var state = cluster.GetMetaState();
|
||||
|
||||
state!.Streams.ShouldContain("MTRK_A");
|
||||
state.Streams.ShouldContain("MTRK_B");
|
||||
state.Streams.ShouldContain("MTRK_C");
|
||||
state.Streams.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go ref: meta state survives a stepdown
|
||||
[Fact]
|
||||
public async Task Meta_state_streams_survive_meta_leader_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("SURVSD1", ["ss1.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("SURVSD2", ["ss2.>"], replicas: 3);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var state = cluster.GetMetaState();
|
||||
state!.Streams.ShouldContain("SURVSD1");
|
||||
state.Streams.ShouldContain("SURVSD2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown — data survives leader election
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterStreamLeaderStepDown line 4925 — all messages preserved
|
||||
[Fact]
|
||||
public async Task Messages_survive_stream_leader_election()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("ELECT_DATA", ["ed.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("ed.event", $"msg-{i}");
|
||||
|
||||
await cluster.StepDownStreamLeaderAsync("ELECT_DATA");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("ELECT_DATA");
|
||||
state.Messages.ShouldBe(10UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Replica group structure after election
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: replica group has correct node count
|
||||
[Fact]
|
||||
public async Task R3_stream_replica_group_has_three_nodes()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("RG3", ["rg3.>"], replicas: 3);
|
||||
|
||||
var group = cluster.GetReplicaGroup("RG3");
|
||||
|
||||
group.ShouldNotBeNull();
|
||||
group!.Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// Go ref: replica group leader is marked as leader
|
||||
[Fact]
|
||||
public async Task R3_stream_replica_group_leader_is_marked_as_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("RGLDR", ["rgl.>"], replicas: 3);
|
||||
|
||||
var group = cluster.GetReplicaGroup("RGLDR");
|
||||
|
||||
group.ShouldNotBeNull();
|
||||
group!.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Go ref: replica group for unknown stream is null
|
||||
[Fact]
|
||||
public async Task Replica_group_for_unknown_stream_is_null()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var group = cluster.GetReplicaGroup("NONEXISTENT");
|
||||
|
||||
group.ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Leadership version increments on each stepdown
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: leadership version tracks stepdown count
|
||||
[Fact]
|
||||
public async Task Leadership_version_increments_on_each_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
cluster.GetMetaState()!.LeadershipVersion.ShouldBe(1);
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.GetMetaState()!.LeadershipVersion.ShouldBe(2);
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.GetMetaState()!.LeadershipVersion.ShouldBe(3);
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.GetMetaState()!.LeadershipVersion.ShouldBe(4);
|
||||
}
|
||||
|
||||
// Go ref: meta leader stepdown via API also increments version
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_via_api_increments_leadership_version()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("VERSIONAPI", ["va.>"], replicas: 3);
|
||||
var vBefore = cluster.GetMetaState()!.LeadershipVersion;
|
||||
|
||||
(await cluster.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}")).Success.ShouldBeTrue();
|
||||
|
||||
cluster.GetMetaState()!.LeadershipVersion.ShouldBe(vBefore + 1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer leader ID is consistent with stream
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: consumerLeader — consumer leader ID includes consumer name
|
||||
[Fact]
|
||||
public async Task Consumer_leader_ids_are_distinct_for_different_consumers_on_same_stream()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("MULTICONS", ["mc.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("MULTICONS", "consA");
|
||||
await cluster.CreateConsumerAsync("MULTICONS", "consB");
|
||||
|
||||
var leaderA = cluster.GetConsumerLeaderId("MULTICONS", "consA");
|
||||
var leaderB = cluster.GetConsumerLeaderId("MULTICONS", "consB");
|
||||
|
||||
leaderA.ShouldNotBeNullOrWhiteSpace();
|
||||
leaderB.ShouldNotBeNullOrWhiteSpace();
|
||||
leaderA.ShouldNotBe(leaderB);
|
||||
}
|
||||
|
||||
// Go ref: consumer leader ID for unknown stream returns empty
|
||||
[Fact]
|
||||
public async Task Consumer_leader_id_for_unknown_stream_is_empty()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var leader = cluster.GetConsumerLeaderId("NO_SUCH_STREAM", "no_consumer");
|
||||
|
||||
leader.ShouldBeNullOrEmpty();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Node lifecycle helpers do not affect stream state
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: shutdownServerAndRemoveStorage + restartServerAndWait
|
||||
[Fact]
|
||||
public async Task RemoveNode_and_restart_does_not_affect_stream_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("LIFECYCLE", ["lc.>"], replicas: 3);
|
||||
var leaderBefore = cluster.GetStreamLeaderId("LIFECYCLE");
|
||||
|
||||
cluster.RemoveNode(2);
|
||||
cluster.SimulateNodeRestart(2);
|
||||
|
||||
var leaderAfter = cluster.GetStreamLeaderId("LIFECYCLE");
|
||||
leaderBefore.ShouldNotBeNullOrWhiteSpace();
|
||||
leaderAfter.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,503 @@
|
||||
// Go ref: TestJetStreamClusterXxx — jetstream_cluster_long_test.go
|
||||
// Covers: high-volume publish/consume cycles, many sequential fetches, many consumers,
|
||||
// many streams, repeated publish-ack-fetch cycles, stepdowns during publishing,
|
||||
// alternating publish+stepdown, create-publish-delete sequences, ack tracking across
|
||||
// failovers, batch-1 iteration, mixed multi-stream operations, rapid meta stepdowns,
|
||||
// large R1 message volumes, max-messages stream limits, consumer pending correctness.
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Long-running JetStream cluster tests covering high-volume scenarios,
|
||||
/// repeated failover cycles, many-stream/many-consumer environments, and
|
||||
/// limit enforcement under sustained load.
|
||||
/// Ported from Go jetstream_cluster_long_test.go.
|
||||
/// All tests are marked [Trait("Category", "LongRunning")].
|
||||
/// </summary>
|
||||
public class JsClusterLongRunningTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLong5000MessagesR3 — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Five_thousand_messages_in_R3_stream_maintain_consistency()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLong5000MessagesR3 — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("LONG5K", ["long5k.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5000; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("long5k.data", $"msg-{i}");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
ack.Seq.ShouldBe((ulong)(i + 1));
|
||||
}
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("LONG5K");
|
||||
state.Messages.ShouldBe(5000UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(5000UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLong100SequentialFetchesOf50 — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task One_hundred_sequential_fetches_of_fifty_messages_each()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLong100SequentialFetchesOf50 — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("SEQFETCH", ["sf.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("SEQFETCH", "batcher", filterSubject: "sf.>");
|
||||
|
||||
// Pre-publish 5000 messages
|
||||
for (var i = 0; i < 5000; i++)
|
||||
await cluster.PublishAsync("sf.event", $"msg-{i}");
|
||||
|
||||
var totalFetched = 0;
|
||||
for (var batch = 0; batch < 100; batch++)
|
||||
{
|
||||
var result = await cluster.FetchAsync("SEQFETCH", "batcher", 50);
|
||||
result.Messages.Count.ShouldBe(50);
|
||||
totalFetched += result.Messages.Count;
|
||||
|
||||
// Verify sequences are contiguous within each batch
|
||||
for (var j = 1; j < result.Messages.Count; j++)
|
||||
result.Messages[j].Sequence.ShouldBe(result.Messages[j - 1].Sequence + 1);
|
||||
}
|
||||
|
||||
totalFetched.ShouldBe(5000);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLong50ConsumersOnSameStream — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Fifty_consumers_on_same_stream_all_see_all_messages()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLong50ConsumersOnSameStream — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("FIFTYCONSUMERS", ["fc.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 100; i++)
|
||||
await cluster.PublishAsync("fc.event", $"msg-{i}");
|
||||
|
||||
for (var c = 0; c < 50; c++)
|
||||
await cluster.CreateConsumerAsync("FIFTYCONSUMERS", $"cons{c}", filterSubject: "fc.>");
|
||||
|
||||
// Each consumer should see all 100 messages independently
|
||||
for (var c = 0; c < 50; c++)
|
||||
{
|
||||
var batch = await cluster.FetchAsync("FIFTYCONSUMERS", $"cons{c}", 100);
|
||||
batch.Messages.Count.ShouldBe(100);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLong20StreamsIn5NodeCluster — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Twenty_streams_in_five_node_cluster_are_independent()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLong20StreamsIn5NodeCluster — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await cluster.CreateStreamAsync($"IND{i}", [$"ind{i}.>"], replicas: 3);
|
||||
|
||||
// Publish to each stream
|
||||
for (var i = 0; i < 20; i++)
|
||||
for (var j = 0; j < 10; j++)
|
||||
await cluster.PublishAsync($"ind{i}.event", $"stream{i}-msg{j}");
|
||||
|
||||
// Verify each stream is independent
|
||||
for (var i = 0; i < 20; i++)
|
||||
{
|
||||
var state = await cluster.GetStreamStateAsync($"IND{i}");
|
||||
state.Messages.ShouldBe(10UL);
|
||||
}
|
||||
|
||||
var accountInfo = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
accountInfo.AccountInfo!.Streams.ShouldBe(20);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongPublishAckFetchCycle100Times — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Publish_ack_fetch_cycle_repeated_100_times()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongPublishAckFetchCycle100Times — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("PAFCYCLE", ["paf.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("PAFCYCLE", "cycler", filterSubject: "paf.>",
|
||||
ackPolicy: AckPolicy.All);
|
||||
|
||||
for (var cycle = 0; cycle < 100; cycle++)
|
||||
{
|
||||
// Publish one message per cycle
|
||||
var ack = await cluster.PublishAsync("paf.event", $"cycle-{cycle}");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
|
||||
// Fetch one message
|
||||
var batch = await cluster.FetchAsync("PAFCYCLE", "cycler", 1);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
batch.Messages[0].Sequence.ShouldBe(ack.Seq);
|
||||
|
||||
// Ack it
|
||||
cluster.AckAll("PAFCYCLE", "cycler", ack.Seq);
|
||||
}
|
||||
|
||||
var finalState = await cluster.GetStreamStateAsync("PAFCYCLE");
|
||||
finalState.Messages.ShouldBe(100UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLong10StepdownsDuringPublish — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Ten_stepdowns_during_continuous_publish_preserve_all_messages()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLong10StepdownsDuringPublish — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("STEPDURINGPUB", ["sdp.>"], replicas: 3);
|
||||
|
||||
var totalPublished = 0;
|
||||
|
||||
// Publish 50 messages per batch, then step down (10 iterations = 500 msgs + 10 stepdowns)
|
||||
for (var sd = 0; sd < 10; sd++)
|
||||
{
|
||||
for (var i = 0; i < 50; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("sdp.event", $"batch{sd}-msg{i}");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
totalPublished++;
|
||||
}
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("STEPDURINGPUB")).Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("STEPDURINGPUB");
|
||||
state.Messages.ShouldBe((ulong)totalPublished);
|
||||
state.LastSeq.ShouldBe((ulong)totalPublished);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongAlternatingPublishAndStepdown — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Alternating_publish_and_stepdown_20_iterations_preserves_monotonic_sequence()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongAlternatingPublishAndStepdown — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("ALTPUBSD", ["aps.>"], replicas: 3);
|
||||
|
||||
var allSeqs = new List<ulong>();
|
||||
|
||||
for (var iter = 0; iter < 20; iter++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("aps.event", $"iter-{iter}");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
allSeqs.Add(ack.Seq);
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("ALTPUBSD")).Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// Verify strictly monotonically increasing sequences across all stepdowns
|
||||
for (var i = 1; i < allSeqs.Count; i++)
|
||||
allSeqs[i].ShouldBeGreaterThan(allSeqs[i - 1]);
|
||||
|
||||
allSeqs[^1].ShouldBe(20UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongCreatePublishDelete20Streams — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Create_publish_delete_20_streams_sequentially()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongCreatePublishDelete20Streams — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
{
|
||||
var streamName = $"SEQ{i}";
|
||||
|
||||
var create = await cluster.CreateStreamAsync(streamName, [$"seq{i}.>"], replicas: 3);
|
||||
create.Error.ShouldBeNull();
|
||||
|
||||
for (var j = 0; j < 10; j++)
|
||||
await cluster.PublishAsync($"seq{i}.event", $"msg-{j}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync(streamName);
|
||||
state.Messages.ShouldBe(10UL);
|
||||
|
||||
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}{streamName}", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// All streams deleted
|
||||
var accountInfo = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
accountInfo.AccountInfo!.Streams.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongConsumerAckAfter10Failovers — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Consumer_ack_tracking_correct_after_ten_leader_failovers()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongConsumerAckAfter10Failovers — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("ACKFAIL", ["af.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("ACKFAIL", "tracker", filterSubject: "af.>",
|
||||
ackPolicy: AckPolicy.All);
|
||||
|
||||
// Pre-publish 100 messages
|
||||
for (var i = 0; i < 100; i++)
|
||||
await cluster.PublishAsync("af.event", $"msg-{i}");
|
||||
|
||||
// Fetch and ack in batches across 10 failovers
|
||||
var ackedThrough = 0UL;
|
||||
for (var failover = 0; failover < 10; failover++)
|
||||
{
|
||||
var batch = await cluster.FetchAsync("ACKFAIL", "tracker", 10);
|
||||
batch.Messages.Count.ShouldBe(10);
|
||||
|
||||
var lastSeq = batch.Messages[^1].Sequence;
|
||||
cluster.AckAll("ACKFAIL", "tracker", lastSeq);
|
||||
ackedThrough = lastSeq;
|
||||
|
||||
(await cluster.StepDownStreamLeaderAsync("ACKFAIL")).Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
ackedThrough.ShouldBe(100UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongFetchBatch1Iterated500Times — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Fetch_with_batch_1_iterated_500_times_reads_all_messages()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongFetchBatch1Iterated500Times — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("BATCH1ITER", ["b1i.>"], replicas: 3);
|
||||
await cluster.CreateConsumerAsync("BATCH1ITER", "one_at_a_time", filterSubject: "b1i.>");
|
||||
|
||||
for (var i = 0; i < 500; i++)
|
||||
await cluster.PublishAsync("b1i.event", $"msg-{i}");
|
||||
|
||||
var allSeqs = new List<ulong>();
|
||||
for (var i = 0; i < 500; i++)
|
||||
{
|
||||
var batch = await cluster.FetchAsync("BATCH1ITER", "one_at_a_time", 1);
|
||||
batch.Messages.Count.ShouldBe(1);
|
||||
allSeqs.Add(batch.Messages[0].Sequence);
|
||||
}
|
||||
|
||||
// All 500 sequences read, strictly increasing
|
||||
allSeqs.Count.ShouldBe(500);
|
||||
for (var i = 1; i < allSeqs.Count; i++)
|
||||
allSeqs[i].ShouldBeGreaterThan(allSeqs[i - 1]);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongMixedMultiStreamOps — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Mixed_ops_five_streams_100_messages_each_consumers_fetch_all()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongMixedMultiStreamOps — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
// Create 5 streams
|
||||
for (var s = 0; s < 5; s++)
|
||||
await cluster.CreateStreamAsync($"MIXED{s}", [$"mixed{s}.>"], replicas: 3);
|
||||
|
||||
// Publish 100 messages to each
|
||||
for (var s = 0; s < 5; s++)
|
||||
for (var i = 0; i < 100; i++)
|
||||
await cluster.PublishAsync($"mixed{s}.event", $"stream{s}-msg{i}");
|
||||
|
||||
// Create one consumer per stream
|
||||
for (var s = 0; s < 5; s++)
|
||||
await cluster.CreateConsumerAsync($"MIXED{s}", $"reader{s}", filterSubject: $"mixed{s}.>");
|
||||
|
||||
// Fetch all messages from each stream consumer
|
||||
for (var s = 0; s < 5; s++)
|
||||
{
|
||||
var batch = await cluster.FetchAsync($"MIXED{s}", $"reader{s}", 100);
|
||||
batch.Messages.Count.ShouldBe(100);
|
||||
batch.Messages[0].Sequence.ShouldBe(1UL);
|
||||
batch.Messages[^1].Sequence.ShouldBe(100UL);
|
||||
}
|
||||
|
||||
var info = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
info.AccountInfo!.Streams.ShouldBe(5);
|
||||
info.AccountInfo.Consumers.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongRapidMetaStepdowns — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Rapid_meta_stepdowns_20_times_all_streams_remain_accessible()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongRapidMetaStepdowns — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
// Create streams before stepdowns
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.CreateStreamAsync($"RAPID{i}", [$"rapid{i}.>"], replicas: 3);
|
||||
|
||||
var leaderVersions = new List<long>();
|
||||
var initialState = cluster.GetMetaState();
|
||||
leaderVersions.Add(initialState!.LeadershipVersion);
|
||||
|
||||
// Perform 20 rapid meta stepdowns
|
||||
for (var sd = 0; sd < 20; sd++)
|
||||
{
|
||||
cluster.StepDownMetaLeader();
|
||||
var state = cluster.GetMetaState();
|
||||
leaderVersions.Add(state!.LeadershipVersion);
|
||||
}
|
||||
|
||||
// Leadership version must monotonically increase
|
||||
for (var i = 1; i < leaderVersions.Count; i++)
|
||||
leaderVersions[i].ShouldBeGreaterThan(leaderVersions[i - 1]);
|
||||
|
||||
// All streams still accessible
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames!.Count.ShouldBe(5);
|
||||
for (var i = 0; i < 5; i++)
|
||||
names.StreamNames.ShouldContain($"RAPID{i}");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLong10000MessagesR1 — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Ten_thousand_small_messages_in_R1_stream()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLong10000MessagesR1 — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("R1HUGE", ["r1h.>"], replicas: 1);
|
||||
|
||||
for (var i = 0; i < 10000; i++)
|
||||
{
|
||||
var ack = await cluster.PublishAsync("r1h.event", $"x{i}");
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("R1HUGE");
|
||||
state.Messages.ShouldBe(10000UL);
|
||||
state.LastSeq.ShouldBe(10000UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongMaxMessagesLimit — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Stream_with_max_messages_100_has_exactly_100_after_1000_publishes()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongMaxMessagesLimit — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "MAXLIMIT",
|
||||
Subjects = ["ml.>"],
|
||||
Replicas = 3,
|
||||
MaxMsgs = 100,
|
||||
};
|
||||
cluster.CreateStreamDirect(cfg);
|
||||
|
||||
for (var i = 0; i < 1000; i++)
|
||||
await cluster.PublishAsync("ml.event", $"msg-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("MAXLIMIT");
|
||||
// MaxMsgs=100: only the latest 100 messages retained (old ones discarded)
|
||||
state.Messages.ShouldBeLessThanOrEqualTo(100UL);
|
||||
state.Messages.ShouldBeGreaterThan(0UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go ref: TestJetStreamClusterLongConsumerPendingWithMaxMessages — jetstream_cluster_long_test.go
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
[Trait("Category", "LongRunning")]
|
||||
public async Task Consumer_on_max_messages_stream_tracks_correct_pending()
|
||||
{
|
||||
// Go ref: TestJetStreamClusterLongConsumerPendingWithMaxMessages — jetstream_cluster_long_test.go
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "MAXPEND",
|
||||
Subjects = ["mp.>"],
|
||||
Replicas = 3,
|
||||
MaxMsgs = 50,
|
||||
};
|
||||
cluster.CreateStreamDirect(cfg);
|
||||
|
||||
// Publish 200 messages (150 will be evicted by MaxMsgs)
|
||||
for (var i = 0; i < 200; i++)
|
||||
await cluster.PublishAsync("mp.event", $"msg-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("MAXPEND");
|
||||
// Stream retains at most 50 messages
|
||||
state.Messages.ShouldBeLessThanOrEqualTo(50UL);
|
||||
|
||||
// Create consumer after publishes (starts at current first seq)
|
||||
await cluster.CreateConsumerAsync("MAXPEND", "latecons", filterSubject: "mp.>",
|
||||
ackPolicy: AckPolicy.None);
|
||||
|
||||
var batch = await cluster.FetchAsync("MAXPEND", "latecons", 100);
|
||||
// Consumer should see only retained messages
|
||||
((ulong)batch.Messages.Count).ShouldBeLessThanOrEqualTo(state.Messages);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,839 @@
|
||||
// Go ref: TestJetStreamClusterMeta* — jetstream_cluster_3_test.go
|
||||
// Covers: meta-cluster peer count & state, API routing from any node,
|
||||
// meta leader operations, account limit governance, stream governance.
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream cluster meta-cluster governance: meta peer count,
|
||||
/// meta state, API routing from any node, leader stepdown, account limits,
|
||||
/// and stream governance in cluster mode.
|
||||
/// Ported from Go jetstream_cluster_3_test.go.
|
||||
/// </summary>
|
||||
public class JsClusterMetaGovernanceTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Meta-cluster peer count & state
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterBasics — jetstream_cluster_3_test.go
|
||||
[Fact]
|
||||
public async Task Three_node_cluster_reports_ClusterSize_3()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var meta = cluster.GetMetaState();
|
||||
meta.ShouldNotBeNull();
|
||||
meta!.ClusterSize.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Five_node_cluster_reports_ClusterSize_5()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
var meta = cluster.GetMetaState();
|
||||
meta.ShouldNotBeNull();
|
||||
meta!.ClusterSize.ShouldBe(5);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Seven_node_cluster_reports_ClusterSize_7()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(7);
|
||||
var meta = cluster.GetMetaState();
|
||||
meta.ShouldNotBeNull();
|
||||
meta!.ClusterSize.ShouldBe(7);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_has_non_empty_leader_id()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var meta = cluster.GetMetaState();
|
||||
meta.ShouldNotBeNull();
|
||||
meta!.LeaderId.ShouldNotBeNullOrEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_leadership_version_starts_at_1()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var meta = cluster.GetMetaState();
|
||||
meta.ShouldNotBeNull();
|
||||
meta!.LeadershipVersion.ShouldBe(1L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Leadership_version_increments_on_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var meta1 = cluster.GetMetaState();
|
||||
meta1!.LeadershipVersion.ShouldBe(1L);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var meta2 = cluster.GetMetaState();
|
||||
meta2!.LeadershipVersion.ShouldBe(2L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_stepdowns_increment_version_correctly()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.LeadershipVersion.ShouldBe(6L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_streams_list_is_empty_initially()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var meta = cluster.GetMetaState();
|
||||
meta.ShouldNotBeNull();
|
||||
meta!.Streams.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_streams_list_grows_with_stream_creation()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("GROW1", ["grow1.>"], 1);
|
||||
await cluster.CreateStreamAsync("GROW2", ["grow2.>"], 1);
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.Streams.Count.ShouldBe(2);
|
||||
meta.Streams.ShouldContain("GROW1");
|
||||
meta.Streams.ShouldContain("GROW2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_streams_list_is_ordered_alphabetically()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("ZSTREAM", ["zs.>"], 1);
|
||||
await cluster.CreateStreamAsync("ASTREAM", ["as.>"], 1);
|
||||
await cluster.CreateStreamAsync("MSTREAM", ["ms.>"], 1);
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
var streams = meta!.Streams.ToList();
|
||||
streams.Count.ShouldBe(3);
|
||||
streams[0].ShouldBe("ASTREAM");
|
||||
streams[1].ShouldBe("MSTREAM");
|
||||
streams[2].ShouldBe("ZSTREAM");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_after_10_stream_creations_tracks_all()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.CreateStreamAsync($"BULK{i:D2}", [$"bulk{i:D2}.>"], 1);
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.Streams.Count.ShouldBe(10);
|
||||
for (var i = 0; i < 10; i++)
|
||||
meta.Streams.ShouldContain($"BULK{i:D2}");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// API routing from any node
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterStreamCRUD — jetstream_cluster_3_test.go
|
||||
[Fact]
|
||||
public async Task Stream_create_via_RequestAsync_routes_correctly()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp = await cluster.RequestAsync(
|
||||
$"{JetStreamApiSubjects.StreamCreate}APITEST",
|
||||
"{\"name\":\"APITEST\",\"subjects\":[\"api.>\"],\"retention\":\"limits\",\"storage\":\"memory\",\"num_replicas\":1}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("APITEST");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_via_RequestAsync_returns_valid_info()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("INFOAPI", ["infoapi.>"], 1);
|
||||
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamInfo}INFOAPI", "{}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("INFOAPI");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_names_via_RequestAsync_lists_all_streams()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("NAMES1", ["n1.>"], 1);
|
||||
await cluster.CreateStreamAsync("NAMES2", ["n2.>"], 1);
|
||||
await cluster.CreateStreamAsync("NAMES3", ["n3.>"], 1);
|
||||
|
||||
var resp = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamNames.ShouldNotBeNull();
|
||||
resp.StreamNames!.Count.ShouldBe(3);
|
||||
resp.StreamNames.ShouldContain("NAMES1");
|
||||
resp.StreamNames.ShouldContain("NAMES2");
|
||||
resp.StreamNames.ShouldContain("NAMES3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_list_via_RequestAsync_returns_all_streams()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("LIST1", ["l1.>"], 1);
|
||||
await cluster.CreateStreamAsync("LIST2", ["l2.>"], 1);
|
||||
|
||||
var resp = await cluster.RequestAsync(JetStreamApiSubjects.StreamList, "{}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamNames.ShouldNotBeNull();
|
||||
resp.StreamNames!.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_create_via_RequestAsync_routes_correctly()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CONCREATE", ["cc.>"], 1);
|
||||
|
||||
var resp = await cluster.RequestAsync(
|
||||
$"{JetStreamApiSubjects.ConsumerCreate}CONCREATE.dur1",
|
||||
"{\"durable_name\":\"dur1\",\"ack_policy\":\"none\"}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.DurableName.ShouldBe("dur1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_info_via_RequestAsync_returns_valid_info()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CONINFO", ["ci.>"], 1);
|
||||
await cluster.CreateConsumerAsync("CONINFO", "infoconsumer");
|
||||
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerInfo}CONINFO.infoconsumer", "{}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.DurableName.ShouldBe("infoconsumer");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_names_via_RequestAsync_lists_consumers()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CONNAMES", ["cn.>"], 1);
|
||||
await cluster.CreateConsumerAsync("CONNAMES", "cname1");
|
||||
await cluster.CreateConsumerAsync("CONNAMES", "cname2");
|
||||
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}CONNAMES", "{}");
|
||||
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerNames.ShouldNotBeNull();
|
||||
resp.ConsumerNames!.Count.ShouldBe(2);
|
||||
resp.ConsumerNames.ShouldContain("cname1");
|
||||
resp.ConsumerNames.ShouldContain("cname2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Unknown_API_subject_returns_error_response()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp = await cluster.RequestAsync("$JS.API.UNKNOWN.ROUTE", "{}");
|
||||
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Empty_payload_to_stream_create_uses_name_from_subject()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
// Empty payload causes ParseConfig to return default config; the handler
|
||||
// falls back to extracting the stream name from the API subject.
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamCreate}EMPTYTEST", "");
|
||||
|
||||
// With name recovered from subject, the create should succeed
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("EMPTYTEST");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Invalid_JSON_to_API_falls_back_to_default_config()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
// Invalid JSON causes ParseConfig to fall back to a default config;
|
||||
// the stream name is extracted from the subject and a default subject is added.
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamCreate}BADJSONTEST", "not-valid-json{{{{");
|
||||
|
||||
// The handler is resilient: it defaults to the name from the subject.
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("BADJSONTEST");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Meta leader operations
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterMetaLeaderStepdown — jetstream_cluster_3_test.go
|
||||
[Fact]
|
||||
public async Task StepDownMetaLeader_changes_leader_id()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var oldLeader = cluster.GetMetaLeaderId();
|
||||
oldLeader.ShouldNotBeNullOrEmpty();
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var newLeader = cluster.GetMetaLeaderId();
|
||||
newLeader.ShouldNotBe(oldLeader);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task New_meta_leader_is_different_from_previous()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var leader1 = cluster.GetMetaLeaderId();
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
var leader2 = cluster.GetMetaLeaderId();
|
||||
|
||||
leader2.ShouldNotBe(leader1);
|
||||
leader2.ShouldNotBeNullOrEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_meta_stepdowns_cycle_leaders()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var seenLeaders = new HashSet<string>();
|
||||
|
||||
seenLeaders.Add(cluster.GetMetaLeaderId());
|
||||
cluster.StepDownMetaLeader();
|
||||
seenLeaders.Add(cluster.GetMetaLeaderId());
|
||||
cluster.StepDownMetaLeader();
|
||||
seenLeaders.Add(cluster.GetMetaLeaderId());
|
||||
|
||||
// With 3 nodes, stepping down twice should produce at least 2 distinct leaders
|
||||
seenLeaders.Count.ShouldBeGreaterThanOrEqualTo(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_creation_works_after_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("AFTERSTEP", ["after.>"], 1);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("AFTERSTEP");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_creation_works_after_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CONAFTERSTEP", ["cas.>"], 1);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var resp = await cluster.CreateConsumerAsync("CONAFTERSTEP", "postdown");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
resp.ConsumerInfo!.Config.DurableName.ShouldBe("postdown");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Publish_works_after_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("PUBAFTERSTEP", ["pub.>"], 1);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var ack = await cluster.PublishAsync("pub.event", "post-stepdown-message");
|
||||
ack.Stream.ShouldBe("PUBAFTERSTEP");
|
||||
ack.Seq.ShouldBe(1UL);
|
||||
ack.ErrorCode.ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Fetch_works_after_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("FETCHAFTERSTEP", ["fetch.>"], 1);
|
||||
await cluster.CreateConsumerAsync("FETCHAFTERSTEP", "fetchcons", filterSubject: "fetch.>");
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
await cluster.PublishAsync("fetch.event", $"msg-{i}");
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var batch = await cluster.FetchAsync("FETCHAFTERSTEP", "fetchcons", 3);
|
||||
batch.Messages.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_accurate_after_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("INFOAFTERSTEP", ["ias.>"], 1);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("ias.event", $"msg-{i}");
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var info = await cluster.GetStreamInfoAsync("INFOAFTERSTEP");
|
||||
info.Error.ShouldBeNull();
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.State.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_works_after_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("DELAFTERSTEP", ["das.>"], 1);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DELAFTERSTEP", "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Three_meta_stepdowns_followed_by_stream_creation_works()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("TRIPLE", ["triple.>"], 1);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("TRIPLE");
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.Streams.ShouldContain("TRIPLE");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Account limit governance (cluster mode)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterStreamLimitWithAccountDefaults — jetstream_cluster_1_test.go:124
|
||||
[Fact]
|
||||
public async Task Multiple_streams_up_to_limit_succeed()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
{
|
||||
var resp = await cluster.CreateStreamAsync($"LIMIT{i}", [$"lim{i}.>"], 1);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.Streams.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_with_max_messages_enforced_in_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "MAXMSGCLUSTER",
|
||||
Subjects = ["mmcluster.>"],
|
||||
Replicas = 1,
|
||||
MaxMsgs = 3,
|
||||
};
|
||||
var resp = cluster.CreateStreamDirect(cfg);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("mmcluster.event", $"msg-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("MAXMSGCLUSTER");
|
||||
state.Messages.ShouldBeLessThanOrEqualTo(3UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_with_max_bytes_enforced_in_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var cfg = new StreamConfig
|
||||
{
|
||||
Name = "MAXBYTECLUSTER",
|
||||
Subjects = ["mbcluster.>"],
|
||||
Replicas = 1,
|
||||
MaxBytes = 256,
|
||||
Discard = DiscardPolicy.Old,
|
||||
};
|
||||
var resp = cluster.CreateStreamDirect(cfg);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await cluster.PublishAsync("mbcluster.event", new string('X', 64));
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("MAXBYTECLUSTER");
|
||||
// MaxBytes enforcement ensures total bytes stays bounded
|
||||
((long)state.Bytes).ShouldBeLessThanOrEqualTo(cfg.MaxBytes + 128);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Delete_then_recreate_stays_within_limits()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp1 = await cluster.CreateStreamAsync("RECREATE", ["rec.>"], 1);
|
||||
resp1.Error.ShouldBeNull();
|
||||
|
||||
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}RECREATE", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
|
||||
var resp2 = await cluster.CreateStreamAsync("RECREATE", ["rec.>"], 1);
|
||||
resp2.Error.ShouldBeNull();
|
||||
resp2.StreamInfo!.Config.Name.ShouldBe("RECREATE");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_creation_respects_limits()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CONLIMIT", ["conlim.>"], 1);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
{
|
||||
var resp = await cluster.CreateConsumerAsync("CONLIMIT", $"conlim{i}");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.ConsumerInfo.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
var names = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerNames}CONLIMIT", "{}");
|
||||
names.ConsumerNames.ShouldNotBeNull();
|
||||
names.ConsumerNames!.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream governance
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterStreamCreate — jetstream_cluster_3_test.go
|
||||
[Fact]
|
||||
public void Stream_create_validation_requires_name()
|
||||
{
|
||||
var streamManager = new StreamManager();
|
||||
var resp = streamManager.CreateOrUpdate(new StreamConfig { Name = "" });
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Description.ShouldContain("name");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_validation_requires_subjects_via_router()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
// Providing a name but no subjects — router should handle gracefully
|
||||
var resp = await cluster.RequestAsync(
|
||||
$"{JetStreamApiSubjects.StreamCreate}NOSUBJ",
|
||||
"{\"name\":\"NOSUBJ\"}");
|
||||
|
||||
// Either succeeds (subjects optional) or returns an error; it must not throw
|
||||
(resp.Error is not null || resp.StreamInfo is not null).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_with_empty_name_fails()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp = await cluster.RequestAsync(
|
||||
$"{JetStreamApiSubjects.StreamCreate}",
|
||||
"{\"name\":\"\",\"subjects\":[\"x.>\"]}");
|
||||
|
||||
resp.Error.ShouldNotBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_with_duplicate_name_returns_existing()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var first = await cluster.CreateStreamAsync("DUP_GOV", ["dupgov.>"], 1);
|
||||
first.Error.ShouldBeNull();
|
||||
first.StreamInfo!.Config.Name.ShouldBe("DUP_GOV");
|
||||
|
||||
// Creating the same stream again (idempotent)
|
||||
var second = await cluster.CreateStreamAsync("DUP_GOV", ["dupgov.>"], 1);
|
||||
second.Error.ShouldBeNull();
|
||||
second.StreamInfo!.Config.Name.ShouldBe("DUP_GOV");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_update_preserves_messages()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("UPDPRES", ["updpres.>"], 1);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("updpres.event", $"msg-{i}");
|
||||
|
||||
var update = cluster.UpdateStream("UPDPRES", ["updpres.>"], replicas: 1, maxMsgs: 100);
|
||||
update.Error.ShouldBeNull();
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("UPDPRES");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_update_can_change_subjects()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("UPDSUBJ", ["old.>"], 1);
|
||||
|
||||
var update = cluster.UpdateStream("UPDSUBJ", ["new.>"], replicas: 1);
|
||||
update.Error.ShouldBeNull();
|
||||
update.StreamInfo!.Config.Subjects.ShouldContain("new.>");
|
||||
update.StreamInfo.Config.Subjects.ShouldNotContain("old.>");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_removes_from_meta_state()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("DELMETA", ["delmeta.>"], 1);
|
||||
|
||||
var metaBefore = cluster.GetMetaState();
|
||||
metaBefore!.Streams.ShouldContain("DELMETA");
|
||||
|
||||
var del = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DELMETA", "{}");
|
||||
del.Success.ShouldBeTrue();
|
||||
|
||||
// After delete, the stream manager no longer shows it, but meta group
|
||||
// state tracks what was proposed; verify via stream info being not found
|
||||
var info = await cluster.GetStreamInfoAsync("DELMETA");
|
||||
info.Error.ShouldNotBeNull();
|
||||
info.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Deleted_stream_not_in_stream_names_list()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("KEEPME", ["keep.>"], 1);
|
||||
await cluster.CreateStreamAsync("DELME", ["del.>"], 1);
|
||||
|
||||
await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DELME", "{}");
|
||||
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.ShouldContain("KEEPME");
|
||||
names.StreamNames.ShouldNotContain("DELME");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_after_delete_with_same_name_succeeds()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("RECYCLE", ["recycle.>"], 1);
|
||||
|
||||
await cluster.PublishAsync("recycle.event", "original");
|
||||
|
||||
await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}RECYCLE", "{}");
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("RECYCLE", ["recycle.>"], 1);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("RECYCLE");
|
||||
|
||||
// New stream starts at sequence 1
|
||||
var ack = await cluster.PublishAsync("recycle.event", "new-message");
|
||||
ack.Stream.ShouldBe("RECYCLE");
|
||||
ack.Seq.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Twenty_streams_in_same_cluster_all_tracked()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
{
|
||||
var resp = await cluster.CreateStreamAsync($"TWENTY{i:D2}", [$"twenty{i:D2}.>"], 1);
|
||||
resp.Error.ShouldBeNull();
|
||||
}
|
||||
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.Streams.Count.ShouldBe(20);
|
||||
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.Count.ShouldBe(20);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_for_non_existent_stream_returns_error()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamInfo}DOESNOTEXIST", "{}");
|
||||
|
||||
resp.Error.ShouldNotBeNull();
|
||||
resp.Error!.Code.ShouldBe(404);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Additional governance: Meta stepdown via API subject
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
// Go ref: TestJetStreamClusterMetaLeaderStepdown — jetstream_cluster_3_test.go
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_via_API_subject_changes_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var before = cluster.GetMetaLeaderId();
|
||||
before.ShouldNotBeNullOrEmpty();
|
||||
|
||||
var resp = await cluster.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}");
|
||||
resp.Success.ShouldBeTrue();
|
||||
|
||||
var after = cluster.GetMetaLeaderId();
|
||||
after.ShouldNotBe(before);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_via_API_increments_leadership_version()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
var versionBefore = cluster.GetMetaState()!.LeadershipVersion;
|
||||
|
||||
await cluster.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}");
|
||||
|
||||
var versionAfter = cluster.GetMetaState()!.LeadershipVersion;
|
||||
versionAfter.ShouldBeGreaterThan(versionBefore);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_publish_and_fetch_round_trip_in_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("ROUNDTRIP", ["rt.>"], 1);
|
||||
await cluster.CreateConsumerAsync("ROUNDTRIP", "rtcon", filterSubject: "rt.>");
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("rt.event", $"round-trip-{i}");
|
||||
|
||||
var batch = await cluster.FetchAsync("ROUNDTRIP", "rtcon", 5);
|
||||
batch.Messages.Count.ShouldBe(5);
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("ROUNDTRIP");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Account_info_reflects_stream_and_consumer_counts_in_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("ACCTGOV1", ["ag1.>"], 1);
|
||||
await cluster.CreateStreamAsync("ACCTGOV2", ["ag2.>"], 1);
|
||||
await cluster.CreateConsumerAsync("ACCTGOV1", "acctcon1");
|
||||
await cluster.CreateConsumerAsync("ACCTGOV1", "acctcon2");
|
||||
|
||||
var resp = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
resp.AccountInfo.ShouldNotBeNull();
|
||||
resp.AccountInfo!.Streams.ShouldBe(2);
|
||||
resp.AccountInfo.Consumers.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_purge_via_API_clears_messages_and_meta_stream_count_unchanged()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("PURGEMETA", ["purgemeta.>"], 1);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("purgemeta.event", $"msg-{i}");
|
||||
|
||||
var stateBefore = await cluster.GetStreamStateAsync("PURGEMETA");
|
||||
stateBefore.Messages.ShouldBe(10UL);
|
||||
|
||||
var purge = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamPurge}PURGEMETA", "{}");
|
||||
purge.Success.ShouldBeTrue();
|
||||
|
||||
var stateAfter = await cluster.GetStreamStateAsync("PURGEMETA");
|
||||
stateAfter.Messages.ShouldBe(0UL);
|
||||
|
||||
// Meta state still tracks the stream name after purge (purge != delete)
|
||||
var meta = cluster.GetMetaState();
|
||||
meta!.Streams.ShouldContain("PURGEMETA");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_list_returns_all_consumers_in_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
await cluster.CreateStreamAsync("CONLISTGOV", ["clgov.>"], 1);
|
||||
|
||||
await cluster.CreateConsumerAsync("CONLISTGOV", "gd1");
|
||||
await cluster.CreateConsumerAsync("CONLISTGOV", "gd2");
|
||||
await cluster.CreateConsumerAsync("CONLISTGOV", "gd3");
|
||||
|
||||
var list = await cluster.RequestAsync($"{JetStreamApiSubjects.ConsumerList}CONLISTGOV", "{}");
|
||||
list.ConsumerNames.ShouldNotBeNull();
|
||||
list.ConsumerNames!.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_state_streams_list_shrinks_after_stream_delete_via_stream_manager()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("SHRINK1", ["sh1.>"], 1);
|
||||
await cluster.CreateStreamAsync("SHRINK2", ["sh2.>"], 1);
|
||||
|
||||
var metaBefore = cluster.GetMetaState();
|
||||
metaBefore!.Streams.Count.ShouldBe(2);
|
||||
|
||||
// Delete via API router which calls stream manager delete
|
||||
await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}SHRINK1", "{}");
|
||||
|
||||
// The stream names list from the router should reflect the deletion
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames!.Count.ShouldBe(1);
|
||||
names.StreamNames.ShouldContain("SHRINK2");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,825 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: placement caps, cluster size variations, replica defaults, R1/R3/R5/R7
|
||||
// placement, stepdown and info consistency, concurrent creation, long names,
|
||||
// subject overlap, re-create after delete, update without message loss.
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.TestUtilities;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream cluster stream placement semantics:
|
||||
/// replica caps at cluster size, various cluster sizes, replica defaults,
|
||||
/// concurrent creation, leader stepdown, info consistency, and edge cases.
|
||||
/// Ported from Go jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class JsClusterStreamPlacementTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_caps_five_replicas_in_three_node_cluster()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: 5);
|
||||
placement.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_allows_exact_cluster_size_replicas()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: 3);
|
||||
placement.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_zero_replicas_defaults_to_one()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: 0);
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_negative_replicas_treated_as_one()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: -1);
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterSingleReplicaStreams server/jetstream_cluster_1_test.go:223
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_R1_in_single_node_cluster()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 1);
|
||||
var placement = planner.PlanReplicas(replicas: 1);
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_caps_to_single_node_in_one_node_cluster()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 1);
|
||||
var placement = planner.PlanReplicas(replicas: 3);
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_R1_in_three_node_cluster()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
var placement = planner.PlanReplicas(replicas: 1);
|
||||
placement.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_R3_in_five_node_cluster()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 5);
|
||||
var placement = planner.PlanReplicas(replicas: 3);
|
||||
placement.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_R5_in_seven_node_cluster()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 7);
|
||||
var placement = planner.PlanReplicas(replicas: 5);
|
||||
placement.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_R7_in_seven_node_cluster_exact_match()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 7);
|
||||
var placement = planner.PlanReplicas(replicas: 7);
|
||||
placement.Count.ShouldBe(7);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_caps_R7_in_five_node_cluster_to_five()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 5);
|
||||
var placement = planner.PlanReplicas(replicas: 7);
|
||||
placement.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInfoList server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_streams_with_different_placements_coexist()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
await cluster.CreateStreamAsync("P1", ["p1.>"], replicas: 1);
|
||||
await cluster.CreateStreamAsync("P3", ["p3.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("P5", ["p5.>"], replicas: 5);
|
||||
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.Count.ShouldBe(3);
|
||||
names.StreamNames.ShouldContain("P1");
|
||||
names.StreamNames.ShouldContain("P3");
|
||||
names.StreamNames.ShouldContain("P5");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_with_replicas_equal_to_cluster_size_succeeds()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("FULL3", ["full3.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
var group = cluster.GetReplicaGroup("FULL3");
|
||||
group.ShouldNotBeNull();
|
||||
group!.Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInfoList server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_creation_after_another_stream_exists_succeeds()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("FIRST", ["first.>"], replicas: 3);
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("SECOND", ["second.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("SECOND");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMaxStreamsReached server/jetstream_cluster_1_test.go:3177
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Ten_streams_in_same_cluster_all_exist()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.CreateStreamAsync($"PLACE{i}", [$"place{i}.>"], replicas: 3);
|
||||
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.Count.ShouldBe(10);
|
||||
for (var i = 0; i < 10; i++)
|
||||
names.StreamNames.ShouldContain($"PLACE{i}");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replicated_stream_survives_meta_leader_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("SURV", ["surv.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("surv.event", $"msg-{i}");
|
||||
|
||||
var metaBefore = cluster.GetMetaLeaderId();
|
||||
cluster.StepDownMetaLeader();
|
||||
var metaAfter = cluster.GetMetaLeaderId();
|
||||
metaAfter.ShouldNotBe(metaBefore);
|
||||
|
||||
// Stream still accessible after meta stepdown
|
||||
var state = await cluster.GetStreamStateAsync("SURV");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_consistent_after_meta_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("INFOSTEP", ["infostep.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 7; i++)
|
||||
await cluster.PublishAsync("infostep.event", $"msg-{i}");
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var info = await cluster.GetStreamInfoAsync("INFOSTEP");
|
||||
info.Error.ShouldBeNull();
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.Config.Name.ShouldBe("INFOSTEP");
|
||||
info.StreamInfo.State.Messages.ShouldBe(7UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_more_replicas_than_nodes_caps_not_errors()
|
||||
{
|
||||
// Verifies AssetPlacementPlanner silently caps rather than throwing
|
||||
var planner = new AssetPlacementPlanner(nodes: 3);
|
||||
|
||||
var act = () => planner.PlanReplicas(replicas: 999);
|
||||
act.ShouldNotThrow();
|
||||
|
||||
var result = planner.PlanReplicas(replicas: 999);
|
||||
result.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_cluster_size_one_always_returns_one_replica()
|
||||
{
|
||||
var planner = new AssetPlacementPlanner(nodes: 1);
|
||||
|
||||
for (var r = 1; r <= 10; r++)
|
||||
planner.PlanReplicas(replicas: r).Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamNormalCatchup server/jetstream_cluster_1_test.go:1607
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_exists_after_remove_and_restart_node_simulation()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("NODEREMOVE", ["noderemove.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("noderemove.event", $"msg-{i}");
|
||||
|
||||
cluster.RemoveNode(2);
|
||||
cluster.SimulateNodeRestart(2);
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("NODEREMOVE");
|
||||
state.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInfoList server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Concurrent_stream_creation_all_streams_verify_exist()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var tasks = Enumerable.Range(0, 5)
|
||||
.Select(i => cluster.CreateStreamAsync($"CONC{i}", [$"conc{i}.>"], replicas: 3))
|
||||
.ToArray();
|
||||
|
||||
await Task.WhenAll(tasks);
|
||||
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.Count.ShouldBe(5);
|
||||
for (var i = 0; i < 5; i++)
|
||||
names.StreamNames.ShouldContain($"CONC{i}");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_names_can_be_long_strings()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var longName = new string('A', 60);
|
||||
var resp = await cluster.CreateStreamAsync(longName, [$"{longName.ToLowerInvariant()}.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe(longName);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamOverlapSubjects server/jetstream_cluster_1_test.go:1248
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_subjects_can_be_completely_distinct_from_others()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("DISTINCT1", ["ns1.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("DISTINCT2", ["ns2.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("DISTINCT3", ["ns3.>"], replicas: 3);
|
||||
|
||||
var ack1 = await cluster.PublishAsync("ns1.event", "msg1");
|
||||
ack1.Stream.ShouldBe("DISTINCT1");
|
||||
|
||||
var ack2 = await cluster.PublishAsync("ns2.event", "msg2");
|
||||
ack2.Stream.ShouldBe("DISTINCT2");
|
||||
|
||||
var ack3 = await cluster.PublishAsync("ns3.event", "msg3");
|
||||
ack3.Stream.ShouldBe("DISTINCT3");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamUpdate server/jetstream_cluster_1_test.go:1433
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Re_creating_deleted_stream_with_same_placement_works()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("REDEL", ["redel.>"], replicas: 3);
|
||||
|
||||
await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}REDEL", "{}");
|
||||
|
||||
var resp = await cluster.CreateStreamAsync("REDEL", ["redel.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("REDEL");
|
||||
resp.StreamInfo.Config.Replicas.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamUpdate server/jetstream_cluster_1_test.go:1433
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_update_does_not_lose_published_messages()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("NOLOSS", ["noloss.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 15; i++)
|
||||
await cluster.PublishAsync("noloss.event", $"msg-{i}");
|
||||
|
||||
var update = cluster.UpdateStream("NOLOSS", ["noloss.>"], replicas: 3, maxMsgs: 100);
|
||||
update.Error.ShouldBeNull();
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("NOLOSS");
|
||||
state.Messages.ShouldBe(15UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task R3_stream_leader_stepdown_elects_new_leader()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("PLSTEP", ["plstep.>"], replicas: 3);
|
||||
|
||||
var before = cluster.GetStreamLeaderId("PLSTEP");
|
||||
before.ShouldNotBeNullOrWhiteSpace();
|
||||
|
||||
var resp = await cluster.StepDownStreamLeaderAsync("PLSTEP");
|
||||
resp.Success.ShouldBeTrue();
|
||||
|
||||
var after = cluster.GetStreamLeaderId("PLSTEP");
|
||||
after.ShouldNotBe(before);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_info_consistent_after_R3_stream_leader_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("PLINFOSTEP", ["plinfostep.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("plinfostep.event", $"msg-{i}");
|
||||
|
||||
await cluster.StepDownStreamLeaderAsync("PLINFOSTEP");
|
||||
|
||||
var info = await cluster.GetStreamInfoAsync("PLINFOSTEP");
|
||||
info.Error.ShouldBeNull();
|
||||
info.StreamInfo.ShouldNotBeNull();
|
||||
info.StreamInfo!.Config.Replicas.ShouldBe(3);
|
||||
info.StreamInfo.State.Messages.ShouldBe(5UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Placement_validation_replicas_capped_at_cluster_node_count()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
// StreamReplicaGroup internally caps replicas at cluster size
|
||||
var group = cluster.GetReplicaGroup("NOTEXIST");
|
||||
group.ShouldBeNull();
|
||||
|
||||
// Creating with excess replicas should work (streamed to cluster-size)
|
||||
var resp = await cluster.CreateStreamAsync("CAPTEST", ["captest.>"], replicas: 3);
|
||||
resp.Error.ShouldBeNull();
|
||||
|
||||
var g = cluster.GetReplicaGroup("CAPTEST");
|
||||
g.ShouldNotBeNull();
|
||||
g!.Nodes.Count.ShouldBeLessThanOrEqualTo(cluster.NodeCount);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Placement_planner_cluster_size_reflected_correctly_for_different_sizes()
|
||||
{
|
||||
// 1-node cluster
|
||||
new AssetPlacementPlanner(1).PlanReplicas(3).Count.ShouldBe(1);
|
||||
// 3-node cluster
|
||||
new AssetPlacementPlanner(3).PlanReplicas(3).Count.ShouldBe(3);
|
||||
// 5-node cluster
|
||||
new AssetPlacementPlanner(5).PlanReplicas(3).Count.ShouldBe(3);
|
||||
// 7-node cluster
|
||||
new AssetPlacementPlanner(7).PlanReplicas(3).Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMetaSnapshotsAndCatchup server/jetstream_cluster_1_test.go:833
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_group_tracks_stream_placement_changes_through_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("META_P1", ["meta_p1.>"], replicas: 1);
|
||||
await cluster.CreateStreamAsync("META_P3", ["meta_p3.>"], replicas: 3);
|
||||
|
||||
var stateBefore = cluster.GetMetaState();
|
||||
stateBefore.ShouldNotBeNull();
|
||||
stateBefore!.Streams.ShouldContain("META_P1");
|
||||
stateBefore.Streams.ShouldContain("META_P3");
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var stateAfter = cluster.GetMetaState();
|
||||
stateAfter.ShouldNotBeNull();
|
||||
stateAfter!.Streams.ShouldContain("META_P1");
|
||||
stateAfter.Streams.ShouldContain("META_P3");
|
||||
stateAfter.LeadershipVersion.ShouldBeGreaterThan(stateBefore.LeadershipVersion);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInfoList server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_list_api_returns_all_streams_in_five_node_cluster()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
await cluster.CreateStreamAsync("FL1", ["fl1.>"], replicas: 1);
|
||||
await cluster.CreateStreamAsync("FL3", ["fl3.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("FL5", ["fl5.>"], replicas: 5);
|
||||
|
||||
var list = await cluster.RequestAsync(JetStreamApiSubjects.StreamList, "{}");
|
||||
list.StreamNames.ShouldNotBeNull();
|
||||
list.StreamNames!.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterSingleReplicaStreams server/jetstream_cluster_1_test.go:223
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task R1_placement_in_five_node_cluster_creates_one_node_group()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
await cluster.CreateStreamAsync("R1IN5", ["r1in5.>"], replicas: 1);
|
||||
|
||||
var group = cluster.GetReplicaGroup("R1IN5");
|
||||
group.ShouldNotBeNull();
|
||||
group!.Nodes.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task R3_placement_in_five_node_cluster_creates_three_node_group()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
await cluster.CreateStreamAsync("R3IN5", ["r3in5.>"], replicas: 3);
|
||||
|
||||
var group = cluster.GetReplicaGroup("R3IN5");
|
||||
group.ShouldNotBeNull();
|
||||
group!.Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consecutive_meta_stepdowns_preserve_stream_placements()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("CONSEC1", ["consec1.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("CONSEC2", ["consec2.>"], replicas: 1);
|
||||
|
||||
// Perform multiple stepdowns
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.StepDownMetaLeader();
|
||||
cluster.StepDownMetaLeader();
|
||||
|
||||
var names = await cluster.RequestAsync(JetStreamApiSubjects.StreamNames, "{}");
|
||||
names.StreamNames.ShouldNotBeNull();
|
||||
names.StreamNames!.ShouldContain("CONSEC1");
|
||||
names.StreamNames.ShouldContain("CONSEC2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamUpdate server/jetstream_cluster_1_test.go:1433
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Publish_after_stream_update_works_correctly()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("POSTUPD", ["postupd.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("postupd.event", $"before-{i}");
|
||||
|
||||
cluster.UpdateStream("POSTUPD", ["postupd.>"], replicas: 3, maxMsgs: 100);
|
||||
|
||||
for (var i = 0; i < 5; i++)
|
||||
await cluster.PublishAsync("postupd.event", $"after-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("POSTUPD");
|
||||
state.Messages.ShouldBe(10UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamPurge server/jetstream_cluster_1_test.go:522
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task R3_stream_purge_after_stepdown_clears_messages()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("PURGESTEP", ["purgestep.>"], replicas: 3);
|
||||
|
||||
for (var i = 0; i < 10; i++)
|
||||
await cluster.PublishAsync("purgestep.event", $"msg-{i}");
|
||||
|
||||
await cluster.StepDownStreamLeaderAsync("PURGESTEP");
|
||||
|
||||
var purge = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamPurge}PURGESTEP", "{}");
|
||||
purge.Success.ShouldBeTrue();
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("PURGESTEP");
|
||||
state.Messages.ShouldBe(0UL);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task R3_stream_has_leader_with_naming_convention()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("LEADNM", ["leadnm.>"], replicas: 3);
|
||||
|
||||
var group = cluster.GetReplicaGroup("LEADNM");
|
||||
group.ShouldNotBeNull();
|
||||
group!.Leader.Id.ShouldNotBeNullOrWhiteSpace();
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMaxStreamsReached server/jetstream_cluster_1_test.go:3177
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Account_info_reflects_correct_stream_count_after_placements()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("ACCP1", ["accp1.>"], replicas: 1);
|
||||
await cluster.CreateStreamAsync("ACCP3", ["accp3.>"], replicas: 3);
|
||||
|
||||
var info = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
info.AccountInfo.ShouldNotBeNull();
|
||||
info.AccountInfo!.Streams.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamNormalCatchup server/jetstream_cluster_1_test.go:1607
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Wait_on_stream_leader_completes_for_newly_placed_stream()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("WAITPL", ["waitpl.>"], replicas: 3);
|
||||
|
||||
await cluster.WaitOnStreamLeaderAsync("WAITPL", timeoutMs: 2000);
|
||||
|
||||
var leaderId = cluster.GetStreamLeaderId("WAITPL");
|
||||
leaderId.ShouldNotBeNullOrWhiteSpace();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterDelete server/jetstream_cluster_1_test.go:472
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_reduces_account_stream_count()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("DEL_A", ["del_a.>"], replicas: 3);
|
||||
await cluster.CreateStreamAsync("DEL_B", ["del_b.>"], replicas: 3);
|
||||
|
||||
await cluster.RequestAsync($"{JetStreamApiSubjects.StreamDelete}DEL_A", "{}");
|
||||
|
||||
var info = await cluster.RequestAsync(JetStreamApiSubjects.Info, "{}");
|
||||
info.AccountInfo!.Streams.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamInfoList server/jetstream_cluster_1_test.go:1284
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_placement_info_accessible_via_api_router_subject()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("APIPLC", ["apiplc.>"], replicas: 3);
|
||||
|
||||
var resp = await cluster.RequestAsync($"{JetStreamApiSubjects.StreamInfo}APIPLC", "{}");
|
||||
resp.Error.ShouldBeNull();
|
||||
resp.StreamInfo.ShouldNotBeNull();
|
||||
resp.StreamInfo!.Config.Name.ShouldBe("APIPLC");
|
||||
resp.StreamInfo.Config.Replicas.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMemoryStore server/jetstream_cluster_1_test.go:423
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Memory_store_placement_in_three_node_cluster_accepts_publishes()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("MEMPLACE", ["memplace.>"], replicas: 3, storage: StorageType.Memory);
|
||||
|
||||
for (var i = 0; i < 20; i++)
|
||||
await cluster.PublishAsync("memplace.event", $"msg-{i}");
|
||||
|
||||
var state = await cluster.GetStreamStateAsync("MEMPLACE");
|
||||
state.Messages.ShouldBe(20UL);
|
||||
|
||||
cluster.GetStoreBackendType("MEMPLACE").ShouldBe("memory");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Meta_leadership_version_increments_on_each_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
var initial = cluster.GetMetaState();
|
||||
initial.ShouldNotBeNull();
|
||||
initial!.LeadershipVersion.ShouldBe(1L);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
var v2 = cluster.GetMetaState()!.LeadershipVersion;
|
||||
v2.ShouldBe(2L);
|
||||
|
||||
cluster.StepDownMetaLeader();
|
||||
var v3 = cluster.GetMetaState()!.LeadershipVersion;
|
||||
v3.ShouldBe(3L);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Placement_group_leader_changes_on_stream_stepdown()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(3);
|
||||
|
||||
await cluster.CreateStreamAsync("STEPPL", ["steppl.>"], replicas: 3);
|
||||
|
||||
var groupBefore = cluster.GetReplicaGroup("STEPPL");
|
||||
groupBefore.ShouldNotBeNull();
|
||||
var leaderBefore = groupBefore!.Leader.Id;
|
||||
|
||||
await cluster.StepDownStreamLeaderAsync("STEPPL");
|
||||
|
||||
var groupAfter = cluster.GetReplicaGroup("STEPPL");
|
||||
groupAfter.ShouldNotBeNull();
|
||||
var leaderAfter = groupAfter!.Leader.Id;
|
||||
|
||||
leaderAfter.ShouldNotBe(leaderBefore);
|
||||
groupAfter.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Placement_node_count_consistent_with_requested_replicas()
|
||||
{
|
||||
await using var cluster = await JetStreamClusterFixture.StartAsync(5);
|
||||
|
||||
await cluster.CreateStreamAsync("NODECNT1", ["nc1.>"], replicas: 1);
|
||||
await cluster.CreateStreamAsync("NODECNT2", ["nc2.>"], replicas: 2);
|
||||
await cluster.CreateStreamAsync("NODECNT5", ["nc5.>"], replicas: 5);
|
||||
|
||||
cluster.GetReplicaGroup("NODECNT1")!.Nodes.Count.ShouldBe(1);
|
||||
cluster.GetReplicaGroup("NODECNT2")!.Nodes.Count.ShouldBe(2);
|
||||
cluster.GetReplicaGroup("NODECNT5")!.Nodes.Count.ShouldBe(5);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,228 @@
|
||||
// Parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// TestJetStreamClusterStreamLeaderStepDown (line 4925)
|
||||
// TestJetStreamClusterLeaderStepdown (line 5464)
|
||||
// TestJetStreamClusterLeader (line 73)
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream leader election and failover scenarios,
|
||||
/// ported from the Go server's jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class LeaderFailoverParityTests
|
||||
{
|
||||
/// <summary>
|
||||
/// Go parity: TestJetStreamClusterStreamLeaderStepDown (line 4925).
|
||||
/// After publishing messages to an R=3 stream, stepping down the stream leader
|
||||
/// must elect a new leader and preserve all previously stored messages. The new
|
||||
/// leader must accept subsequent writes with correct sequencing.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Stream_leader_stepdown_preserves_data_and_elects_new_leader()
|
||||
{
|
||||
await using var fx = await LeaderFailoverFixture.StartAsync(nodes: 3);
|
||||
var streamName = "STEPDOWN_DATA";
|
||||
await fx.CreateStreamAsync(streamName, subjects: ["sd.>"], replicas: 3);
|
||||
|
||||
// Publish 10 messages before stepdown (Go: msg, toSend := []byte("Hello JS Clustering"), 10)
|
||||
for (var i = 1; i <= 10; i++)
|
||||
{
|
||||
var ack = await fx.PublishAsync($"sd.{i}", $"msg-{i}");
|
||||
ack.Seq.ShouldBe((ulong)i);
|
||||
ack.Stream.ShouldBe(streamName);
|
||||
}
|
||||
|
||||
// Capture current leader identity
|
||||
var leaderBefore = fx.GetStreamLeaderId(streamName);
|
||||
leaderBefore.ShouldNotBeNullOrWhiteSpace();
|
||||
|
||||
// Step down the stream leader (Go: nc.Request(JSApiStreamLeaderStepDownT, "TEST"))
|
||||
var stepdownResponse = await fx.StepDownStreamLeaderAsync(streamName);
|
||||
stepdownResponse.Success.ShouldBeTrue();
|
||||
|
||||
// Verify new leader was elected (Go: si.Cluster.Leader != oldLeader)
|
||||
var leaderAfter = fx.GetStreamLeaderId(streamName);
|
||||
leaderAfter.ShouldNotBe(leaderBefore);
|
||||
|
||||
// Verify all 10 messages survived the failover
|
||||
var state = await fx.GetStreamStateAsync(streamName);
|
||||
state.Messages.ShouldBe(10UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(10UL);
|
||||
|
||||
// Verify the new leader accepts writes with correct sequencing
|
||||
var postFailoverAck = await fx.PublishAsync("sd.post", "after-stepdown");
|
||||
postFailoverAck.Seq.ShouldBe(11UL);
|
||||
postFailoverAck.Stream.ShouldBe(streamName);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Go parity: TestJetStreamClusterLeaderStepdown (line 5464).
|
||||
/// Requesting a meta-leader stepdown via the $JS.API.META.LEADER.STEPDOWN subject
|
||||
/// must succeed and elect a new meta-leader with an incremented leadership version.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_elects_new_leader_with_incremented_version()
|
||||
{
|
||||
await using var fx = await LeaderFailoverFixture.StartAsync(nodes: 3);
|
||||
|
||||
// Create a stream so the meta group has some state
|
||||
await fx.CreateStreamAsync("META_SD", subjects: ["meta.>"], replicas: 3);
|
||||
|
||||
var metaBefore = fx.GetMetaState();
|
||||
metaBefore.ShouldNotBeNull();
|
||||
metaBefore.ClusterSize.ShouldBe(3);
|
||||
var leaderBefore = metaBefore.LeaderId;
|
||||
var versionBefore = metaBefore.LeadershipVersion;
|
||||
|
||||
// Step down meta leader via API (Go: nc.Request(JSApiLeaderStepDown, nil))
|
||||
var response = await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}");
|
||||
response.Success.ShouldBeTrue();
|
||||
|
||||
// Verify new meta leader elected (Go: cl != c.leader())
|
||||
var metaAfter = fx.GetMetaState();
|
||||
metaAfter.ShouldNotBeNull();
|
||||
metaAfter.LeaderId.ShouldNotBe(leaderBefore);
|
||||
metaAfter.LeadershipVersion.ShouldBe(versionBefore + 1);
|
||||
|
||||
// Stream metadata must survive the meta-leader transition
|
||||
metaAfter.Streams.ShouldContain("META_SD");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Go parity: TestJetStreamClusterLeader (line 73).
|
||||
/// After electing a stream leader, stepping down twice through consecutive
|
||||
/// elections must cycle through distinct leaders. Each election must produce
|
||||
/// a valid leader that can accept proposals.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Consecutive_leader_elections_cycle_through_distinct_peers()
|
||||
{
|
||||
await using var fx = await LeaderFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CYCLE", subjects: ["cycle.>"], replicas: 3);
|
||||
|
||||
// Track leaders across consecutive stepdowns
|
||||
var leaders = new List<string>();
|
||||
leaders.Add(fx.GetStreamLeaderId("CYCLE"));
|
||||
|
||||
// First stepdown
|
||||
var resp1 = await fx.StepDownStreamLeaderAsync("CYCLE");
|
||||
resp1.Success.ShouldBeTrue();
|
||||
leaders.Add(fx.GetStreamLeaderId("CYCLE"));
|
||||
|
||||
// Second stepdown
|
||||
var resp2 = await fx.StepDownStreamLeaderAsync("CYCLE");
|
||||
resp2.Success.ShouldBeTrue();
|
||||
leaders.Add(fx.GetStreamLeaderId("CYCLE"));
|
||||
|
||||
// Each consecutive leader must differ from its predecessor
|
||||
leaders[1].ShouldNotBe(leaders[0]);
|
||||
leaders[2].ShouldNotBe(leaders[1]);
|
||||
|
||||
// After cycling, the stream must still be writable
|
||||
var ack = await fx.PublishAsync("cycle.verify", "still-alive");
|
||||
ack.Stream.ShouldBe("CYCLE");
|
||||
ack.Seq.ShouldBeGreaterThan(0UL);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Test fixture that wires up a JetStream cluster with meta group, stream manager,
|
||||
/// consumer manager, and API router for leader failover testing.
|
||||
/// </summary>
|
||||
internal sealed class LeaderFailoverFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private LeaderFailoverFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = new JetStreamPublisher(_streamManager);
|
||||
}
|
||||
|
||||
public static Task<LeaderFailoverFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var streamManager = new StreamManager(meta);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
return Task.FromResult(new LeaderFailoverFixture(meta, streamManager, consumerManager, router));
|
||||
}
|
||||
|
||||
public Task CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
|
||||
if (response.Error is not null)
|
||||
throw new InvalidOperationException(response.Error.Description);
|
||||
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), null, out var ack))
|
||||
return Task.FromResult(ack);
|
||||
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match a stream.");
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> StepDownStreamLeaderAsync(string stream)
|
||||
{
|
||||
var response = _router.Route(
|
||||
$"{JetStreamApiSubjects.StreamLeaderStepdown}{stream}",
|
||||
"{}"u8);
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public string GetStreamLeaderId(string stream)
|
||||
{
|
||||
// The StreamManager exposes replica groups via step-down routing;
|
||||
// we also reflect the leader through the replica group directly.
|
||||
var field = typeof(StreamManager)
|
||||
.GetField("_replicaGroups", System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Instance)!;
|
||||
var groups = (System.Collections.Concurrent.ConcurrentDictionary<string, StreamReplicaGroup>)field.GetValue(_streamManager)!;
|
||||
if (groups.TryGetValue(stream, out var group))
|
||||
return group.Leader.Id;
|
||||
return string.Empty;
|
||||
}
|
||||
|
||||
public ValueTask<ApiStreamState> GetStreamStateAsync(string stream)
|
||||
=> _streamManager.GetStateAsync(stream, default);
|
||||
|
||||
public MetaGroupState? GetMetaState() => _streamManager.GetMetaState();
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
{
|
||||
var response = _router.Route(subject, Encoding.UTF8.GetBytes(payload));
|
||||
|
||||
if (subject.Equals(JetStreamApiSubjects.MetaLeaderStepdown, StringComparison.Ordinal) && response.Success)
|
||||
_metaGroup.BecomeLeader();
|
||||
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,463 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go
|
||||
// Covers: JetStreamMetaGroup RAFT proposal workflow — stream create/delete,
|
||||
// consumer create/delete, leader validation, duplicate rejection,
|
||||
// ApplyEntry dispatch, inflight tracking, leader change clearing inflight,
|
||||
// GetState snapshot with consumer counts.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for JetStreamMetaGroup RAFT proposal workflow.
|
||||
/// Go reference: jetstream_cluster.go:500-2000 (processStreamAssignment,
|
||||
/// processConsumerAssignment, meta group leader logic).
|
||||
/// </summary>
|
||||
public class MetaGroupProposalTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Stream create proposal
|
||||
// Go reference: jetstream_cluster.go processStreamAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_proposal_adds_stream_assignment()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "test-group", Peers = ["p1", "p2", "p3"] };
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "ORDERS" }, group, default);
|
||||
|
||||
var assignment = meta.GetStreamAssignment("ORDERS");
|
||||
assignment.ShouldNotBeNull();
|
||||
assignment.StreamName.ShouldBe("ORDERS");
|
||||
assignment.Group.ShouldBeSameAs(group);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_proposal_increments_stream_count()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "S1" }, null, default);
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "S2" }, null, default);
|
||||
|
||||
meta.StreamCount.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_create_proposal_appears_in_state()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "EVENTS" }, null, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
state.Streams.ShouldContain("EVENTS");
|
||||
state.AssignmentCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream delete proposal
|
||||
// Go reference: jetstream_cluster.go processStreamDelete
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_proposal_removes_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "DOOMED" }, null, default);
|
||||
|
||||
await meta.ProposeDeleteStreamValidatedAsync("DOOMED", default);
|
||||
|
||||
meta.GetStreamAssignment("DOOMED").ShouldBeNull();
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
meta.GetState().Streams.ShouldNotContain("DOOMED");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_delete_with_consumers_decrements_consumer_count()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["p1"] };
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "S" }, sg, default);
|
||||
await meta.ProposeCreateConsumerValidatedAsync("S", "C1", cg, default);
|
||||
await meta.ProposeCreateConsumerValidatedAsync("S", "C2", cg, default);
|
||||
meta.ConsumerCount.ShouldBe(2);
|
||||
|
||||
await meta.ProposeDeleteStreamValidatedAsync("S", default);
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer create/delete proposal
|
||||
// Go reference: jetstream_cluster.go processConsumerAssignment/Delete
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_create_proposal_adds_consumer_to_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["p1", "p2", "p3"] };
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "ORDERS" }, sg, default);
|
||||
await meta.ProposeCreateConsumerValidatedAsync("ORDERS", "PROCESSOR", cg, default);
|
||||
|
||||
var ca = meta.GetConsumerAssignment("ORDERS", "PROCESSOR");
|
||||
ca.ShouldNotBeNull();
|
||||
ca.ConsumerName.ShouldBe("PROCESSOR");
|
||||
ca.StreamName.ShouldBe("ORDERS");
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_delete_proposal_removes_consumer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["p1"] };
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "S" }, sg, default);
|
||||
await meta.ProposeCreateConsumerValidatedAsync("S", "C1", cg, default);
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
|
||||
await meta.ProposeDeleteConsumerValidatedAsync("S", "C1", default);
|
||||
meta.GetConsumerAssignment("S", "C1").ShouldBeNull();
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_consumers_tracked_independently()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sg = new RaftGroup { Name = "sg", Peers = ["p1"] };
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "MULTI" }, sg, default);
|
||||
await meta.ProposeCreateConsumerValidatedAsync("MULTI", "C1", cg, default);
|
||||
await meta.ProposeCreateConsumerValidatedAsync("MULTI", "C2", cg, default);
|
||||
await meta.ProposeCreateConsumerValidatedAsync("MULTI", "C3", cg, default);
|
||||
|
||||
meta.ConsumerCount.ShouldBe(3);
|
||||
meta.GetStreamAssignment("MULTI")!.Consumers.Count.ShouldBe(3);
|
||||
|
||||
await meta.ProposeDeleteConsumerValidatedAsync("MULTI", "C2", default);
|
||||
meta.ConsumerCount.ShouldBe(2);
|
||||
meta.GetConsumerAssignment("MULTI", "C2").ShouldBeNull();
|
||||
meta.GetConsumerAssignment("MULTI", "C1").ShouldNotBeNull();
|
||||
meta.GetConsumerAssignment("MULTI", "C3").ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Not-leader rejects proposals
|
||||
// Go reference: jetstream_api.go:200-300 — leader check
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Not_leader_rejects_stream_create()
|
||||
{
|
||||
// selfIndex=2 but leaderIndex starts at 1, so IsLeader() is false
|
||||
var meta = new JetStreamMetaGroup(3, selfIndex: 2);
|
||||
|
||||
var ex = Should.Throw<InvalidOperationException>(
|
||||
() => meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "FAIL" }, null, default));
|
||||
|
||||
ex.Message.ShouldContain("Not the meta-group leader");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Not_leader_rejects_stream_delete()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3, selfIndex: 2);
|
||||
|
||||
var ex = Should.Throw<InvalidOperationException>(
|
||||
() => meta.ProposeDeleteStreamValidatedAsync("S", default));
|
||||
|
||||
ex.Message.ShouldContain("Not the meta-group leader");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Not_leader_rejects_consumer_create()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3, selfIndex: 2);
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
|
||||
var ex = Should.Throw<InvalidOperationException>(
|
||||
() => meta.ProposeCreateConsumerValidatedAsync("S", "C1", cg, default));
|
||||
|
||||
ex.Message.ShouldContain("Not the meta-group leader");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Not_leader_rejects_consumer_delete()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3, selfIndex: 2);
|
||||
|
||||
var ex = Should.Throw<InvalidOperationException>(
|
||||
() => meta.ProposeDeleteConsumerValidatedAsync("S", "C1", default));
|
||||
|
||||
ex.Message.ShouldContain("Not the meta-group leader");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Duplicate stream name rejected (validated path)
|
||||
// Go reference: jetstream_cluster.go duplicate stream check
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Duplicate_stream_name_rejected_by_validated_proposal()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "DUP" }, null, default);
|
||||
|
||||
var ex = Should.Throw<InvalidOperationException>(
|
||||
() => meta.ProposeCreateStreamValidatedAsync(new StreamConfig { Name = "DUP" }, null, default));
|
||||
|
||||
ex.Message.ShouldContain("already exists");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Consumer on non-existent stream rejected (validated path)
|
||||
// Go reference: jetstream_cluster.go stream existence check
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Consumer_on_nonexistent_stream_rejected_by_validated_proposal()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
|
||||
var ex = Should.Throw<InvalidOperationException>(
|
||||
() => meta.ProposeCreateConsumerValidatedAsync("MISSING", "C1", cg, default));
|
||||
|
||||
ex.Message.ShouldContain("not found");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyEntry dispatch
|
||||
// Go reference: jetstream_cluster.go RAFT apply for meta group
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_stream_create_adds_assignment()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "APPLIED", Peers = ["p1"] };
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "APPLIED", group: group);
|
||||
|
||||
meta.GetStreamAssignment("APPLIED").ShouldNotBeNull();
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_stream_delete_removes_assignment()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "TEMP");
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.StreamDelete, "TEMP");
|
||||
|
||||
meta.GetStreamAssignment("TEMP").ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_consumer_create_adds_consumer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "S");
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.ConsumerCreate, "C1", streamName: "S");
|
||||
|
||||
meta.GetConsumerAssignment("S", "C1").ShouldNotBeNull();
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_consumer_delete_removes_consumer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ApplyEntry(MetaEntryType.StreamCreate, "S");
|
||||
meta.ApplyEntry(MetaEntryType.ConsumerCreate, "C1", streamName: "S");
|
||||
|
||||
meta.ApplyEntry(MetaEntryType.ConsumerDelete, "C1", streamName: "S");
|
||||
|
||||
meta.GetConsumerAssignment("S", "C1").ShouldBeNull();
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_consumer_without_stream_name_throws()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
Should.Throw<ArgumentNullException>(
|
||||
() => meta.ApplyEntry(MetaEntryType.ConsumerCreate, "C1"));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Inflight tracking
|
||||
// Go reference: jetstream_cluster.go inflight tracking
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Inflight_cleared_after_stream_create()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "INF" }, default);
|
||||
|
||||
// Inflight should be cleared after proposal completes
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Inflight_cleared_after_consumer_create()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "S" }, default);
|
||||
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
await meta.ProposeCreateConsumerAsync("S", "C1", cg, default);
|
||||
|
||||
meta.InflightConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Leader change clears inflight
|
||||
// Go reference: jetstream_cluster.go leader stepdown
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Leader_change_clears_inflight()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
// Manually inspect that step down clears (inflight is always 0 after
|
||||
// synchronous proposal, but the StepDown path is the important semantic).
|
||||
meta.StepDown();
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
meta.InflightConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StepDown_increments_leadership_version()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var versionBefore = meta.GetState().LeadershipVersion;
|
||||
|
||||
meta.StepDown();
|
||||
|
||||
meta.GetState().LeadershipVersion.ShouldBeGreaterThan(versionBefore);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetState returns correct snapshot
|
||||
// Go reference: jetstream_cluster.go meta group state
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task GetState_returns_correct_snapshot()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(5);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "ALPHA" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "BETA" }, default);
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
await meta.ProposeCreateConsumerAsync("ALPHA", "C1", cg, default);
|
||||
await meta.ProposeCreateConsumerAsync("ALPHA", "C2", cg, default);
|
||||
await meta.ProposeCreateConsumerAsync("BETA", "C1", cg, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
|
||||
state.ClusterSize.ShouldBe(5);
|
||||
state.Streams.Count.ShouldBe(2);
|
||||
state.AssignmentCount.ShouldBe(2);
|
||||
state.ConsumerCount.ShouldBe(3);
|
||||
state.LeaderId.ShouldBe("meta-1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetState_streams_are_sorted()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "ZULU" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "ALPHA" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "MIKE" }, default);
|
||||
|
||||
var state = meta.GetState();
|
||||
state.Streams[0].ShouldBe("ALPHA");
|
||||
state.Streams[1].ShouldBe("MIKE");
|
||||
state.Streams[2].ShouldBe("ZULU");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetAllAssignments
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task GetAllAssignments_returns_all_streams()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "A" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "B" }, default);
|
||||
|
||||
var all = meta.GetAllAssignments();
|
||||
all.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// GetConsumerAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void GetConsumerAssignment_returns_null_for_nonexistent_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.GetConsumerAssignment("MISSING", "C1").ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task GetConsumerAssignment_returns_null_for_nonexistent_consumer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "S" }, default);
|
||||
|
||||
meta.GetConsumerAssignment("S", "MISSING").ShouldBeNull();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Idempotent backward-compatible paths
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Duplicate_stream_create_is_idempotent_via_unvalidated_path()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "DUP" }, default);
|
||||
await meta.ProposeCreateStreamAsync(new StreamConfig { Name = "DUP" }, default);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Consumer_on_nonexistent_stream_is_silent_via_unvalidated_path()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var cg = new RaftGroup { Name = "cg", Peers = ["p1"] };
|
||||
|
||||
// Should not throw
|
||||
await meta.ProposeCreateConsumerAsync("MISSING", "C1", cg, default);
|
||||
|
||||
meta.GetStreamAssignment("MISSING").ShouldBeNull();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,205 @@
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for MetaSnapshotCodec: encode/decode round-trip, S2 compression, versioning.
|
||||
/// Go reference: jetstream_cluster.go:2075-2145.
|
||||
/// </summary>
|
||||
public class MetaSnapshotCodecTests
|
||||
{
|
||||
[Fact]
|
||||
public void Encode_decode_round_trips()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go encodeMetaSnapshot/decodeMetaSnapshot round-trip
|
||||
var assignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["stream-A"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "stream-A",
|
||||
Group = new RaftGroup { Name = "rg-a", Peers = ["n1", "n2", "n3"] },
|
||||
ConfigJson = """{"subjects":["foo.>"]}""",
|
||||
},
|
||||
["stream-B"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "stream-B",
|
||||
Group = new RaftGroup { Name = "rg-b", Peers = ["n1", "n2"] },
|
||||
ConfigJson = """{"subjects":["bar.>"]}""",
|
||||
},
|
||||
};
|
||||
|
||||
// Add a consumer to stream-B
|
||||
assignments["stream-B"].Consumers["con-1"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "con-1",
|
||||
StreamName = "stream-B",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2"] },
|
||||
};
|
||||
|
||||
var encoded = MetaSnapshotCodec.Encode(assignments);
|
||||
encoded.ShouldNotBeEmpty();
|
||||
|
||||
var decoded = MetaSnapshotCodec.Decode(encoded);
|
||||
decoded.Count.ShouldBe(2);
|
||||
decoded["stream-A"].StreamName.ShouldBe("stream-A");
|
||||
decoded["stream-A"].Group.Peers.Count.ShouldBe(3);
|
||||
decoded["stream-B"].Consumers.Count.ShouldBe(1);
|
||||
decoded["stream-B"].Consumers["con-1"].ConsumerName.ShouldBe("con-1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Encoded_snapshot_is_compressed()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go S2 compression of meta snapshots
|
||||
var assignments = new Dictionary<string, StreamAssignment>();
|
||||
for (int i = 0; i < 100; i++)
|
||||
{
|
||||
assignments[$"stream-{i}"] = new StreamAssignment
|
||||
{
|
||||
StreamName = $"stream-{i}",
|
||||
Group = new RaftGroup { Name = $"rg-{i}", Peers = ["n1", "n2", "n3"] },
|
||||
ConfigJson = """{"subjects":["test.>"]}""",
|
||||
};
|
||||
}
|
||||
|
||||
var encoded = MetaSnapshotCodec.Encode(assignments);
|
||||
var json = System.Text.Json.JsonSerializer.SerializeToUtf8Bytes(assignments);
|
||||
|
||||
// S2 compressed + 2-byte version header should be smaller than raw JSON
|
||||
encoded.Length.ShouldBeLessThan(json.Length);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Empty_snapshot_round_trips()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go decodeMetaSnapshot handles empty map
|
||||
var empty = new Dictionary<string, StreamAssignment>();
|
||||
var encoded = MetaSnapshotCodec.Encode(empty);
|
||||
var decoded = MetaSnapshotCodec.Decode(encoded);
|
||||
decoded.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Versioned_format_rejects_unknown_version()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go version check in decodeMetaSnapshot
|
||||
var bad = new byte[] { 0xFF, 0xFF, 0, 0 }; // version 65535
|
||||
Should.Throw<InvalidOperationException>(() => MetaSnapshotCodec.Decode(bad));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Decode_rejects_too_short_input()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go guard against truncated snapshot
|
||||
Should.Throw<InvalidOperationException>(() => MetaSnapshotCodec.Decode([0x01]));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Encoded_snapshot_begins_with_version_one_header()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2075 — versioned header allows future format evolution
|
||||
var assignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["s1"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "g1", Peers = ["n1"] },
|
||||
},
|
||||
};
|
||||
|
||||
var encoded = MetaSnapshotCodec.Encode(assignments);
|
||||
|
||||
// Little-endian version 1: bytes [0x01, 0x00]
|
||||
encoded[0].ShouldBe((byte)0x01);
|
||||
encoded[1].ShouldBe((byte)0x00);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Round_trip_preserves_all_stream_assignment_fields()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go streamAssignment struct fields preserved across snapshot
|
||||
var created = new DateTime(2025, 6, 15, 12, 0, 0, DateTimeKind.Utc);
|
||||
var assignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["my-stream"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "my-stream",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "rg-main",
|
||||
Peers = ["peer-a", "peer-b", "peer-c"],
|
||||
StorageType = "memory",
|
||||
Cluster = "cluster-east",
|
||||
Preferred = "peer-a",
|
||||
},
|
||||
Created = created,
|
||||
ConfigJson = """{"subjects":["events.>"],"storage":"memory"}""",
|
||||
SyncSubject = "$JS.SYNC.my-stream",
|
||||
Responded = true,
|
||||
Recovering = false,
|
||||
Reassigning = true,
|
||||
},
|
||||
};
|
||||
|
||||
var decoded = MetaSnapshotCodec.Decode(MetaSnapshotCodec.Encode(assignments));
|
||||
|
||||
var sa = decoded["my-stream"];
|
||||
sa.StreamName.ShouldBe("my-stream");
|
||||
sa.Group.Name.ShouldBe("rg-main");
|
||||
sa.Group.Peers.ShouldBe(["peer-a", "peer-b", "peer-c"]);
|
||||
sa.Group.StorageType.ShouldBe("memory");
|
||||
sa.Group.Cluster.ShouldBe("cluster-east");
|
||||
sa.Group.Preferred.ShouldBe("peer-a");
|
||||
sa.Created.ShouldBe(created);
|
||||
sa.ConfigJson.ShouldBe("""{"subjects":["events.>"],"storage":"memory"}""");
|
||||
sa.SyncSubject.ShouldBe("$JS.SYNC.my-stream");
|
||||
sa.Responded.ShouldBeTrue();
|
||||
sa.Recovering.ShouldBeFalse();
|
||||
sa.Reassigning.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Round_trip_preserves_multiple_consumers_per_stream()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go consumerAssignment map restored in snapshot
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "multi-consumer-stream",
|
||||
Group = new RaftGroup { Name = "rg-mc", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
sa.Consumers["consumer-alpha"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "consumer-alpha",
|
||||
StreamName = "multi-consumer-stream",
|
||||
Group = new RaftGroup { Name = "rg-alpha", Peers = ["n1"] },
|
||||
ConfigJson = """{"deliver_subject":"out.alpha"}""",
|
||||
Responded = true,
|
||||
};
|
||||
sa.Consumers["consumer-beta"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "consumer-beta",
|
||||
StreamName = "multi-consumer-stream",
|
||||
Group = new RaftGroup { Name = "rg-beta", Peers = ["n2", "n3"] },
|
||||
Recovering = true,
|
||||
};
|
||||
|
||||
var assignments = new Dictionary<string, StreamAssignment> { ["multi-consumer-stream"] = sa };
|
||||
var decoded = MetaSnapshotCodec.Decode(MetaSnapshotCodec.Encode(assignments));
|
||||
|
||||
var dsa = decoded["multi-consumer-stream"];
|
||||
dsa.Consumers.Count.ShouldBe(2);
|
||||
|
||||
var alpha = dsa.Consumers["consumer-alpha"];
|
||||
alpha.ConsumerName.ShouldBe("consumer-alpha");
|
||||
alpha.StreamName.ShouldBe("multi-consumer-stream");
|
||||
alpha.Group.Name.ShouldBe("rg-alpha");
|
||||
alpha.ConfigJson.ShouldBe("""{"deliver_subject":"out.alpha"}""");
|
||||
alpha.Responded.ShouldBeTrue();
|
||||
|
||||
var beta = dsa.Consumers["consumer-beta"];
|
||||
beta.ConsumerName.ShouldBe("consumer-beta");
|
||||
beta.Group.Peers.Count.ShouldBe(2);
|
||||
beta.Recovering.ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,433 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go:2290-2439
|
||||
// Covers: ProcessAddPeer, ProcessRemovePeer, RemovePeerFromStream, RemapStreamAssignment —
|
||||
// peer-driven stream reassignment in the JetStreamMetaGroup.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for JetStreamMetaGroup peer management and stream reassignment.
|
||||
/// Go reference: jetstream_cluster.go:2290-2439 (processAddPeer, processRemovePeer,
|
||||
/// removePeerFromStreamLocked, remapStreamAssignment).
|
||||
/// </summary>
|
||||
public class PeerManagementTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// ProcessAddPeer — peer registration
|
||||
// Go reference: jetstream_cluster.go:2290 processAddPeer
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ProcessAddPeer_registers_new_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2290 processAddPeer — peer is tracked
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.ProcessAddPeer("peer-1");
|
||||
|
||||
meta.GetKnownPeers().ShouldContain("peer-1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessAddPeer_registers_multiple_peers_independently()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2290 — each peer is independently tracked
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.ProcessAddPeer("peer-1");
|
||||
meta.ProcessAddPeer("peer-2");
|
||||
meta.ProcessAddPeer("peer-3");
|
||||
|
||||
var known = meta.GetKnownPeers();
|
||||
known.Count.ShouldBe(3);
|
||||
known.ShouldContain("peer-1");
|
||||
known.ShouldContain("peer-2");
|
||||
known.ShouldContain("peer-3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessAddPeer_duplicate_add_is_idempotent()
|
||||
{
|
||||
// AddKnownPeer uses a HashSet so duplicates do not inflate the count.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.ProcessAddPeer("peer-1");
|
||||
meta.ProcessAddPeer("peer-1");
|
||||
|
||||
meta.GetKnownPeers().Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ProcessAddPeer — under-replication detection
|
||||
// Go reference: jetstream_cluster.go:2311-2339 missingPeers + peer append
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ProcessAddPeer_triggers_rereplication_of_underreplicated_stream()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2315 sa.missingPeers() — adds new peer to group
|
||||
var meta = new JetStreamMetaGroup(3); // leader by default (selfIndex == leaderIndex == 1)
|
||||
|
||||
// Stream assigned with 2 peers but DesiredReplicas == 3 → under-replicated
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "orders-rg",
|
||||
Peers = ["peer-1", "peer-2"],
|
||||
DesiredReplicas = 3,
|
||||
};
|
||||
var sa = new StreamAssignment { StreamName = "ORDERS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
meta.ProcessAddPeer("peer-3");
|
||||
|
||||
var updated = meta.GetStreamAssignment("ORDERS")!;
|
||||
updated.Group.Peers.ShouldContain("peer-3");
|
||||
updated.Group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessAddPeer_does_not_add_peer_to_fully_replicated_stream()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2315 missingPeers() returns false when at desired count
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "events-rg",
|
||||
Peers = ["peer-1", "peer-2", "peer-3"],
|
||||
DesiredReplicas = 3,
|
||||
};
|
||||
var sa = new StreamAssignment { StreamName = "EVENTS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
meta.ProcessAddPeer("peer-4");
|
||||
|
||||
var updated = meta.GetStreamAssignment("EVENTS")!;
|
||||
updated.Group.Peers.Count.ShouldBe(3);
|
||||
updated.Group.Peers.ShouldNotContain("peer-4");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessAddPeer_does_not_add_peer_already_in_group()
|
||||
{
|
||||
// Peer already a member — should not be added twice.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "logs-rg",
|
||||
Peers = ["peer-1"],
|
||||
DesiredReplicas = 2,
|
||||
};
|
||||
var sa = new StreamAssignment { StreamName = "LOGS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
meta.ProcessAddPeer("peer-1");
|
||||
|
||||
var updated = meta.GetStreamAssignment("LOGS")!;
|
||||
updated.Group.Peers.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessAddPeer_non_leader_does_not_modify_assignments()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2301 — only leader triggers re-assignment
|
||||
var meta = new JetStreamMetaGroup(3, selfIndex: 2); // not leader
|
||||
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "rg",
|
||||
Peers = ["peer-1"],
|
||||
DesiredReplicas = 3,
|
||||
};
|
||||
var sa = new StreamAssignment { StreamName = "S", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
meta.ProcessAddPeer("peer-2");
|
||||
|
||||
// Peer is registered but stream is not modified since not leader.
|
||||
meta.GetKnownPeers().ShouldContain("peer-2");
|
||||
meta.GetStreamAssignment("S")!.Group.Peers.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ProcessRemovePeer — stream reassignment
|
||||
// Go reference: jetstream_cluster.go:2342 processRemovePeer
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ProcessRemovePeer_reassigns_streams_away_from_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2385-2392 — streams with removed peer get remapped
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
// Register three peers
|
||||
meta.AddKnownPeer("peer-1");
|
||||
meta.AddKnownPeer("peer-2");
|
||||
meta.AddKnownPeer("peer-3");
|
||||
|
||||
var group = new RaftGroup
|
||||
{
|
||||
Name = "rg",
|
||||
Peers = ["peer-1", "peer-2"],
|
||||
};
|
||||
var sa = new StreamAssignment { StreamName = "ORDERS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
meta.ProcessRemovePeer("peer-1");
|
||||
|
||||
var updated = meta.GetStreamAssignment("ORDERS")!;
|
||||
updated.Group.Peers.ShouldNotContain("peer-1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessRemovePeer_removes_peer_from_known_peers()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2342 — peer is de-registered
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.AddKnownPeer("peer-1");
|
||||
|
||||
meta.ProcessRemovePeer("peer-1");
|
||||
|
||||
meta.GetKnownPeers().ShouldNotContain("peer-1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessRemovePeer_unknown_peer_is_noop()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2342 — no crash when peer not known
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-2", "peer-3"] };
|
||||
var sa = new StreamAssignment { StreamName = "S", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
// Should not throw
|
||||
meta.ProcessRemovePeer("peer-99");
|
||||
|
||||
// Stream unaffected
|
||||
meta.GetStreamAssignment("S")!.Group.Peers.ShouldContain("peer-2");
|
||||
meta.GetStreamAssignment("S")!.Group.Peers.ShouldContain("peer-3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessRemovePeer_non_leader_only_deregisters_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2378 — non-leader skips re-assignment
|
||||
var meta = new JetStreamMetaGroup(3, selfIndex: 2);
|
||||
meta.AddKnownPeer("peer-1");
|
||||
meta.AddKnownPeer("peer-2");
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2"] };
|
||||
var sa = new StreamAssignment { StreamName = "S", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
meta.ProcessRemovePeer("peer-1");
|
||||
|
||||
// Peer removed from known set
|
||||
meta.GetKnownPeers().ShouldNotContain("peer-1");
|
||||
|
||||
// Stream assignments are NOT modified by a non-leader
|
||||
meta.GetStreamAssignment("S")!.Group.Peers.ShouldContain("peer-1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// RemovePeerFromStream
|
||||
// Go reference: jetstream_cluster.go:2403 removePeerFromStreamLocked
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RemovePeerFromStream_removes_peer_from_group()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2404 — peer is removed from stream group
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.AddKnownPeer("peer-1");
|
||||
meta.AddKnownPeer("peer-2");
|
||||
meta.AddKnownPeer("peer-3");
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2", "peer-3"] };
|
||||
var sa = new StreamAssignment { StreamName = "EVENTS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
meta.RemovePeerFromStream("EVENTS", "peer-2");
|
||||
|
||||
var updated = meta.GetStreamAssignment("EVENTS")!;
|
||||
updated.Group.Peers.ShouldNotContain("peer-2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemovePeerFromStream_returns_false_for_nonexistent_stream()
|
||||
{
|
||||
// RemovePeerFromStream silently returns false when stream not found.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var result = meta.RemovePeerFromStream("GHOST", "peer-1");
|
||||
|
||||
result.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemovePeerFromStream_returns_false_when_peer_not_in_group()
|
||||
{
|
||||
// Peer not a member of the stream's group.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2"] };
|
||||
var sa = new StreamAssignment { StreamName = "S", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
var result = meta.RemovePeerFromStream("S", "peer-99");
|
||||
|
||||
result.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemovePeerFromStream_replaces_peer_when_replacement_available()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:7088-7094 — replacement peer picked from available pool
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.AddKnownPeer("peer-1");
|
||||
meta.AddKnownPeer("peer-2");
|
||||
meta.AddKnownPeer("peer-3"); // replacement candidate
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2"] };
|
||||
var sa = new StreamAssignment { StreamName = "ORDERS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
var result = meta.RemovePeerFromStream("ORDERS", "peer-1");
|
||||
|
||||
result.ShouldBeTrue();
|
||||
var updated = meta.GetStreamAssignment("ORDERS")!;
|
||||
updated.Group.Peers.ShouldNotContain("peer-1");
|
||||
updated.Group.Peers.Count.ShouldBe(2);
|
||||
updated.Group.Peers.ShouldContain("peer-3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemovePeerFromStream_shrinks_group_when_no_replacement_available()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:7102-7110 — R>1 bare removal fallback
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
// Only peer-1 and peer-2 are known; peer-1 is in the group; no replacement
|
||||
meta.AddKnownPeer("peer-1");
|
||||
meta.AddKnownPeer("peer-2");
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2"] };
|
||||
var sa = new StreamAssignment { StreamName = "LOGS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
var result = meta.RemovePeerFromStream("LOGS", "peer-1");
|
||||
|
||||
// No replacement found → group shrinks
|
||||
result.ShouldBeFalse();
|
||||
var updated = meta.GetStreamAssignment("LOGS")!;
|
||||
updated.Group.Peers.ShouldNotContain("peer-1");
|
||||
updated.Group.Peers.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// RemapStreamAssignment
|
||||
// Go reference: jetstream_cluster.go:7077 remapStreamAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RemapStreamAssignment_selects_new_peers()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:7077 — retain existing minus removed, add candidate
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2", "peer-3"] };
|
||||
var sa = new StreamAssignment { StreamName = "EVENTS", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
var available = new List<string> { "peer-1", "peer-2", "peer-3", "peer-4" };
|
||||
var result = meta.RemapStreamAssignment(sa, available, removePeer: "peer-3");
|
||||
|
||||
result.ShouldBeTrue();
|
||||
var updated = meta.GetStreamAssignment("EVENTS")!;
|
||||
updated.Group.Peers.ShouldNotContain("peer-3");
|
||||
updated.Group.Peers.Count.ShouldBe(3);
|
||||
updated.Group.Peers.ShouldContain("peer-4");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemapStreamAssignment_retains_existing_peers()
|
||||
{
|
||||
// Retained peers (not removed) remain in the new assignment.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2", "peer-3"] };
|
||||
var sa = new StreamAssignment { StreamName = "S", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
var available = new List<string> { "peer-1", "peer-2", "peer-3", "peer-4" };
|
||||
meta.RemapStreamAssignment(sa, available, removePeer: "peer-1");
|
||||
|
||||
var updated = meta.GetStreamAssignment("S")!;
|
||||
updated.Group.Peers.ShouldContain("peer-2");
|
||||
updated.Group.Peers.ShouldContain("peer-3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemapStreamAssignment_returns_false_when_no_replacement()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:7098-7110 — no placement, R1 returns false
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1"] };
|
||||
var sa = new StreamAssignment { StreamName = "R1", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
var available = new List<string> { "peer-1" };
|
||||
var result = meta.RemapStreamAssignment(sa, available, removePeer: "peer-1");
|
||||
|
||||
// Only peer-1 available and it is the removed one → nothing to add
|
||||
result.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemapStreamAssignment_empty_available_shrinks_group()
|
||||
{
|
||||
// When the available-peer list is empty, the group simply loses the removed peer.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var group = new RaftGroup { Name = "rg", Peers = ["peer-1", "peer-2"] };
|
||||
var sa = new StreamAssignment { StreamName = "S", Group = group };
|
||||
meta.AddStreamAssignment(sa);
|
||||
|
||||
var result = meta.RemapStreamAssignment(sa, [], removePeer: "peer-1");
|
||||
|
||||
result.ShouldBeFalse();
|
||||
meta.GetStreamAssignment("S")!.Group.Peers.ShouldNotContain("peer-1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// AddKnownPeer / RemoveKnownPeer
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void AddKnownPeer_and_RemoveKnownPeer_are_consistent()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
meta.AddKnownPeer("p1");
|
||||
meta.AddKnownPeer("p2");
|
||||
meta.RemoveKnownPeer("p1");
|
||||
|
||||
var known = meta.GetKnownPeers();
|
||||
known.ShouldNotContain("p1");
|
||||
known.ShouldContain("p2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemoveKnownPeer_unknown_peer_is_noop()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.AddKnownPeer("p1");
|
||||
|
||||
// Should not throw
|
||||
meta.RemoveKnownPeer("p99");
|
||||
|
||||
meta.GetKnownPeers().Count.ShouldBe(1);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,309 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go:7212 selectPeerGroup
|
||||
// Covers: PlacementEngine peer selection with cluster affinity, tag filtering,
|
||||
// exclude-tag filtering, unavailable peer exclusion, storage-based ordering,
|
||||
// single replica selection, and combined policy filtering.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for PlacementEngine topology-aware peer selection.
|
||||
/// Go reference: jetstream_cluster.go:7212 selectPeerGroup.
|
||||
/// </summary>
|
||||
public class PlacementEngineTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Basic selection with enough peers
|
||||
// Go reference: jetstream_cluster.go selectPeerGroup base case
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Basic_selection_with_enough_peers()
|
||||
{
|
||||
var peers = CreatePeers(5);
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("test-group", 3, peers);
|
||||
|
||||
group.Name.ShouldBe("test-group");
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Selection_returns_exact_replica_count()
|
||||
{
|
||||
var peers = CreatePeers(10);
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("exact", 5, peers);
|
||||
|
||||
group.Peers.Count.ShouldBe(5);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Insufficient peers throws
|
||||
// Go reference: jetstream_cluster.go not enough peers error
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Insufficient_peers_throws()
|
||||
{
|
||||
var peers = CreatePeers(2);
|
||||
|
||||
Should.Throw<InvalidOperationException>(
|
||||
() => PlacementEngine.SelectPeerGroup("fail", 5, peers));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Zero_peers_with_replicas_throws()
|
||||
{
|
||||
var group = Should.Throw<InvalidOperationException>(
|
||||
() => PlacementEngine.SelectPeerGroup("empty", 1, []));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Cluster affinity filtering
|
||||
// Go reference: jetstream_cluster.go cluster affinity in placement
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Cluster_affinity_selects_only_matching_cluster()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Cluster = "us-east" },
|
||||
new() { PeerId = "p2", Cluster = "us-west" },
|
||||
new() { PeerId = "p3", Cluster = "us-east" },
|
||||
new() { PeerId = "p4", Cluster = "us-east" },
|
||||
new() { PeerId = "p5", Cluster = "eu-west" },
|
||||
};
|
||||
var policy = new PlacementPolicy { Cluster = "us-east" };
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("cluster", 3, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
group.Peers.ShouldAllBe(id => id.StartsWith("p1") || id.StartsWith("p3") || id.StartsWith("p4"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Cluster_affinity_is_case_insensitive()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Cluster = "US-East" },
|
||||
new() { PeerId = "p2", Cluster = "us-east" },
|
||||
};
|
||||
var policy = new PlacementPolicy { Cluster = "us-east" };
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("ci", 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Cluster_affinity_with_insufficient_matching_throws()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Cluster = "us-east" },
|
||||
new() { PeerId = "p2", Cluster = "us-west" },
|
||||
};
|
||||
var policy = new PlacementPolicy { Cluster = "us-east" };
|
||||
|
||||
Should.Throw<InvalidOperationException>(
|
||||
() => PlacementEngine.SelectPeerGroup("fail", 2, peers, policy));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Tag filtering (include and exclude)
|
||||
// Go reference: jetstream_cluster.go tag-based filtering
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Tag_filtering_selects_peers_with_all_required_tags()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Tags = ["ssd", "fast"] },
|
||||
new() { PeerId = "p2", Tags = ["ssd"] },
|
||||
new() { PeerId = "p3", Tags = ["ssd", "fast", "gpu"] },
|
||||
new() { PeerId = "p4", Tags = ["hdd"] },
|
||||
};
|
||||
var policy = new PlacementPolicy { Tags = ["ssd", "fast"] };
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("tags", 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldContain("p1");
|
||||
group.Peers.ShouldContain("p3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Exclude_tag_filtering_removes_peers_with_excluded_tags()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Tags = ["ssd"] },
|
||||
new() { PeerId = "p2", Tags = ["ssd", "deprecated"] },
|
||||
new() { PeerId = "p3", Tags = ["ssd"] },
|
||||
};
|
||||
var policy = new PlacementPolicy { ExcludeTags = ["deprecated"] };
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("excl", 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldNotContain("p2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Unavailable peers excluded
|
||||
// Go reference: jetstream_cluster.go offline peer filter
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Unavailable_peers_are_excluded()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Available = true },
|
||||
new() { PeerId = "p2", Available = false },
|
||||
new() { PeerId = "p3", Available = true },
|
||||
new() { PeerId = "p4", Available = false },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("avail", 2, peers);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldContain("p1");
|
||||
group.Peers.ShouldContain("p3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void All_unavailable_throws()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Available = false },
|
||||
new() { PeerId = "p2", Available = false },
|
||||
};
|
||||
|
||||
Should.Throw<InvalidOperationException>(
|
||||
() => PlacementEngine.SelectPeerGroup("fail", 1, peers));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Peers ordered by available storage
|
||||
// Go reference: jetstream_cluster.go storage-based ordering
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Peers_ordered_by_available_storage_descending()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "low", AvailableStorage = 100 },
|
||||
new() { PeerId = "high", AvailableStorage = 10000 },
|
||||
new() { PeerId = "mid", AvailableStorage = 5000 },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("storage", 2, peers);
|
||||
|
||||
// Should pick high and mid (top 2 by storage)
|
||||
group.Peers[0].ShouldBe("high");
|
||||
group.Peers[1].ShouldBe("mid");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Single replica selection
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Single_replica_selection()
|
||||
{
|
||||
var peers = CreatePeers(5);
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("single", 1, peers);
|
||||
|
||||
group.Peers.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Policy with all filters combined
|
||||
// Go reference: jetstream_cluster.go combined placement policy
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Combined_policy_filters_applied_together()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Cluster = "us-east", Tags = ["ssd"], Available = true, AvailableStorage = 5000 },
|
||||
new() { PeerId = "p2", Cluster = "us-east", Tags = ["ssd", "old"], Available = true, AvailableStorage = 8000 },
|
||||
new() { PeerId = "p3", Cluster = "us-west", Tags = ["ssd"], Available = true, AvailableStorage = 9000 },
|
||||
new() { PeerId = "p4", Cluster = "us-east", Tags = ["ssd"], Available = false, AvailableStorage = 10000 },
|
||||
new() { PeerId = "p5", Cluster = "us-east", Tags = ["ssd"], Available = true, AvailableStorage = 7000 },
|
||||
new() { PeerId = "p6", Cluster = "us-east", Tags = ["hdd"], Available = true, AvailableStorage = 12000 },
|
||||
};
|
||||
var policy = new PlacementPolicy
|
||||
{
|
||||
Cluster = "us-east",
|
||||
Tags = ["ssd"],
|
||||
ExcludeTags = ["old"],
|
||||
};
|
||||
|
||||
// After filtering: p1 (5000), p5 (7000) — p2 excluded (old tag), p3 (wrong cluster), p4 (unavailable), p6 (no ssd tag)
|
||||
var group = PlacementEngine.SelectPeerGroup("combined", 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
// Ordered by storage descending: p5 (7000) first, p1 (5000) second
|
||||
group.Peers[0].ShouldBe("p5");
|
||||
group.Peers[1].ShouldBe("p1");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Null policy is allowed (no filtering)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Null_policy_selects_without_filtering()
|
||||
{
|
||||
var peers = CreatePeers(3);
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("nofilter", 3, peers, policy: null);
|
||||
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Empty policy fields are ignored
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Empty_policy_cluster_is_ignored()
|
||||
{
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Cluster = "us-east" },
|
||||
new() { PeerId = "p2", Cluster = "us-west" },
|
||||
};
|
||||
var policy = new PlacementPolicy { Cluster = "" };
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("empty-cluster", 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
private static List<PeerInfo> CreatePeers(int count)
|
||||
{
|
||||
return Enumerable.Range(1, count)
|
||||
.Select(i => new PeerInfo
|
||||
{
|
||||
PeerId = $"peer-{i}",
|
||||
Available = true,
|
||||
AvailableStorage = long.MaxValue - i,
|
||||
})
|
||||
.ToList();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,260 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go
|
||||
// Covers: RaftGroup member management, peer add/remove/preferred,
|
||||
// factory method via PlacementEngine, replication health properties,
|
||||
// and quorum size calculation.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for RaftGroup lifecycle: membership helpers, factory method,
|
||||
/// replication status properties, and quorum size.
|
||||
/// Go reference: jetstream_cluster.go:154-163 raftGroup struct and peer management.
|
||||
/// </summary>
|
||||
public class RaftGroupLifecycleTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// IsMember — membership check
|
||||
// Go reference: jetstream_cluster.go isMember helper
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void IsMember_returns_true_for_existing_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go isMember — checks rg.Peers contains id
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2", "peer-3"] };
|
||||
|
||||
group.IsMember("peer-2").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsMember_returns_false_for_non_member()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go isMember — returns false when not in Peers
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2"] };
|
||||
|
||||
group.IsMember("peer-9").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// SetPreferred — assign preferred peer
|
||||
// Go reference: jetstream_cluster.go setPreferred / rg.Preferred
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void SetPreferred_sets_preferred_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go setPreferred — assigns rg.Preferred when member
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2", "peer-3"] };
|
||||
|
||||
group.SetPreferred("peer-3");
|
||||
|
||||
group.Preferred.ShouldBe("peer-3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void SetPreferred_throws_for_non_member()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go setPreferred — validates membership before setting
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2"] };
|
||||
|
||||
Should.Throw<InvalidOperationException>(() => group.SetPreferred("peer-99"));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// RemovePeer — remove a peer from the group
|
||||
// Go reference: jetstream_cluster.go removePeer
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void RemovePeer_removes_existing_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go removePeer — removes peer from rg.Peers
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2", "peer-3"] };
|
||||
|
||||
var removed = group.RemovePeer("peer-2");
|
||||
|
||||
removed.ShouldBeTrue();
|
||||
group.Peers.ShouldNotContain("peer-2");
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemovePeer_clears_preferred_when_removing_preferred()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go removePeer — clears rg.Preferred if it matches removed peer
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2", "peer-3"], Preferred = "peer-2" };
|
||||
|
||||
group.RemovePeer("peer-2");
|
||||
|
||||
group.Preferred.ShouldBe(string.Empty);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemovePeer_returns_false_for_non_member()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go removePeer — returns false when peer not found
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2"] };
|
||||
|
||||
var removed = group.RemovePeer("peer-99");
|
||||
|
||||
removed.ShouldBeFalse();
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// AddPeer — add a new peer to the group
|
||||
// Go reference: jetstream_cluster.go addPeer / expandGroup
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void AddPeer_adds_new_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go addPeer — appends peer to rg.Peers
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2"] };
|
||||
|
||||
var added = group.AddPeer("peer-3");
|
||||
|
||||
added.ShouldBeTrue();
|
||||
group.Peers.ShouldContain("peer-3");
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void AddPeer_returns_false_for_existing_peer()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go addPeer — skips duplicate, returns false
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2"] };
|
||||
|
||||
var added = group.AddPeer("peer-1");
|
||||
|
||||
added.ShouldBeFalse();
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// CreateRaftGroup factory — uses PlacementEngine
|
||||
// Go reference: jetstream_cluster.go:7212 selectPeerGroup called from createGroupForStream
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void CreateRaftGroup_uses_placement_engine()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go createGroupForStream — calls selectPeerGroup
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "peer-A", Available = true, AvailableStorage = 9000 },
|
||||
new() { PeerId = "peer-B", Available = true, AvailableStorage = 8000 },
|
||||
new() { PeerId = "peer-C", Available = true, AvailableStorage = 7000 },
|
||||
};
|
||||
|
||||
var group = RaftGroup.CreateRaftGroup("my-stream", 3, peers);
|
||||
|
||||
group.Name.ShouldBe("my-stream");
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
group.Peers.ShouldContain("peer-A");
|
||||
group.Peers.ShouldContain("peer-B");
|
||||
group.Peers.ShouldContain("peer-C");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CreateRaftGroup_sets_desired_replicas()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go rg.DesiredReplicas = replicas after group creation
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "peer-X", Available = true },
|
||||
new() { PeerId = "peer-Y", Available = true },
|
||||
new() { PeerId = "peer-Z", Available = true },
|
||||
};
|
||||
|
||||
var group = RaftGroup.CreateRaftGroup("replicated-stream", 3, peers);
|
||||
|
||||
group.DesiredReplicas.ShouldBe(3);
|
||||
group.HasDesiredReplicas.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// IsUnderReplicated — replication health
|
||||
// Go reference: jetstream_cluster.go missingPeers — len(Peers) < DesiredReplicas
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void IsUnderReplicated_true_when_peers_less_than_desired()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2284 sa.missingPeers()
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1"], DesiredReplicas = 3 };
|
||||
|
||||
group.IsUnderReplicated.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsUnderReplicated_false_when_peers_equal_desired()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2284 sa.missingPeers() — no deficit when equal
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1", "peer-2", "peer-3"], DesiredReplicas = 3 };
|
||||
|
||||
group.IsUnderReplicated.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsUnderReplicated_false_when_no_desired_replicas_set()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go — without DesiredReplicas set, no under-replication
|
||||
var group = new RaftGroup { Name = "test", Peers = ["peer-1"] };
|
||||
|
||||
group.IsUnderReplicated.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// IsOverReplicated — excess replication detection
|
||||
// Go reference: jetstream_cluster.go extraPeers — len(Peers) > DesiredReplicas
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void IsOverReplicated_true_when_peers_more_than_desired()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go extraPeers detection for scale-down
|
||||
var group = new RaftGroup { Name = "test", Peers = ["p1", "p2", "p3", "p4"], DesiredReplicas = 3 };
|
||||
|
||||
group.IsOverReplicated.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsOverReplicated_false_when_peers_equal_desired()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go — no excess when equal
|
||||
var group = new RaftGroup { Name = "test", Peers = ["p1", "p2", "p3"], DesiredReplicas = 3 };
|
||||
|
||||
group.IsOverReplicated.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsOverReplicated_false_when_no_desired_replicas_set()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go — without DesiredReplicas set, no over-replication
|
||||
var group = new RaftGroup { Name = "test", Peers = ["p1", "p2", "p3", "p4"] };
|
||||
|
||||
group.IsOverReplicated.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// QuorumSize — majority quorum calculation
|
||||
// Go reference: jetstream_cluster.go quorumNeeded — (n/2)+1
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Theory]
|
||||
[InlineData(1, 1)] // R=1 → quorum=1
|
||||
[InlineData(3, 2)] // R=3 → quorum=2
|
||||
[InlineData(5, 3)] // R=5 → quorum=3
|
||||
[InlineData(2, 2)] // R=2 → quorum=2 (degenerate, but formula consistent)
|
||||
[InlineData(4, 3)] // R=4 → quorum=3
|
||||
public void QuorumSize_correct_for_various_counts(int peerCount, int expectedQuorum)
|
||||
{
|
||||
// Go reference: jetstream_cluster.go quorumNeeded — (n/2)+1
|
||||
var peers = Enumerable.Range(1, peerCount).Select(i => $"peer-{i}").ToList();
|
||||
var group = new RaftGroup { Name = "quorum-test", Peers = peers };
|
||||
|
||||
group.QuorumSize.ShouldBe(expectedQuorum);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,196 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go
|
||||
// Covers: Per-stream RAFT group message proposals, message count tracking,
|
||||
// sequence tracking, leader change events, replica status reporting,
|
||||
// and non-leader rejection.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for StreamReplicaGroup stream-specific RAFT apply logic:
|
||||
/// message proposals, message count, last sequence, leader change
|
||||
/// event, and replica status reporting.
|
||||
/// Go reference: jetstream_cluster.go processStreamMsg, processStreamEntries.
|
||||
/// </summary>
|
||||
public class StreamRaftGroupTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// ProposeMessageAsync succeeds as leader
|
||||
// Go reference: jetstream_cluster.go processStreamMsg
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Propose_message_succeeds_as_leader()
|
||||
{
|
||||
var group = new StreamReplicaGroup("MSGS", replicas: 3);
|
||||
|
||||
var index = await group.ProposeMessageAsync(
|
||||
"orders.new", ReadOnlyMemory<byte>.Empty, "hello"u8.ToArray(), default);
|
||||
|
||||
index.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ProposeMessageAsync fails when not leader
|
||||
// Go reference: jetstream_cluster.go leader check
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Propose_message_fails_when_not_leader()
|
||||
{
|
||||
var group = new StreamReplicaGroup("NOLEAD", replicas: 3);
|
||||
|
||||
// Step down so the current leader is no longer leader
|
||||
group.Leader.RequestStepDown();
|
||||
|
||||
await Should.ThrowAsync<InvalidOperationException>(async () =>
|
||||
await group.ProposeMessageAsync(
|
||||
"test.sub", ReadOnlyMemory<byte>.Empty, "data"u8.ToArray(), default));
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Message count increments after proposal
|
||||
// Go reference: stream.go state.Msgs tracking
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Message_count_increments_after_proposal()
|
||||
{
|
||||
var group = new StreamReplicaGroup("COUNT", replicas: 3);
|
||||
|
||||
group.MessageCount.ShouldBe(0);
|
||||
|
||||
await group.ProposeMessageAsync("a.1", ReadOnlyMemory<byte>.Empty, "m1"u8.ToArray(), default);
|
||||
group.MessageCount.ShouldBe(1);
|
||||
|
||||
await group.ProposeMessageAsync("a.2", ReadOnlyMemory<byte>.Empty, "m2"u8.ToArray(), default);
|
||||
group.MessageCount.ShouldBe(2);
|
||||
|
||||
await group.ProposeMessageAsync("a.3", ReadOnlyMemory<byte>.Empty, "m3"u8.ToArray(), default);
|
||||
group.MessageCount.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Last sequence tracks correctly
|
||||
// Go reference: stream.go state.LastSeq
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Last_sequence_tracks_correctly()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SEQ", replicas: 3);
|
||||
|
||||
group.LastSequence.ShouldBe(0);
|
||||
|
||||
var idx1 = await group.ProposeMessageAsync("s.1", ReadOnlyMemory<byte>.Empty, "d1"u8.ToArray(), default);
|
||||
group.LastSequence.ShouldBe(idx1);
|
||||
|
||||
var idx2 = await group.ProposeMessageAsync("s.2", ReadOnlyMemory<byte>.Empty, "d2"u8.ToArray(), default);
|
||||
group.LastSequence.ShouldBe(idx2);
|
||||
|
||||
idx2.ShouldBeGreaterThan(idx1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Step down triggers leader change event
|
||||
// Go reference: jetstream_cluster.go leader change notification
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Step_down_triggers_leader_change_event()
|
||||
{
|
||||
var group = new StreamReplicaGroup("EVENT", replicas: 3);
|
||||
var previousId = group.Leader.Id;
|
||||
|
||||
LeaderChangedEventArgs? receivedArgs = null;
|
||||
group.LeaderChanged += (_, args) => receivedArgs = args;
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
|
||||
receivedArgs.ShouldNotBeNull();
|
||||
receivedArgs.PreviousLeaderId.ShouldBe(previousId);
|
||||
receivedArgs.NewLeaderId.ShouldNotBe(previousId);
|
||||
receivedArgs.NewTerm.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_stepdowns_fire_leader_changed_each_time()
|
||||
{
|
||||
var group = new StreamReplicaGroup("MULTI_EVENT", replicas: 3);
|
||||
var eventCount = 0;
|
||||
group.LeaderChanged += (_, _) => eventCount++;
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
await group.StepDownAsync(default);
|
||||
await group.StepDownAsync(default);
|
||||
|
||||
eventCount.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Replica status reports correct state
|
||||
// Go reference: jetstream_cluster.go stream replica status
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_status_reports_correct_state()
|
||||
{
|
||||
var group = new StreamReplicaGroup("STATUS", replicas: 3);
|
||||
|
||||
await group.ProposeMessageAsync("x.1", ReadOnlyMemory<byte>.Empty, "m1"u8.ToArray(), default);
|
||||
await group.ProposeMessageAsync("x.2", ReadOnlyMemory<byte>.Empty, "m2"u8.ToArray(), default);
|
||||
|
||||
var status = group.GetStatus();
|
||||
|
||||
status.StreamName.ShouldBe("STATUS");
|
||||
status.LeaderId.ShouldBe(group.Leader.Id);
|
||||
status.LeaderTerm.ShouldBeGreaterThan(0);
|
||||
status.MessageCount.ShouldBe(2);
|
||||
status.LastSequence.ShouldBeGreaterThan(0);
|
||||
status.ReplicaCount.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Initial_status_has_zero_messages()
|
||||
{
|
||||
var group = new StreamReplicaGroup("EMPTY", replicas: 1);
|
||||
|
||||
var status = group.GetStatus();
|
||||
|
||||
status.MessageCount.ShouldBe(0);
|
||||
status.LastSequence.ShouldBe(0);
|
||||
status.ReplicaCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Status updates after step down
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Status_reflects_new_leader_after_stepdown()
|
||||
{
|
||||
var group = new StreamReplicaGroup("NEWLEAD", replicas: 3);
|
||||
var statusBefore = group.GetStatus();
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
|
||||
var statusAfter = group.GetStatus();
|
||||
statusAfter.LeaderId.ShouldNotBe(statusBefore.LeaderId);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ProposeAsync still works after ProposeMessageAsync
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeAsync_and_ProposeMessageAsync_coexist()
|
||||
{
|
||||
var group = new StreamReplicaGroup("COEXIST", replicas: 3);
|
||||
|
||||
var idx1 = await group.ProposeAsync("PUB test.1", default);
|
||||
var idx2 = await group.ProposeMessageAsync("test.2", ReadOnlyMemory<byte>.Empty, "data"u8.ToArray(), default);
|
||||
|
||||
idx2.ShouldBeGreaterThan(idx1);
|
||||
group.MessageCount.ShouldBe(1); // Only ProposeMessageAsync increments message count
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,309 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go
|
||||
// Covers: StreamReplicaGroup construction from StreamAssignment, per-stream RAFT apply
|
||||
// logic (processStreamEntries), checkpoint/restore snapshot lifecycle, and commit/processed
|
||||
// index tracking through the group facade.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.Raft;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for B10: per-stream RAFT apply logic added to StreamReplicaGroup.
|
||||
/// Covers construction from StreamAssignment, apply loop, snapshot checkpoint/restore,
|
||||
/// and the CommitIndex/ProcessedIndex/PendingCommits facade properties.
|
||||
/// Go reference: jetstream_cluster.go processStreamAssignment, processStreamEntries.
|
||||
/// </summary>
|
||||
public class StreamReplicaGroupApplyTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: jetstream_cluster.go processStreamAssignment — builds per-stream raft group
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Construction_from_assignment_creates_correct_number_of_nodes()
|
||||
{
|
||||
var assignment = new StreamAssignment
|
||||
{
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "orders-raft",
|
||||
Peers = ["n1", "n2", "n3"],
|
||||
},
|
||||
};
|
||||
|
||||
var group = new StreamReplicaGroup(assignment);
|
||||
|
||||
group.Nodes.Count.ShouldBe(3);
|
||||
group.StreamName.ShouldBe("ORDERS");
|
||||
group.Assignment.ShouldNotBeNull();
|
||||
group.Assignment!.StreamName.ShouldBe("ORDERS");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Construction_from_assignment_uses_peer_ids_as_node_ids()
|
||||
{
|
||||
var assignment = new StreamAssignment
|
||||
{
|
||||
StreamName = "EVENTS",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "events-raft",
|
||||
Peers = ["peer-a", "peer-b", "peer-c"],
|
||||
},
|
||||
};
|
||||
|
||||
var group = new StreamReplicaGroup(assignment);
|
||||
|
||||
var nodeIds = group.Nodes.Select(n => n.Id).ToHashSet();
|
||||
nodeIds.ShouldContain("peer-a");
|
||||
nodeIds.ShouldContain("peer-b");
|
||||
nodeIds.ShouldContain("peer-c");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Construction_from_assignment_elects_leader()
|
||||
{
|
||||
var assignment = new StreamAssignment
|
||||
{
|
||||
StreamName = "STREAM",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "stream-raft",
|
||||
Peers = ["n1", "n2", "n3"],
|
||||
},
|
||||
};
|
||||
|
||||
var group = new StreamReplicaGroup(assignment);
|
||||
|
||||
group.Leader.ShouldNotBeNull();
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Construction_from_assignment_with_no_peers_creates_single_node()
|
||||
{
|
||||
var assignment = new StreamAssignment
|
||||
{
|
||||
StreamName = "SOLO",
|
||||
Group = new RaftGroup { Name = "solo-raft" },
|
||||
};
|
||||
|
||||
var group = new StreamReplicaGroup(assignment);
|
||||
|
||||
group.Nodes.Count.ShouldBe(1);
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: raft.go:150-160 (applied/processed fields) — commit index on proposal
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeAsync_through_group_increments_commit_index()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TRACK", replicas: 3);
|
||||
group.CommitIndex.ShouldBe(0);
|
||||
|
||||
await group.ProposeAsync("msg.1", default);
|
||||
|
||||
group.CommitIndex.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Multiple_proposals_increment_commit_index_monotonically()
|
||||
{
|
||||
var group = new StreamReplicaGroup("MULTI", replicas: 3);
|
||||
|
||||
await group.ProposeAsync("msg.1", default);
|
||||
await group.ProposeAsync("msg.2", default);
|
||||
await group.ProposeAsync("msg.3", default);
|
||||
|
||||
group.CommitIndex.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: jetstream_cluster.go processStreamEntries — apply loop
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_processes_pending_entries()
|
||||
{
|
||||
var group = new StreamReplicaGroup("APPLY", replicas: 3);
|
||||
|
||||
await group.ProposeAsync("store.msg.1", default);
|
||||
await group.ProposeAsync("store.msg.2", default);
|
||||
|
||||
group.PendingCommits.ShouldBe(2);
|
||||
|
||||
await group.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
group.PendingCommits.ShouldBe(0);
|
||||
group.ProcessedIndex.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_marks_regular_entries_as_processed()
|
||||
{
|
||||
var group = new StreamReplicaGroup("MARK", replicas: 1);
|
||||
|
||||
var idx = await group.ProposeAsync("data.record", default);
|
||||
|
||||
group.ProcessedIndex.ShouldBe(0);
|
||||
|
||||
await group.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
group.ProcessedIndex.ShouldBe(idx);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ApplyCommittedEntriesAsync_on_empty_queue_is_noop()
|
||||
{
|
||||
var group = new StreamReplicaGroup("EMPTY", replicas: 3);
|
||||
|
||||
// No proposals — queue is empty, should not throw
|
||||
await group.ApplyCommittedEntriesAsync(default);
|
||||
|
||||
group.ProcessedIndex.ShouldBe(0);
|
||||
group.PendingCommits.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: raft.go CreateSnapshotCheckpoint — snapshot lifecycle
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task CheckpointAsync_creates_snapshot_at_current_state()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SNAP", replicas: 3);
|
||||
|
||||
await group.ProposeAsync("entry.1", default);
|
||||
await group.ProposeAsync("entry.2", default);
|
||||
|
||||
var snapshot = await group.CheckpointAsync(default);
|
||||
|
||||
snapshot.ShouldNotBeNull();
|
||||
snapshot.LastIncludedIndex.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task CheckpointAsync_snapshot_index_matches_applied_index()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SNAPIDX", replicas: 1);
|
||||
|
||||
await group.ProposeAsync("record.1", default);
|
||||
await group.ProposeAsync("record.2", default);
|
||||
|
||||
var snapshot = await group.CheckpointAsync(default);
|
||||
|
||||
snapshot.LastIncludedIndex.ShouldBe(group.Leader.AppliedIndex);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: raft.go DrainAndReplaySnapshot — restore lifecycle
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task RestoreFromSnapshotAsync_restores_state()
|
||||
{
|
||||
var group = new StreamReplicaGroup("RESTORE", replicas: 3);
|
||||
|
||||
await group.ProposeAsync("pre.1", default);
|
||||
await group.ProposeAsync("pre.2", default);
|
||||
|
||||
var snapshot = await group.CheckpointAsync(default);
|
||||
|
||||
// Advance state further after snapshot
|
||||
await group.ProposeAsync("post.1", default);
|
||||
|
||||
// Restore: should drain queue and roll back to snapshot state
|
||||
await group.RestoreFromSnapshotAsync(snapshot, default);
|
||||
|
||||
// After restore the commit index reflects the snapshot
|
||||
group.CommitIndex.ShouldBe(snapshot.LastIncludedIndex);
|
||||
// Pending commits should be drained
|
||||
group.PendingCommits.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task RestoreFromSnapshotAsync_drains_pending_commits()
|
||||
{
|
||||
var group = new StreamReplicaGroup("DRAIN", replicas: 3);
|
||||
|
||||
// Propose several entries so queue has items
|
||||
await group.ProposeAsync("queued.1", default);
|
||||
await group.ProposeAsync("queued.2", default);
|
||||
await group.ProposeAsync("queued.3", default);
|
||||
|
||||
group.PendingCommits.ShouldBeGreaterThan(0);
|
||||
|
||||
var snapshot = new RaftSnapshot
|
||||
{
|
||||
LastIncludedIndex = 3,
|
||||
LastIncludedTerm = group.Leader.Term,
|
||||
};
|
||||
|
||||
await group.RestoreFromSnapshotAsync(snapshot, default);
|
||||
|
||||
group.PendingCommits.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: raft.go:150-160 — PendingCommits reflects commit queue depth
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task PendingCommits_reflects_commit_queue_depth()
|
||||
{
|
||||
var group = new StreamReplicaGroup("QUEUE", replicas: 3);
|
||||
|
||||
group.PendingCommits.ShouldBe(0);
|
||||
|
||||
await group.ProposeAsync("q.1", default);
|
||||
group.PendingCommits.ShouldBe(1);
|
||||
|
||||
await group.ProposeAsync("q.2", default);
|
||||
group.PendingCommits.ShouldBe(2);
|
||||
|
||||
await group.ApplyCommittedEntriesAsync(default);
|
||||
group.PendingCommits.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: raft.go applied/processed tracking — CommitIndex and ProcessedIndex
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task CommitIndex_and_ProcessedIndex_track_through_the_group()
|
||||
{
|
||||
var group = new StreamReplicaGroup("INDICES", replicas: 3);
|
||||
|
||||
group.CommitIndex.ShouldBe(0);
|
||||
group.ProcessedIndex.ShouldBe(0);
|
||||
|
||||
await group.ProposeAsync("step.1", default);
|
||||
group.CommitIndex.ShouldBe(1);
|
||||
// Not yet applied
|
||||
group.ProcessedIndex.ShouldBe(0);
|
||||
|
||||
await group.ApplyCommittedEntriesAsync(default);
|
||||
group.ProcessedIndex.ShouldBe(1);
|
||||
|
||||
await group.ProposeAsync("step.2", default);
|
||||
group.CommitIndex.ShouldBe(2);
|
||||
group.ProcessedIndex.ShouldBe(1); // still only first entry applied
|
||||
|
||||
await group.ApplyCommittedEntriesAsync(default);
|
||||
group.ProcessedIndex.ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CommitIndex_initially_zero_for_fresh_group()
|
||||
{
|
||||
var group = new StreamReplicaGroup("FRESH", replicas: 5);
|
||||
|
||||
group.CommitIndex.ShouldBe(0);
|
||||
group.ProcessedIndex.ShouldBe(0);
|
||||
group.PendingCommits.ShouldBe(0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,381 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// Covers: per-stream RAFT groups, stream assignment proposal, replica count
|
||||
// enforcement, leader election for stream group, data replication across
|
||||
// stream replicas, placement scaling, stepdown behavior.
|
||||
using System.Collections.Concurrent;
|
||||
using System.Reflection;
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
using NATS.Server.Raft;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering per-stream RAFT groups: stream assignment proposal,
|
||||
/// replica count enforcement, leader election, data replication across
|
||||
/// replicas, placement scaling, and stepdown behavior.
|
||||
/// Ported from Go jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class StreamReplicaGroupTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_r3_creates_three_raft_nodes()
|
||||
{
|
||||
var group = new StreamReplicaGroup("TEST", replicas: 3);
|
||||
|
||||
group.Nodes.Count.ShouldBe(3);
|
||||
group.StreamName.ShouldBe("TEST");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterSingleReplicaStreams server/jetstream_cluster_1_test.go:223
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_r1_creates_single_raft_node()
|
||||
{
|
||||
var group = new StreamReplicaGroup("R1S", replicas: 1);
|
||||
|
||||
group.Nodes.Count.ShouldBe(1);
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_zero_replicas_creates_one_node()
|
||||
{
|
||||
var group = new StreamReplicaGroup("ZERO", replicas: 0);
|
||||
|
||||
group.Nodes.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_negative_replicas_creates_one_node()
|
||||
{
|
||||
var group = new StreamReplicaGroup("NEG", replicas: -1);
|
||||
|
||||
group.Nodes.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_elects_initial_leader_on_creation()
|
||||
{
|
||||
var group = new StreamReplicaGroup("ELECT", replicas: 3);
|
||||
|
||||
group.Leader.ShouldNotBeNull();
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
group.Leader.Role.ShouldBe(RaftRole.Leader);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_leader_id_follows_naming_convention()
|
||||
{
|
||||
var group = new StreamReplicaGroup("MY_STREAM", replicas: 3);
|
||||
|
||||
group.Leader.Id.ShouldStartWith("my_stream-r");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_stepdown_changes_leader()
|
||||
{
|
||||
var group = new StreamReplicaGroup("STEP", replicas: 3);
|
||||
var before = group.Leader.Id;
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
|
||||
group.Leader.Id.ShouldNotBe(before);
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_consecutive_stepdowns_cycle_leaders()
|
||||
{
|
||||
var group = new StreamReplicaGroup("CYCLE", replicas: 3);
|
||||
var leaders = new List<string> { group.Leader.Id };
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
leaders.Add(group.Leader.Id);
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
leaders.Add(group.Leader.Id);
|
||||
|
||||
leaders[1].ShouldNotBe(leaders[0]);
|
||||
leaders[2].ShouldNotBe(leaders[1]);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterLeader server/jetstream_cluster_1_test.go:73
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_stepdown_wraps_around()
|
||||
{
|
||||
var group = new StreamReplicaGroup("WRAP", replicas: 3);
|
||||
var ids = new HashSet<string>();
|
||||
|
||||
for (var i = 0; i < 6; i++)
|
||||
{
|
||||
ids.Add(group.Leader.Id);
|
||||
await group.StepDownAsync(default);
|
||||
}
|
||||
|
||||
// Should have cycled through all 3 nodes
|
||||
ids.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_leader_accepts_proposals()
|
||||
{
|
||||
var group = new StreamReplicaGroup("PROPOSE", replicas: 3);
|
||||
|
||||
var index = await group.ProposeAsync("PUB test.1", default);
|
||||
|
||||
index.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_sequential_proposals_have_increasing_indices()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SEQPROP", replicas: 3);
|
||||
|
||||
var idx1 = await group.ProposeAsync("PUB test.1", default);
|
||||
var idx2 = await group.ProposeAsync("PUB test.2", default);
|
||||
var idx3 = await group.ProposeAsync("PUB test.3", default);
|
||||
|
||||
idx2.ShouldBeGreaterThan(idx1);
|
||||
idx3.ShouldBeGreaterThan(idx2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamNormalCatchup server/jetstream_cluster_1_test.go:1607
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_proposals_survive_stepdown()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SURVIVE", replicas: 3);
|
||||
|
||||
await group.ProposeAsync("PUB a.1", default);
|
||||
await group.ProposeAsync("PUB a.2", default);
|
||||
|
||||
await group.StepDownAsync(default);
|
||||
|
||||
// New leader should accept proposals
|
||||
var idx = await group.ProposeAsync("PUB a.3", default);
|
||||
idx.ShouldBeGreaterThan(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_apply_placement_scales_up()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SCALEUP", replicas: 1);
|
||||
group.Nodes.Count.ShouldBe(1);
|
||||
|
||||
await group.ApplyPlacementAsync([1, 2, 3], default);
|
||||
|
||||
group.Nodes.Count.ShouldBe(3);
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_apply_placement_scales_down()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SCALEDN", replicas: 5);
|
||||
group.Nodes.Count.ShouldBe(5);
|
||||
|
||||
await group.ApplyPlacementAsync([1, 2], default);
|
||||
|
||||
group.Nodes.Count.ShouldBe(2);
|
||||
group.Leader.IsLeader.ShouldBeTrue();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterExpandCluster server/jetstream_cluster_1_test.go:86
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Replica_group_apply_same_size_is_noop()
|
||||
{
|
||||
var group = new StreamReplicaGroup("NOOP", replicas: 3);
|
||||
var leaderBefore = group.Leader.Id;
|
||||
|
||||
await group.ApplyPlacementAsync([1, 2, 3], default);
|
||||
|
||||
group.Nodes.Count.ShouldBe(3);
|
||||
// Leader should remain the same since placement is a no-op
|
||||
group.Leader.Id.ShouldBe(leaderBefore);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterMultiReplicaStreams server/jetstream_cluster_1_test.go:299
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Replica_group_all_nodes_share_cluster()
|
||||
{
|
||||
var group = new StreamReplicaGroup("SHARED", replicas: 3);
|
||||
|
||||
foreach (var node in group.Nodes)
|
||||
node.Members.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamSynchedTimeStamps server/jetstream_cluster_1_test.go:977
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_manager_creates_replica_group_on_stream_create()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var streamManager = new StreamManager(meta);
|
||||
|
||||
streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "REPL",
|
||||
Subjects = ["repl.>"],
|
||||
Replicas = 3,
|
||||
});
|
||||
|
||||
// Use reflection to verify internal replica group was created
|
||||
var field = typeof(StreamManager)
|
||||
.GetField("_replicaGroups", BindingFlags.NonPublic | BindingFlags.Instance)!;
|
||||
var groups = (ConcurrentDictionary<string, StreamReplicaGroup>)field.GetValue(streamManager)!;
|
||||
|
||||
groups.ContainsKey("REPL").ShouldBeTrue();
|
||||
groups["REPL"].Nodes.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamLeaderStepDown server/jetstream_cluster_1_test.go:4925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task Stream_leader_stepdown_via_stream_manager_changes_leader()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var streamManager = new StreamManager(meta);
|
||||
|
||||
streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "SD",
|
||||
Subjects = ["sd.>"],
|
||||
Replicas = 3,
|
||||
});
|
||||
|
||||
var field = typeof(StreamManager)
|
||||
.GetField("_replicaGroups", BindingFlags.NonPublic | BindingFlags.Instance)!;
|
||||
var groups = (ConcurrentDictionary<string, StreamReplicaGroup>)field.GetValue(streamManager)!;
|
||||
var leaderBefore = groups["SD"].Leader.Id;
|
||||
|
||||
await streamManager.StepDownStreamLeaderAsync("SD", default);
|
||||
|
||||
groups["SD"].Leader.Id.ShouldNotBe(leaderBefore);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamDelete server/jetstream_cluster_1_test.go:472
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Stream_delete_removes_replica_group()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var streamManager = new StreamManager(meta);
|
||||
|
||||
streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "DELRG",
|
||||
Subjects = ["delrg.>"],
|
||||
Replicas = 3,
|
||||
});
|
||||
|
||||
streamManager.Delete("DELRG").ShouldBeTrue();
|
||||
|
||||
var field = typeof(StreamManager)
|
||||
.GetField("_replicaGroups", BindingFlags.NonPublic | BindingFlags.Instance)!;
|
||||
var groups = (ConcurrentDictionary<string, StreamReplicaGroup>)field.GetValue(streamManager)!;
|
||||
|
||||
groups.ContainsKey("DELRG").ShouldBeFalse();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Go: TestJetStreamClusterStreamUpdate server/jetstream_cluster_1_test.go:1433
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Stream_update_preserves_replica_group_when_replicas_unchanged()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var streamManager = new StreamManager(meta);
|
||||
|
||||
streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "UPD",
|
||||
Subjects = ["upd.>"],
|
||||
Replicas = 3,
|
||||
});
|
||||
|
||||
var field = typeof(StreamManager)
|
||||
.GetField("_replicaGroups", BindingFlags.NonPublic | BindingFlags.Instance)!;
|
||||
var groups = (ConcurrentDictionary<string, StreamReplicaGroup>)field.GetValue(streamManager)!;
|
||||
var groupBefore = groups["UPD"];
|
||||
|
||||
streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = "UPD",
|
||||
Subjects = ["upd.>", "upd2.>"],
|
||||
Replicas = 3,
|
||||
MaxMsgs = 100,
|
||||
});
|
||||
|
||||
// Same replica count means the group reference should be the same
|
||||
groups["UPD"].ShouldBeSameAs(groupBefore);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,246 @@
|
||||
// Go parity: golang/nats-server/server/jetstream_cluster.go:7212 selectPeerGroup
|
||||
// Covers: UniqueTag enforcement, HA asset limits, weighted scoring by available resources.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for topology-aware placement: JetStreamUniqueTag enforcement,
|
||||
/// MaxAssetsPerPeer HA limits, and weighted scoring.
|
||||
/// Go reference: jetstream_cluster.go:7212 selectPeerGroup (uniqueTagPrefix, maxHaAssets, weighted sort).
|
||||
/// </summary>
|
||||
public class TopologyPlacementTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// UniqueTag enforcement
|
||||
// Go reference: jetstream_cluster.go:7251 uniqueTagPrefix / checkUniqueTag
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void UniqueTag_prevents_same_tag_value_replicas()
|
||||
{
|
||||
// 3 peers: p1 and p2 in az:us-east-1a, p3 in az:us-east-1b.
|
||||
// R=2 with UniqueTag="az" must pick one from each AZ.
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Tags = ["az:us-east-1a"], AvailableStorage = 1000 },
|
||||
new() { PeerId = "p2", Tags = ["az:us-east-1a"], AvailableStorage = 2000 },
|
||||
new() { PeerId = "p3", Tags = ["az:us-east-1b"], AvailableStorage = 900 },
|
||||
};
|
||||
var policy = new PlacementPolicy { UniqueTag = "az" };
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("az-group", 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
// One peer must be from az:us-east-1a and one from az:us-east-1b.
|
||||
var selectedPeers = peers.Where(p => group.Peers.Contains(p.PeerId)).ToList();
|
||||
var azValues = selectedPeers
|
||||
.SelectMany(p => p.Tags)
|
||||
.Where(t => t.StartsWith("az:", StringComparison.OrdinalIgnoreCase))
|
||||
.ToList();
|
||||
azValues.Distinct(StringComparer.OrdinalIgnoreCase).Count().ShouldBe(2);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void UniqueTag_throws_when_not_enough_unique_values()
|
||||
{
|
||||
// All 3 peers share the same AZ tag; R=2 requires 2 unique AZ values → impossible.
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Tags = ["az:us-east-1a"] },
|
||||
new() { PeerId = "p2", Tags = ["az:us-east-1a"] },
|
||||
new() { PeerId = "p3", Tags = ["az:us-east-1a"] },
|
||||
};
|
||||
var policy = new PlacementPolicy { UniqueTag = "az" };
|
||||
|
||||
Should.Throw<InvalidOperationException>(
|
||||
() => PlacementEngine.SelectPeerGroup("fail", 2, peers, policy));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Tag_prefix_matching_for_unique_constraint()
|
||||
{
|
||||
// UniqueTag="az" should match tags like "az:us-east-1a", "az:us-west-2b", etc.
|
||||
// Go reference: jetstream_cluster.go:7265 strings.HasPrefix(t, uniqueTagPrefix)
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Tags = ["az:us-east-1a", "ssd"] },
|
||||
new() { PeerId = "p2", Tags = ["az:us-west-2b", "ssd"] },
|
||||
new() { PeerId = "p3", Tags = ["az:eu-central-1a", "ssd"] },
|
||||
};
|
||||
var policy = new PlacementPolicy { UniqueTag = "az" };
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("prefix", 3, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(3);
|
||||
group.Peers.ShouldContain("p1");
|
||||
group.Peers.ShouldContain("p2");
|
||||
group.Peers.ShouldContain("p3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Empty_unique_tag_ignored()
|
||||
{
|
||||
// UniqueTag="" or null → no unique constraint applied, normal selection.
|
||||
// Go reference: jetstream_cluster.go:7252 if uniqueTagPrefix != _EMPTY_
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Tags = ["az:us-east-1a"] },
|
||||
new() { PeerId = "p2", Tags = ["az:us-east-1a"] },
|
||||
new() { PeerId = "p3", Tags = ["az:us-east-1a"] },
|
||||
};
|
||||
|
||||
// No UniqueTag policy — all 3 peers are valid, R=3 should succeed.
|
||||
var groupNull = PlacementEngine.SelectPeerGroup("no-unique-null", 3, peers, policy: null);
|
||||
groupNull.Peers.Count.ShouldBe(3);
|
||||
|
||||
// Empty string UniqueTag → treated as disabled.
|
||||
var policy = new PlacementPolicy { UniqueTag = "" };
|
||||
var groupEmpty = PlacementEngine.SelectPeerGroup("no-unique-empty", 3, peers, policy);
|
||||
groupEmpty.Peers.Count.ShouldBe(3);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void UniqueTag_combined_with_cluster_filter()
|
||||
{
|
||||
// Both cluster filter and UniqueTag must be applied together.
|
||||
// Go reference: jetstream_cluster.go:7346 cluster check before uniqueTag check
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", Cluster = "us-east", Tags = ["az:us-east-1a"] },
|
||||
new() { PeerId = "p2", Cluster = "us-east", Tags = ["az:us-east-1a"] },
|
||||
new() { PeerId = "p3", Cluster = "us-east", Tags = ["az:us-east-1b"] },
|
||||
new() { PeerId = "p4", Cluster = "us-west", Tags = ["az:us-west-2a"] },
|
||||
};
|
||||
var policy = new PlacementPolicy { Cluster = "us-east", UniqueTag = "az" };
|
||||
|
||||
// Only p1/p2/p3 are in us-east; UniqueTag="az" → picks one from 1a and one from 1b.
|
||||
var group = PlacementEngine.SelectPeerGroup("combo", 2, peers, policy);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldNotContain("p4");
|
||||
var selectedPeers = peers.Where(p => group.Peers.Contains(p.PeerId)).ToList();
|
||||
var azValues = selectedPeers
|
||||
.SelectMany(p => p.Tags)
|
||||
.Where(t => t.StartsWith("az:", StringComparison.OrdinalIgnoreCase))
|
||||
.Distinct(StringComparer.OrdinalIgnoreCase)
|
||||
.ToList();
|
||||
azValues.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// MaxAssetsPerPeer HA limit deprioritization
|
||||
// Go reference: jetstream_cluster.go:7428 maxHaAssets check (deprioritize vs hard exclude)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void MaxAssetsPerPeer_deprioritizes_overloaded_peers()
|
||||
{
|
||||
// p1 is at its asset limit but p2 and p3 are not.
|
||||
// With enough non-overloaded candidates, overloaded peer should not be selected.
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", AvailableStorage = 10_000, CurrentAssets = 5, MaxAssetsPerPeer = 5 },
|
||||
new() { PeerId = "p2", AvailableStorage = 8_000, CurrentAssets = 1, MaxAssetsPerPeer = 5 },
|
||||
new() { PeerId = "p3", AvailableStorage = 6_000, CurrentAssets = 0, MaxAssetsPerPeer = 5 },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("ha-limit", 2, peers);
|
||||
|
||||
// p1 is deprioritized (at max), so p2 and p3 should be selected over p1.
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldContain("p2");
|
||||
group.Peers.ShouldContain("p3");
|
||||
group.Peers.ShouldNotContain("p1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void MaxAssetsPerPeer_still_used_when_no_alternatives()
|
||||
{
|
||||
// All peers are at their HA asset limit, but we must still select from them.
|
||||
// Go reference: jetstream_cluster.go — deprioritize (move to end), not hard exclude.
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", AvailableStorage = 1000, CurrentAssets = 3, MaxAssetsPerPeer = 3 },
|
||||
new() { PeerId = "p2", AvailableStorage = 900, CurrentAssets = 3, MaxAssetsPerPeer = 3 },
|
||||
};
|
||||
|
||||
// Should succeed even though both peers are at max.
|
||||
var group = PlacementEngine.SelectPeerGroup("ha-fallback", 2, peers);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldContain("p1");
|
||||
group.Peers.ShouldContain("p2");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Zero_MaxAssets_means_unlimited()
|
||||
{
|
||||
// MaxAssetsPerPeer=0 → no asset limit, peer treated as not overloaded regardless of CurrentAssets.
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
new() { PeerId = "p1", AvailableStorage = 5000, CurrentAssets = 100, MaxAssetsPerPeer = 0 },
|
||||
new() { PeerId = "p2", AvailableStorage = 4000, CurrentAssets = 200, MaxAssetsPerPeer = 0 },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("unlimited", 2, peers);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers.ShouldContain("p1");
|
||||
group.Peers.ShouldContain("p2");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Weighted score = AvailableStorage - (CurrentAssets * AssetCostWeight)
|
||||
// Go reference: jetstream_cluster.go:7469 sort by avail then ns (stream count)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Weighted_score_prefers_less_loaded_peers()
|
||||
{
|
||||
// p1: more storage but many assets → lower score
|
||||
// p2: less storage but few assets → higher score
|
||||
// With DefaultAssetCostWeight = 1GB, even a small difference in assets
|
||||
// can overcome a moderate storage advantage.
|
||||
const long gb = PlacementEngine.DefaultAssetCostWeight; // 1_073_741_824L
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
// p1: score = 10*GB - 5*GB = 5*GB
|
||||
new() { PeerId = "p1", AvailableStorage = 10 * gb, CurrentAssets = 5 },
|
||||
// p2: score = 9*GB - 1*GB = 8*GB (wins despite less raw storage)
|
||||
new() { PeerId = "p2", AvailableStorage = 9 * gb, CurrentAssets = 1 },
|
||||
// p3: score = 3*GB - 0 = 3*GB
|
||||
new() { PeerId = "p3", AvailableStorage = 3 * gb, CurrentAssets = 0 },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("weighted", 2, peers);
|
||||
|
||||
// p2 has the highest score (8*GB), p1 has second (5*GB).
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers[0].ShouldBe("p2");
|
||||
group.Peers[1].ShouldBe("p1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Weighted_score_with_custom_cost_weight()
|
||||
{
|
||||
// Verify score formula: score = AvailableStorage - (CurrentAssets * AssetCostWeight)
|
||||
// Use a fixed, small cost weight to make the math obvious.
|
||||
const long costWeight = 1000L;
|
||||
var peers = new List<PeerInfo>
|
||||
{
|
||||
// score = 5000 - (3 * 1000) = 2000
|
||||
new() { PeerId = "p1", AvailableStorage = 5000, CurrentAssets = 3 },
|
||||
// score = 4000 - (0 * 1000) = 4000 (wins)
|
||||
new() { PeerId = "p2", AvailableStorage = 4000, CurrentAssets = 0 },
|
||||
// score = 6000 - (5 * 1000) = 1000 (loses)
|
||||
new() { PeerId = "p3", AvailableStorage = 6000, CurrentAssets = 5 },
|
||||
};
|
||||
|
||||
var group = PlacementEngine.SelectPeerGroup("custom-weight", 2, peers, assetCostWeight: costWeight);
|
||||
|
||||
group.Peers.Count.ShouldBe(2);
|
||||
group.Peers[0].ShouldBe("p2"); // score 4000
|
||||
group.Peers[1].ShouldBe("p1"); // score 2000
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,222 @@
|
||||
// Go parity: jetstream_cluster.go — version-incompatible stream/consumer assignment handling.
|
||||
// Covers: future-version SA/CA rejection, unknown MetaEntryType graceful handling,
|
||||
// SkippedUnsupportedEntries counter, mixed-version batch partial apply.
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for graceful handling of version-incompatible stream/consumer assignments
|
||||
/// in JetStreamMetaGroup (Gap 2.11).
|
||||
/// Go reference: jetstream_cluster.go — versioned assignment processing, unknown entry fallback.
|
||||
/// </summary>
|
||||
public class UnsupportedAssetTests
|
||||
{
|
||||
// ---------------------------------------------------------------
|
||||
// ProcessStreamAssignment — version checks
|
||||
// Go reference: jetstream_cluster.go:4541 processStreamAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamAssignment_skips_future_version()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — future-version entries are skipped to avoid cluster crash.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup { Name = "orders-group" },
|
||||
Version = 2, // future version — beyond CurrentVersion
|
||||
};
|
||||
|
||||
var result = meta.ProcessStreamAssignment(sa);
|
||||
|
||||
result.ShouldBeFalse();
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamAssignment_accepts_current_version()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — current-version entries are processed normally.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup { Name = "orders-group" },
|
||||
Version = JetStreamMetaGroup.CurrentVersion,
|
||||
};
|
||||
|
||||
var result = meta.ProcessStreamAssignment(sa);
|
||||
|
||||
result.ShouldBeTrue();
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamAssignment_accepts_default_version()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — version 0 (default/unset) is treated as current version,
|
||||
// maintaining backward compatibility with pre-versioned assignments.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup { Name = "orders-group" },
|
||||
// Version = 0 (default int value — pre-versioned assignment)
|
||||
};
|
||||
|
||||
var result = meta.ProcessStreamAssignment(sa);
|
||||
|
||||
result.ShouldBeTrue();
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ProcessConsumerAssignment — version checks
|
||||
// Go reference: jetstream_cluster.go:5300 processConsumerAssignment
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerAssignment_skips_future_version()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — future-version consumer entries are skipped.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
// First add the parent stream (current version)
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup { Name = "orders-group" },
|
||||
};
|
||||
meta.ProcessStreamAssignment(sa);
|
||||
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "my-consumer",
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup { Name = "consumer-group" },
|
||||
Version = 2, // future version
|
||||
};
|
||||
|
||||
var result = meta.ProcessConsumerAssignment(ca);
|
||||
|
||||
result.ShouldBeFalse();
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerAssignment_accepts_current_version()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — current-version consumer entries are processed normally.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup { Name = "orders-group" },
|
||||
};
|
||||
meta.ProcessStreamAssignment(sa);
|
||||
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "my-consumer",
|
||||
StreamName = "ORDERS",
|
||||
Group = new RaftGroup { Name = "consumer-group" },
|
||||
Version = JetStreamMetaGroup.CurrentVersion,
|
||||
};
|
||||
|
||||
var result = meta.ProcessConsumerAssignment(ca);
|
||||
|
||||
result.ShouldBeTrue();
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(0);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyEntry — unknown entry type
|
||||
// Go reference: jetstream_cluster.go — unknown entry type fallback (no crash)
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void ApplyEntry_unknown_type_does_not_crash()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — unknown entry types must not crash the cluster;
|
||||
// they are counted and skipped to allow forward compatibility.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
// Should not throw
|
||||
meta.ApplyEntry(MetaEntryType.Unknown, "something");
|
||||
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// SkippedUnsupportedEntries counter accumulation
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void SkippedUnsupportedEntries_count_increments_for_each_skip()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — cluster must track how many unsupported entries
|
||||
// were encountered so operators can detect version skew.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var futureStream = new StreamAssignment
|
||||
{
|
||||
StreamName = "STREAM-A",
|
||||
Group = new RaftGroup { Name = "g1" },
|
||||
Version = 99,
|
||||
};
|
||||
var futureStream2 = new StreamAssignment
|
||||
{
|
||||
StreamName = "STREAM-B",
|
||||
Group = new RaftGroup { Name = "g2" },
|
||||
Version = 99,
|
||||
};
|
||||
|
||||
meta.ProcessStreamAssignment(futureStream);
|
||||
meta.ProcessStreamAssignment(futureStream2);
|
||||
meta.ApplyEntry(MetaEntryType.Unknown, "x");
|
||||
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(3);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Mixed-version batch: only v1 assignments applied
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public void Mixed_versions_partial_apply()
|
||||
{
|
||||
// Go ref: jetstream_cluster.go — when replaying a RAFT log with mixed-version entries,
|
||||
// supported entries are applied and future-version entries are skipped without affecting
|
||||
// correctly versioned entries.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
|
||||
var streams = new[]
|
||||
{
|
||||
new StreamAssignment { StreamName = "S1", Group = new RaftGroup { Name = "g1" }, Version = 1 },
|
||||
new StreamAssignment { StreamName = "S2", Group = new RaftGroup { Name = "g2" }, Version = 2 }, // future
|
||||
new StreamAssignment { StreamName = "S3", Group = new RaftGroup { Name = "g3" }, Version = 1 },
|
||||
new StreamAssignment { StreamName = "S4", Group = new RaftGroup { Name = "g4" }, Version = 3 }, // future
|
||||
new StreamAssignment { StreamName = "S5", Group = new RaftGroup { Name = "g5" }, Version = 0 }, // default = current
|
||||
};
|
||||
|
||||
foreach (var sa in streams)
|
||||
meta.ProcessStreamAssignment(sa);
|
||||
|
||||
// S1, S3, S5 should be applied; S2, S4 skipped
|
||||
meta.StreamCount.ShouldBe(3);
|
||||
meta.GetStreamAssignment("S1").ShouldNotBeNull();
|
||||
meta.GetStreamAssignment("S2").ShouldBeNull();
|
||||
meta.GetStreamAssignment("S3").ShouldNotBeNull();
|
||||
meta.GetStreamAssignment("S4").ShouldBeNull();
|
||||
meta.GetStreamAssignment("S5").ShouldNotBeNull();
|
||||
meta.SkippedUnsupportedEntries.ShouldBe(2);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user