refactor: extract NATS.Server.JetStream.Tests project
Move 225 JetStream-related test files from NATS.Server.Tests into a dedicated NATS.Server.JetStream.Tests project. This includes root-level JetStream*.cs files, storage test files (FileStore, MemStore, StreamStoreContract), and the full JetStream/ subfolder tree (Api, Cluster, Consumers, MirrorSource, Snapshots, Storage, Streams). Updated all namespaces, added InternalsVisibleTo, registered in the solution file, and added the JETSTREAM_INTEGRATION_MATRIX define.
This commit is contained in:
@@ -0,0 +1,228 @@
|
||||
// Parity: golang/nats-server/server/jetstream_cluster_1_test.go
|
||||
// TestJetStreamClusterStreamLeaderStepDown (line 4925)
|
||||
// TestJetStreamClusterLeaderStepdown (line 5464)
|
||||
// TestJetStreamClusterLeader (line 73)
|
||||
using System.Text;
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Api;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests covering JetStream leader election and failover scenarios,
|
||||
/// ported from the Go server's jetstream_cluster_1_test.go.
|
||||
/// </summary>
|
||||
public class LeaderFailoverParityTests
|
||||
{
|
||||
/// <summary>
|
||||
/// Go parity: TestJetStreamClusterStreamLeaderStepDown (line 4925).
|
||||
/// After publishing messages to an R=3 stream, stepping down the stream leader
|
||||
/// must elect a new leader and preserve all previously stored messages. The new
|
||||
/// leader must accept subsequent writes with correct sequencing.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Stream_leader_stepdown_preserves_data_and_elects_new_leader()
|
||||
{
|
||||
await using var fx = await LeaderFailoverFixture.StartAsync(nodes: 3);
|
||||
var streamName = "STEPDOWN_DATA";
|
||||
await fx.CreateStreamAsync(streamName, subjects: ["sd.>"], replicas: 3);
|
||||
|
||||
// Publish 10 messages before stepdown (Go: msg, toSend := []byte("Hello JS Clustering"), 10)
|
||||
for (var i = 1; i <= 10; i++)
|
||||
{
|
||||
var ack = await fx.PublishAsync($"sd.{i}", $"msg-{i}");
|
||||
ack.Seq.ShouldBe((ulong)i);
|
||||
ack.Stream.ShouldBe(streamName);
|
||||
}
|
||||
|
||||
// Capture current leader identity
|
||||
var leaderBefore = fx.GetStreamLeaderId(streamName);
|
||||
leaderBefore.ShouldNotBeNullOrWhiteSpace();
|
||||
|
||||
// Step down the stream leader (Go: nc.Request(JSApiStreamLeaderStepDownT, "TEST"))
|
||||
var stepdownResponse = await fx.StepDownStreamLeaderAsync(streamName);
|
||||
stepdownResponse.Success.ShouldBeTrue();
|
||||
|
||||
// Verify new leader was elected (Go: si.Cluster.Leader != oldLeader)
|
||||
var leaderAfter = fx.GetStreamLeaderId(streamName);
|
||||
leaderAfter.ShouldNotBe(leaderBefore);
|
||||
|
||||
// Verify all 10 messages survived the failover
|
||||
var state = await fx.GetStreamStateAsync(streamName);
|
||||
state.Messages.ShouldBe(10UL);
|
||||
state.FirstSeq.ShouldBe(1UL);
|
||||
state.LastSeq.ShouldBe(10UL);
|
||||
|
||||
// Verify the new leader accepts writes with correct sequencing
|
||||
var postFailoverAck = await fx.PublishAsync("sd.post", "after-stepdown");
|
||||
postFailoverAck.Seq.ShouldBe(11UL);
|
||||
postFailoverAck.Stream.ShouldBe(streamName);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Go parity: TestJetStreamClusterLeaderStepdown (line 5464).
|
||||
/// Requesting a meta-leader stepdown via the $JS.API.META.LEADER.STEPDOWN subject
|
||||
/// must succeed and elect a new meta-leader with an incremented leadership version.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Meta_leader_stepdown_elects_new_leader_with_incremented_version()
|
||||
{
|
||||
await using var fx = await LeaderFailoverFixture.StartAsync(nodes: 3);
|
||||
|
||||
// Create a stream so the meta group has some state
|
||||
await fx.CreateStreamAsync("META_SD", subjects: ["meta.>"], replicas: 3);
|
||||
|
||||
var metaBefore = fx.GetMetaState();
|
||||
metaBefore.ShouldNotBeNull();
|
||||
metaBefore.ClusterSize.ShouldBe(3);
|
||||
var leaderBefore = metaBefore.LeaderId;
|
||||
var versionBefore = metaBefore.LeadershipVersion;
|
||||
|
||||
// Step down meta leader via API (Go: nc.Request(JSApiLeaderStepDown, nil))
|
||||
var response = await fx.RequestAsync(JetStreamApiSubjects.MetaLeaderStepdown, "{}");
|
||||
response.Success.ShouldBeTrue();
|
||||
|
||||
// Verify new meta leader elected (Go: cl != c.leader())
|
||||
var metaAfter = fx.GetMetaState();
|
||||
metaAfter.ShouldNotBeNull();
|
||||
metaAfter.LeaderId.ShouldNotBe(leaderBefore);
|
||||
metaAfter.LeadershipVersion.ShouldBe(versionBefore + 1);
|
||||
|
||||
// Stream metadata must survive the meta-leader transition
|
||||
metaAfter.Streams.ShouldContain("META_SD");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Go parity: TestJetStreamClusterLeader (line 73).
|
||||
/// After electing a stream leader, stepping down twice through consecutive
|
||||
/// elections must cycle through distinct leaders. Each election must produce
|
||||
/// a valid leader that can accept proposals.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Consecutive_leader_elections_cycle_through_distinct_peers()
|
||||
{
|
||||
await using var fx = await LeaderFailoverFixture.StartAsync(nodes: 3);
|
||||
await fx.CreateStreamAsync("CYCLE", subjects: ["cycle.>"], replicas: 3);
|
||||
|
||||
// Track leaders across consecutive stepdowns
|
||||
var leaders = new List<string>();
|
||||
leaders.Add(fx.GetStreamLeaderId("CYCLE"));
|
||||
|
||||
// First stepdown
|
||||
var resp1 = await fx.StepDownStreamLeaderAsync("CYCLE");
|
||||
resp1.Success.ShouldBeTrue();
|
||||
leaders.Add(fx.GetStreamLeaderId("CYCLE"));
|
||||
|
||||
// Second stepdown
|
||||
var resp2 = await fx.StepDownStreamLeaderAsync("CYCLE");
|
||||
resp2.Success.ShouldBeTrue();
|
||||
leaders.Add(fx.GetStreamLeaderId("CYCLE"));
|
||||
|
||||
// Each consecutive leader must differ from its predecessor
|
||||
leaders[1].ShouldNotBe(leaders[0]);
|
||||
leaders[2].ShouldNotBe(leaders[1]);
|
||||
|
||||
// After cycling, the stream must still be writable
|
||||
var ack = await fx.PublishAsync("cycle.verify", "still-alive");
|
||||
ack.Stream.ShouldBe("CYCLE");
|
||||
ack.Seq.ShouldBeGreaterThan(0UL);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Test fixture that wires up a JetStream cluster with meta group, stream manager,
|
||||
/// consumer manager, and API router for leader failover testing.
|
||||
/// </summary>
|
||||
internal sealed class LeaderFailoverFixture : IAsyncDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup _metaGroup;
|
||||
private readonly StreamManager _streamManager;
|
||||
private readonly ConsumerManager _consumerManager;
|
||||
private readonly JetStreamApiRouter _router;
|
||||
private readonly JetStreamPublisher _publisher;
|
||||
|
||||
private LeaderFailoverFixture(
|
||||
JetStreamMetaGroup metaGroup,
|
||||
StreamManager streamManager,
|
||||
ConsumerManager consumerManager,
|
||||
JetStreamApiRouter router)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
_streamManager = streamManager;
|
||||
_consumerManager = consumerManager;
|
||||
_router = router;
|
||||
_publisher = new JetStreamPublisher(_streamManager);
|
||||
}
|
||||
|
||||
public static Task<LeaderFailoverFixture> StartAsync(int nodes)
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(nodes);
|
||||
var streamManager = new StreamManager(meta);
|
||||
var consumerManager = new ConsumerManager(meta);
|
||||
var router = new JetStreamApiRouter(streamManager, consumerManager, meta);
|
||||
return Task.FromResult(new LeaderFailoverFixture(meta, streamManager, consumerManager, router));
|
||||
}
|
||||
|
||||
public Task CreateStreamAsync(string name, string[] subjects, int replicas)
|
||||
{
|
||||
var response = _streamManager.CreateOrUpdate(new StreamConfig
|
||||
{
|
||||
Name = name,
|
||||
Subjects = [.. subjects],
|
||||
Replicas = replicas,
|
||||
});
|
||||
|
||||
if (response.Error is not null)
|
||||
throw new InvalidOperationException(response.Error.Description);
|
||||
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
public Task<PubAck> PublishAsync(string subject, string payload)
|
||||
{
|
||||
if (_publisher.TryCapture(subject, Encoding.UTF8.GetBytes(payload), null, out var ack))
|
||||
return Task.FromResult(ack);
|
||||
|
||||
throw new InvalidOperationException($"Publish to '{subject}' did not match a stream.");
|
||||
}
|
||||
|
||||
public Task<JetStreamApiResponse> StepDownStreamLeaderAsync(string stream)
|
||||
{
|
||||
var response = _router.Route(
|
||||
$"{JetStreamApiSubjects.StreamLeaderStepdown}{stream}",
|
||||
"{}"u8);
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public string GetStreamLeaderId(string stream)
|
||||
{
|
||||
// The StreamManager exposes replica groups via step-down routing;
|
||||
// we also reflect the leader through the replica group directly.
|
||||
var field = typeof(StreamManager)
|
||||
.GetField("_replicaGroups", System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Instance)!;
|
||||
var groups = (System.Collections.Concurrent.ConcurrentDictionary<string, StreamReplicaGroup>)field.GetValue(_streamManager)!;
|
||||
if (groups.TryGetValue(stream, out var group))
|
||||
return group.Leader.Id;
|
||||
return string.Empty;
|
||||
}
|
||||
|
||||
public ValueTask<ApiStreamState> GetStreamStateAsync(string stream)
|
||||
=> _streamManager.GetStateAsync(stream, default);
|
||||
|
||||
public MetaGroupState? GetMetaState() => _streamManager.GetMetaState();
|
||||
|
||||
public Task<JetStreamApiResponse> RequestAsync(string subject, string payload)
|
||||
{
|
||||
var response = _router.Route(subject, Encoding.UTF8.GetBytes(payload));
|
||||
|
||||
if (subject.Equals(JetStreamApiSubjects.MetaLeaderStepdown, StringComparison.Ordinal) && response.Success)
|
||||
_metaGroup.BecomeLeader();
|
||||
|
||||
return Task.FromResult(response);
|
||||
}
|
||||
|
||||
public ValueTask DisposeAsync() => ValueTask.CompletedTask;
|
||||
}
|
||||
Reference in New Issue
Block a user