Move 43 Raft consensus test files (8 root-level + 35 in Raft/ subfolder) from NATS.Server.Tests into a dedicated NATS.Server.Raft.Tests project. Update namespaces, add InternalsVisibleTo, and fix timing/exception handling issues in moved test files.
394 lines
14 KiB
C#
394 lines
14 KiB
C#
using NATS.Server.Raft;
|
|
|
|
namespace NATS.Server.Raft.Tests.Raft;
|
|
|
|
/// <summary>
|
|
/// Tests for B4 (membership change proposals), B5 (snapshot checkpoints and log compaction),
|
|
/// and verifying the pre-vote absence (B6).
|
|
/// Go reference: raft.go:961-1019 (proposeAddPeer/proposeRemovePeer),
|
|
/// raft.go CreateSnapshotCheckpoint, raft.go DrainAndReplaySnapshot.
|
|
/// </summary>
|
|
public class RaftMembershipAndSnapshotTests
|
|
{
|
|
// -- Helpers (self-contained) --
|
|
|
|
private static (RaftNode leader, RaftNode[] followers) CreateCluster(int size)
|
|
{
|
|
var nodes = Enumerable.Range(1, size)
|
|
.Select(i => new RaftNode($"n{i}"))
|
|
.ToArray();
|
|
foreach (var node in nodes)
|
|
node.ConfigureCluster(nodes);
|
|
|
|
var candidate = nodes[0];
|
|
candidate.StartElection(size);
|
|
foreach (var voter in nodes.Skip(1))
|
|
candidate.ReceiveVote(voter.GrantVote(candidate.Term, candidate.Id), size);
|
|
|
|
return (candidate, nodes.Skip(1).ToArray());
|
|
}
|
|
|
|
// =====================================================================
|
|
// B4: ProposeAddPeerAsync
|
|
// Go reference: raft.go:961-990 (proposeAddPeer)
|
|
// =====================================================================
|
|
|
|
// Go: raft.go proposeAddPeer — adds member after quorum confirmation
|
|
[Fact]
|
|
public async Task ProposeAddPeerAsync_adds_member_after_quorum()
|
|
{
|
|
var (leader, _) = CreateCluster(3);
|
|
leader.Members.ShouldNotContain("n4");
|
|
|
|
await leader.ProposeAddPeerAsync("n4", default);
|
|
|
|
leader.Members.ShouldContain("n4");
|
|
}
|
|
|
|
// Go: raft.go proposeAddPeer — log entry has correct command format
|
|
[Fact]
|
|
public async Task ProposeAddPeerAsync_appends_entry_with_plus_peer_command()
|
|
{
|
|
var (leader, _) = CreateCluster(3);
|
|
var initialLogCount = leader.Log.Entries.Count;
|
|
|
|
await leader.ProposeAddPeerAsync("n4", default);
|
|
|
|
leader.Log.Entries.Count.ShouldBe(initialLogCount + 1);
|
|
leader.Log.Entries[^1].Command.ShouldBe("+peer:n4");
|
|
}
|
|
|
|
// Go: raft.go proposeAddPeer — commit index advances
|
|
[Fact]
|
|
public async Task ProposeAddPeerAsync_advances_commit_and_applied_index()
|
|
{
|
|
var (leader, _) = CreateCluster(3);
|
|
|
|
var index = await leader.ProposeAddPeerAsync("n4", default);
|
|
|
|
leader.CommitIndex.ShouldBe(index);
|
|
leader.AppliedIndex.ShouldBe(index);
|
|
}
|
|
|
|
// Go: raft.go proposeAddPeer — commit queue receives the entry
|
|
[Fact]
|
|
public async Task ProposeAddPeerAsync_enqueues_entry_to_commit_queue()
|
|
{
|
|
var (leader, _) = CreateCluster(3);
|
|
|
|
await leader.ProposeAddPeerAsync("n4", default);
|
|
|
|
// The commit queue should contain the membership change entry
|
|
leader.CommitQueue.Count.ShouldBeGreaterThanOrEqualTo(1);
|
|
}
|
|
|
|
// =====================================================================
|
|
// B4: ProposeRemovePeerAsync
|
|
// Go reference: raft.go:992-1019 (proposeRemovePeer)
|
|
// =====================================================================
|
|
|
|
// Go: raft.go proposeRemovePeer — removes member after quorum
|
|
[Fact]
|
|
public async Task ProposeRemovePeerAsync_removes_member_after_quorum()
|
|
{
|
|
var (leader, _) = CreateCluster(3);
|
|
leader.Members.ShouldContain("n2");
|
|
|
|
await leader.ProposeRemovePeerAsync("n2", default);
|
|
|
|
leader.Members.ShouldNotContain("n2");
|
|
}
|
|
|
|
// Go: raft.go proposeRemovePeer — log entry has correct command format
|
|
[Fact]
|
|
public async Task ProposeRemovePeerAsync_appends_entry_with_minus_peer_command()
|
|
{
|
|
var (leader, _) = CreateCluster(3);
|
|
var initialLogCount = leader.Log.Entries.Count;
|
|
|
|
await leader.ProposeRemovePeerAsync("n2", default);
|
|
|
|
leader.Log.Entries.Count.ShouldBe(initialLogCount + 1);
|
|
leader.Log.Entries[^1].Command.ShouldBe("-peer:n2");
|
|
}
|
|
|
|
// Go: raft.go proposeRemovePeer — commit index advances
|
|
[Fact]
|
|
public async Task ProposeRemovePeerAsync_advances_commit_and_applied_index()
|
|
{
|
|
var (leader, _) = CreateCluster(3);
|
|
|
|
var index = await leader.ProposeRemovePeerAsync("n2", default);
|
|
|
|
leader.CommitIndex.ShouldBe(index);
|
|
leader.AppliedIndex.ShouldBe(index);
|
|
}
|
|
|
|
// =====================================================================
|
|
// B4: MembershipChangeInProgress guard
|
|
// Go reference: raft.go:961-1019 single-change invariant
|
|
// =====================================================================
|
|
|
|
// Go: raft.go single-change invariant — cannot remove the last member
|
|
[Fact]
|
|
public async Task ProposeRemovePeerAsync_throws_when_only_one_member_remains()
|
|
{
|
|
// Create a lone leader (not in a cluster — self is the only member)
|
|
var lone = new RaftNode("solo");
|
|
// Manually make it leader by running election against itself
|
|
lone.StartElection(1);
|
|
|
|
lone.Members.Count.ShouldBe(1);
|
|
|
|
await Should.ThrowAsync<InvalidOperationException>(
|
|
() => lone.ProposeRemovePeerAsync("solo", default).AsTask());
|
|
}
|
|
|
|
// Go: raft.go proposeAddPeer — only leader can propose
|
|
[Fact]
|
|
public async Task ProposeAddPeerAsync_throws_when_node_is_not_leader()
|
|
{
|
|
var (_, followers) = CreateCluster(3);
|
|
var follower = followers[0];
|
|
follower.IsLeader.ShouldBeFalse();
|
|
|
|
await Should.ThrowAsync<InvalidOperationException>(
|
|
() => follower.ProposeAddPeerAsync("n4", default).AsTask());
|
|
}
|
|
|
|
// Go: raft.go proposeRemovePeer — only leader can propose
|
|
[Fact]
|
|
public async Task ProposeRemovePeerAsync_throws_when_node_is_not_leader()
|
|
{
|
|
var (_, followers) = CreateCluster(3);
|
|
var follower = followers[0];
|
|
follower.IsLeader.ShouldBeFalse();
|
|
|
|
await Should.ThrowAsync<InvalidOperationException>(
|
|
() => follower.ProposeRemovePeerAsync("n1", default).AsTask());
|
|
}
|
|
|
|
// Go: raft.go single-change invariant — MembershipChangeInProgress cleared after proposal
|
|
[Fact]
|
|
public async Task MembershipChangeInProgress_is_false_after_proposal_completes()
|
|
{
|
|
var (leader, _) = CreateCluster(3);
|
|
|
|
await leader.ProposeAddPeerAsync("n4", default);
|
|
|
|
// After the proposal completes the flag must be cleared
|
|
leader.MembershipChangeInProgress.ShouldBeFalse();
|
|
}
|
|
|
|
// Go: raft.go single-change invariant — two sequential proposals both succeed
|
|
[Fact]
|
|
public async Task Two_sequential_membership_changes_both_succeed()
|
|
{
|
|
var (leader, _) = CreateCluster(3);
|
|
|
|
await leader.ProposeAddPeerAsync("n4", default);
|
|
// First change must be cleared before second can proceed
|
|
leader.MembershipChangeInProgress.ShouldBeFalse();
|
|
|
|
await leader.ProposeAddPeerAsync("n5", default);
|
|
|
|
leader.Members.ShouldContain("n4");
|
|
leader.Members.ShouldContain("n5");
|
|
}
|
|
|
|
// =====================================================================
|
|
// B5: RaftLog.Compact
|
|
// Go reference: raft.go WAL compact / compactLog
|
|
// =====================================================================
|
|
|
|
// Go: raft.go compactLog — removes entries up to given index
|
|
[Fact]
|
|
public void Log_Compact_removes_entries_up_to_index()
|
|
{
|
|
var log = new RaftLog();
|
|
log.Append(term: 1, command: "a"); // index 1
|
|
log.Append(term: 1, command: "b"); // index 2
|
|
log.Append(term: 1, command: "c"); // index 3
|
|
log.Append(term: 1, command: "d"); // index 4
|
|
|
|
log.Compact(upToIndex: 2);
|
|
|
|
log.Entries.Count.ShouldBe(2);
|
|
log.Entries[0].Index.ShouldBe(3);
|
|
log.Entries[1].Index.ShouldBe(4);
|
|
}
|
|
|
|
// Go: raft.go compactLog — base index advances after compact
|
|
[Fact]
|
|
public void Log_Compact_advances_base_index()
|
|
{
|
|
var log = new RaftLog();
|
|
log.Append(term: 1, command: "a"); // index 1
|
|
log.Append(term: 1, command: "b"); // index 2
|
|
log.Append(term: 1, command: "c"); // index 3
|
|
|
|
log.Compact(upToIndex: 2);
|
|
|
|
// New entries should be indexed from the new base
|
|
var next = log.Append(term: 1, command: "d");
|
|
next.Index.ShouldBe(4);
|
|
}
|
|
|
|
// Go: raft.go compactLog — compact all entries yields empty log
|
|
[Fact]
|
|
public void Log_Compact_all_entries_leaves_empty_log()
|
|
{
|
|
var log = new RaftLog();
|
|
log.Append(term: 1, command: "x"); // index 1
|
|
log.Append(term: 1, command: "y"); // index 2
|
|
|
|
log.Compact(upToIndex: 2);
|
|
|
|
log.Entries.Count.ShouldBe(0);
|
|
}
|
|
|
|
// Go: raft.go compactLog — compact with index beyond all entries is safe
|
|
[Fact]
|
|
public void Log_Compact_beyond_all_entries_removes_everything()
|
|
{
|
|
var log = new RaftLog();
|
|
log.Append(term: 1, command: "p"); // index 1
|
|
log.Append(term: 1, command: "q"); // index 2
|
|
|
|
log.Compact(upToIndex: 999);
|
|
|
|
log.Entries.Count.ShouldBe(0);
|
|
}
|
|
|
|
// Go: raft.go compactLog — compact with index 0 is a no-op
|
|
[Fact]
|
|
public void Log_Compact_index_zero_is_noop()
|
|
{
|
|
var log = new RaftLog();
|
|
log.Append(term: 1, command: "r"); // index 1
|
|
log.Append(term: 1, command: "s"); // index 2
|
|
|
|
log.Compact(upToIndex: 0);
|
|
|
|
log.Entries.Count.ShouldBe(2);
|
|
}
|
|
|
|
// =====================================================================
|
|
// B5: CreateSnapshotCheckpointAsync
|
|
// Go reference: raft.go CreateSnapshotCheckpoint
|
|
// =====================================================================
|
|
|
|
// Go: raft.go CreateSnapshotCheckpoint — captures applied index and compacts log
|
|
[Fact]
|
|
public async Task CreateSnapshotCheckpointAsync_creates_snapshot_and_compacts_log()
|
|
{
|
|
var (leader, _) = CreateCluster(3);
|
|
await leader.ProposeAsync("cmd-1", default);
|
|
await leader.ProposeAsync("cmd-2", default);
|
|
await leader.ProposeAsync("cmd-3", default);
|
|
|
|
var logCountBefore = leader.Log.Entries.Count;
|
|
var snapshot = await leader.CreateSnapshotCheckpointAsync(default);
|
|
|
|
snapshot.LastIncludedIndex.ShouldBe(leader.AppliedIndex);
|
|
snapshot.LastIncludedTerm.ShouldBe(leader.Term);
|
|
// The log should have been compacted — entries up to applied index removed
|
|
leader.Log.Entries.Count.ShouldBeLessThan(logCountBefore);
|
|
}
|
|
|
|
// Go: raft.go CreateSnapshotCheckpoint — log is empty after compacting all entries
|
|
[Fact]
|
|
public async Task CreateSnapshotCheckpointAsync_with_all_entries_applied_empties_log()
|
|
{
|
|
var (leader, _) = CreateCluster(3);
|
|
await leader.ProposeAsync("alpha", default);
|
|
await leader.ProposeAsync("beta", default);
|
|
|
|
// AppliedIndex should equal the last entry's index after ProposeAsync
|
|
var snapshot = await leader.CreateSnapshotCheckpointAsync(default);
|
|
|
|
snapshot.LastIncludedIndex.ShouldBeGreaterThan(0);
|
|
leader.Log.Entries.Count.ShouldBe(0);
|
|
}
|
|
|
|
// Go: raft.go CreateSnapshotCheckpoint — new entries continue from correct index after checkpoint
|
|
[Fact]
|
|
public async Task CreateSnapshotCheckpointAsync_new_entries_start_after_snapshot()
|
|
{
|
|
var (leader, _) = CreateCluster(3);
|
|
await leader.ProposeAsync("first", default);
|
|
await leader.ProposeAsync("second", default);
|
|
|
|
var snapshot = await leader.CreateSnapshotCheckpointAsync(default);
|
|
var snapshotIndex = snapshot.LastIncludedIndex;
|
|
|
|
// Append directly to the log (bypasses quorum for index continuity test)
|
|
var nextEntry = leader.Log.Append(term: leader.Term, command: "third");
|
|
|
|
nextEntry.Index.ShouldBe(snapshotIndex + 1);
|
|
}
|
|
|
|
// =====================================================================
|
|
// B5: DrainAndReplaySnapshotAsync
|
|
// Go reference: raft.go DrainAndReplaySnapshot
|
|
// =====================================================================
|
|
|
|
// Go: raft.go DrainAndReplaySnapshot — installs snapshot, updates commit and applied index
|
|
[Fact]
|
|
public async Task DrainAndReplaySnapshotAsync_installs_snapshot_and_updates_indices()
|
|
{
|
|
var (leader, followers) = CreateCluster(3);
|
|
await leader.ProposeAsync("entry-1", default);
|
|
await leader.ProposeAsync("entry-2", default);
|
|
|
|
var snapshot = new RaftSnapshot
|
|
{
|
|
LastIncludedIndex = 100,
|
|
LastIncludedTerm = 5,
|
|
};
|
|
|
|
var follower = followers[0];
|
|
await follower.DrainAndReplaySnapshotAsync(snapshot, default);
|
|
|
|
follower.AppliedIndex.ShouldBe(100);
|
|
follower.CommitIndex.ShouldBe(100);
|
|
}
|
|
|
|
// Go: raft.go DrainAndReplaySnapshot — drains pending commit queue entries
|
|
[Fact]
|
|
public async Task DrainAndReplaySnapshotAsync_drains_commit_queue()
|
|
{
|
|
var node = new RaftNode("n1");
|
|
// Manually stuff some entries into the commit queue to simulate pending work
|
|
var fakeEntry1 = new RaftLogEntry(1, 1, "fake-1");
|
|
var fakeEntry2 = new RaftLogEntry(2, 1, "fake-2");
|
|
await node.CommitQueue.EnqueueAsync(fakeEntry1, default);
|
|
await node.CommitQueue.EnqueueAsync(fakeEntry2, default);
|
|
node.CommitQueue.Count.ShouldBe(2);
|
|
|
|
var snapshot = new RaftSnapshot { LastIncludedIndex = 50, LastIncludedTerm = 3 };
|
|
await node.DrainAndReplaySnapshotAsync(snapshot, default);
|
|
|
|
// Queue should be empty after drain
|
|
node.CommitQueue.Count.ShouldBe(0);
|
|
}
|
|
|
|
// Go: raft.go DrainAndReplaySnapshot — log is replaced with snapshot baseline
|
|
[Fact]
|
|
public async Task DrainAndReplaySnapshotAsync_replaces_log_with_snapshot_baseline()
|
|
{
|
|
var node = new RaftNode("n1");
|
|
node.Log.Append(term: 1, command: "stale-a");
|
|
node.Log.Append(term: 1, command: "stale-b");
|
|
node.Log.Entries.Count.ShouldBe(2);
|
|
|
|
var snapshot = new RaftSnapshot { LastIncludedIndex = 77, LastIncludedTerm = 4 };
|
|
await node.DrainAndReplaySnapshotAsync(snapshot, default);
|
|
|
|
node.Log.Entries.Count.ShouldBe(0);
|
|
// New entries should start from the snapshot base
|
|
var next = node.Log.Append(term: 5, command: "fresh");
|
|
next.Index.ShouldBe(78);
|
|
}
|
|
}
|