Compare commits
28 Commits
6c268c4143
...
502481b6ba
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
502481b6ba | ||
|
|
bfe7a71fcd | ||
|
|
e09835ca70 | ||
|
|
b7bac8e68e | ||
|
|
7468401bd0 | ||
|
|
494d327282 | ||
|
|
36e23fa31d | ||
|
|
8fa16d59d2 | ||
|
|
955d568423 | ||
|
|
2eaa736b21 | ||
|
|
dcc3e4460e | ||
|
|
8fb80acafe | ||
|
|
3183fd2dc7 | ||
|
|
55de052009 | ||
|
|
7e4a23a0b7 | ||
|
|
63d4e43178 | ||
|
|
1a9b6a9175 | ||
|
|
7f9ee493b6 | ||
|
|
9fdc931ff5 | ||
|
|
e0f5fe7150 | ||
|
|
a0894e7321 | ||
|
|
c9ac4b9918 | ||
|
|
3ab683489e | ||
|
|
be432c3224 | ||
|
|
f402fd364f | ||
|
|
f031edb97e | ||
|
|
10e2c4ef22 | ||
|
|
f143295392 |
Binary file not shown.
@@ -398,6 +398,30 @@ public static class ConfigReloader
|
||||
}, ct);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Compares two options directly (without reading from a config file) and returns
|
||||
/// a reload result indicating whether the change is valid.
|
||||
/// Go reference: server/reload.go — Reload with in-memory options comparison.
|
||||
/// </summary>
|
||||
public static Task<ReloadFromOptionsResult> ReloadFromOptionsAsync(NatsOptions original, NatsOptions updated)
|
||||
{
|
||||
var changes = Diff(original, updated);
|
||||
var errors = Validate(changes);
|
||||
var rejectedChanges = new List<string>();
|
||||
|
||||
foreach (var change in changes)
|
||||
{
|
||||
if (change.IsNonReloadable)
|
||||
{
|
||||
rejectedChanges.Add($"{change.Name} cannot be changed at runtime");
|
||||
}
|
||||
}
|
||||
|
||||
return Task.FromResult(new ReloadFromOptionsResult(
|
||||
Success: rejectedChanges.Count == 0 && errors.Count == 0,
|
||||
RejectedChanges: rejectedChanges));
|
||||
}
|
||||
|
||||
// ─── Comparison helpers ─────────────────────────────────────────
|
||||
|
||||
private static void CompareAndAdd<T>(List<IConfigChange> changes, string name, T oldVal, T newVal)
|
||||
@@ -524,3 +548,8 @@ public sealed class ConfigReloadResult
|
||||
|
||||
public bool HasErrors => Errors is { Count: > 0 };
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Result of an in-memory options comparison for reload validation.
|
||||
/// </summary>
|
||||
public sealed record ReloadFromOptionsResult(bool Success, List<string> RejectedChanges);
|
||||
|
||||
41
src/NATS.Server/Configuration/SignalHandler.cs
Normal file
41
src/NATS.Server/Configuration/SignalHandler.cs
Normal file
@@ -0,0 +1,41 @@
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace NATS.Server.Configuration;
|
||||
|
||||
/// <summary>
|
||||
/// Registers POSIX signal handlers for config reload.
|
||||
/// Go reference: server/signal_unix.go, server/opts.go reload logic.
|
||||
/// On SIGHUP, triggers config reload via ConfigReloader.
|
||||
/// </summary>
|
||||
public static class SignalHandler
|
||||
{
|
||||
private static PosixSignalRegistration? _registration;
|
||||
|
||||
/// <summary>
|
||||
/// Registers a SIGHUP handler that will call the provided reload callback.
|
||||
/// Go reference: server/signal_unix.go — handleSignals goroutine.
|
||||
/// </summary>
|
||||
/// <param name="onReload">Callback invoked when SIGHUP is received.</param>
|
||||
public static void Register(Action onReload)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(onReload);
|
||||
_registration = PosixSignalRegistration.Create(PosixSignal.SIGHUP, _ =>
|
||||
{
|
||||
onReload();
|
||||
});
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Unregisters the SIGHUP handler.
|
||||
/// </summary>
|
||||
public static void Unregister()
|
||||
{
|
||||
_registration?.Dispose();
|
||||
_registration = null;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Whether a SIGHUP handler is currently registered.
|
||||
/// </summary>
|
||||
public static bool IsRegistered => _registration is not null;
|
||||
}
|
||||
14
src/NATS.Server/Gateways/GatewayInfo.cs
Normal file
14
src/NATS.Server/Gateways/GatewayInfo.cs
Normal file
@@ -0,0 +1,14 @@
|
||||
namespace NATS.Server.Gateways;
|
||||
|
||||
/// <summary>
|
||||
/// Information about a remote gateway cluster received during implicit discovery.
|
||||
/// Go reference: server/gateway.go — implicit gateway discovery via INFO gossip.
|
||||
/// </summary>
|
||||
public sealed record GatewayInfo
|
||||
{
|
||||
/// <summary>Name of the remote gateway cluster.</summary>
|
||||
public required string Name { get; init; }
|
||||
|
||||
/// <summary>URLs for connecting to the remote gateway cluster.</summary>
|
||||
public required string[] Urls { get; init; }
|
||||
}
|
||||
@@ -16,6 +16,7 @@ public sealed class GatewayManager : IAsyncDisposable
|
||||
private readonly Action<GatewayMessage> _messageSink;
|
||||
private readonly ILogger<GatewayManager> _logger;
|
||||
private readonly ConcurrentDictionary<string, GatewayConnection> _connections = new(StringComparer.Ordinal);
|
||||
private readonly HashSet<string> _discoveredGateways = new(StringComparer.OrdinalIgnoreCase);
|
||||
private long _forwardedJetStreamClusterMessages;
|
||||
|
||||
private CancellationTokenSource? _cts;
|
||||
@@ -44,6 +45,29 @@ public sealed class GatewayManager : IAsyncDisposable
|
||||
_logger = logger;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Gateway clusters auto-discovered via INFO gossip.
|
||||
/// Go reference: server/gateway.go processImplicitGateway.
|
||||
/// </summary>
|
||||
public IReadOnlyCollection<string> DiscoveredGateways
|
||||
{
|
||||
get { lock (_discoveredGateways) return _discoveredGateways.ToList(); }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Processes a gateway info message from a peer, discovering new gateway clusters.
|
||||
/// Go reference: server/gateway.go:800-850 (processImplicitGateway).
|
||||
/// </summary>
|
||||
public void ProcessImplicitGateway(GatewayInfo gwInfo)
|
||||
{
|
||||
ArgumentNullException.ThrowIfNull(gwInfo);
|
||||
|
||||
lock (_discoveredGateways)
|
||||
{
|
||||
_discoveredGateways.Add(gwInfo.Name);
|
||||
}
|
||||
}
|
||||
|
||||
public Task StartAsync(CancellationToken ct)
|
||||
{
|
||||
_cts = CancellationTokenSource.CreateLinkedTokenSource(ct);
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
using System.Text.Json.Serialization;
|
||||
|
||||
namespace NATS.Server.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
@@ -30,6 +32,13 @@ public sealed class StreamAssignment
|
||||
public bool Responded { get; set; }
|
||||
public bool Recovering { get; set; }
|
||||
public bool Reassigning { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// Consumer assignments keyed by consumer name.
|
||||
/// Uses <see cref="JsonObjectCreationHandling.Populate"/> so the deserializer populates
|
||||
/// the existing dictionary instance rather than replacing it (the property has no setter).
|
||||
/// </summary>
|
||||
[JsonObjectCreationHandling(JsonObjectCreationHandling.Populate)]
|
||||
public Dictionary<string, ConsumerAssignment> Consumers { get; } = new(StringComparer.Ordinal);
|
||||
}
|
||||
|
||||
|
||||
211
src/NATS.Server/JetStream/Cluster/JetStreamClusterMonitor.cs
Normal file
211
src/NATS.Server/JetStream/Cluster/JetStreamClusterMonitor.cs
Normal file
@@ -0,0 +1,211 @@
|
||||
using System.Text.Json;
|
||||
using System.Threading.Channels;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using NATS.Server.Raft;
|
||||
|
||||
namespace NATS.Server.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Background loop consuming meta RAFT entries and dispatching cluster state changes.
|
||||
/// Reads <see cref="RaftLogEntry"/> items from a channel, parses the JSON command payload,
|
||||
/// and applies the corresponding mutation to the <see cref="JetStreamMetaGroup"/>.
|
||||
/// Go reference: jetstream_cluster.go:1455-1825 (monitorCluster).
|
||||
/// </summary>
|
||||
public sealed class JetStreamClusterMonitor
|
||||
{
|
||||
private readonly JetStreamMetaGroup _meta;
|
||||
private readonly ChannelReader<RaftLogEntry> _entries;
|
||||
private readonly ILogger<JetStreamClusterMonitor> _logger;
|
||||
|
||||
// Monotonic counter incremented after each entry (including malformed ones).
|
||||
// Protected by _processedLock so WaitForProcessedAsync can wait for a target
|
||||
// without races between the count read and the wait.
|
||||
private readonly object _processedLock = new();
|
||||
private int _processedCount;
|
||||
|
||||
/// <summary>
|
||||
/// Total number of entries dequeued from the channel and processed (including
|
||||
/// malformed entries that were skipped). Useful for test synchronisation.
|
||||
/// </summary>
|
||||
public int ProcessedCount { get { lock (_processedLock) return _processedCount; } }
|
||||
|
||||
public JetStreamClusterMonitor(JetStreamMetaGroup meta, ChannelReader<RaftLogEntry> entries)
|
||||
: this(meta, entries, NullLogger<JetStreamClusterMonitor>.Instance)
|
||||
{
|
||||
}
|
||||
|
||||
public JetStreamClusterMonitor(
|
||||
JetStreamMetaGroup meta,
|
||||
ChannelReader<RaftLogEntry> entries,
|
||||
ILogger<JetStreamClusterMonitor> logger)
|
||||
{
|
||||
_meta = meta;
|
||||
_entries = entries;
|
||||
_logger = logger;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Starts consuming entries from the channel until the token is cancelled.
|
||||
/// Each entry is applied synchronously before the next is read.
|
||||
/// Returns normally (without throwing) when <paramref name="ct"/> is cancelled.
|
||||
/// </summary>
|
||||
public async Task StartAsync(CancellationToken ct)
|
||||
{
|
||||
try
|
||||
{
|
||||
await foreach (var entry in _entries.ReadAllAsync(ct))
|
||||
{
|
||||
ApplyMetaEntry(entry);
|
||||
lock (_processedLock)
|
||||
{
|
||||
_processedCount++;
|
||||
Monitor.PulseAll(_processedLock);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (OperationCanceledException) when (ct.IsCancellationRequested)
|
||||
{
|
||||
_logger.LogDebug("JetStreamClusterMonitor stopped via cancellation.");
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Waits until the cumulative <see cref="ProcessedCount"/> reaches at least
|
||||
/// <paramref name="targetCount"/>. Returns immediately when the target is already met.
|
||||
/// Used by tests to synchronise without sleeping.
|
||||
/// </summary>
|
||||
public Task WaitForProcessedAsync(int targetCount, CancellationToken ct)
|
||||
{
|
||||
// Fast path — already done.
|
||||
lock (_processedLock)
|
||||
{
|
||||
if (_processedCount >= targetCount)
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
// Slow path — offload the blocking Monitor.Wait to a thread-pool thread so the
|
||||
// calling async context is not blocked. Monitor.Wait releases the lock atomically
|
||||
// while waiting, eliminating the TOCTOU race between reading the count and waiting.
|
||||
return Task.Run(() =>
|
||||
{
|
||||
lock (_processedLock)
|
||||
{
|
||||
while (_processedCount < targetCount)
|
||||
{
|
||||
ct.ThrowIfCancellationRequested();
|
||||
// Wait up to 50 ms so we can check cancellation periodically.
|
||||
Monitor.Wait(_processedLock, millisecondsTimeout: 50);
|
||||
}
|
||||
}
|
||||
}, ct);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Entry dispatch
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
private void ApplyMetaEntry(RaftLogEntry entry)
|
||||
{
|
||||
try
|
||||
{
|
||||
using var doc = JsonDocument.Parse(entry.Command);
|
||||
var root = doc.RootElement;
|
||||
|
||||
if (!root.TryGetProperty("Op", out var opElement))
|
||||
return;
|
||||
|
||||
switch (opElement.GetString())
|
||||
{
|
||||
case "assignStream":
|
||||
ProcessStreamAssignment(root);
|
||||
break;
|
||||
case "removeStream":
|
||||
ProcessStreamRemoval(root);
|
||||
break;
|
||||
case "assignConsumer":
|
||||
ProcessConsumerAssignment(root);
|
||||
break;
|
||||
case "removeConsumer":
|
||||
ProcessConsumerRemoval(root);
|
||||
break;
|
||||
case "snapshot":
|
||||
ApplyMetaSnapshot(root);
|
||||
break;
|
||||
// Unknown ops are silently ignored — forward compatibility.
|
||||
}
|
||||
}
|
||||
catch (JsonException ex)
|
||||
{
|
||||
_logger.LogWarning(
|
||||
ex,
|
||||
"Skipping malformed meta RAFT entry at index {Index}: {Message}",
|
||||
entry.Index,
|
||||
ex.Message);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Per-op processors
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
private void ProcessStreamAssignment(JsonElement root)
|
||||
{
|
||||
var streamName = root.GetProperty("StreamName").GetString()!;
|
||||
var peers = root.GetProperty("Peers").EnumerateArray()
|
||||
.Select(p => p.GetString()!)
|
||||
.ToList();
|
||||
var config = root.TryGetProperty("Config", out var cfg) ? cfg.GetString() ?? "{}" : "{}";
|
||||
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = streamName,
|
||||
Group = new RaftGroup { Name = streamName, Peers = peers },
|
||||
ConfigJson = config,
|
||||
};
|
||||
_meta.AddStreamAssignment(sa);
|
||||
_logger.LogDebug("Applied stream assignment for {StreamName}", streamName);
|
||||
}
|
||||
|
||||
private void ProcessStreamRemoval(JsonElement root)
|
||||
{
|
||||
var streamName = root.GetProperty("StreamName").GetString()!;
|
||||
_meta.RemoveStreamAssignment(streamName);
|
||||
_logger.LogDebug("Applied stream removal for {StreamName}", streamName);
|
||||
}
|
||||
|
||||
private void ProcessConsumerAssignment(JsonElement root)
|
||||
{
|
||||
var streamName = root.GetProperty("StreamName").GetString()!;
|
||||
var consumerName = root.GetProperty("ConsumerName").GetString()!;
|
||||
var peers = root.GetProperty("Peers").EnumerateArray()
|
||||
.Select(p => p.GetString()!)
|
||||
.ToList();
|
||||
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = consumerName,
|
||||
StreamName = streamName,
|
||||
Group = new RaftGroup { Name = consumerName, Peers = peers },
|
||||
};
|
||||
_meta.AddConsumerAssignment(streamName, ca);
|
||||
_logger.LogDebug("Applied consumer assignment {ConsumerName} on {StreamName}", consumerName, streamName);
|
||||
}
|
||||
|
||||
private void ProcessConsumerRemoval(JsonElement root)
|
||||
{
|
||||
var streamName = root.GetProperty("StreamName").GetString()!;
|
||||
var consumerName = root.GetProperty("ConsumerName").GetString()!;
|
||||
_meta.RemoveConsumerAssignment(streamName, consumerName);
|
||||
_logger.LogDebug("Applied consumer removal {ConsumerName} from {StreamName}", consumerName, streamName);
|
||||
}
|
||||
|
||||
private void ApplyMetaSnapshot(JsonElement root)
|
||||
{
|
||||
var dataB64 = root.GetProperty("Data").GetString()!;
|
||||
var data = Convert.FromBase64String(dataB64);
|
||||
var assignments = MetaSnapshotCodec.Decode(data);
|
||||
_meta.ReplaceAllAssignments(assignments);
|
||||
_logger.LogInformation("Applied meta snapshot: {StreamCount} streams restored", assignments.Count);
|
||||
}
|
||||
}
|
||||
@@ -22,10 +22,12 @@ public sealed class JetStreamMetaGroup
|
||||
private readonly ConcurrentDictionary<string, StreamAssignment> _assignments =
|
||||
new(StringComparer.Ordinal);
|
||||
|
||||
// B8: Inflight proposal tracking -- entries that have been proposed but not yet committed.
|
||||
// Go reference: jetstream_cluster.go inflight tracking for proposals.
|
||||
private readonly ConcurrentDictionary<string, string> _inflightStreams = new(StringComparer.Ordinal);
|
||||
private readonly ConcurrentDictionary<string, string> _inflightConsumers = new(StringComparer.Ordinal);
|
||||
// Account-scoped inflight proposal tracking -- entries proposed but not yet committed.
|
||||
// Go reference: jetstream_cluster.go inflight tracking for proposals (jetstream_cluster.go:1193-1278).
|
||||
// Outer key: account name. Inner key: stream name → InflightInfo.
|
||||
private readonly ConcurrentDictionary<string, Dictionary<string, InflightInfo>> _inflightStreams = new(StringComparer.Ordinal);
|
||||
// Outer key: account name. Inner key: "stream/consumer" → InflightInfo.
|
||||
private readonly ConcurrentDictionary<string, Dictionary<string, InflightInfo>> _inflightConsumers = new(StringComparer.Ordinal);
|
||||
|
||||
// Running count of consumers across all stream assignments.
|
||||
private int _totalConsumerCount;
|
||||
@@ -74,14 +76,152 @@ public sealed class JetStreamMetaGroup
|
||||
public int ConsumerCount => _totalConsumerCount;
|
||||
|
||||
/// <summary>
|
||||
/// Number of inflight stream proposals.
|
||||
/// Total number of inflight stream proposals across all accounts.
|
||||
/// </summary>
|
||||
public int InflightStreamCount => _inflightStreams.Count;
|
||||
public int InflightStreamCount => _inflightStreams.Values.Sum(d => d.Count);
|
||||
|
||||
/// <summary>
|
||||
/// Number of inflight consumer proposals.
|
||||
/// Total number of inflight consumer proposals across all accounts.
|
||||
/// </summary>
|
||||
public int InflightConsumerCount => _inflightConsumers.Count;
|
||||
public int InflightConsumerCount => _inflightConsumers.Values.Sum(d => d.Count);
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Inflight proposal tracking — public API
|
||||
// Go reference: jetstream_cluster.go:1193-1278 inflight proposal management.
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Tracks a stream proposal as inflight for the given account.
|
||||
/// Increments OpsCount on duplicate proposals for the same stream name.
|
||||
/// Go reference: jetstream_cluster.go inflight proposal tracking.
|
||||
/// </summary>
|
||||
public void TrackInflightStreamProposal(string account, StreamAssignment sa)
|
||||
{
|
||||
var accountDict = _inflightStreams.GetOrAdd(account, _ => new Dictionary<string, InflightInfo>(StringComparer.Ordinal));
|
||||
lock (accountDict)
|
||||
{
|
||||
if (accountDict.TryGetValue(sa.StreamName, out var existing))
|
||||
accountDict[sa.StreamName] = existing with { OpsCount = existing.OpsCount + 1 };
|
||||
else
|
||||
accountDict[sa.StreamName] = new InflightInfo(OpsCount: 1, Deleted: false, Assignment: sa);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Decrements OpsCount for a stream proposal. Removes the entry when OpsCount reaches zero.
|
||||
/// Removes the account entry when its dictionary becomes empty.
|
||||
/// Go reference: jetstream_cluster.go inflight proposal tracking.
|
||||
/// </summary>
|
||||
public void RemoveInflightStreamProposal(string account, string streamName)
|
||||
{
|
||||
if (!_inflightStreams.TryGetValue(account, out var accountDict))
|
||||
return;
|
||||
|
||||
lock (accountDict)
|
||||
{
|
||||
if (!accountDict.TryGetValue(streamName, out var existing))
|
||||
return;
|
||||
|
||||
if (existing.OpsCount <= 1)
|
||||
{
|
||||
accountDict.Remove(streamName);
|
||||
if (accountDict.Count == 0)
|
||||
_inflightStreams.TryRemove(account, out _);
|
||||
}
|
||||
else
|
||||
{
|
||||
accountDict[streamName] = existing with { OpsCount = existing.OpsCount - 1 };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns true if the given stream is currently tracked as inflight for the account.
|
||||
/// Go reference: jetstream_cluster.go inflight check.
|
||||
/// </summary>
|
||||
public bool IsStreamInflight(string account, string streamName)
|
||||
{
|
||||
if (!_inflightStreams.TryGetValue(account, out var accountDict))
|
||||
return false;
|
||||
|
||||
lock (accountDict)
|
||||
{
|
||||
return accountDict.TryGetValue(streamName, out var info) && info.OpsCount > 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Tracks a consumer proposal as inflight for the given account.
|
||||
/// Increments OpsCount on duplicate proposals for the same stream/consumer key.
|
||||
/// Go reference: jetstream_cluster.go inflight consumer proposal tracking.
|
||||
/// </summary>
|
||||
public void TrackInflightConsumerProposal(string account, string streamName, string consumerName, ConsumerAssignment? ca = null)
|
||||
{
|
||||
var key = $"{streamName}/{consumerName}";
|
||||
var accountDict = _inflightConsumers.GetOrAdd(account, _ => new Dictionary<string, InflightInfo>(StringComparer.Ordinal));
|
||||
lock (accountDict)
|
||||
{
|
||||
if (accountDict.TryGetValue(key, out var existing))
|
||||
accountDict[key] = existing with { OpsCount = existing.OpsCount + 1 };
|
||||
else
|
||||
accountDict[key] = new InflightInfo(OpsCount: 1, Deleted: false, Assignment: null);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Decrements OpsCount for a consumer proposal. Removes the entry when OpsCount reaches zero.
|
||||
/// Removes the account entry when its dictionary becomes empty.
|
||||
/// Go reference: jetstream_cluster.go inflight consumer proposal tracking.
|
||||
/// </summary>
|
||||
public void RemoveInflightConsumerProposal(string account, string streamName, string consumerName)
|
||||
{
|
||||
var key = $"{streamName}/{consumerName}";
|
||||
if (!_inflightConsumers.TryGetValue(account, out var accountDict))
|
||||
return;
|
||||
|
||||
lock (accountDict)
|
||||
{
|
||||
if (!accountDict.TryGetValue(key, out var existing))
|
||||
return;
|
||||
|
||||
if (existing.OpsCount <= 1)
|
||||
{
|
||||
accountDict.Remove(key);
|
||||
if (accountDict.Count == 0)
|
||||
_inflightConsumers.TryRemove(account, out _);
|
||||
}
|
||||
else
|
||||
{
|
||||
accountDict[key] = existing with { OpsCount = existing.OpsCount - 1 };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns true if the given consumer is currently tracked as inflight for the account.
|
||||
/// Go reference: jetstream_cluster.go inflight check.
|
||||
/// </summary>
|
||||
public bool IsConsumerInflight(string account, string streamName, string consumerName)
|
||||
{
|
||||
var key = $"{streamName}/{consumerName}";
|
||||
if (!_inflightConsumers.TryGetValue(account, out var accountDict))
|
||||
return false;
|
||||
|
||||
lock (accountDict)
|
||||
{
|
||||
return accountDict.TryGetValue(key, out var info) && info.OpsCount > 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Clears all inflight stream and consumer proposals across all accounts.
|
||||
/// Go reference: jetstream_cluster.go — inflight cleared on shutdown/reset.
|
||||
/// </summary>
|
||||
public void ClearAllInflight()
|
||||
{
|
||||
_inflightStreams.Clear();
|
||||
_inflightConsumers.Clear();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Stream proposals
|
||||
@@ -104,14 +244,16 @@ public sealed class JetStreamMetaGroup
|
||||
{
|
||||
_ = ct;
|
||||
|
||||
var resolvedGroup = group ?? new RaftGroup { Name = config.Name };
|
||||
|
||||
// Track as inflight
|
||||
_inflightStreams[config.Name] = config.Name;
|
||||
TrackInflightStreamProposal("$G", new StreamAssignment { StreamName = config.Name, Group = resolvedGroup });
|
||||
|
||||
// Apply the entry (idempotent via AddOrUpdate)
|
||||
ApplyStreamCreate(config.Name, group ?? new RaftGroup { Name = config.Name });
|
||||
ApplyStreamCreate(config.Name, resolvedGroup);
|
||||
|
||||
// Clear inflight
|
||||
_inflightStreams.TryRemove(config.Name, out _);
|
||||
RemoveInflightStreamProposal("$G", config.Name);
|
||||
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
@@ -131,14 +273,16 @@ public sealed class JetStreamMetaGroup
|
||||
if (_assignments.ContainsKey(config.Name))
|
||||
throw new InvalidOperationException($"Stream '{config.Name}' already exists.");
|
||||
|
||||
var resolvedGroup = group ?? new RaftGroup { Name = config.Name };
|
||||
|
||||
// Track as inflight
|
||||
_inflightStreams[config.Name] = config.Name;
|
||||
TrackInflightStreamProposal("$G", new StreamAssignment { StreamName = config.Name, Group = resolvedGroup });
|
||||
|
||||
// Apply the entry
|
||||
ApplyStreamCreate(config.Name, group ?? new RaftGroup { Name = config.Name });
|
||||
ApplyStreamCreate(config.Name, resolvedGroup);
|
||||
|
||||
// Clear inflight
|
||||
_inflightStreams.TryRemove(config.Name, out _);
|
||||
RemoveInflightStreamProposal("$G", config.Name);
|
||||
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
@@ -187,14 +331,13 @@ public sealed class JetStreamMetaGroup
|
||||
_ = ct;
|
||||
|
||||
// Track as inflight
|
||||
var inflightKey = $"{streamName}/{consumerName}";
|
||||
_inflightConsumers[inflightKey] = inflightKey;
|
||||
TrackInflightConsumerProposal("$G", streamName, consumerName);
|
||||
|
||||
// Apply the entry (silently ignored if stream does not exist)
|
||||
ApplyConsumerCreate(streamName, consumerName, group);
|
||||
|
||||
// Clear inflight
|
||||
_inflightConsumers.TryRemove(inflightKey, out _);
|
||||
RemoveInflightConsumerProposal("$G", streamName, consumerName);
|
||||
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
@@ -219,14 +362,13 @@ public sealed class JetStreamMetaGroup
|
||||
throw new InvalidOperationException($"Stream '{streamName}' not found.");
|
||||
|
||||
// Track as inflight
|
||||
var inflightKey = $"{streamName}/{consumerName}";
|
||||
_inflightConsumers[inflightKey] = inflightKey;
|
||||
TrackInflightConsumerProposal("$G", streamName, consumerName);
|
||||
|
||||
// Apply the entry
|
||||
ApplyConsumerCreate(streamName, consumerName, group);
|
||||
|
||||
// Clear inflight
|
||||
_inflightConsumers.TryRemove(inflightKey, out _);
|
||||
RemoveInflightConsumerProposal("$G", streamName, consumerName);
|
||||
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
@@ -264,6 +406,166 @@ public sealed class JetStreamMetaGroup
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Validated assignment processing
|
||||
// Go reference: jetstream_cluster.go:4541-5925
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Validates and processes a stream assignment.
|
||||
/// Returns false if the assignment is invalid (empty name, null group).
|
||||
/// Idempotent: duplicate assignments for the same stream name are accepted.
|
||||
/// Go reference: jetstream_cluster.go:4541 processStreamAssignment.
|
||||
/// </summary>
|
||||
public bool ProcessStreamAssignment(StreamAssignment sa)
|
||||
{
|
||||
if (string.IsNullOrEmpty(sa.StreamName) || sa.Group == null)
|
||||
return false;
|
||||
|
||||
AddStreamAssignment(sa);
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Updates an existing stream assignment's configuration.
|
||||
/// Returns false if the stream does not exist.
|
||||
/// Go reference: jetstream_cluster.go processUpdateStreamAssignment.
|
||||
/// </summary>
|
||||
public bool ProcessUpdateStreamAssignment(StreamAssignment sa)
|
||||
{
|
||||
if (!_assignments.TryGetValue(sa.StreamName, out var existing))
|
||||
return false;
|
||||
|
||||
// Update the config while preserving consumers and other state
|
||||
var updated = new StreamAssignment
|
||||
{
|
||||
StreamName = sa.StreamName,
|
||||
Group = sa.Group,
|
||||
ConfigJson = sa.ConfigJson,
|
||||
Created = existing.Created,
|
||||
SyncSubject = existing.SyncSubject,
|
||||
};
|
||||
// Copy consumers from old to new
|
||||
foreach (var (name, ca) in existing.Consumers)
|
||||
updated.Consumers[name] = ca;
|
||||
|
||||
_assignments[sa.StreamName] = updated;
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Removes a stream and all its consumers.
|
||||
/// Returns false if stream didn't exist. Returns true if removed.
|
||||
/// Go reference: jetstream_cluster.go processStreamRemoval.
|
||||
/// </summary>
|
||||
public bool ProcessStreamRemoval(string streamName)
|
||||
{
|
||||
if (!_assignments.ContainsKey(streamName))
|
||||
return false;
|
||||
|
||||
RemoveStreamAssignment(streamName);
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Validates and processes a consumer assignment.
|
||||
/// Returns false if the parent stream does not exist or consumer name is empty.
|
||||
/// Go reference: jetstream_cluster.go:5300 processConsumerAssignment.
|
||||
/// </summary>
|
||||
public bool ProcessConsumerAssignment(ConsumerAssignment ca)
|
||||
{
|
||||
if (string.IsNullOrEmpty(ca.ConsumerName) || string.IsNullOrEmpty(ca.StreamName))
|
||||
return false;
|
||||
|
||||
if (!_assignments.ContainsKey(ca.StreamName))
|
||||
return false;
|
||||
|
||||
AddConsumerAssignment(ca.StreamName, ca);
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Removes a consumer assignment.
|
||||
/// Returns false if stream or consumer doesn't exist.
|
||||
/// Go reference: jetstream_cluster.go processConsumerRemoval.
|
||||
/// </summary>
|
||||
public bool ProcessConsumerRemoval(string streamName, string consumerName)
|
||||
{
|
||||
if (!_assignments.TryGetValue(streamName, out var sa))
|
||||
return false;
|
||||
|
||||
if (!sa.Consumers.ContainsKey(consumerName))
|
||||
return false;
|
||||
|
||||
RemoveConsumerAssignment(streamName, consumerName);
|
||||
return true;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// Monitor-facing mutation methods
|
||||
// Called by JetStreamClusterMonitor when processing committed RAFT entries.
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Directly adds a stream assignment to the meta-group state.
|
||||
/// Used by the cluster monitor when processing RAFT entries.
|
||||
/// </summary>
|
||||
public void AddStreamAssignment(StreamAssignment sa)
|
||||
{
|
||||
_streams[sa.StreamName] = 0;
|
||||
_assignments[sa.StreamName] = sa;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Removes a stream assignment from the meta-group state.
|
||||
/// Used by the cluster monitor when processing RAFT entries.
|
||||
/// </summary>
|
||||
public void RemoveStreamAssignment(string streamName)
|
||||
{
|
||||
ApplyStreamDelete(streamName);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Adds a consumer assignment to a stream's assignment.
|
||||
/// Increments the total consumer count if the consumer is new.
|
||||
/// </summary>
|
||||
public void AddConsumerAssignment(string streamName, ConsumerAssignment ca)
|
||||
{
|
||||
if (_assignments.TryGetValue(streamName, out var sa))
|
||||
{
|
||||
var isNew = !sa.Consumers.ContainsKey(ca.ConsumerName);
|
||||
sa.Consumers[ca.ConsumerName] = ca;
|
||||
if (isNew)
|
||||
Interlocked.Increment(ref _totalConsumerCount);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Removes a consumer assignment from a stream.
|
||||
/// </summary>
|
||||
public void RemoveConsumerAssignment(string streamName, string consumerName)
|
||||
{
|
||||
ApplyConsumerDelete(streamName, consumerName);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Replaces all assignments atomically (used for snapshot apply).
|
||||
/// Go reference: jetstream_cluster.go meta snapshot restore.
|
||||
/// </summary>
|
||||
public void ReplaceAllAssignments(Dictionary<string, StreamAssignment> newState)
|
||||
{
|
||||
_assignments.Clear();
|
||||
_streams.Clear();
|
||||
_totalConsumerCount = 0;
|
||||
|
||||
foreach (var (name, sa) in newState)
|
||||
{
|
||||
_assignments[name] = sa;
|
||||
_streams[name] = 0;
|
||||
_totalConsumerCount += sa.Consumers.Count;
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
// ApplyEntry dispatch
|
||||
// Go reference: jetstream_cluster.go RAFT apply for meta group
|
||||
@@ -347,9 +649,32 @@ public sealed class JetStreamMetaGroup
|
||||
};
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Fired when leadership changes. Argument is true when becoming leader, false when stepping down.
|
||||
/// Go reference: jetstream_cluster.go processLeaderChange callback.
|
||||
/// </summary>
|
||||
public event Action<bool>? OnLeaderChange;
|
||||
|
||||
/// <summary>
|
||||
/// Processes a leadership change event.
|
||||
/// When stepping down: clears all inflight proposals.
|
||||
/// When becoming leader: fires OnLeaderChange event.
|
||||
/// Go reference: jetstream_cluster.go:7001-7074 processLeaderChange.
|
||||
/// </summary>
|
||||
public void ProcessLeaderChange(bool isLeader)
|
||||
{
|
||||
if (!isLeader)
|
||||
{
|
||||
// Stepping down — clear inflight
|
||||
ClearAllInflight();
|
||||
}
|
||||
|
||||
OnLeaderChange?.Invoke(isLeader);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Steps down the current leader, rotating to the next node.
|
||||
/// Clears all inflight proposals on leader change.
|
||||
/// Clears all inflight proposals on leader change via ProcessLeaderChange.
|
||||
/// Go reference: jetstream_cluster.go leader stepdown, clear inflight.
|
||||
/// </summary>
|
||||
public void StepDown()
|
||||
@@ -360,10 +685,7 @@ public sealed class JetStreamMetaGroup
|
||||
|
||||
Interlocked.Increment(ref _leadershipVersion);
|
||||
|
||||
// Clear inflight on leader change
|
||||
// Go reference: jetstream_cluster.go -- inflight entries are cleared when leadership changes.
|
||||
_inflightStreams.Clear();
|
||||
_inflightConsumers.Clear();
|
||||
ProcessLeaderChange(isLeader: false);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
@@ -452,3 +774,11 @@ public sealed class MetaGroupState
|
||||
/// </summary>
|
||||
public int ConsumerCount { get; init; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Tracks an inflight stream or consumer proposal with ops counting.
|
||||
/// OpsCount increments on duplicate proposals so that each proposer must
|
||||
/// independently call Remove before the entry is cleared.
|
||||
/// Go reference: jetstream_cluster.go inflight proposal tracking.
|
||||
/// </summary>
|
||||
public record InflightInfo(int OpsCount, bool Deleted, StreamAssignment? Assignment);
|
||||
|
||||
60
src/NATS.Server/JetStream/Cluster/MetaSnapshotCodec.cs
Normal file
60
src/NATS.Server/JetStream/Cluster/MetaSnapshotCodec.cs
Normal file
@@ -0,0 +1,60 @@
|
||||
using System.Buffers.Binary;
|
||||
using System.Text.Json;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Binary codec for meta-group snapshots.
|
||||
/// Format: [2:version_le][N:S2-compressed JSON of assignment map]
|
||||
/// Go reference: jetstream_cluster.go:2075-2145 (encodeMetaSnapshot/decodeMetaSnapshot)
|
||||
/// </summary>
|
||||
internal static class MetaSnapshotCodec
|
||||
{
|
||||
private const ushort CurrentVersion = 1;
|
||||
|
||||
// Use Populate so the getter-only Consumers dictionary on StreamAssignment
|
||||
// is populated in-place by the deserializer rather than requiring a setter.
|
||||
// Go reference: jetstream_cluster.go streamAssignment consumers map restoration.
|
||||
private static readonly JsonSerializerOptions SerializerOptions = new()
|
||||
{
|
||||
PreferredObjectCreationHandling = System.Text.Json.Serialization.JsonObjectCreationHandling.Populate,
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// Encodes <paramref name="assignments"/> into the versioned, S2-compressed binary format.
|
||||
/// Go reference: jetstream_cluster.go:2075 encodeMetaSnapshot.
|
||||
/// </summary>
|
||||
public static byte[] Encode(Dictionary<string, StreamAssignment> assignments)
|
||||
{
|
||||
var json = JsonSerializer.SerializeToUtf8Bytes(assignments, SerializerOptions);
|
||||
var compressed = S2Codec.Compress(json);
|
||||
|
||||
var result = new byte[2 + compressed.Length];
|
||||
BinaryPrimitives.WriteUInt16LittleEndian(result, CurrentVersion);
|
||||
compressed.CopyTo(result, 2);
|
||||
return result;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Decodes a versioned, S2-compressed binary snapshot into a stream assignment map.
|
||||
/// Go reference: jetstream_cluster.go:2100 decodeMetaSnapshot.
|
||||
/// </summary>
|
||||
/// <exception cref="InvalidOperationException">
|
||||
/// Thrown when <paramref name="data"/> is too short or contains an unrecognised version.
|
||||
/// </exception>
|
||||
public static Dictionary<string, StreamAssignment> Decode(byte[] data)
|
||||
{
|
||||
if (data.Length < 2)
|
||||
throw new InvalidOperationException("Meta snapshot too short to contain version header.");
|
||||
|
||||
var version = BinaryPrimitives.ReadUInt16LittleEndian(data);
|
||||
if (version != CurrentVersion)
|
||||
throw new InvalidOperationException($"Unknown meta snapshot version: {version}");
|
||||
|
||||
var compressed = data.AsSpan(2);
|
||||
var json = S2Codec.Decompress(compressed);
|
||||
return JsonSerializer.Deserialize<Dictionary<string, StreamAssignment>>(json, SerializerOptions)
|
||||
?? new Dictionary<string, StreamAssignment>();
|
||||
}
|
||||
}
|
||||
@@ -8,14 +8,21 @@ using NATS.Server.Subscriptions;
|
||||
|
||||
namespace NATS.Server.JetStream;
|
||||
|
||||
public sealed class ConsumerManager
|
||||
public sealed class ConsumerManager : IDisposable
|
||||
{
|
||||
private readonly JetStreamMetaGroup? _metaGroup;
|
||||
private readonly ConcurrentDictionary<(string Stream, string Name), ConsumerHandle> _consumers = new();
|
||||
private readonly ConcurrentDictionary<string, ulong> _ackFloors = new(StringComparer.Ordinal);
|
||||
private readonly ConcurrentDictionary<(string Stream, string Name), Timer> _resumeTimers = new();
|
||||
private readonly PullConsumerEngine _pullConsumerEngine = new();
|
||||
private readonly PushConsumerEngine _pushConsumerEngine = new();
|
||||
|
||||
/// <summary>
|
||||
/// Raised when a consumer is automatically resumed by the deadline timer.
|
||||
/// Arguments are (stream, durableName).
|
||||
/// </summary>
|
||||
public event EventHandler<(string Stream, string Name)>? OnAutoResumed;
|
||||
|
||||
public ConsumerManager(JetStreamMetaGroup? metaGroup = null)
|
||||
{
|
||||
_metaGroup = metaGroup;
|
||||
@@ -77,6 +84,7 @@ public sealed class ConsumerManager
|
||||
|
||||
public bool Delete(string stream, string durableName)
|
||||
{
|
||||
CancelResumeTimer((stream, durableName));
|
||||
return _consumers.TryRemove((stream, durableName), out _);
|
||||
}
|
||||
|
||||
@@ -93,9 +101,120 @@ public sealed class ConsumerManager
|
||||
return false;
|
||||
|
||||
handle.Paused = paused;
|
||||
if (!paused)
|
||||
{
|
||||
handle.PauseUntilUtc = null;
|
||||
CancelResumeTimer((stream, durableName));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Pause a consumer until <paramref name="pauseUntilUtc"/>.
|
||||
/// A background timer will auto-resume the consumer when the deadline passes.
|
||||
/// Go reference: consumer.go (pauseConsumer).
|
||||
/// </summary>
|
||||
public bool Pause(string stream, string durableName, DateTime pauseUntilUtc)
|
||||
{
|
||||
if (!_consumers.TryGetValue((stream, durableName), out var handle))
|
||||
return false;
|
||||
|
||||
handle.Paused = true;
|
||||
handle.PauseUntilUtc = pauseUntilUtc;
|
||||
|
||||
// Cancel any existing timer for this consumer before scheduling a new one.
|
||||
CancelResumeTimer((stream, durableName));
|
||||
|
||||
var delay = pauseUntilUtc - DateTime.UtcNow;
|
||||
if (delay <= TimeSpan.Zero)
|
||||
{
|
||||
// Deadline already passed — resume immediately.
|
||||
AutoResume(stream, durableName);
|
||||
}
|
||||
else
|
||||
{
|
||||
var key = (stream, durableName);
|
||||
var timer = new Timer(_ => AutoResume(key.stream, key.durableName),
|
||||
state: null, dueTime: delay, period: Timeout.InfiniteTimeSpan);
|
||||
_resumeTimers[key] = timer;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Explicitly resume a paused consumer, cancelling any pending auto-resume timer.
|
||||
/// Go reference: consumer.go (resumeConsumer).
|
||||
/// </summary>
|
||||
public bool Resume(string stream, string durableName)
|
||||
{
|
||||
if (!_consumers.TryGetValue((stream, durableName), out var handle))
|
||||
return false;
|
||||
|
||||
handle.Paused = false;
|
||||
handle.PauseUntilUtc = null;
|
||||
CancelResumeTimer((stream, durableName));
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns true when the consumer is paused and the deadline (if set) has not yet passed.
|
||||
/// If the deadline has passed, auto-resumes the consumer and returns false.
|
||||
/// Go reference: consumer.go (isPaused).
|
||||
/// </summary>
|
||||
public bool IsPaused(string stream, string durableName)
|
||||
{
|
||||
if (!_consumers.TryGetValue((stream, durableName), out var handle))
|
||||
return false;
|
||||
|
||||
if (!handle.Paused)
|
||||
return false;
|
||||
|
||||
if (handle.PauseUntilUtc.HasValue && handle.PauseUntilUtc.Value <= DateTime.UtcNow)
|
||||
{
|
||||
AutoResume(stream, durableName);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns the UTC deadline until which the consumer is paused, or null.
|
||||
/// Go reference: consumer.go (pauseUntil).
|
||||
/// </summary>
|
||||
public DateTime? GetPauseUntil(string stream, string durableName)
|
||||
{
|
||||
if (!_consumers.TryGetValue((stream, durableName), out var handle))
|
||||
return null;
|
||||
|
||||
return handle.PauseUntilUtc;
|
||||
}
|
||||
|
||||
private void AutoResume(string stream, string durableName)
|
||||
{
|
||||
if (!_consumers.TryGetValue((stream, durableName), out var handle))
|
||||
return;
|
||||
|
||||
handle.Paused = false;
|
||||
handle.PauseUntilUtc = null;
|
||||
CancelResumeTimer((stream, durableName));
|
||||
OnAutoResumed?.Invoke(this, (stream, durableName));
|
||||
}
|
||||
|
||||
private void CancelResumeTimer((string Stream, string Name) key)
|
||||
{
|
||||
if (_resumeTimers.TryRemove(key, out var timer))
|
||||
timer.Dispose();
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
foreach (var timer in _resumeTimers.Values)
|
||||
timer.Dispose();
|
||||
_resumeTimers.Clear();
|
||||
}
|
||||
|
||||
public bool Reset(string stream, string durableName)
|
||||
{
|
||||
if (!_consumers.TryGetValue((stream, durableName), out var handle))
|
||||
@@ -191,6 +310,12 @@ public sealed record ConsumerHandle(string Stream, ConsumerConfig Config)
|
||||
{
|
||||
public ulong NextSequence { get; set; } = 1;
|
||||
public bool Paused { get; set; }
|
||||
|
||||
/// <summary>
|
||||
/// UTC deadline until which this consumer is paused. Null means pause indefinitely
|
||||
/// (until explicitly resumed). Go reference: consumer.go pauseUntil field.
|
||||
/// </summary>
|
||||
public DateTime? PauseUntilUtc { get; set; }
|
||||
public Queue<StoredMessage> Pending { get; } = new();
|
||||
public Queue<PushFrame> PushFrames { get; } = new();
|
||||
public AckProcessor AckProcessor { get; } = new();
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
// Go: consumer.go (processAckMsg, processNak, processTerm, processAckProgress)
|
||||
namespace NATS.Server.JetStream.Consumers;
|
||||
|
||||
// Go: consumer.go:2550 — ack type prefix constants (+ACK, -NAK, +TERM, +WPI)
|
||||
public enum AckType { Ack, Nak, Term, Progress, Unknown }
|
||||
|
||||
public sealed class AckProcessor
|
||||
{
|
||||
// Go: consumer.go — ackTerminatedFlag marks sequences that must not be redelivered
|
||||
@@ -9,6 +12,13 @@ public sealed class AckProcessor
|
||||
private readonly int[]? _backoffMs;
|
||||
private int _ackWaitMs;
|
||||
|
||||
// Fields for the RedeliveryTracker constructor overload
|
||||
private readonly RedeliveryTracker? _tracker;
|
||||
private readonly int _maxAckPending;
|
||||
|
||||
// Stores deliver subjects keyed by sequence for tracker-based registrations
|
||||
private readonly Dictionary<ulong, string>? _deliverSubjects;
|
||||
|
||||
public ulong AckFloor { get; private set; }
|
||||
public int TerminatedCount { get; private set; }
|
||||
|
||||
@@ -17,6 +27,15 @@ public sealed class AckProcessor
|
||||
_backoffMs = backoffMs;
|
||||
}
|
||||
|
||||
// Go: consumer.go — ConsumerConfig maxAckPending + RedeliveryTracker integration
|
||||
public AckProcessor(RedeliveryTracker tracker, int maxAckPending = 0)
|
||||
{
|
||||
_tracker = tracker;
|
||||
_maxAckPending = maxAckPending;
|
||||
_deliverSubjects = new Dictionary<ulong, string>();
|
||||
_backoffMs = null;
|
||||
}
|
||||
|
||||
public void Register(ulong sequence, int ackWaitMs)
|
||||
{
|
||||
if (sequence <= AckFloor)
|
||||
@@ -34,6 +53,52 @@ public sealed class AckProcessor
|
||||
};
|
||||
}
|
||||
|
||||
// Go: consumer.go — register with deliver subject; ackWait comes from the tracker
|
||||
public void Register(ulong sequence, string deliverSubject)
|
||||
{
|
||||
if (_tracker is null)
|
||||
throw new InvalidOperationException("Register(ulong, string) requires a RedeliveryTracker constructor.");
|
||||
|
||||
var ackWaitMs = (int)Math.Max(_tracker.GetBackoffDelay(1), 1);
|
||||
Register(sequence, ackWaitMs);
|
||||
|
||||
if (_deliverSubjects is not null)
|
||||
_deliverSubjects[sequence] = deliverSubject;
|
||||
}
|
||||
|
||||
// Go: consumer.go — processAck without payload: plain +ACK, also notifies tracker
|
||||
public void ProcessAck(ulong seq)
|
||||
{
|
||||
AckSequence(seq);
|
||||
_tracker?.Acknowledge(seq);
|
||||
}
|
||||
|
||||
// Go: consumer.go — returns ack deadline for a pending sequence; MinValue if not tracked
|
||||
public DateTimeOffset GetDeadline(ulong seq)
|
||||
{
|
||||
if (_pending.TryGetValue(seq, out var state))
|
||||
return new DateTimeOffset(state.DeadlineUtc, TimeSpan.Zero);
|
||||
|
||||
return DateTimeOffset.MinValue;
|
||||
}
|
||||
|
||||
// Go: consumer.go — maxAckPending=0 means unlimited; otherwise cap pending registrations
|
||||
public bool CanRegister() => _maxAckPending <= 0 || _pending.Count < _maxAckPending;
|
||||
|
||||
// Go: consumer.go:2550 — parse ack type prefix from raw payload bytes
|
||||
public static AckType ParseAckType(ReadOnlySpan<byte> data)
|
||||
{
|
||||
if (data.StartsWith("+ACK"u8))
|
||||
return AckType.Ack;
|
||||
if (data.StartsWith("-NAK"u8))
|
||||
return AckType.Nak;
|
||||
if (data.StartsWith("+TERM"u8))
|
||||
return AckType.Term;
|
||||
if (data.StartsWith("+WPI"u8))
|
||||
return AckType.Progress;
|
||||
return AckType.Unknown;
|
||||
}
|
||||
|
||||
public bool TryGetExpired(out ulong sequence, out int deliveries)
|
||||
{
|
||||
foreach (var (seq, state) in _pending)
|
||||
|
||||
@@ -92,10 +92,61 @@ public sealed class PriorityGroupManager
|
||||
return active != null && string.Equals(active, consumerId, StringComparison.Ordinal);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Assign a new pin ID to the named group, replacing any existing pin.
|
||||
/// Go reference: consumer.go (assignNewPinId).
|
||||
/// </summary>
|
||||
/// <returns>The newly generated 22-character pin ID.</returns>
|
||||
public string AssignPinId(string groupName, string consumerId)
|
||||
{
|
||||
if (!_groups.TryGetValue(groupName, out var group))
|
||||
return string.Empty;
|
||||
|
||||
var pinId = Guid.NewGuid().ToString("N")[..22];
|
||||
lock (group.Lock)
|
||||
{
|
||||
group.CurrentPinId = pinId;
|
||||
}
|
||||
|
||||
return pinId;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns <c>true</c> if the group exists and its current pin ID equals <paramref name="pinId"/>.
|
||||
/// Go reference: consumer.go (setPinnedTimer).
|
||||
/// </summary>
|
||||
public bool ValidatePinId(string groupName, string pinId)
|
||||
{
|
||||
if (!_groups.TryGetValue(groupName, out var group))
|
||||
return false;
|
||||
|
||||
lock (group.Lock)
|
||||
{
|
||||
return group.CurrentPinId != null &&
|
||||
string.Equals(group.CurrentPinId, pinId, StringComparison.Ordinal);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Clear the current pin ID for the named group. No-op if the group does not exist.
|
||||
/// Go reference: consumer.go (setPinnedTimer).
|
||||
/// </summary>
|
||||
public void UnassignPinId(string groupName)
|
||||
{
|
||||
if (!_groups.TryGetValue(groupName, out var group))
|
||||
return;
|
||||
|
||||
lock (group.Lock)
|
||||
{
|
||||
group.CurrentPinId = null;
|
||||
}
|
||||
}
|
||||
|
||||
private sealed class PriorityGroup
|
||||
{
|
||||
public object Lock { get; } = new();
|
||||
public List<PriorityMember> Members { get; } = [];
|
||||
public string? CurrentPinId { get; set; }
|
||||
}
|
||||
|
||||
private record struct PriorityMember(string ConsumerId, int Priority);
|
||||
|
||||
@@ -7,14 +7,37 @@ namespace NATS.Server.JetStream.Consumers;
|
||||
public sealed class RedeliveryTracker
|
||||
{
|
||||
private readonly int[] _backoffMs;
|
||||
private readonly long[]? _backoffMsLong;
|
||||
|
||||
// Go: consumer.go — pending maps sseq → (deadline, deliveries)
|
||||
private readonly Dictionary<ulong, RedeliveryEntry> _entries = new();
|
||||
|
||||
// Go: consumer.go — rdc map tracks per-sequence delivery counts
|
||||
private readonly Dictionary<ulong, int> _deliveryCounts = new();
|
||||
|
||||
// Go: consumer.go — rdq priority queue ordered by deadline for efficient dispatch
|
||||
private readonly PriorityQueue<ulong, DateTimeOffset> _priorityQueue = new();
|
||||
|
||||
// Stored config for the new constructor overload
|
||||
private readonly int _maxDeliveries;
|
||||
private readonly long _ackWaitMs;
|
||||
|
||||
// Go: consumer.go:100 — BackOff []time.Duration in ConsumerConfig; empty falls back to ackWait
|
||||
public RedeliveryTracker(int[] backoffMs)
|
||||
{
|
||||
_backoffMs = backoffMs;
|
||||
_backoffMsLong = null;
|
||||
_maxDeliveries = 0;
|
||||
_ackWaitMs = 0;
|
||||
}
|
||||
|
||||
// Go: consumer.go — ConsumerConfig maxDeliver + ackWait + backoff, new overload storing config fields
|
||||
public RedeliveryTracker(int maxDeliveries, long ackWaitMs, long[]? backoffMs = null)
|
||||
{
|
||||
_backoffMs = [];
|
||||
_backoffMsLong = backoffMs;
|
||||
_maxDeliveries = maxDeliveries;
|
||||
_ackWaitMs = ackWaitMs;
|
||||
}
|
||||
|
||||
// Go: consumer.go:5540 — trackPending records delivery count and schedules deadline
|
||||
@@ -34,6 +57,13 @@ public sealed class RedeliveryTracker
|
||||
return deadline;
|
||||
}
|
||||
|
||||
// Go: consumer.go — schedule with an explicit deadline into the priority queue
|
||||
public void Schedule(ulong seq, DateTimeOffset deadline)
|
||||
{
|
||||
_deliveryCounts.TryAdd(seq, 0);
|
||||
_priorityQueue.Enqueue(seq, deadline);
|
||||
}
|
||||
|
||||
// Go: consumer.go — rdq entries are dispatched once their deadline has passed
|
||||
public IReadOnlyList<ulong> GetDue()
|
||||
{
|
||||
@@ -52,8 +82,53 @@ public sealed class RedeliveryTracker
|
||||
return due ?? (IReadOnlyList<ulong>)[];
|
||||
}
|
||||
|
||||
// Go: consumer.go — drain the rdq priority queue of all entries whose deadline <= now,
|
||||
// returning them in deadline order (earliest first).
|
||||
public IEnumerable<ulong> GetDue(DateTimeOffset now)
|
||||
{
|
||||
List<(ulong seq, DateTimeOffset deadline)>? dequeued = null;
|
||||
List<(ulong seq, DateTimeOffset deadline)>? future = null;
|
||||
|
||||
// Drain the entire queue, separating due from future
|
||||
while (_priorityQueue.TryDequeue(out var seq, out var deadline))
|
||||
{
|
||||
// Skip sequences that were acknowledged
|
||||
if (!_deliveryCounts.ContainsKey(seq))
|
||||
continue;
|
||||
|
||||
if (deadline <= now)
|
||||
{
|
||||
dequeued ??= [];
|
||||
dequeued.Add((seq, deadline));
|
||||
}
|
||||
else
|
||||
{
|
||||
future ??= [];
|
||||
future.Add((seq, deadline));
|
||||
}
|
||||
}
|
||||
|
||||
// Re-enqueue future items
|
||||
if (future is not null)
|
||||
{
|
||||
foreach (var (seq, deadline) in future)
|
||||
_priorityQueue.Enqueue(seq, deadline);
|
||||
}
|
||||
|
||||
if (dequeued is null)
|
||||
return [];
|
||||
|
||||
// Already extracted in priority order since PriorityQueue dequeues min first
|
||||
return dequeued.Select(x => x.seq);
|
||||
}
|
||||
|
||||
// Go: consumer.go — acking a sequence removes it from the pending redelivery set
|
||||
public void Acknowledge(ulong seq) => _entries.Remove(seq);
|
||||
public void Acknowledge(ulong seq)
|
||||
{
|
||||
_entries.Remove(seq);
|
||||
_deliveryCounts.Remove(seq);
|
||||
// Priority queue entries are lazily skipped in GetDue when seq not in _deliveryCounts
|
||||
}
|
||||
|
||||
// Go: consumer.go — maxdeliver check: drop sequence once delivery count exceeds max
|
||||
public bool IsMaxDeliveries(ulong seq, int maxDeliver)
|
||||
@@ -67,6 +142,36 @@ public sealed class RedeliveryTracker
|
||||
return entry.DeliveryCount >= maxDeliver;
|
||||
}
|
||||
|
||||
// Go: consumer.go — maxdeliver check using the stored _maxDeliveries from new constructor
|
||||
public bool IsMaxDeliveries(ulong seq)
|
||||
{
|
||||
if (_maxDeliveries <= 0)
|
||||
return false;
|
||||
|
||||
_deliveryCounts.TryGetValue(seq, out var count);
|
||||
return count >= _maxDeliveries;
|
||||
}
|
||||
|
||||
// Go: consumer.go — rdc map increment: track how many times a sequence has been delivered
|
||||
public void IncrementDeliveryCount(ulong seq)
|
||||
{
|
||||
_deliveryCounts[seq] = _deliveryCounts.TryGetValue(seq, out var count) ? count + 1 : 1;
|
||||
}
|
||||
|
||||
// Go: consumer.go — backoff delay lookup: index by deliveryCount, clamp to last entry,
|
||||
// fall back to ackWait when no backoff array is configured.
|
||||
public long GetBackoffDelay(int deliveryCount)
|
||||
{
|
||||
if (_backoffMsLong is { Length: > 0 })
|
||||
{
|
||||
var idx = Math.Min(deliveryCount - 1, _backoffMsLong.Length - 1);
|
||||
if (idx < 0) idx = 0;
|
||||
return _backoffMsLong[idx];
|
||||
}
|
||||
|
||||
return _ackWaitMs;
|
||||
}
|
||||
|
||||
public bool IsTracking(ulong seq) => _entries.ContainsKey(seq);
|
||||
|
||||
public int TrackedCount => _entries.Count;
|
||||
|
||||
65
src/NATS.Server/JetStream/Consumers/WaitingRequestQueue.cs
Normal file
65
src/NATS.Server/JetStream/Consumers/WaitingRequestQueue.cs
Normal file
@@ -0,0 +1,65 @@
|
||||
namespace NATS.Server.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// A pull request with mutable batch and byte tracking for delivery fulfillment.
|
||||
/// Go reference: consumer.go waitingRequest / processNextMsgRequest.
|
||||
/// </summary>
|
||||
public sealed record PullRequest(
|
||||
string ReplyTo,
|
||||
int Batch,
|
||||
long MaxBytes,
|
||||
DateTimeOffset Expires,
|
||||
bool NoWait,
|
||||
string? PinId = null)
|
||||
{
|
||||
/// <summary>Remaining messages allowed for this request.</summary>
|
||||
public int RemainingBatch { get; private set; } = Batch;
|
||||
|
||||
/// <summary>Remaining bytes allowed for this request (only meaningful when MaxBytes > 0).</summary>
|
||||
public long RemainingBytes { get; private set; } = MaxBytes;
|
||||
|
||||
/// <summary>True when batch or bytes (if set) are exhausted.</summary>
|
||||
public bool IsExhausted => RemainingBatch <= 0 || (MaxBytes > 0 && RemainingBytes <= 0);
|
||||
|
||||
/// <summary>Decrement remaining batch count by one.</summary>
|
||||
public void ConsumeBatch() => RemainingBatch--;
|
||||
|
||||
/// <summary>Subtract delivered bytes from remaining byte budget.</summary>
|
||||
public void ConsumeBytes(long bytes) => RemainingBytes -= bytes;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// FIFO queue of pull requests with expiry support.
|
||||
/// Unlike PullRequestWaitQueue (priority-based), this is a simple FIFO with
|
||||
/// RemoveExpired cleanup and mutable request tracking.
|
||||
/// Go reference: consumer.go waitQueue / processNextMsgRequest.
|
||||
/// </summary>
|
||||
public sealed class WaitingRequestQueue
|
||||
{
|
||||
private readonly LinkedList<PullRequest> _queue = new();
|
||||
|
||||
public int Count => _queue.Count;
|
||||
public bool IsEmpty => _queue.Count == 0;
|
||||
|
||||
public void Enqueue(PullRequest request) => _queue.AddLast(request);
|
||||
|
||||
public PullRequest? TryDequeue()
|
||||
{
|
||||
if (_queue.Count == 0) return null;
|
||||
var first = _queue.First!.Value;
|
||||
_queue.RemoveFirst();
|
||||
return first;
|
||||
}
|
||||
|
||||
public void RemoveExpired(DateTimeOffset now)
|
||||
{
|
||||
var node = _queue.First;
|
||||
while (node != null)
|
||||
{
|
||||
var next = node.Next;
|
||||
if (node.Value.Expires <= now)
|
||||
_queue.Remove(node);
|
||||
node = next;
|
||||
}
|
||||
}
|
||||
}
|
||||
75
src/NATS.Server/JetStream/InterestRetentionPolicy.cs
Normal file
75
src/NATS.Server/JetStream/InterestRetentionPolicy.cs
Normal file
@@ -0,0 +1,75 @@
|
||||
using NATS.Server.Subscriptions;
|
||||
|
||||
namespace NATS.Server.JetStream;
|
||||
|
||||
/// <summary>
|
||||
/// Tracks per-consumer interest and determines when messages can be removed
|
||||
/// under Interest retention policy. A message should be retained until all
|
||||
/// interested consumers have acknowledged it.
|
||||
/// Go reference: stream.go checkInterestState/noInterest.
|
||||
/// </summary>
|
||||
public sealed class InterestRetentionPolicy
|
||||
{
|
||||
// consumer → filter subject pattern
|
||||
private readonly Dictionary<string, string> _interests = new(StringComparer.Ordinal);
|
||||
// seq → set of consumers that have acked this sequence
|
||||
private readonly Dictionary<ulong, HashSet<string>> _acks = new();
|
||||
|
||||
/// <summary>
|
||||
/// Register a consumer's interest in a subject pattern.
|
||||
/// </summary>
|
||||
public void RegisterInterest(string consumer, string filterSubject)
|
||||
{
|
||||
_interests[consumer] = filterSubject;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Remove a consumer's interest (e.g., on deletion).
|
||||
/// </summary>
|
||||
public void UnregisterInterest(string consumer)
|
||||
{
|
||||
_interests.Remove(consumer);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Record that a consumer has acknowledged delivery of a sequence.
|
||||
/// </summary>
|
||||
public void AcknowledgeDelivery(string consumer, ulong seq)
|
||||
{
|
||||
if (!_acks.TryGetValue(seq, out var ackedBy))
|
||||
{
|
||||
ackedBy = new HashSet<string>(StringComparer.Ordinal);
|
||||
_acks[seq] = ackedBy;
|
||||
}
|
||||
ackedBy.Add(consumer);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns true if the message should be retained (i.e., at least one
|
||||
/// interested consumer has NOT yet acknowledged it).
|
||||
/// A consumer is "interested" if its filter subject matches the message subject.
|
||||
/// </summary>
|
||||
public bool ShouldRetain(ulong seq, string msgSubject)
|
||||
{
|
||||
_acks.TryGetValue(seq, out var ackedBy);
|
||||
|
||||
foreach (var (consumer, filterSubject) in _interests)
|
||||
{
|
||||
// Check if this consumer is interested in this message's subject
|
||||
if (!SubjectMatch.MatchLiteral(msgSubject, filterSubject))
|
||||
continue;
|
||||
|
||||
// Consumer is interested — has it acked?
|
||||
if (ackedBy == null || !ackedBy.Contains(consumer))
|
||||
return true; // Not yet acked — must retain
|
||||
}
|
||||
|
||||
// All interested consumers have acked (or no one is interested)
|
||||
// Clean up ack tracking for this sequence
|
||||
_acks.Remove(seq);
|
||||
return false;
|
||||
}
|
||||
|
||||
/// <summary>Number of registered consumers.</summary>
|
||||
public int ConsumerCount => _interests.Count;
|
||||
}
|
||||
@@ -58,12 +58,44 @@ public sealed class MirrorCoordinator : IAsyncDisposable
|
||||
/// </summary>
|
||||
public ulong Lag { get; private set; }
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Gap detection properties
|
||||
// Go reference: server/stream.go:2863-3014 (processInboundMirrorMsg gap handling)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/// <summary>Whether a sequence gap has been detected in the origin stream.</summary>
|
||||
public bool HasGap => _hasGap;
|
||||
|
||||
/// <summary>First missing sequence number when a gap is detected.</summary>
|
||||
public ulong GapStart => _gapStart;
|
||||
|
||||
/// <summary>Last missing sequence number when a gap is detected.</summary>
|
||||
public ulong GapEnd => _gapEnd;
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Error state properties
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/// <summary>Whether the coordinator is in an error state.</summary>
|
||||
public bool HasError => _errorMessage is not null;
|
||||
|
||||
/// <summary>Current error message, or null if no error.</summary>
|
||||
public string? ErrorMessage => _errorMessage;
|
||||
|
||||
// Go: mirror.sseq — stream sequence tracking for gap detection
|
||||
private ulong _expectedOriginSeq;
|
||||
|
||||
// Go: mirror.dseq — delivery sequence tracking
|
||||
private ulong _deliverySeq;
|
||||
|
||||
// Gap detection state
|
||||
private ulong _gapStart;
|
||||
private ulong _gapEnd;
|
||||
private bool _hasGap;
|
||||
|
||||
// Error state tracking
|
||||
private string? _errorMessage;
|
||||
|
||||
public MirrorCoordinator(IStreamStore targetStore)
|
||||
{
|
||||
_targetStore = targetStore;
|
||||
@@ -175,6 +207,74 @@ public sealed class MirrorCoordinator : IAsyncDisposable
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Public retry / failure tracking API
|
||||
// Go reference: server/stream.go:3478-3505 (calculateRetryBackoff),
|
||||
// server/stream.go:3125-3400 (setupMirrorConsumer retry logic)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Increments the consecutive failure counter.
|
||||
/// Call this when a sync attempt fails.
|
||||
/// </summary>
|
||||
public void RecordFailure()
|
||||
{
|
||||
lock (_gate) _consecutiveFailures++;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Resets the consecutive failure counter to zero.
|
||||
/// Call this when a sync attempt succeeds.
|
||||
/// </summary>
|
||||
public void RecordSuccess()
|
||||
{
|
||||
lock (_gate) _consecutiveFailures = 0;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns the exponential backoff delay for the current failure count,
|
||||
/// without jitter so that tests get deterministic results.
|
||||
/// Go reference: server/stream.go:3478-3505 (calculateRetryBackoff)
|
||||
/// </summary>
|
||||
public TimeSpan GetRetryDelay()
|
||||
{
|
||||
var failures = ConsecutiveFailures;
|
||||
if (failures == 0) return InitialRetryDelay;
|
||||
var baseDelay = InitialRetryDelay.TotalMilliseconds * Math.Pow(2, Math.Min(failures - 1, 10));
|
||||
var capped = Math.Min(baseDelay, MaxRetryDelay.TotalMilliseconds);
|
||||
return TimeSpan.FromMilliseconds(capped);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Sequence gap detection
|
||||
// Go reference: server/stream.go:2863-3014 (processInboundMirrorMsg)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Records the next received sequence number from the origin stream.
|
||||
/// Sets gap state when a gap (skipped sequences) is detected.
|
||||
/// </summary>
|
||||
public void RecordSourceSeq(ulong seq)
|
||||
{
|
||||
if (_expectedOriginSeq > 0 && seq > _expectedOriginSeq + 1)
|
||||
{
|
||||
_hasGap = true;
|
||||
_gapStart = _expectedOriginSeq + 1;
|
||||
_gapEnd = seq - 1;
|
||||
}
|
||||
_expectedOriginSeq = seq;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Error state management
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/// <summary>Sets the coordinator into an error state with the given message.</summary>
|
||||
public void SetError(string message) => _errorMessage = message;
|
||||
|
||||
/// <summary>Clears the error state.</summary>
|
||||
public void ClearError() => _errorMessage = null;
|
||||
|
||||
/// <summary>
|
||||
/// Reports current health state for monitoring.
|
||||
/// Go reference: server/stream.go:2739-2743 (mirrorInfo), 2698-2736 (sourceInfo)
|
||||
|
||||
@@ -411,17 +411,41 @@ public sealed class SourceCoordinator : IAsyncDisposable
|
||||
// Deduplication helpers
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
private bool IsDuplicate(string msgId)
|
||||
/// <summary>
|
||||
/// Returns true if the given message ID is already present in the dedup window.
|
||||
/// Go reference: server/stream.go duplicate window check
|
||||
/// </summary>
|
||||
public bool IsDuplicate(string msgId)
|
||||
{
|
||||
PruneDedupWindowIfNeeded();
|
||||
return _dedupWindow.ContainsKey(msgId);
|
||||
}
|
||||
|
||||
private void RecordMsgId(string msgId)
|
||||
/// <summary>
|
||||
/// Records a message ID in the dedup window with the current timestamp.
|
||||
/// Go reference: server/stream.go duplicate window tracking
|
||||
/// </summary>
|
||||
public void RecordMsgId(string msgId)
|
||||
{
|
||||
_dedupWindow[msgId] = DateTime.UtcNow;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Removes all dedup window entries whose timestamps are earlier than the given cutoff.
|
||||
/// This is the explicit-cutoff variant intended for testing, as opposed to the internal
|
||||
/// time-based pruning done by <see cref="PruneDedupWindowIfNeeded"/>.
|
||||
/// Go reference: server/stream.go duplicate window pruning
|
||||
/// </summary>
|
||||
public void PruneDedupWindow(DateTimeOffset cutoff)
|
||||
{
|
||||
var cutoffDt = cutoff.UtcDateTime;
|
||||
foreach (var kvp in _dedupWindow)
|
||||
{
|
||||
if (kvp.Value < cutoffDt)
|
||||
_dedupWindow.TryRemove(kvp.Key, out _);
|
||||
}
|
||||
}
|
||||
|
||||
private void PruneDedupWindowIfNeeded()
|
||||
{
|
||||
if (_sourceConfig.DuplicateWindowMs <= 0)
|
||||
|
||||
@@ -35,6 +35,9 @@ public sealed class FileStore : IStreamStore, IAsyncDisposable, IDisposable
|
||||
private ulong _last;
|
||||
private ulong _first; // Go: first.seq — watermark for the first live or expected-first sequence
|
||||
|
||||
// Set to true after Stop() is called. Prevents further writes.
|
||||
private bool _stopped;
|
||||
|
||||
// Resolved at construction time: which format family to use.
|
||||
private readonly bool _useS2; // true -> S2Codec (FSV2 compression path)
|
||||
private readonly bool _useAead; // true -> AeadEncryptor (FSV2 encryption path)
|
||||
@@ -66,6 +69,9 @@ public sealed class FileStore : IStreamStore, IAsyncDisposable, IDisposable
|
||||
|
||||
public async ValueTask<ulong> AppendAsync(string subject, ReadOnlyMemory<byte> payload, CancellationToken ct)
|
||||
{
|
||||
if (_stopped)
|
||||
throw new ObjectDisposedException(nameof(FileStore), "Store has been stopped.");
|
||||
|
||||
// Go: check and remove expired messages before each append.
|
||||
// Reference: golang/nats-server/server/filestore.go — storeMsg, expire check.
|
||||
ExpireFromWheel();
|
||||
@@ -255,6 +261,9 @@ public sealed class FileStore : IStreamStore, IAsyncDisposable, IDisposable
|
||||
/// </summary>
|
||||
public (ulong Seq, long Ts) StoreMsg(string subject, byte[]? hdr, byte[] msg, long ttl)
|
||||
{
|
||||
if (_stopped)
|
||||
throw new ObjectDisposedException(nameof(FileStore), "Store has been stopped.");
|
||||
|
||||
// Go: expire check before each store (same as AppendAsync).
|
||||
// Reference: golang/nats-server/server/filestore.go:6793 (expireMsgs call).
|
||||
ExpireFromWheel();
|
||||
@@ -669,9 +678,9 @@ public sealed class FileStore : IStreamStore, IAsyncDisposable, IDisposable
|
||||
/// Stops the store and deletes all persisted data (blocks, index files).
|
||||
/// Reference: golang/nats-server/server/filestore.go — fileStore.Delete.
|
||||
/// </summary>
|
||||
public void Delete()
|
||||
public void Delete(bool inline = false)
|
||||
{
|
||||
DisposeAllBlocks();
|
||||
Stop();
|
||||
if (Directory.Exists(_options.Directory))
|
||||
{
|
||||
try { Directory.Delete(_options.Directory, recursive: true); }
|
||||
@@ -1183,7 +1192,17 @@ public sealed class FileStore : IStreamStore, IAsyncDisposable, IDisposable
|
||||
if ((flags & EncryptionFlag) != 0)
|
||||
{
|
||||
var key = NormalizeKey(_options.EncryptionKey);
|
||||
data = AeadEncryptor.Decrypt(data, key, _options.Cipher);
|
||||
try
|
||||
{
|
||||
data = AeadEncryptor.Decrypt(data, key, _options.Cipher);
|
||||
}
|
||||
catch (CryptographicException ex)
|
||||
{
|
||||
// AEAD tag verification failed — wrong key or corrupted data.
|
||||
// Wrap as InvalidDataException so RecoverBlocks propagates it
|
||||
// as a fatal key-mismatch error (same behaviour as FSV1 key-hash check).
|
||||
throw new InvalidDataException("AEAD decryption failed: wrong key or corrupted block.", ex);
|
||||
}
|
||||
}
|
||||
|
||||
if ((flags & CompressionFlag) != 0)
|
||||
@@ -1604,6 +1623,162 @@ public sealed class FileStore : IStreamStore, IAsyncDisposable, IDisposable
|
||||
}
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Go-parity IStreamStore methods: StoreRawMsg, LoadPrevMsg, Type, Stop
|
||||
// Reference: golang/nats-server/server/filestore.go
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Stores a message at a caller-specified sequence number and timestamp.
|
||||
/// Used for replication and mirroring — the caller (NRG, mirror source) controls
|
||||
/// the sequence/timestamp rather than the store auto-incrementing them.
|
||||
/// <para>Unlike <see cref="StoreMsg"/>, this does NOT call <c>ExpireFromWheel</c>
|
||||
/// or auto-increment <c>_last</c>. It updates <c>_last</c> via
|
||||
/// <c>Math.Max(_last, seq)</c> so the watermark reflects the highest stored
|
||||
/// sequence.</para>
|
||||
/// Reference: golang/nats-server/server/filestore.go:6756 (storeRawMsg).
|
||||
/// </summary>
|
||||
public void StoreRawMsg(string subject, byte[]? hdr, byte[] msg, ulong seq, long ts, long ttl, bool discardNewCheck)
|
||||
{
|
||||
if (_stopped)
|
||||
throw new ObjectDisposedException(nameof(FileStore), "Store has been stopped.");
|
||||
|
||||
// Combine headers and payload, same as StoreMsg.
|
||||
byte[] combined;
|
||||
if (hdr is { Length: > 0 })
|
||||
{
|
||||
combined = new byte[hdr.Length + msg.Length];
|
||||
hdr.CopyTo(combined, 0);
|
||||
msg.CopyTo(combined, hdr.Length);
|
||||
}
|
||||
else
|
||||
{
|
||||
combined = msg;
|
||||
}
|
||||
|
||||
var persistedPayload = TransformForPersist(combined.AsSpan());
|
||||
// Recover UTC DateTime from caller-supplied Unix nanosecond timestamp.
|
||||
var storedUtc = DateTimeOffset.FromUnixTimeMilliseconds(ts / 1_000_000L).UtcDateTime;
|
||||
|
||||
var stored = new StoredMessage
|
||||
{
|
||||
Sequence = seq,
|
||||
Subject = subject,
|
||||
Payload = combined,
|
||||
TimestampUtc = storedUtc,
|
||||
};
|
||||
_messages[seq] = stored;
|
||||
|
||||
// Go: update _last to the high-water mark — do not decrement.
|
||||
_last = Math.Max(_last, seq);
|
||||
|
||||
// Register TTL using the caller-supplied timestamp and TTL.
|
||||
var effectiveTtlNs = ttl > 0 ? ttl : (_options.MaxAgeMs > 0 ? (long)_options.MaxAgeMs * 1_000_000L : 0L);
|
||||
RegisterTtl(seq, ts, effectiveTtlNs);
|
||||
|
||||
EnsureActiveBlock();
|
||||
try
|
||||
{
|
||||
_activeBlock!.WriteAt(seq, subject, ReadOnlyMemory<byte>.Empty, persistedPayload, ts);
|
||||
}
|
||||
catch (InvalidOperationException)
|
||||
{
|
||||
RotateBlock();
|
||||
_activeBlock!.WriteAt(seq, subject, ReadOnlyMemory<byte>.Empty, persistedPayload, ts);
|
||||
}
|
||||
|
||||
if (_activeBlock!.IsSealed)
|
||||
RotateBlock();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Loads the message immediately before <paramref name="start"/> by walking
|
||||
/// backward from <c>start - 1</c> to <c>_first</c>.
|
||||
/// Throws <see cref="KeyNotFoundException"/> if no such message exists.
|
||||
/// Reference: golang/nats-server/server/filestore.go — LoadPrevMsg.
|
||||
/// </summary>
|
||||
public StoreMsg LoadPrevMsg(ulong start, StoreMsg? sm)
|
||||
{
|
||||
if (start == 0)
|
||||
throw new KeyNotFoundException("No message found before seq 0.");
|
||||
|
||||
var first = _messages.Count > 0 ? _messages.Keys.Min() : 1UL;
|
||||
|
||||
for (var seq = start - 1; seq >= first && seq <= _last; seq--)
|
||||
{
|
||||
if (_messages.TryGetValue(seq, out var stored))
|
||||
{
|
||||
sm ??= new StoreMsg();
|
||||
sm.Clear();
|
||||
sm.Subject = stored.Subject;
|
||||
sm.Data = stored.Payload.Length > 0 ? stored.Payload.ToArray() : null;
|
||||
sm.Sequence = stored.Sequence;
|
||||
sm.Timestamp = new DateTimeOffset(stored.TimestampUtc).ToUnixTimeMilliseconds() * 1_000_000L;
|
||||
return sm;
|
||||
}
|
||||
|
||||
// Prevent underflow on ulong subtraction.
|
||||
if (seq == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
throw new KeyNotFoundException($"No message found before seq {start}.");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns the storage backend type for this store instance.
|
||||
/// Reference: golang/nats-server/server/filestore.go — fileStore.Type.
|
||||
/// </summary>
|
||||
public StorageType Type() => StorageType.File;
|
||||
|
||||
/// <summary>
|
||||
/// Flushes the active block to disk and marks the store as stopped.
|
||||
/// After <c>Stop()</c> returns, calls to <see cref="StoreMsg"/> or
|
||||
/// <see cref="AppendAsync"/> will throw <see cref="ObjectDisposedException"/>.
|
||||
/// Blocks are NOT deleted — use <see cref="Delete"/> if data removal is needed.
|
||||
/// Reference: golang/nats-server/server/filestore.go — fileStore.Stop.
|
||||
/// </summary>
|
||||
public void Stop()
|
||||
{
|
||||
if (_stopped)
|
||||
return;
|
||||
|
||||
_stopped = true;
|
||||
|
||||
// Flush the active block to ensure all buffered writes reach disk.
|
||||
_activeBlock?.Flush();
|
||||
|
||||
// Dispose all blocks to release OS file handles. The files remain on disk.
|
||||
DisposeAllBlocks();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns a binary-encoded snapshot of the stream state. The <paramref name="failed"/>
|
||||
/// parameter indicates the number of failed apply operations (passed through for
|
||||
/// cluster consensus use). Currently returns an empty array — the full binary
|
||||
/// encoding will be added when the RAFT snapshot codec is implemented (Task 9).
|
||||
/// Reference: golang/nats-server/server/filestore.go — EncodedStreamState.
|
||||
/// </summary>
|
||||
public byte[] EncodedStreamState(ulong failed) => [];
|
||||
|
||||
/// <summary>
|
||||
/// Updates the stream configuration. Currently a no-op placeholder — config
|
||||
/// changes that affect storage (MaxMsgsPer, MaxAge, etc.) will be enforced
|
||||
/// when the stream engine is fully wired.
|
||||
/// Reference: golang/nats-server/server/filestore.go — UpdateConfig.
|
||||
/// </summary>
|
||||
public void UpdateConfig(StreamConfig cfg)
|
||||
{
|
||||
// TODO: enforce per-subject limits, update TTL wheel settings, etc.
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Resets internal cached state after a leadership transition or snapshot restore.
|
||||
/// Currently a no-op — the FileStore re-derives its state from blocks on construction.
|
||||
/// Reference: golang/nats-server/server/filestore.go — ResetState.
|
||||
/// </summary>
|
||||
public void ResetState() { }
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// ConsumerStore factory
|
||||
// Reference: golang/nats-server/server/filestore.go — fileStore.ConsumerStore
|
||||
@@ -1623,6 +1798,62 @@ public sealed class FileStore : IStreamStore, IAsyncDisposable, IDisposable
|
||||
return new ConsumerFileStore(stateFile, cfg);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FlushAllPending: flush buffered writes and checkpoint stream state.
|
||||
// Reference: golang/nats-server/server/filestore.go:5783-5842
|
||||
// (flushPendingWritesUnlocked / writeFullState)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/// <summary>
|
||||
/// Flushes any buffered writes in the active block to disk and atomically
|
||||
/// persists a lightweight stream state checkpoint (stream.state) so that a
|
||||
/// subsequent recovery after a crash can quickly identify the last known
|
||||
/// good sequence without re-scanning every block.
|
||||
/// Reference: golang/nats-server/server/filestore.go:5783 (flushPendingWritesUnlocked).
|
||||
/// </summary>
|
||||
public void FlushAllPending()
|
||||
{
|
||||
_activeBlock?.Flush();
|
||||
WriteStreamState();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Atomically persists a compact stream state snapshot to disk using the
|
||||
/// write-to-temp-then-rename pattern so that a partial write never leaves
|
||||
/// a corrupt state file.
|
||||
/// The file is written as JSON to <c>{Directory}/stream.state</c>.
|
||||
/// Reference: golang/nats-server/server/filestore.go:5820 (writeFullState).
|
||||
/// </summary>
|
||||
private void WriteStreamState()
|
||||
{
|
||||
var statePath = Path.Combine(_options.Directory, "stream.state");
|
||||
var tmpPath = statePath + ".tmp";
|
||||
|
||||
var snapshot = new StreamStateSnapshot
|
||||
{
|
||||
FirstSeq = _messages.Count > 0 ? _messages.Keys.Min() : 0UL,
|
||||
LastSeq = _last,
|
||||
Messages = (ulong)_messages.Count,
|
||||
Bytes = (ulong)_blocks.Sum(b => b.BytesUsed),
|
||||
};
|
||||
|
||||
var json = JsonSerializer.Serialize(snapshot);
|
||||
File.WriteAllText(tmpPath, json);
|
||||
File.Move(tmpPath, statePath, overwrite: true);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// StreamStateSnapshot — private checkpoint record written by WriteStreamState.
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
private sealed record StreamStateSnapshot
|
||||
{
|
||||
public ulong FirstSeq { get; init; }
|
||||
public ulong LastSeq { get; init; }
|
||||
public ulong Messages { get; init; }
|
||||
public ulong Bytes { get; init; }
|
||||
}
|
||||
|
||||
private sealed class FileRecord
|
||||
{
|
||||
public ulong Sequence { get; init; }
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
// QoS 2 flow — mqttProcessPubRec / mqttProcessPubRel / mqttProcessPubComp (~lines 1300–1400)
|
||||
|
||||
using System.Collections.Concurrent;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.Mqtt;
|
||||
|
||||
@@ -20,6 +21,28 @@ public sealed class MqttRetainedStore
|
||||
{
|
||||
private readonly ConcurrentDictionary<string, ReadOnlyMemory<byte>> _retained = new(StringComparer.Ordinal);
|
||||
|
||||
// Topics explicitly cleared in this session — prevents falling back to backing store for cleared topics.
|
||||
private readonly ConcurrentDictionary<string, bool> _cleared = new(StringComparer.Ordinal);
|
||||
|
||||
private readonly IStreamStore? _backingStore;
|
||||
|
||||
/// <summary>Backing store for JetStream persistence.</summary>
|
||||
public IStreamStore? BackingStore => _backingStore;
|
||||
|
||||
/// <summary>
|
||||
/// Initializes a new in-memory retained message store with no backing store.
|
||||
/// </summary>
|
||||
public MqttRetainedStore() : this(null) { }
|
||||
|
||||
/// <summary>
|
||||
/// Initializes a new retained message store with an optional JetStream backing store.
|
||||
/// </summary>
|
||||
/// <param name="backingStore">Optional JetStream stream store for persistence.</param>
|
||||
public MqttRetainedStore(IStreamStore? backingStore)
|
||||
{
|
||||
_backingStore = backingStore;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets (or clears) the retained message for a topic.
|
||||
/// An empty payload clears the retained message.
|
||||
@@ -30,9 +53,11 @@ public sealed class MqttRetainedStore
|
||||
if (payload.IsEmpty)
|
||||
{
|
||||
_retained.TryRemove(topic, out _);
|
||||
_cleared[topic] = true;
|
||||
return;
|
||||
}
|
||||
|
||||
_cleared.TryRemove(topic, out _);
|
||||
_retained[topic] = payload;
|
||||
}
|
||||
|
||||
@@ -64,6 +89,53 @@ public sealed class MqttRetainedStore
|
||||
return results;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets (or clears) the retained message and persists to backing store.
|
||||
/// Go reference: server/mqtt.go mqttHandleRetainedMsg with JetStream.
|
||||
/// </summary>
|
||||
public async Task SetRetainedAsync(string topic, ReadOnlyMemory<byte> payload, CancellationToken ct = default)
|
||||
{
|
||||
SetRetained(topic, payload);
|
||||
|
||||
if (_backingStore is not null)
|
||||
{
|
||||
if (payload.IsEmpty)
|
||||
{
|
||||
// Clear — the in-memory clear above is sufficient for this implementation.
|
||||
// A full implementation would publish a tombstone to JetStream.
|
||||
return;
|
||||
}
|
||||
await _backingStore.AppendAsync($"$MQTT.rmsgs.{topic}", payload, ct);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Gets the retained message, checking backing store if not in memory.
|
||||
/// Returns null if the topic was explicitly cleared in this session.
|
||||
/// </summary>
|
||||
public async Task<byte[]?> GetRetainedAsync(string topic, CancellationToken ct = default)
|
||||
{
|
||||
var mem = GetRetained(topic);
|
||||
if (mem.HasValue)
|
||||
return mem.Value.ToArray();
|
||||
|
||||
// Don't consult the backing store if this topic was explicitly cleared in this session.
|
||||
if (_cleared.ContainsKey(topic))
|
||||
return null;
|
||||
|
||||
if (_backingStore is not null)
|
||||
{
|
||||
var messages = await _backingStore.ListAsync(ct);
|
||||
foreach (var msg in messages)
|
||||
{
|
||||
if (msg.Subject == $"$MQTT.rmsgs.{topic}")
|
||||
return msg.Payload.ToArray();
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Matches an MQTT topic against a filter pattern.
|
||||
/// '+' matches exactly one level, '#' matches zero or more levels (must be last).
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
// Flapper detection — mqttCheckFlapper (lines ~300–360)
|
||||
|
||||
using System.Collections.Concurrent;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.Mqtt;
|
||||
|
||||
@@ -39,6 +40,10 @@ public sealed class MqttSessionStore
|
||||
private readonly int _flapThreshold;
|
||||
private readonly TimeSpan _flapBackoff;
|
||||
private readonly TimeProvider _timeProvider;
|
||||
private readonly IStreamStore? _backingStore;
|
||||
|
||||
/// <summary>Backing store for JetStream persistence. Null for in-memory only.</summary>
|
||||
public IStreamStore? BackingStore => _backingStore;
|
||||
|
||||
/// <summary>
|
||||
/// Initializes a new session store.
|
||||
@@ -59,6 +64,25 @@ public sealed class MqttSessionStore
|
||||
_timeProvider = timeProvider ?? TimeProvider.System;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Initializes a new session store with an optional JetStream backing store.
|
||||
/// </summary>
|
||||
/// <param name="backingStore">Optional JetStream stream store for persistence.</param>
|
||||
/// <param name="flapWindow">Window in which repeated connects trigger flap detection. Default 10 seconds.</param>
|
||||
/// <param name="flapThreshold">Number of connects within the window to trigger backoff. Default 3.</param>
|
||||
/// <param name="flapBackoff">Backoff delay to apply when flapping. Default 1 second.</param>
|
||||
/// <param name="timeProvider">Optional time provider for testing. Default uses system clock.</param>
|
||||
public MqttSessionStore(
|
||||
IStreamStore? backingStore,
|
||||
TimeSpan? flapWindow = null,
|
||||
int flapThreshold = 3,
|
||||
TimeSpan? flapBackoff = null,
|
||||
TimeProvider? timeProvider = null)
|
||||
: this(flapWindow, flapThreshold, flapBackoff, timeProvider)
|
||||
{
|
||||
_backingStore = backingStore;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Saves (or overwrites) session data for the given client.
|
||||
/// Go reference: server/mqtt.go mqttStoreSession.
|
||||
@@ -130,4 +154,75 @@ public sealed class MqttSessionStore
|
||||
return history.Count >= _flapThreshold ? _flapBackoff : TimeSpan.Zero;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Connects a client session. If cleanSession is false, loads existing session from backing store.
|
||||
/// If cleanSession is true, deletes existing session data.
|
||||
/// Go reference: server/mqtt.go mqttInitSessionStore.
|
||||
/// </summary>
|
||||
public async Task ConnectAsync(string clientId, bool cleanSession, CancellationToken ct = default)
|
||||
{
|
||||
if (cleanSession)
|
||||
{
|
||||
DeleteSession(clientId);
|
||||
// For now the in-memory delete is sufficient; a full implementation would
|
||||
// publish a tombstone or use sequence lookup to remove from JetStream.
|
||||
return;
|
||||
}
|
||||
|
||||
// Try to load from backing store
|
||||
if (_backingStore is not null)
|
||||
{
|
||||
var messages = await _backingStore.ListAsync(ct);
|
||||
foreach (var msg in messages)
|
||||
{
|
||||
if (msg.Subject == $"$MQTT.sess.{clientId}")
|
||||
{
|
||||
var data = System.Text.Json.JsonSerializer.Deserialize<MqttSessionData>(msg.Payload.Span);
|
||||
if (data is not null)
|
||||
{
|
||||
SaveSession(data);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Adds a subscription to the client's session.
|
||||
/// </summary>
|
||||
public void AddSubscription(string clientId, string topic, int qos)
|
||||
{
|
||||
var session = LoadSession(clientId);
|
||||
if (session is null)
|
||||
{
|
||||
session = new MqttSessionData { ClientId = clientId };
|
||||
}
|
||||
session.Subscriptions[topic] = qos;
|
||||
SaveSession(session);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Saves the session to the backing JetStream store if available.
|
||||
/// Go reference: server/mqtt.go mqttStoreSession.
|
||||
/// </summary>
|
||||
public async Task SaveSessionAsync(string clientId, CancellationToken ct = default)
|
||||
{
|
||||
var session = LoadSession(clientId);
|
||||
if (session is null || _backingStore is null)
|
||||
return;
|
||||
|
||||
var json = System.Text.Json.JsonSerializer.SerializeToUtf8Bytes(session);
|
||||
await _backingStore.AppendAsync($"$MQTT.sess.{clientId}", json, ct);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns subscriptions for the given client, or an empty dictionary.
|
||||
/// </summary>
|
||||
public IReadOnlyDictionary<string, int> GetSubscriptions(string clientId)
|
||||
{
|
||||
var session = LoadSession(clientId);
|
||||
return session?.Subscriptions ?? new Dictionary<string, int>();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -171,11 +171,55 @@ public sealed class NatsClient : INatsClient, IDisposable
|
||||
return false;
|
||||
}
|
||||
|
||||
SignalFlushPending();
|
||||
return true;
|
||||
}
|
||||
|
||||
public long PendingBytes => Interlocked.Read(ref _pendingBytes);
|
||||
|
||||
/// <summary>
|
||||
/// Maximum number of pending flush signals before forcing a flush.
|
||||
/// Go reference: server/client.go (maxFlushPending, pcd)
|
||||
/// </summary>
|
||||
public const int MaxFlushPending = 10;
|
||||
|
||||
/// <summary>
|
||||
/// Current pending flush signal count. When the write loop drains queued data
|
||||
/// and _flushSignalsPending is below MaxFlushPending, it can briefly coalesce
|
||||
/// additional writes before flushing to reduce syscalls.
|
||||
/// </summary>
|
||||
private int _flushSignalsPending;
|
||||
|
||||
/// <summary>
|
||||
/// Records that a flush signal has been posted. Called after each QueueOutbound write.
|
||||
/// Go reference: server/client.go pcd (post-channel-data) flush signaling.
|
||||
/// </summary>
|
||||
public void SignalFlushPending()
|
||||
{
|
||||
Interlocked.Increment(ref _flushSignalsPending);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Resets the flush signal counter after a flush completes.
|
||||
/// </summary>
|
||||
public void ResetFlushPending()
|
||||
{
|
||||
Interlocked.Exchange(ref _flushSignalsPending, 0);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Current number of pending flush signals.
|
||||
/// </summary>
|
||||
public int FlushSignalsPending => Volatile.Read(ref _flushSignalsPending);
|
||||
|
||||
/// <summary>
|
||||
/// Whether more writes should be coalesced before flushing.
|
||||
/// Returns true when pending flush signals are below MaxFlushPending,
|
||||
/// indicating the write loop may briefly wait for more data.
|
||||
/// Go reference: server/client.go — fsp (flush signal pending) check.
|
||||
/// </summary>
|
||||
public bool ShouldCoalesceFlush => FlushSignalsPending < MaxFlushPending;
|
||||
|
||||
public async Task RunAsync(CancellationToken ct)
|
||||
{
|
||||
_clientCts = CancellationTokenSource.CreateLinkedTokenSource(ct);
|
||||
@@ -758,6 +802,7 @@ public sealed class NatsClient : INatsClient, IDisposable
|
||||
try
|
||||
{
|
||||
await _stream.FlushAsync(flushCts.Token);
|
||||
ResetFlushPending();
|
||||
}
|
||||
catch (OperationCanceledException) when (!ct.IsCancellationRequested)
|
||||
{
|
||||
@@ -921,4 +966,122 @@ public sealed class NatsClient : INatsClient, IDisposable
|
||||
_stream.Dispose();
|
||||
_socket.Dispose();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Policy for handling write timeouts based on client kind.
|
||||
/// Go reference: server/client.go — CLIENT connections close on timeout,
|
||||
/// ROUTER/GATEWAY/LEAF connections attempt TCP-level flush recovery.
|
||||
/// </summary>
|
||||
public enum WriteTimeoutPolicy
|
||||
{
|
||||
/// <summary>Close the connection on write timeout (used for CLIENT kind).</summary>
|
||||
Close,
|
||||
|
||||
/// <summary>Attempt TCP-level flush and continue (used for ROUTER, GATEWAY, LEAF).</summary>
|
||||
TcpFlush,
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns the write timeout policy for the given client kind.
|
||||
/// Go reference: server/client.go — routes/gateways/leafnodes get TcpFlush,
|
||||
/// regular clients get Close.
|
||||
/// </summary>
|
||||
public static WriteTimeoutPolicy GetWriteTimeoutPolicy(ClientKind kind) => kind switch
|
||||
{
|
||||
ClientKind.Client => WriteTimeoutPolicy.Close,
|
||||
ClientKind.Router => WriteTimeoutPolicy.TcpFlush,
|
||||
ClientKind.Gateway => WriteTimeoutPolicy.TcpFlush,
|
||||
ClientKind.Leaf => WriteTimeoutPolicy.TcpFlush,
|
||||
_ => WriteTimeoutPolicy.Close,
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// Result of a flush operation, tracking partial write progress.
|
||||
/// Go reference: server/client.go — partial write handling for routes and gateways.
|
||||
/// </summary>
|
||||
public readonly record struct FlushResult(long BytesAttempted, long BytesWritten)
|
||||
{
|
||||
/// <summary>Whether the flush was only partially completed.</summary>
|
||||
public bool IsPartial => BytesWritten < BytesAttempted;
|
||||
|
||||
/// <summary>Number of bytes remaining to be written.</summary>
|
||||
public long BytesRemaining => BytesAttempted - BytesWritten;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Blocks producers when the client's outbound buffer is near capacity.
|
||||
/// Go reference: server/client.go (stc channel, stalledRoute handling).
|
||||
/// When pending bytes exceed 75% of maxPending, producers must wait until
|
||||
/// the write loop drains enough data.
|
||||
/// </summary>
|
||||
public sealed class StallGate
|
||||
{
|
||||
private readonly long _threshold;
|
||||
private volatile SemaphoreSlim? _semaphore;
|
||||
private readonly Lock _gate = new();
|
||||
|
||||
/// <summary>
|
||||
/// Creates a stall gate with the given maxPending capacity.
|
||||
/// The stall threshold is set at 75% of maxPending.
|
||||
/// Go reference: server/client.go stc channel creation.
|
||||
/// </summary>
|
||||
public StallGate(long maxPending)
|
||||
{
|
||||
_threshold = maxPending * 3 / 4;
|
||||
}
|
||||
|
||||
/// <summary>Whether producers are currently being stalled.</summary>
|
||||
public bool IsStalled
|
||||
{
|
||||
get { lock (_gate) return _semaphore is not null; }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Updates pending byte count and activates/deactivates the stall gate.
|
||||
/// Go reference: server/client.go stalledRoute check.
|
||||
/// </summary>
|
||||
public void UpdatePending(long pending)
|
||||
{
|
||||
lock (_gate)
|
||||
{
|
||||
if (pending >= _threshold && _semaphore is null)
|
||||
{
|
||||
_semaphore = new SemaphoreSlim(0, 1);
|
||||
}
|
||||
else if (pending < _threshold && _semaphore is not null)
|
||||
{
|
||||
Release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Waits for the stall gate to release. Returns true if released,
|
||||
/// false if timed out (indicating the client should be closed as slow consumer).
|
||||
/// Go reference: server/client.go stc channel receive with timeout.
|
||||
/// </summary>
|
||||
public async Task<bool> WaitAsync(TimeSpan timeout)
|
||||
{
|
||||
SemaphoreSlim? sem;
|
||||
lock (_gate) sem = _semaphore;
|
||||
if (sem is null) return true;
|
||||
return await sem.WaitAsync(timeout);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Releases any blocked producers. Called when the write loop has drained
|
||||
/// enough data to bring pending bytes below the threshold.
|
||||
/// </summary>
|
||||
public void Release()
|
||||
{
|
||||
lock (_gate)
|
||||
{
|
||||
if (_semaphore is not null)
|
||||
{
|
||||
_semaphore.Release();
|
||||
_semaphore = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,6 +89,10 @@ public sealed class ServerInfo
|
||||
[JsonPropertyName("tls_available")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
|
||||
public bool TlsAvailable { get; set; }
|
||||
|
||||
[JsonPropertyName("connect_urls")]
|
||||
[JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
|
||||
public string[]? ConnectUrls { get; set; }
|
||||
}
|
||||
|
||||
public sealed class ClientOptions
|
||||
|
||||
@@ -23,6 +23,13 @@ public sealed class RaftNode : IDisposable
|
||||
// Go reference: raft.go:961-1019 (proposeAddPeer / proposeRemovePeer, single-change invariant)
|
||||
private long _membershipChangeIndex;
|
||||
|
||||
// Joint consensus (two-phase membership change) per Raft paper Section 4.
|
||||
// During the joint phase both the old config (Cold) and new config (Cnew) are stored.
|
||||
// A quorum decision requires majority from BOTH configurations simultaneously.
|
||||
// Go reference: raft.go joint consensus / two-phase membership transitions.
|
||||
private HashSet<string>? _jointOldMembers;
|
||||
private HashSet<string>? _jointNewMembers;
|
||||
|
||||
// Pre-vote: Go NATS server does not implement pre-vote (RFC 5849 §9.6). Skipped for parity.
|
||||
|
||||
public string Id { get; }
|
||||
@@ -54,6 +61,19 @@ public sealed class RaftNode : IDisposable
|
||||
// Go reference: raft.go:961-1019 single-change invariant.
|
||||
public bool MembershipChangeInProgress => Interlocked.Read(ref _membershipChangeIndex) > 0;
|
||||
|
||||
/// <summary>
|
||||
/// True when this node is in the joint consensus phase (transitioning between
|
||||
/// two membership configurations).
|
||||
/// Go reference: raft.go joint consensus / two-phase membership transitions.
|
||||
/// </summary>
|
||||
public bool InJointConsensus => _jointNewMembers != null;
|
||||
|
||||
/// <summary>
|
||||
/// The new (Cnew) member set stored during a joint configuration transition,
|
||||
/// or null when not in joint consensus. Exposed for testing.
|
||||
/// </summary>
|
||||
public IReadOnlyCollection<string>? JointNewMembers => _jointNewMembers;
|
||||
|
||||
public RaftNode(string id, IRaftTransport? transport = null, string? persistDirectory = null)
|
||||
{
|
||||
Id = id;
|
||||
@@ -273,6 +293,62 @@ public sealed class RaftNode : IDisposable
|
||||
return entry.Index;
|
||||
}
|
||||
|
||||
// Joint consensus (Raft paper Section 4) — two-phase membership transitions.
|
||||
// Go reference: raft.go joint consensus / two-phase membership transitions.
|
||||
|
||||
/// <summary>
|
||||
/// Enters the joint consensus phase with the given old and new configurations.
|
||||
/// During this phase quorum decisions require majority from BOTH configurations.
|
||||
/// The active member set is set to the union of Cold and Cnew so that entries
|
||||
/// are replicated to all nodes that participate in either configuration.
|
||||
/// Go reference: raft.go Section 4 (joint consensus).
|
||||
/// </summary>
|
||||
public void BeginJointConsensus(IReadOnlyCollection<string> cold, IReadOnlyCollection<string> cnew)
|
||||
{
|
||||
_jointOldMembers = new HashSet<string>(cold, StringComparer.Ordinal);
|
||||
_jointNewMembers = new HashSet<string>(cnew, StringComparer.Ordinal);
|
||||
// The active member set is the union of both configs
|
||||
foreach (var member in cnew)
|
||||
_members.Add(member);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Commits the joint configuration by finalizing Cnew as the active member set.
|
||||
/// Clears both Cold and Cnew, leaving only the new configuration.
|
||||
/// Call this once the Cnew log entry has reached quorum in both configs.
|
||||
/// Go reference: raft.go joint consensus commit.
|
||||
/// </summary>
|
||||
public void CommitJointConsensus()
|
||||
{
|
||||
if (_jointNewMembers == null)
|
||||
return;
|
||||
|
||||
_members.Clear();
|
||||
foreach (var m in _jointNewMembers)
|
||||
_members.Add(m);
|
||||
|
||||
_jointOldMembers = null;
|
||||
_jointNewMembers = null;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// During joint consensus, checks whether a set of acknowledging voters satisfies
|
||||
/// a majority in BOTH the old configuration (Cold) and the new configuration (Cnew).
|
||||
/// Returns false when not in joint consensus.
|
||||
/// Go reference: raft.go Section 4 — joint config quorum calculation.
|
||||
/// </summary>
|
||||
public bool CalculateJointQuorum(
|
||||
IReadOnlyCollection<string> coldVoters,
|
||||
IReadOnlyCollection<string> cnewVoters)
|
||||
{
|
||||
if (_jointOldMembers == null || _jointNewMembers == null)
|
||||
return false;
|
||||
|
||||
var oldQuorum = (_jointOldMembers.Count / 2) + 1;
|
||||
var newQuorum = (_jointNewMembers.Count / 2) + 1;
|
||||
return coldVoters.Count >= oldQuorum && cnewVoters.Count >= newQuorum;
|
||||
}
|
||||
|
||||
// B5: Snapshot checkpoints and log compaction
|
||||
// Go reference: raft.go CreateSnapshotCheckpoint, DrainAndReplaySnapshot
|
||||
|
||||
@@ -610,8 +686,19 @@ public sealed class RaftNode : IDisposable
|
||||
var dir = _persistDirectory ?? Path.Combine(Path.GetTempPath(), "natsdotnet-raft", Id);
|
||||
Directory.CreateDirectory(dir);
|
||||
await Log.PersistAsync(Path.Combine(dir, "log.json"), ct);
|
||||
await File.WriteAllTextAsync(Path.Combine(dir, "term.txt"), TermState.CurrentTerm.ToString(), ct);
|
||||
await File.WriteAllTextAsync(Path.Combine(dir, "applied.txt"), AppliedIndex.ToString(), ct);
|
||||
|
||||
// Persist term and VotedFor together in meta.json for atomic durable state.
|
||||
// Go reference: raft.go storeMeta / writeTermVote (term + votedFor written atomically)
|
||||
var meta = new RaftMetaState
|
||||
{
|
||||
CurrentTerm = TermState.CurrentTerm,
|
||||
VotedFor = TermState.VotedFor,
|
||||
};
|
||||
await File.WriteAllTextAsync(
|
||||
Path.Combine(dir, "meta.json"),
|
||||
System.Text.Json.JsonSerializer.Serialize(meta),
|
||||
ct);
|
||||
}
|
||||
|
||||
public async Task LoadPersistedStateAsync(CancellationToken ct)
|
||||
@@ -619,9 +706,25 @@ public sealed class RaftNode : IDisposable
|
||||
var dir = _persistDirectory ?? Path.Combine(Path.GetTempPath(), "natsdotnet-raft", Id);
|
||||
Log = await RaftLog.LoadAsync(Path.Combine(dir, "log.json"), ct);
|
||||
|
||||
var termPath = Path.Combine(dir, "term.txt");
|
||||
if (File.Exists(termPath) && int.TryParse(await File.ReadAllTextAsync(termPath, ct), out var term))
|
||||
TermState.CurrentTerm = term;
|
||||
// Load from meta.json first (includes VotedFor); fall back to legacy term.txt
|
||||
var metaPath = Path.Combine(dir, "meta.json");
|
||||
if (File.Exists(metaPath))
|
||||
{
|
||||
var json = await File.ReadAllTextAsync(metaPath, ct);
|
||||
var meta = System.Text.Json.JsonSerializer.Deserialize<RaftMetaState>(json);
|
||||
if (meta is not null)
|
||||
{
|
||||
TermState.CurrentTerm = meta.CurrentTerm;
|
||||
TermState.VotedFor = meta.VotedFor;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Legacy: term.txt only (no VotedFor)
|
||||
var termPath = Path.Combine(dir, "term.txt");
|
||||
if (File.Exists(termPath) && int.TryParse(await File.ReadAllTextAsync(termPath, ct), out var term))
|
||||
TermState.CurrentTerm = term;
|
||||
}
|
||||
|
||||
var appliedPath = Path.Combine(dir, "applied.txt");
|
||||
if (File.Exists(appliedPath) && long.TryParse(await File.ReadAllTextAsync(appliedPath, ct), out var applied))
|
||||
@@ -630,6 +733,13 @@ public sealed class RaftNode : IDisposable
|
||||
AppliedIndex = Log.Entries[^1].Index;
|
||||
}
|
||||
|
||||
/// <summary>Durable term + vote metadata written alongside the log.</summary>
|
||||
private sealed class RaftMetaState
|
||||
{
|
||||
public int CurrentTerm { get; set; }
|
||||
public string? VotedFor { get; set; }
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
StopElectionTimer();
|
||||
|
||||
247
src/NATS.Server/Raft/RaftWal.cs
Normal file
247
src/NATS.Server/Raft/RaftWal.cs
Normal file
@@ -0,0 +1,247 @@
|
||||
using System.Buffers.Binary;
|
||||
using System.IO.Hashing;
|
||||
using System.Text;
|
||||
|
||||
namespace NATS.Server.Raft;
|
||||
|
||||
/// <summary>
|
||||
/// Binary write-ahead log for RAFT log entries.
|
||||
///
|
||||
/// File layout:
|
||||
/// Header: [4:magic="NWAL"][4:version_le=1]
|
||||
/// Records (repeated):
|
||||
/// [4:length_le][8:index_le][4:term_le][4:crc32_le][N:utf8_command]
|
||||
/// where length = 8 + 4 + 4 + N (payload bytes counted after the length field itself)
|
||||
///
|
||||
/// CRC32 is computed over the index, term, and command bytes of each record.
|
||||
///
|
||||
/// Go reference: server/raft.go WAL / wal.go (binary WAL format)
|
||||
/// </summary>
|
||||
public sealed class RaftWal : IDisposable
|
||||
{
|
||||
private static ReadOnlySpan<byte> Magic => "NWAL"u8;
|
||||
private const int Version = 1;
|
||||
|
||||
// Header: 4 bytes magic + 4 bytes version
|
||||
private const int HeaderSize = 8;
|
||||
|
||||
// After the 4-byte length field, each record payload contains:
|
||||
// 8 bytes index + 4 bytes term + 4 bytes crc32 + N bytes command
|
||||
private const int LengthFieldSize = 4;
|
||||
private const int IndexFieldSize = 8;
|
||||
private const int TermFieldSize = 4;
|
||||
private const int Crc32FieldSize = 4;
|
||||
private const int RecordFixedPayloadSize = IndexFieldSize + TermFieldSize + Crc32FieldSize; // 16
|
||||
|
||||
private readonly string _path;
|
||||
private FileStream _stream;
|
||||
private readonly List<RaftLogEntry> _entries = [];
|
||||
|
||||
/// <summary>
|
||||
/// Opens or creates a WAL file at the given path. Writes the file header if the file is new.
|
||||
/// The file is opened with FileShare.Read so readers (Load) can access it concurrently.
|
||||
/// </summary>
|
||||
public RaftWal(string path)
|
||||
{
|
||||
_path = path;
|
||||
var isNew = !File.Exists(path);
|
||||
_stream = OpenWriteStream(path, isNew ? FileMode.Create : FileMode.Open);
|
||||
|
||||
if (isNew)
|
||||
{
|
||||
WriteHeaderTo(_stream);
|
||||
_stream.Flush(flushToDisk: true);
|
||||
}
|
||||
else
|
||||
{
|
||||
_stream.Seek(0, SeekOrigin.End);
|
||||
}
|
||||
}
|
||||
|
||||
private RaftWal(string path, FileStream stream, List<RaftLogEntry> entries)
|
||||
{
|
||||
_path = path;
|
||||
_stream = stream;
|
||||
_entries = entries;
|
||||
}
|
||||
|
||||
/// <summary>All in-memory entries loaded from or appended to this WAL.</summary>
|
||||
public IReadOnlyList<RaftLogEntry> Entries => _entries;
|
||||
|
||||
/// <summary>
|
||||
/// Appends a single RAFT log entry to the WAL file and the in-memory list.
|
||||
/// </summary>
|
||||
public async Task AppendAsync(RaftLogEntry entry)
|
||||
{
|
||||
await WriteEntryTo(_stream, entry);
|
||||
_entries.Add(entry);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Flushes the WAL to disk (fsync).
|
||||
/// </summary>
|
||||
public Task SyncAsync()
|
||||
{
|
||||
_stream.Flush(flushToDisk: true);
|
||||
return Task.CompletedTask;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Rewrites the WAL keeping only entries with index > upToIndex, using a temp file and atomic rename.
|
||||
/// Also updates the in-memory list.
|
||||
/// </summary>
|
||||
public async Task CompactAsync(long upToIndex)
|
||||
{
|
||||
var remaining = _entries.Where(e => e.Index > upToIndex).ToList();
|
||||
var tmpPath = _path + ".tmp";
|
||||
|
||||
await using (var tmp = new FileStream(tmpPath, FileMode.Create, FileAccess.Write,
|
||||
FileShare.None, bufferSize: 4096, useAsync: true))
|
||||
{
|
||||
WriteHeaderTo(tmp);
|
||||
foreach (var entry in remaining)
|
||||
await WriteEntryTo(tmp, entry);
|
||||
tmp.Flush(flushToDisk: true);
|
||||
}
|
||||
|
||||
// Atomic replace: close current stream, rename temp over original, reopen
|
||||
_stream.Dispose();
|
||||
File.Move(tmpPath, _path, overwrite: true);
|
||||
_stream = OpenWriteStream(_path, FileMode.Open);
|
||||
_stream.Seek(0, SeekOrigin.End);
|
||||
|
||||
_entries.Clear();
|
||||
_entries.AddRange(remaining);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Scans a WAL file, validates records via CRC32, stops at the first corrupt or truncated
|
||||
/// record, and returns a populated RaftWal ready for append.
|
||||
/// </summary>
|
||||
public static RaftWal Load(string path)
|
||||
{
|
||||
if (!File.Exists(path))
|
||||
return new RaftWal(path); // creates new file with header
|
||||
|
||||
// Read the file content while allowing concurrent writers (FileShare.ReadWrite)
|
||||
byte[] bytes;
|
||||
using (var fs = new FileStream(path, FileMode.Open, FileAccess.Read,
|
||||
FileShare.ReadWrite, bufferSize: 4096, useAsync: false))
|
||||
{
|
||||
bytes = new byte[fs.Length];
|
||||
var read = 0;
|
||||
while (read < bytes.Length)
|
||||
read += fs.Read(bytes, read, bytes.Length - read);
|
||||
}
|
||||
|
||||
var entries = ParseEntries(bytes);
|
||||
|
||||
var stream = OpenWriteStream(path, FileMode.Open);
|
||||
stream.Seek(0, SeekOrigin.End);
|
||||
return new RaftWal(path, stream, entries);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
_stream.Dispose();
|
||||
}
|
||||
|
||||
// --- Private helpers ---
|
||||
|
||||
/// <summary>
|
||||
/// Parses all valid records from a WAL byte buffer, stopping at the first corrupt or
|
||||
/// truncated record. Extracted to eliminate the goto pattern in Load.
|
||||
/// </summary>
|
||||
private static List<RaftLogEntry> ParseEntries(byte[] bytes)
|
||||
{
|
||||
var entries = new List<RaftLogEntry>();
|
||||
|
||||
// Validate magic header
|
||||
if (bytes.Length < HeaderSize)
|
||||
return entries;
|
||||
|
||||
if (bytes[0] != Magic[0] || bytes[1] != Magic[1] ||
|
||||
bytes[2] != Magic[2] || bytes[3] != Magic[3])
|
||||
return entries; // Unrecognized file — return empty
|
||||
|
||||
// Skip version field (bytes 4..7)
|
||||
var pos = HeaderSize;
|
||||
|
||||
while (pos < bytes.Length)
|
||||
{
|
||||
// Need at least the length field
|
||||
if (pos + LengthFieldSize > bytes.Length)
|
||||
break;
|
||||
|
||||
var recordPayloadLength = BinaryPrimitives.ReadInt32LittleEndian(bytes.AsSpan(pos, LengthFieldSize));
|
||||
pos += LengthFieldSize;
|
||||
|
||||
// Sanity-check payload length — cast to long to guard against integer overflow
|
||||
if (recordPayloadLength < RecordFixedPayloadSize || (long)pos + recordPayloadLength > bytes.Length)
|
||||
break; // Truncated or corrupt record — stop
|
||||
|
||||
var indexSpan = bytes.AsSpan(pos, IndexFieldSize);
|
||||
var termSpan = bytes.AsSpan(pos + IndexFieldSize, TermFieldSize);
|
||||
var storedCrc = BinaryPrimitives.ReadUInt32LittleEndian(
|
||||
bytes.AsSpan(pos + IndexFieldSize + TermFieldSize, Crc32FieldSize));
|
||||
var commandLength = recordPayloadLength - RecordFixedPayloadSize;
|
||||
var commandSpan = bytes.AsSpan(pos + RecordFixedPayloadSize, commandLength);
|
||||
|
||||
uint computedCrc = ComputeCrc(indexSpan, termSpan, commandSpan);
|
||||
if (computedCrc != storedCrc)
|
||||
break; // CRC mismatch — corrupt record, truncate from here
|
||||
|
||||
var index = BinaryPrimitives.ReadInt64LittleEndian(indexSpan);
|
||||
var term = BinaryPrimitives.ReadInt32LittleEndian(termSpan);
|
||||
var command = Encoding.UTF8.GetString(commandSpan);
|
||||
|
||||
entries.Add(new RaftLogEntry(index, term, command));
|
||||
pos += recordPayloadLength;
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
/// <summary>Opens the WAL file for reading and appending with FileShare.Read to allow concurrent readers.</summary>
|
||||
private static FileStream OpenWriteStream(string path, FileMode mode) =>
|
||||
new(path, mode, FileAccess.ReadWrite, FileShare.Read, bufferSize: 4096, useAsync: true);
|
||||
|
||||
private static void WriteHeaderTo(Stream stream)
|
||||
{
|
||||
stream.Write(Magic);
|
||||
Span<byte> version = stackalloc byte[4];
|
||||
BinaryPrimitives.WriteInt32LittleEndian(version, Version);
|
||||
stream.Write(version);
|
||||
}
|
||||
|
||||
private static async Task WriteEntryTo(Stream stream, RaftLogEntry entry)
|
||||
{
|
||||
var commandBytes = Encoding.UTF8.GetBytes(entry.Command);
|
||||
var recordPayloadLength = RecordFixedPayloadSize + commandBytes.Length;
|
||||
var record = new byte[LengthFieldSize + recordPayloadLength];
|
||||
var span = record.AsSpan();
|
||||
|
||||
BinaryPrimitives.WriteInt32LittleEndian(span[..4], recordPayloadLength);
|
||||
BinaryPrimitives.WriteInt64LittleEndian(span[4..12], entry.Index);
|
||||
BinaryPrimitives.WriteInt32LittleEndian(span[12..16], entry.Term);
|
||||
|
||||
uint crc = ComputeCrc(span[4..12], span[12..16], commandBytes);
|
||||
BinaryPrimitives.WriteUInt32LittleEndian(span[16..20], crc);
|
||||
|
||||
commandBytes.CopyTo(record, 20);
|
||||
await stream.WriteAsync(record);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Computes CRC32 incrementally over index, term, and command bytes without allocating
|
||||
/// a contiguous buffer. Uses System.IO.Hashing.Crc32 (IEEE 802.3 polynomial).
|
||||
/// </summary>
|
||||
private static uint ComputeCrc(ReadOnlySpan<byte> indexBytes, ReadOnlySpan<byte> termBytes, ReadOnlySpan<byte> commandBytes)
|
||||
{
|
||||
var crc = new Crc32();
|
||||
crc.Append(indexBytes);
|
||||
crc.Append(termBytes);
|
||||
crc.Append(commandBytes);
|
||||
return crc.GetCurrentHashAsUInt32();
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ using System.Net;
|
||||
using System.Net.Sockets;
|
||||
using Microsoft.Extensions.Logging;
|
||||
using NATS.Server.Configuration;
|
||||
using NATS.Server.Protocol;
|
||||
using NATS.Server.Subscriptions;
|
||||
|
||||
namespace NATS.Server.Routes;
|
||||
@@ -18,6 +19,8 @@ public sealed class RouteManager : IAsyncDisposable
|
||||
private readonly Action<RouteMessage> _routedMessageSink;
|
||||
private readonly ConcurrentDictionary<string, RouteConnection> _routes = new(StringComparer.Ordinal);
|
||||
private readonly ConcurrentDictionary<string, byte> _connectedServerIds = new(StringComparer.Ordinal);
|
||||
private readonly HashSet<string> _discoveredRoutes = new(StringComparer.OrdinalIgnoreCase);
|
||||
private readonly HashSet<string> _knownRouteUrls = new(StringComparer.OrdinalIgnoreCase);
|
||||
|
||||
private CancellationTokenSource? _cts;
|
||||
private Socket? _listener;
|
||||
@@ -49,6 +52,62 @@ public sealed class RouteManager : IAsyncDisposable
|
||||
_logger = logger;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Routes auto-discovered via INFO gossip from peers.
|
||||
/// Go reference: server/route.go processImplicitRoute.
|
||||
/// </summary>
|
||||
public IReadOnlyCollection<string> DiscoveredRoutes
|
||||
{
|
||||
get { lock (_discoveredRoutes) return _discoveredRoutes.ToList(); }
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Event raised when new route info should be forwarded to known peers.
|
||||
/// Go reference: server/route.go forwardNewRouteInfoToKnownServers.
|
||||
/// </summary>
|
||||
public event Action<List<string>>? OnForwardInfo;
|
||||
|
||||
/// <summary>
|
||||
/// Processes connect_urls from a peer's INFO message. Any URLs not already
|
||||
/// known are added to DiscoveredRoutes for solicited connection.
|
||||
/// Go reference: server/route.go:1500-1550 (processImplicitRoute).
|
||||
/// </summary>
|
||||
public void ProcessImplicitRoute(ServerInfo serverInfo)
|
||||
{
|
||||
if (serverInfo.ConnectUrls is null || serverInfo.ConnectUrls.Length == 0)
|
||||
return;
|
||||
|
||||
lock (_discoveredRoutes)
|
||||
{
|
||||
foreach (var url in serverInfo.ConnectUrls)
|
||||
{
|
||||
if (!_knownRouteUrls.Contains(url))
|
||||
{
|
||||
_discoveredRoutes.Add(url);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Forwards new peer URL information to all known route connections.
|
||||
/// Go reference: server/route.go forwardNewRouteInfoToKnownServers.
|
||||
/// </summary>
|
||||
public void ForwardNewRouteInfoToKnownServers(string newPeerUrl)
|
||||
{
|
||||
OnForwardInfo?.Invoke([newPeerUrl]);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Adds a URL to the known route set. Used during initialization and testing.
|
||||
/// </summary>
|
||||
public void AddKnownRoute(string url)
|
||||
{
|
||||
lock (_discoveredRoutes)
|
||||
{
|
||||
_knownRouteUrls.Add(url);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns a route pool index for the given account name, matching Go's
|
||||
|
||||
104
tests/NATS.Server.Tests/FileStoreEncryptionTests.cs
Normal file
104
tests/NATS.Server.Tests/FileStoreEncryptionTests.cs
Normal file
@@ -0,0 +1,104 @@
|
||||
// Go: TestFileStoreEncryption server/filestore_test.go
|
||||
// Reference: golang/nats-server/server/filestore.go:816-907 (genEncryptionKeys, recoverAEK, setupAEK)
|
||||
// Tests that block files are encrypted at rest and can be recovered with the same key.
|
||||
|
||||
using System.Security.Cryptography;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.Tests;
|
||||
|
||||
public class FileStoreEncryptionTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Encrypted_block_round_trips_message()
|
||||
{
|
||||
// Go: TestFileStoreEncryption server/filestore_test.go
|
||||
var dir = Directory.CreateTempSubdirectory();
|
||||
var key = new byte[32];
|
||||
RandomNumberGenerator.Fill(key);
|
||||
|
||||
await using (var store = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir.FullName,
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = key,
|
||||
}))
|
||||
{
|
||||
await store.AppendAsync("test.subj", "hello encrypted"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
// Raw block file should NOT contain plaintext
|
||||
var blkFiles = Directory.GetFiles(dir.FullName, "*.blk");
|
||||
blkFiles.ShouldNotBeEmpty();
|
||||
var raw = File.ReadAllBytes(blkFiles[0]);
|
||||
System.Text.Encoding.UTF8.GetString(raw).ShouldNotContain("hello encrypted");
|
||||
|
||||
// Recover with same key should return plaintext
|
||||
await using var recovered = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir.FullName,
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = key,
|
||||
});
|
||||
var msg = await recovered.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
System.Text.Encoding.UTF8.GetString(msg.Payload.Span).ShouldBe("hello encrypted");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Encrypted_block_with_aes_round_trips()
|
||||
{
|
||||
var dir = Directory.CreateTempSubdirectory();
|
||||
var key = new byte[32];
|
||||
RandomNumberGenerator.Fill(key);
|
||||
|
||||
await using (var store = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir.FullName,
|
||||
Cipher = StoreCipher.Aes,
|
||||
EncryptionKey = key,
|
||||
}))
|
||||
{
|
||||
await store.AppendAsync("aes.subj", "aes payload"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
await using var recovered = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir.FullName,
|
||||
Cipher = StoreCipher.Aes,
|
||||
EncryptionKey = key,
|
||||
});
|
||||
var msg = await recovered.LoadAsync(1, default);
|
||||
msg.ShouldNotBeNull();
|
||||
System.Text.Encoding.UTF8.GetString(msg.Payload.Span).ShouldBe("aes payload");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Wrong_key_fails_to_decrypt()
|
||||
{
|
||||
var dir = Directory.CreateTempSubdirectory();
|
||||
var key1 = new byte[32];
|
||||
var key2 = new byte[32];
|
||||
RandomNumberGenerator.Fill(key1);
|
||||
RandomNumberGenerator.Fill(key2);
|
||||
|
||||
await using (var store = new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir.FullName,
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = key1,
|
||||
}))
|
||||
{
|
||||
await store.AppendAsync("secret", "data"u8.ToArray(), default);
|
||||
}
|
||||
|
||||
// Recovery with wrong key should throw InvalidDataException (from CryptographicException)
|
||||
var act = () => new FileStore(new FileStoreOptions
|
||||
{
|
||||
Directory = dir.FullName,
|
||||
Cipher = StoreCipher.ChaCha,
|
||||
EncryptionKey = key2,
|
||||
});
|
||||
Should.Throw<InvalidDataException>(act);
|
||||
}
|
||||
}
|
||||
50
tests/NATS.Server.Tests/FlushCoalescingTests.cs
Normal file
50
tests/NATS.Server.Tests/FlushCoalescingTests.cs
Normal file
@@ -0,0 +1,50 @@
|
||||
namespace NATS.Server.Tests;
|
||||
|
||||
// Go reference: server/client.go (maxFlushPending, pcd, flush signal coalescing)
|
||||
|
||||
public class FlushCoalescingTests
|
||||
{
|
||||
[Fact]
|
||||
public void MaxFlushPending_defaults_to_10()
|
||||
{
|
||||
// Go reference: server/client.go maxFlushPending constant
|
||||
NatsClient.MaxFlushPending.ShouldBe(10);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShouldCoalesceFlush_true_when_below_max()
|
||||
{
|
||||
// When flush signals pending is below MaxFlushPending, coalescing is allowed
|
||||
// Go reference: server/client.go fsp < maxFlushPending check
|
||||
var pending = 5;
|
||||
var shouldCoalesce = pending < NatsClient.MaxFlushPending;
|
||||
shouldCoalesce.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShouldCoalesceFlush_false_when_at_max()
|
||||
{
|
||||
// When flush signals pending reaches MaxFlushPending, force flush
|
||||
var pending = NatsClient.MaxFlushPending;
|
||||
var shouldCoalesce = pending < NatsClient.MaxFlushPending;
|
||||
shouldCoalesce.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShouldCoalesceFlush_false_when_above_max()
|
||||
{
|
||||
// Above max, definitely don't coalesce
|
||||
var pending = NatsClient.MaxFlushPending + 5;
|
||||
var shouldCoalesce = pending < NatsClient.MaxFlushPending;
|
||||
shouldCoalesce.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void FlushCoalescing_constant_matches_go_reference()
|
||||
{
|
||||
// Go reference: server/client.go maxFlushPending = 10
|
||||
// Verify the constant is accessible and correct
|
||||
NatsClient.MaxFlushPending.ShouldBeGreaterThan(0);
|
||||
NatsClient.MaxFlushPending.ShouldBeLessThanOrEqualTo(100);
|
||||
}
|
||||
}
|
||||
236
tests/NATS.Server.Tests/ImplicitDiscoveryTests.cs
Normal file
236
tests/NATS.Server.Tests/ImplicitDiscoveryTests.cs
Normal file
@@ -0,0 +1,236 @@
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using NATS.Server.Configuration;
|
||||
using NATS.Server;
|
||||
using NATS.Server.Gateways;
|
||||
using NATS.Server.Protocol;
|
||||
using NATS.Server.Routes;
|
||||
|
||||
namespace NATS.Server.Tests;
|
||||
|
||||
// Go reference: server/route.go processImplicitRoute, server/gateway.go processImplicitGateway
|
||||
|
||||
public class ImplicitDiscoveryTests
|
||||
{
|
||||
[Fact]
|
||||
public void ProcessImplicitRoute_discovers_new_peer()
|
||||
{
|
||||
// Go reference: server/route.go processImplicitRoute
|
||||
var mgr = RouteManagerTestHelper.Create();
|
||||
var serverInfo = new ServerInfo
|
||||
{
|
||||
ServerId = "server-2",
|
||||
ServerName = "server-2",
|
||||
Version = "0.1.0",
|
||||
Host = "0.0.0.0",
|
||||
Port = 4222,
|
||||
ConnectUrls = ["nats://10.0.0.2:6222", "nats://10.0.0.3:6222"],
|
||||
};
|
||||
|
||||
mgr.ProcessImplicitRoute(serverInfo);
|
||||
|
||||
mgr.DiscoveredRoutes.ShouldContain("nats://10.0.0.2:6222");
|
||||
mgr.DiscoveredRoutes.ShouldContain("nats://10.0.0.3:6222");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessImplicitRoute_skips_known_peers()
|
||||
{
|
||||
// Go reference: server/route.go processImplicitRoute — skip already-known
|
||||
var mgr = RouteManagerTestHelper.Create();
|
||||
mgr.AddKnownRoute("nats://10.0.0.2:6222");
|
||||
|
||||
var serverInfo = new ServerInfo
|
||||
{
|
||||
ServerId = "server-2",
|
||||
ServerName = "server-2",
|
||||
Version = "0.1.0",
|
||||
Host = "0.0.0.0",
|
||||
Port = 4222,
|
||||
ConnectUrls = ["nats://10.0.0.2:6222", "nats://10.0.0.3:6222"],
|
||||
};
|
||||
|
||||
mgr.ProcessImplicitRoute(serverInfo);
|
||||
|
||||
mgr.DiscoveredRoutes.Count.ShouldBe(1); // only 10.0.0.3 is new
|
||||
mgr.DiscoveredRoutes.ShouldContain("nats://10.0.0.3:6222");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessImplicitRoute_with_null_urls_is_noop()
|
||||
{
|
||||
// Go reference: server/route.go processImplicitRoute — nil ConnectUrls guard
|
||||
var mgr = RouteManagerTestHelper.Create();
|
||||
var serverInfo = new ServerInfo
|
||||
{
|
||||
ServerId = "server-2",
|
||||
ServerName = "server-2",
|
||||
Version = "0.1.0",
|
||||
Host = "0.0.0.0",
|
||||
Port = 4222,
|
||||
ConnectUrls = null,
|
||||
};
|
||||
|
||||
mgr.ProcessImplicitRoute(serverInfo);
|
||||
|
||||
mgr.DiscoveredRoutes.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessImplicitRoute_with_empty_urls_is_noop()
|
||||
{
|
||||
// Go reference: server/route.go processImplicitRoute — empty ConnectUrls guard
|
||||
var mgr = RouteManagerTestHelper.Create();
|
||||
var serverInfo = new ServerInfo
|
||||
{
|
||||
ServerId = "server-2",
|
||||
ServerName = "server-2",
|
||||
Version = "0.1.0",
|
||||
Host = "0.0.0.0",
|
||||
Port = 4222,
|
||||
ConnectUrls = [],
|
||||
};
|
||||
|
||||
mgr.ProcessImplicitRoute(serverInfo);
|
||||
|
||||
mgr.DiscoveredRoutes.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessImplicitGateway_discovers_new_gateway()
|
||||
{
|
||||
// Go reference: server/gateway.go processImplicitGateway
|
||||
var mgr = GatewayManagerTestHelper.Create();
|
||||
var gwInfo = new GatewayInfo
|
||||
{
|
||||
Name = "cluster-B",
|
||||
Urls = ["nats://10.0.1.1:7222"],
|
||||
};
|
||||
|
||||
mgr.ProcessImplicitGateway(gwInfo);
|
||||
|
||||
mgr.DiscoveredGateways.ShouldContain("cluster-B");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessImplicitGateway_with_null_throws()
|
||||
{
|
||||
// Go reference: server/gateway.go processImplicitGateway — null guard
|
||||
var mgr = GatewayManagerTestHelper.Create();
|
||||
|
||||
Should.Throw<ArgumentNullException>(() => mgr.ProcessImplicitGateway(null!));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessImplicitGateway_deduplicates_same_cluster()
|
||||
{
|
||||
// Go reference: server/gateway.go processImplicitGateway — idempotent discovery
|
||||
var mgr = GatewayManagerTestHelper.Create();
|
||||
var gwInfo = new GatewayInfo { Name = "cluster-B", Urls = ["nats://10.0.1.1:7222"] };
|
||||
|
||||
mgr.ProcessImplicitGateway(gwInfo);
|
||||
mgr.ProcessImplicitGateway(gwInfo);
|
||||
|
||||
mgr.DiscoveredGateways.Count.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ForwardNewRouteInfo_invokes_event()
|
||||
{
|
||||
// Go reference: server/route.go forwardNewRouteInfoToKnownServers
|
||||
var mgr = RouteManagerTestHelper.Create();
|
||||
var forwarded = new List<string>();
|
||||
mgr.OnForwardInfo += urls => forwarded.AddRange(urls);
|
||||
|
||||
mgr.ForwardNewRouteInfoToKnownServers("nats://10.0.0.5:6222");
|
||||
|
||||
forwarded.ShouldContain("nats://10.0.0.5:6222");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ForwardNewRouteInfo_with_no_handler_does_not_throw()
|
||||
{
|
||||
// Go reference: server/route.go forwardNewRouteInfoToKnownServers — no subscribers
|
||||
var mgr = RouteManagerTestHelper.Create();
|
||||
|
||||
Should.NotThrow(() => mgr.ForwardNewRouteInfoToKnownServers("nats://10.0.0.5:6222"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void AddKnownRoute_prevents_later_discovery()
|
||||
{
|
||||
// Go reference: server/route.go processImplicitRoute — pre-seeded known routes
|
||||
var mgr = RouteManagerTestHelper.Create();
|
||||
mgr.AddKnownRoute("nats://10.0.0.9:6222");
|
||||
|
||||
var serverInfo = new ServerInfo
|
||||
{
|
||||
ServerId = "server-3",
|
||||
ServerName = "server-3",
|
||||
Version = "0.1.0",
|
||||
Host = "0.0.0.0",
|
||||
Port = 4222,
|
||||
ConnectUrls = ["nats://10.0.0.9:6222"],
|
||||
};
|
||||
|
||||
mgr.ProcessImplicitRoute(serverInfo);
|
||||
|
||||
mgr.DiscoveredRoutes.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ConnectUrls_is_serialized_when_set()
|
||||
{
|
||||
// Go reference: server/route.go INFO message includes connect_urls
|
||||
var info = new ServerInfo
|
||||
{
|
||||
ServerId = "s1",
|
||||
ServerName = "s1",
|
||||
Version = "0.1.0",
|
||||
Host = "0.0.0.0",
|
||||
Port = 4222,
|
||||
ConnectUrls = ["nats://10.0.0.1:4222"],
|
||||
};
|
||||
|
||||
var json = System.Text.Json.JsonSerializer.Serialize(info);
|
||||
json.ShouldContain("connect_urls");
|
||||
json.ShouldContain("nats://10.0.0.1:4222");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ConnectUrls_is_omitted_when_null()
|
||||
{
|
||||
// Go reference: server/route.go INFO omits connect_urls when empty
|
||||
var info = new ServerInfo
|
||||
{
|
||||
ServerId = "s1",
|
||||
ServerName = "s1",
|
||||
Version = "0.1.0",
|
||||
Host = "0.0.0.0",
|
||||
Port = 4222,
|
||||
ConnectUrls = null,
|
||||
};
|
||||
|
||||
var json = System.Text.Json.JsonSerializer.Serialize(info);
|
||||
json.ShouldNotContain("connect_urls");
|
||||
}
|
||||
}
|
||||
|
||||
public static class RouteManagerTestHelper
|
||||
{
|
||||
public static RouteManager Create()
|
||||
{
|
||||
var options = new ClusterOptions { Name = "test-cluster", Host = "127.0.0.1", Port = 0 };
|
||||
var stats = new ServerStats();
|
||||
return new RouteManager(options, stats, "server-1", _ => { }, _ => { }, NullLogger<RouteManager>.Instance);
|
||||
}
|
||||
}
|
||||
|
||||
public static class GatewayManagerTestHelper
|
||||
{
|
||||
public static GatewayManager Create()
|
||||
{
|
||||
var options = new GatewayOptions { Name = "cluster-A", Host = "127.0.0.1", Port = 0 };
|
||||
var stats = new ServerStats();
|
||||
return new GatewayManager(options, stats, "server-1", _ => { }, _ => { }, NullLogger<GatewayManager>.Instance);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,169 @@
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for validated stream/consumer assignment processing.
|
||||
/// Go reference: jetstream_cluster.go:4541-5925.
|
||||
/// </summary>
|
||||
public class JetStreamAssignmentProcessingTests
|
||||
{
|
||||
[Fact]
|
||||
public void ProcessStreamAssignment_validates_config()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "valid-stream",
|
||||
Group = new RaftGroup { Name = "rg-1", Peers = ["n1", "n2", "n3"] },
|
||||
ConfigJson = """{"subjects":["test.>"]}""",
|
||||
};
|
||||
|
||||
meta.ProcessStreamAssignment(sa).ShouldBeTrue();
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamAssignment_rejects_empty_name()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = CreateStreamAssignment("", "rg-1");
|
||||
meta.ProcessStreamAssignment(sa).ShouldBeFalse();
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessUpdateStreamAssignment_applies_config_change()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("updatable", "rg-u", """{"subjects":["old.>"]}"""));
|
||||
|
||||
var updated = CreateStreamAssignment("updatable", "rg-u", """{"subjects":["new.>"]}""");
|
||||
meta.ProcessUpdateStreamAssignment(updated).ShouldBeTrue();
|
||||
|
||||
var assignment = meta.GetStreamAssignment("updatable");
|
||||
assignment!.ConfigJson.ShouldContain("new.>");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessUpdateStreamAssignment_returns_false_for_nonexistent()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = CreateStreamAssignment("ghost", "rg-g");
|
||||
meta.ProcessUpdateStreamAssignment(sa).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerAssignment_requires_existing_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "orphan-consumer",
|
||||
StreamName = "nonexistent-stream",
|
||||
Group = new RaftGroup { Name = "rg-c", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.ProcessConsumerAssignment(ca).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerAssignment_succeeds_with_existing_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("s1", "rg-s1"));
|
||||
|
||||
var ca = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "c1",
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.ProcessConsumerAssignment(ca).ShouldBeTrue();
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamRemoval_cascades_to_consumers()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("cascade", "rg-cas"));
|
||||
meta.ProcessConsumerAssignment(new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "c1",
|
||||
StreamName = "cascade",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2", "n3"] },
|
||||
});
|
||||
|
||||
meta.ProcessStreamRemoval("cascade").ShouldBeTrue();
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessStreamRemoval_returns_false_for_nonexistent()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamRemoval("nope").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerRemoval_returns_false_for_nonexistent_stream()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessConsumerRemoval("ghost", "c1").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerRemoval_returns_false_for_nonexistent_consumer()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("s1", "rg-s1"));
|
||||
meta.ProcessConsumerRemoval("s1", "nope").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessConsumerRemoval_succeeds()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("s1", "rg-s1"));
|
||||
meta.ProcessConsumerAssignment(new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "c1",
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2"] },
|
||||
});
|
||||
|
||||
meta.ProcessConsumerRemoval("s1", "c1").ShouldBeTrue();
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessUpdateStreamAssignment_preserves_consumers()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.ProcessStreamAssignment(CreateStreamAssignment("s1", "rg-s1", """{"subjects":["old"]}"""));
|
||||
meta.ProcessConsumerAssignment(new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "c1",
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2"] },
|
||||
});
|
||||
|
||||
var updated = CreateStreamAssignment("s1", "rg-s1", """{"subjects":["new"]}""");
|
||||
meta.ProcessUpdateStreamAssignment(updated).ShouldBeTrue();
|
||||
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
meta.GetConsumerAssignment("s1", "c1").ShouldNotBeNull();
|
||||
}
|
||||
|
||||
// Helper to create a StreamAssignment (StreamName is `required` so we must always provide it)
|
||||
private static StreamAssignment CreateStreamAssignment(string name, string groupName, string config = "{}")
|
||||
=> new()
|
||||
{
|
||||
StreamName = name,
|
||||
Group = new RaftGroup { Name = groupName, Peers = ["n1", "n2", "n3"] },
|
||||
ConfigJson = config,
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,344 @@
|
||||
using System.Threading.Channels;
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
using NATS.Server.Raft;
|
||||
|
||||
namespace NATS.Server.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for JetStreamClusterMonitor — background meta entry processing.
|
||||
/// Go reference: jetstream_cluster.go:1455-1825 (monitorCluster).
|
||||
/// </summary>
|
||||
public class JetStreamClusterMonitorTests
|
||||
{
|
||||
// Each test uses a 5-second CancellationToken as a hard upper bound so a
|
||||
// hung monitor doesn't stall the test run indefinitely.
|
||||
private static CancellationTokenSource TestTimeout() =>
|
||||
new(TimeSpan.FromSeconds(5));
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_processes_stream_assignment_entry()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster assignStream op
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "test-stream",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
Config = """{"subjects":["test.>"]}""",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(1, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.GetStreamAssignment("test-stream").ShouldNotBeNull();
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_processes_consumer_assignment_entry()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster assignConsumer op
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var streamJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "s1",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
Config = """{"subjects":["x.>"]}""",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, streamJson));
|
||||
|
||||
var consumerJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignConsumer",
|
||||
StreamName = "s1",
|
||||
ConsumerName = "c1",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, consumerJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_processes_stream_removal()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster removeStream op
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "to-remove",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
Config = """{"subjects":["rm.>"]}""",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, assignJson));
|
||||
|
||||
var removeJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "removeStream",
|
||||
StreamName = "to-remove",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, removeJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(0);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_applies_meta_snapshot()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster snapshot op — replaces all state
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var assignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["snap-stream"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "snap-stream",
|
||||
Group = new RaftGroup { Name = "rg-snap", Peers = ["n1", "n2", "n3"] },
|
||||
},
|
||||
};
|
||||
var snapshotB64 = Convert.ToBase64String(MetaSnapshotCodec.Encode(assignments));
|
||||
|
||||
var snapshotJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "snapshot",
|
||||
Data = snapshotB64,
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, snapshotJson));
|
||||
await monitor.WaitForProcessedAsync(1, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.GetStreamAssignment("snap-stream").ShouldNotBeNull();
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_processes_consumer_removal()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster removeConsumer op
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var streamJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "s1",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, streamJson));
|
||||
|
||||
var consumerJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignConsumer",
|
||||
StreamName = "s1",
|
||||
ConsumerName = "c1",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, consumerJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.ConsumerCount.ShouldBe(1);
|
||||
|
||||
var removeJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "removeConsumer",
|
||||
StreamName = "s1",
|
||||
ConsumerName = "c1",
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(3, 1, removeJson));
|
||||
await monitor.WaitForProcessedAsync(3, cts.Token);
|
||||
|
||||
meta.ConsumerCount.ShouldBe(0);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_skips_malformed_entries()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster — malformed entries must not abort the loop
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, "not-json"));
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "after-bad",
|
||||
Peers = new[] { "n1", "n2", "n3" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.GetStreamAssignment("after-bad").ShouldNotBeNull();
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_stops_on_cancellation()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go monitorCluster shuts down cleanly when stop channel closes
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask; // Should complete without throwing
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_ignores_entry_with_no_op_field()
|
||||
{
|
||||
// Entries missing the "Op" property are silently ignored (forward-compat).
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, """{"NotOp":"whatever"}"""));
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "after-no-op",
|
||||
Peers = new[] { "n1" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_ignores_unknown_op()
|
||||
{
|
||||
// Unknown op names are silently ignored — forward compatibility.
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, """{"Op":"futureFoo","Data":"xyz"}"""));
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "after-unknown-op",
|
||||
Peers = new[] { "n1" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Monitor_snapshot_replaces_existing_state()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go — snapshot apply wipes old assignments
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var channel = Channel.CreateUnbounded<RaftLogEntry>();
|
||||
var monitor = new JetStreamClusterMonitor(meta, channel.Reader);
|
||||
|
||||
using var cts = TestTimeout();
|
||||
var monitorTask = monitor.StartAsync(cts.Token);
|
||||
|
||||
var assignJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "assignStream",
|
||||
StreamName = "old-stream",
|
||||
Peers = new[] { "n1" },
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(1, 1, assignJson));
|
||||
await monitor.WaitForProcessedAsync(1, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
|
||||
var newAssignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["new-stream"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "new-stream",
|
||||
Group = new RaftGroup { Name = "rg-new", Peers = ["n1", "n2", "n3"] },
|
||||
},
|
||||
};
|
||||
var snapshotB64 = Convert.ToBase64String(MetaSnapshotCodec.Encode(newAssignments));
|
||||
var snapshotJson = System.Text.Json.JsonSerializer.Serialize(new
|
||||
{
|
||||
Op = "snapshot",
|
||||
Data = snapshotB64,
|
||||
});
|
||||
await channel.Writer.WriteAsync(new RaftLogEntry(2, 1, snapshotJson));
|
||||
await monitor.WaitForProcessedAsync(2, cts.Token);
|
||||
|
||||
meta.StreamCount.ShouldBe(1);
|
||||
meta.GetStreamAssignment("old-stream").ShouldBeNull();
|
||||
meta.GetStreamAssignment("new-stream").ShouldNotBeNull();
|
||||
|
||||
cts.Cancel();
|
||||
await monitorTask;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,146 @@
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.Tests.JetStream.Cluster;
|
||||
|
||||
public class JetStreamInflightTrackingTests
|
||||
{
|
||||
[Fact]
|
||||
public void TrackInflightStreamProposal_increments_ops()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "inflight-1",
|
||||
Group = new RaftGroup { Name = "rg-inf", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.InflightStreamCount.ShouldBe(1);
|
||||
meta.IsStreamInflight("ACC", "inflight-1").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemoveInflightStreamProposal_clears_when_zero()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "inflight-2",
|
||||
Group = new RaftGroup { Name = "rg-inf2", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.RemoveInflightStreamProposal("ACC", "inflight-2");
|
||||
meta.IsStreamInflight("ACC", "inflight-2").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Duplicate_proposal_increments_ops_count()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "dup-stream",
|
||||
Group = new RaftGroup { Name = "rg-dup", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.InflightStreamCount.ShouldBe(1); // still one unique stream
|
||||
|
||||
// Need two removes to fully clear
|
||||
meta.RemoveInflightStreamProposal("ACC", "dup-stream");
|
||||
meta.IsStreamInflight("ACC", "dup-stream").ShouldBeTrue(); // ops > 0
|
||||
meta.RemoveInflightStreamProposal("ACC", "dup-stream");
|
||||
meta.IsStreamInflight("ACC", "dup-stream").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsStreamInflight_returns_false_for_unknown_account()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.IsStreamInflight("UNKNOWN", "no-stream").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void TrackInflightConsumerProposal_tracks_by_account()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.TrackInflightConsumerProposal("ACC", "stream1", "consumer1");
|
||||
|
||||
meta.InflightConsumerCount.ShouldBe(1);
|
||||
meta.IsConsumerInflight("ACC", "stream1", "consumer1").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemoveInflightConsumerProposal_clears_when_zero()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.TrackInflightConsumerProposal("ACC", "stream1", "consumer1");
|
||||
meta.RemoveInflightConsumerProposal("ACC", "stream1", "consumer1");
|
||||
|
||||
meta.IsConsumerInflight("ACC", "stream1", "consumer1").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ClearAllInflight_removes_everything()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC1", sa);
|
||||
meta.TrackInflightConsumerProposal("ACC2", "s2", "c1");
|
||||
|
||||
meta.ClearAllInflight();
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
meta.InflightConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StepDown_clears_inflight()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC", sa);
|
||||
meta.StepDown();
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Multiple_accounts_tracked_independently()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var sa1 = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg1", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
var sa2 = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1", // same stream name, different account
|
||||
Group = new RaftGroup { Name = "rg2", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
meta.TrackInflightStreamProposal("ACC1", sa1);
|
||||
meta.TrackInflightStreamProposal("ACC2", sa2);
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(2); // one per account
|
||||
meta.IsStreamInflight("ACC1", "s1").ShouldBeTrue();
|
||||
meta.IsStreamInflight("ACC2", "s1").ShouldBeTrue();
|
||||
|
||||
meta.RemoveInflightStreamProposal("ACC1", "s1");
|
||||
meta.IsStreamInflight("ACC1", "s1").ShouldBeFalse();
|
||||
meta.IsStreamInflight("ACC2", "s1").ShouldBeTrue(); // still tracked
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.Tests.JetStream.Cluster;
|
||||
|
||||
public class JetStreamLeadershipTests
|
||||
{
|
||||
[Fact]
|
||||
public void ProcessLeaderChange_clears_inflight_on_step_down()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.TrackInflightStreamProposal("ACC", new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg", Peers = ["n1", "n2", "n3"] },
|
||||
});
|
||||
|
||||
meta.ProcessLeaderChange(isLeader: false);
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessLeaderChange_fires_event_on_become_leader()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
var leaderChanged = false;
|
||||
meta.OnLeaderChange += (isLeader) => leaderChanged = true;
|
||||
|
||||
meta.ProcessLeaderChange(isLeader: true);
|
||||
|
||||
leaderChanged.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessLeaderChange_fires_event_on_step_down()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
bool? receivedIsLeader = null;
|
||||
meta.OnLeaderChange += (isLeader) => receivedIsLeader = isLeader;
|
||||
|
||||
meta.ProcessLeaderChange(isLeader: false);
|
||||
|
||||
receivedIsLeader.ShouldNotBeNull();
|
||||
receivedIsLeader.Value.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StepDown_triggers_leader_change_event()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
bool? receivedIsLeader = null;
|
||||
meta.OnLeaderChange += (isLeader) => receivedIsLeader = isLeader;
|
||||
|
||||
meta.StepDown();
|
||||
|
||||
receivedIsLeader.ShouldNotBeNull();
|
||||
receivedIsLeader.Value.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StepDown_clears_inflight_via_process_leader_change()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.TrackInflightStreamProposal("ACC", new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "rg", Peers = ["n1", "n2", "n3"] },
|
||||
});
|
||||
meta.TrackInflightConsumerProposal("ACC", "s1", "c1");
|
||||
|
||||
meta.StepDown();
|
||||
|
||||
meta.InflightStreamCount.ShouldBe(0);
|
||||
meta.InflightConsumerCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void BecomeLeader_makes_IsLeader_true()
|
||||
{
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
meta.StepDown(); // move leader away from self
|
||||
meta.IsLeader().ShouldBeFalse();
|
||||
|
||||
meta.BecomeLeader();
|
||||
|
||||
meta.IsLeader().ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void OnLeaderChange_not_fired_when_no_subscribers()
|
||||
{
|
||||
// Should not throw when no handlers attached
|
||||
var meta = new JetStreamMetaGroup(3);
|
||||
Should.NotThrow(() => meta.ProcessLeaderChange(isLeader: true));
|
||||
Should.NotThrow(() => meta.ProcessLeaderChange(isLeader: false));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,205 @@
|
||||
using NATS.Server.JetStream.Cluster;
|
||||
|
||||
namespace NATS.Server.Tests.JetStream.Cluster;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for MetaSnapshotCodec: encode/decode round-trip, S2 compression, versioning.
|
||||
/// Go reference: jetstream_cluster.go:2075-2145.
|
||||
/// </summary>
|
||||
public class MetaSnapshotCodecTests
|
||||
{
|
||||
[Fact]
|
||||
public void Encode_decode_round_trips()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go encodeMetaSnapshot/decodeMetaSnapshot round-trip
|
||||
var assignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["stream-A"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "stream-A",
|
||||
Group = new RaftGroup { Name = "rg-a", Peers = ["n1", "n2", "n3"] },
|
||||
ConfigJson = """{"subjects":["foo.>"]}""",
|
||||
},
|
||||
["stream-B"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "stream-B",
|
||||
Group = new RaftGroup { Name = "rg-b", Peers = ["n1", "n2"] },
|
||||
ConfigJson = """{"subjects":["bar.>"]}""",
|
||||
},
|
||||
};
|
||||
|
||||
// Add a consumer to stream-B
|
||||
assignments["stream-B"].Consumers["con-1"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "con-1",
|
||||
StreamName = "stream-B",
|
||||
Group = new RaftGroup { Name = "rg-c1", Peers = ["n1", "n2"] },
|
||||
};
|
||||
|
||||
var encoded = MetaSnapshotCodec.Encode(assignments);
|
||||
encoded.ShouldNotBeEmpty();
|
||||
|
||||
var decoded = MetaSnapshotCodec.Decode(encoded);
|
||||
decoded.Count.ShouldBe(2);
|
||||
decoded["stream-A"].StreamName.ShouldBe("stream-A");
|
||||
decoded["stream-A"].Group.Peers.Count.ShouldBe(3);
|
||||
decoded["stream-B"].Consumers.Count.ShouldBe(1);
|
||||
decoded["stream-B"].Consumers["con-1"].ConsumerName.ShouldBe("con-1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Encoded_snapshot_is_compressed()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go S2 compression of meta snapshots
|
||||
var assignments = new Dictionary<string, StreamAssignment>();
|
||||
for (int i = 0; i < 100; i++)
|
||||
{
|
||||
assignments[$"stream-{i}"] = new StreamAssignment
|
||||
{
|
||||
StreamName = $"stream-{i}",
|
||||
Group = new RaftGroup { Name = $"rg-{i}", Peers = ["n1", "n2", "n3"] },
|
||||
ConfigJson = """{"subjects":["test.>"]}""",
|
||||
};
|
||||
}
|
||||
|
||||
var encoded = MetaSnapshotCodec.Encode(assignments);
|
||||
var json = System.Text.Json.JsonSerializer.SerializeToUtf8Bytes(assignments);
|
||||
|
||||
// S2 compressed + 2-byte version header should be smaller than raw JSON
|
||||
encoded.Length.ShouldBeLessThan(json.Length);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Empty_snapshot_round_trips()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go decodeMetaSnapshot handles empty map
|
||||
var empty = new Dictionary<string, StreamAssignment>();
|
||||
var encoded = MetaSnapshotCodec.Encode(empty);
|
||||
var decoded = MetaSnapshotCodec.Decode(encoded);
|
||||
decoded.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Versioned_format_rejects_unknown_version()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go version check in decodeMetaSnapshot
|
||||
var bad = new byte[] { 0xFF, 0xFF, 0, 0 }; // version 65535
|
||||
Should.Throw<InvalidOperationException>(() => MetaSnapshotCodec.Decode(bad));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Decode_rejects_too_short_input()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go guard against truncated snapshot
|
||||
Should.Throw<InvalidOperationException>(() => MetaSnapshotCodec.Decode([0x01]));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Encoded_snapshot_begins_with_version_one_header()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go:2075 — versioned header allows future format evolution
|
||||
var assignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["s1"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "s1",
|
||||
Group = new RaftGroup { Name = "g1", Peers = ["n1"] },
|
||||
},
|
||||
};
|
||||
|
||||
var encoded = MetaSnapshotCodec.Encode(assignments);
|
||||
|
||||
// Little-endian version 1: bytes [0x01, 0x00]
|
||||
encoded[0].ShouldBe((byte)0x01);
|
||||
encoded[1].ShouldBe((byte)0x00);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Round_trip_preserves_all_stream_assignment_fields()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go streamAssignment struct fields preserved across snapshot
|
||||
var created = new DateTime(2025, 6, 15, 12, 0, 0, DateTimeKind.Utc);
|
||||
var assignments = new Dictionary<string, StreamAssignment>
|
||||
{
|
||||
["my-stream"] = new StreamAssignment
|
||||
{
|
||||
StreamName = "my-stream",
|
||||
Group = new RaftGroup
|
||||
{
|
||||
Name = "rg-main",
|
||||
Peers = ["peer-a", "peer-b", "peer-c"],
|
||||
StorageType = "memory",
|
||||
Cluster = "cluster-east",
|
||||
Preferred = "peer-a",
|
||||
},
|
||||
Created = created,
|
||||
ConfigJson = """{"subjects":["events.>"],"storage":"memory"}""",
|
||||
SyncSubject = "$JS.SYNC.my-stream",
|
||||
Responded = true,
|
||||
Recovering = false,
|
||||
Reassigning = true,
|
||||
},
|
||||
};
|
||||
|
||||
var decoded = MetaSnapshotCodec.Decode(MetaSnapshotCodec.Encode(assignments));
|
||||
|
||||
var sa = decoded["my-stream"];
|
||||
sa.StreamName.ShouldBe("my-stream");
|
||||
sa.Group.Name.ShouldBe("rg-main");
|
||||
sa.Group.Peers.ShouldBe(["peer-a", "peer-b", "peer-c"]);
|
||||
sa.Group.StorageType.ShouldBe("memory");
|
||||
sa.Group.Cluster.ShouldBe("cluster-east");
|
||||
sa.Group.Preferred.ShouldBe("peer-a");
|
||||
sa.Created.ShouldBe(created);
|
||||
sa.ConfigJson.ShouldBe("""{"subjects":["events.>"],"storage":"memory"}""");
|
||||
sa.SyncSubject.ShouldBe("$JS.SYNC.my-stream");
|
||||
sa.Responded.ShouldBeTrue();
|
||||
sa.Recovering.ShouldBeFalse();
|
||||
sa.Reassigning.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Round_trip_preserves_multiple_consumers_per_stream()
|
||||
{
|
||||
// Go reference: jetstream_cluster.go consumerAssignment map restored in snapshot
|
||||
var sa = new StreamAssignment
|
||||
{
|
||||
StreamName = "multi-consumer-stream",
|
||||
Group = new RaftGroup { Name = "rg-mc", Peers = ["n1", "n2", "n3"] },
|
||||
};
|
||||
|
||||
sa.Consumers["consumer-alpha"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "consumer-alpha",
|
||||
StreamName = "multi-consumer-stream",
|
||||
Group = new RaftGroup { Name = "rg-alpha", Peers = ["n1"] },
|
||||
ConfigJson = """{"deliver_subject":"out.alpha"}""",
|
||||
Responded = true,
|
||||
};
|
||||
sa.Consumers["consumer-beta"] = new ConsumerAssignment
|
||||
{
|
||||
ConsumerName = "consumer-beta",
|
||||
StreamName = "multi-consumer-stream",
|
||||
Group = new RaftGroup { Name = "rg-beta", Peers = ["n2", "n3"] },
|
||||
Recovering = true,
|
||||
};
|
||||
|
||||
var assignments = new Dictionary<string, StreamAssignment> { ["multi-consumer-stream"] = sa };
|
||||
var decoded = MetaSnapshotCodec.Decode(MetaSnapshotCodec.Encode(assignments));
|
||||
|
||||
var dsa = decoded["multi-consumer-stream"];
|
||||
dsa.Consumers.Count.ShouldBe(2);
|
||||
|
||||
var alpha = dsa.Consumers["consumer-alpha"];
|
||||
alpha.ConsumerName.ShouldBe("consumer-alpha");
|
||||
alpha.StreamName.ShouldBe("multi-consumer-stream");
|
||||
alpha.Group.Name.ShouldBe("rg-alpha");
|
||||
alpha.ConfigJson.ShouldBe("""{"deliver_subject":"out.alpha"}""");
|
||||
alpha.Responded.ShouldBeTrue();
|
||||
|
||||
var beta = dsa.Consumers["consumer-beta"];
|
||||
beta.ConsumerName.ShouldBe("consumer-beta");
|
||||
beta.Group.Peers.Count.ShouldBe(2);
|
||||
beta.Recovering.ShouldBeTrue();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,118 @@
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.Tests.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for enhanced AckProcessor with RedeliveryTracker integration.
|
||||
/// Go reference: consumer.go:4854 (processInboundAcks).
|
||||
/// </summary>
|
||||
public class AckProcessorEnhancedTests
|
||||
{
|
||||
[Fact]
|
||||
public void ProcessAck_removes_from_pending()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
|
||||
var processor = new AckProcessor(tracker);
|
||||
|
||||
processor.Register(1, "deliver.subj");
|
||||
processor.PendingCount.ShouldBe(1);
|
||||
|
||||
processor.ProcessAck(1);
|
||||
processor.PendingCount.ShouldBe(0);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessNak_schedules_redelivery()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
|
||||
var processor = new AckProcessor(tracker);
|
||||
|
||||
processor.Register(1, "deliver.subj");
|
||||
processor.ProcessNak(1, delayMs: 500);
|
||||
|
||||
processor.PendingCount.ShouldBe(1); // still pending until redelivered
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessTerm_removes_permanently()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
|
||||
var processor = new AckProcessor(tracker);
|
||||
|
||||
processor.Register(1, "deliver.subj");
|
||||
processor.ProcessTerm(1);
|
||||
|
||||
processor.PendingCount.ShouldBe(0);
|
||||
processor.TerminatedCount.ShouldBe(1);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ProcessProgress_resets_deadline_to_full_ack_wait()
|
||||
{
|
||||
// Go: consumer.go — processAckProgress (+WPI): resets deadline to UtcNow + ackWait
|
||||
// Verify the invariant: after ProcessProgress, the deadline is strictly in the future
|
||||
// by at least (ackWait - epsilon) milliseconds, without relying on wall-clock delays.
|
||||
var ackWaitMs = 1000;
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: ackWaitMs);
|
||||
var processor = new AckProcessor(tracker);
|
||||
|
||||
processor.Register(1, "deliver.subj");
|
||||
|
||||
var before = DateTimeOffset.UtcNow;
|
||||
processor.ProcessProgress(1);
|
||||
var after = DateTimeOffset.UtcNow;
|
||||
|
||||
var deadline = processor.GetDeadline(1);
|
||||
|
||||
// Deadline must be at least (before + ackWait) and at most (after + ackWait + epsilon)
|
||||
deadline.ShouldBeGreaterThanOrEqualTo(before.AddMilliseconds(ackWaitMs));
|
||||
deadline.ShouldBeLessThanOrEqualTo(after.AddMilliseconds(ackWaitMs + 50));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void MaxAckPending_blocks_new_registrations()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
|
||||
var processor = new AckProcessor(tracker, maxAckPending: 2);
|
||||
|
||||
processor.Register(1, "d.1");
|
||||
processor.Register(2, "d.2");
|
||||
processor.CanRegister().ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CanRegister_true_when_unlimited()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 30000);
|
||||
var processor = new AckProcessor(tracker); // maxAckPending=0 means unlimited
|
||||
|
||||
processor.Register(1, "d.1");
|
||||
processor.CanRegister().ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ParseAckType_identifies_all_types()
|
||||
{
|
||||
AckProcessor.ParseAckType("+ACK"u8).ShouldBe(AckType.Ack);
|
||||
AckProcessor.ParseAckType("-NAK"u8).ShouldBe(AckType.Nak);
|
||||
AckProcessor.ParseAckType("+TERM"u8).ShouldBe(AckType.Term);
|
||||
AckProcessor.ParseAckType("+WPI"u8).ShouldBe(AckType.Progress);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ParseAckType_returns_unknown_for_invalid()
|
||||
{
|
||||
AckProcessor.ParseAckType("GARBAGE"u8).ShouldBe(AckType.Unknown);
|
||||
AckProcessor.ParseAckType(""u8).ShouldBe(AckType.Unknown);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void GetDeadline_returns_min_for_unknown_sequence()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
|
||||
var processor = new AckProcessor(tracker);
|
||||
|
||||
// Unknown sequence should return DateTimeOffset.MinValue
|
||||
processor.GetDeadline(999).ShouldBe(DateTimeOffset.MinValue);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,103 @@
|
||||
using NATS.Server.JetStream;
|
||||
using NATS.Server.JetStream.Models;
|
||||
|
||||
namespace NATS.Server.Tests.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for consumer pause/resume with auto-resume timer.
|
||||
/// Go reference: consumer.go (pause/resume).
|
||||
/// </summary>
|
||||
public class ConsumerPauseResumeTests
|
||||
{
|
||||
private static ConsumerManager CreateManager() => new();
|
||||
|
||||
private static void CreateConsumer(ConsumerManager mgr, string stream, string name)
|
||||
{
|
||||
mgr.CreateOrUpdate(stream, new ConsumerConfig { DurableName = name });
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Pause_with_deadline_sets_paused()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
CreateConsumer(mgr, "test-stream", "test-consumer");
|
||||
|
||||
var until = DateTime.UtcNow.AddSeconds(5);
|
||||
mgr.Pause("test-stream", "test-consumer", until);
|
||||
|
||||
mgr.IsPaused("test-stream", "test-consumer").ShouldBeTrue();
|
||||
mgr.GetPauseUntil("test-stream", "test-consumer").ShouldBe(until);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resume_clears_pause()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
CreateConsumer(mgr, "test-stream", "test-consumer");
|
||||
|
||||
mgr.Pause("test-stream", "test-consumer", DateTime.UtcNow.AddSeconds(5));
|
||||
mgr.Resume("test-stream", "test-consumer");
|
||||
|
||||
mgr.IsPaused("test-stream", "test-consumer").ShouldBeFalse();
|
||||
mgr.GetPauseUntil("test-stream", "test-consumer").ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Pause_auto_resumes_after_deadline()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
CreateConsumer(mgr, "test-stream", "test-consumer");
|
||||
|
||||
// Use a semaphore to synchronize on the actual timer callback rather than a blind delay.
|
||||
using var resumed = new SemaphoreSlim(0, 1);
|
||||
mgr.OnAutoResumed += (_, _) => resumed.Release();
|
||||
|
||||
mgr.Pause("test-stream", "test-consumer", DateTime.UtcNow.AddMilliseconds(100));
|
||||
|
||||
var signalled = await resumed.WaitAsync(TimeSpan.FromSeconds(5));
|
||||
signalled.ShouldBeTrue("auto-resume timer did not fire within 5 seconds");
|
||||
|
||||
mgr.IsPaused("test-stream", "test-consumer").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsPaused_returns_false_for_unknown_consumer()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
mgr.IsPaused("unknown", "unknown").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void GetPauseUntil_returns_null_for_unknown_consumer()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
mgr.GetPauseUntil("unknown", "unknown").ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Resume_returns_false_for_unknown_consumer()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
mgr.Resume("unknown", "unknown").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Pause_returns_false_for_unknown_consumer()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
mgr.Pause("unknown", "unknown", DateTime.UtcNow.AddSeconds(5)).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsPaused_auto_resumes_expired_deadline()
|
||||
{
|
||||
var mgr = CreateManager();
|
||||
CreateConsumer(mgr, "test-stream", "c1");
|
||||
|
||||
// Pause with a deadline in the past
|
||||
mgr.Pause("test-stream", "c1", DateTime.UtcNow.AddMilliseconds(-100));
|
||||
|
||||
// IsPaused should detect the expired deadline and auto-resume
|
||||
mgr.IsPaused("test-stream", "c1").ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.Tests.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for priority group pin ID management.
|
||||
/// Go reference: consumer.go (setPinnedTimer, assignNewPinId).
|
||||
/// </summary>
|
||||
public class PriorityGroupPinningTests
|
||||
{
|
||||
[Fact]
|
||||
public void AssignPinId_generates_unique_ids()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group-1", "consumer-a", priority: 0);
|
||||
|
||||
var pin1 = mgr.AssignPinId("group-1", "consumer-a");
|
||||
var pin2 = mgr.AssignPinId("group-1", "consumer-a");
|
||||
|
||||
pin1.ShouldNotBeNullOrEmpty();
|
||||
pin2.ShouldNotBeNullOrEmpty();
|
||||
pin1.ShouldNotBe(pin2); // each assignment is unique
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ValidatePinId_accepts_current()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group-1", "consumer-a", priority: 0);
|
||||
|
||||
var pin = mgr.AssignPinId("group-1", "consumer-a");
|
||||
mgr.ValidatePinId("group-1", pin).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ValidatePinId_rejects_expired()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group-1", "consumer-a", priority: 0);
|
||||
|
||||
var pin1 = mgr.AssignPinId("group-1", "consumer-a");
|
||||
var pin2 = mgr.AssignPinId("group-1", "consumer-a"); // replaces pin1
|
||||
|
||||
mgr.ValidatePinId("group-1", pin1).ShouldBeFalse();
|
||||
mgr.ValidatePinId("group-1", pin2).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void UnassignPinId_clears()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("group-1", "consumer-a", priority: 0);
|
||||
|
||||
var pin = mgr.AssignPinId("group-1", "consumer-a");
|
||||
mgr.UnassignPinId("group-1");
|
||||
|
||||
mgr.ValidatePinId("group-1", pin).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ValidatePinId_returns_false_for_unknown_group()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.ValidatePinId("unknown", "any-pin").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void UnassignPinId_noop_for_unknown_group()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
// Should not throw
|
||||
Should.NotThrow(() => mgr.UnassignPinId("unknown"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PinId_is_22_chars()
|
||||
{
|
||||
var mgr = new PriorityGroupManager();
|
||||
mgr.Register("g1", "c1", priority: 0);
|
||||
|
||||
var pin = mgr.AssignPinId("g1", "c1");
|
||||
pin.Length.ShouldBe(22);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.Tests.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for the new PriorityQueue-based RedeliveryTracker features.
|
||||
/// Go reference: consumer.go (rdq redelivery queue).
|
||||
/// </summary>
|
||||
public class RedeliveryTrackerPriorityQueueTests
|
||||
{
|
||||
[Fact]
|
||||
public void Schedule_and_get_due_returns_expired()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
|
||||
var past = DateTimeOffset.UtcNow.AddMilliseconds(-100);
|
||||
|
||||
tracker.Schedule(1, past);
|
||||
tracker.Schedule(2, DateTimeOffset.UtcNow.AddSeconds(60)); // future
|
||||
|
||||
var due = tracker.GetDue(DateTimeOffset.UtcNow).ToList();
|
||||
due.Count.ShouldBe(1);
|
||||
due[0].ShouldBe(1UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Acknowledge_removes_from_queue()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
|
||||
tracker.Schedule(1, DateTimeOffset.UtcNow.AddMilliseconds(-100));
|
||||
|
||||
tracker.Acknowledge(1);
|
||||
|
||||
var due = tracker.GetDue(DateTimeOffset.UtcNow).ToList();
|
||||
due.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IsMaxDeliveries_returns_true_at_threshold()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 3, ackWaitMs: 1000);
|
||||
|
||||
tracker.IncrementDeliveryCount(1);
|
||||
tracker.IncrementDeliveryCount(1);
|
||||
tracker.IsMaxDeliveries(1).ShouldBeFalse();
|
||||
|
||||
tracker.IncrementDeliveryCount(1);
|
||||
tracker.IsMaxDeliveries(1).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Backoff_schedule_uses_delivery_count()
|
||||
{
|
||||
var backoff = new long[] { 100, 500, 2000 };
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 10, ackWaitMs: 1000, backoffMs: backoff);
|
||||
|
||||
// First redeliver: 100ms
|
||||
var delay1 = tracker.GetBackoffDelay(deliveryCount: 1);
|
||||
delay1.ShouldBe(100L);
|
||||
|
||||
// Second: 500ms
|
||||
var delay2 = tracker.GetBackoffDelay(deliveryCount: 2);
|
||||
delay2.ShouldBe(500L);
|
||||
|
||||
// Beyond schedule: use last value
|
||||
var delay4 = tracker.GetBackoffDelay(deliveryCount: 4);
|
||||
delay4.ShouldBe(2000L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void GetDue_returns_in_deadline_order()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
|
||||
var now = DateTimeOffset.UtcNow;
|
||||
|
||||
tracker.Schedule(3, now.AddMilliseconds(-300));
|
||||
tracker.Schedule(1, now.AddMilliseconds(-100));
|
||||
tracker.Schedule(2, now.AddMilliseconds(-200));
|
||||
|
||||
var due = tracker.GetDue(now).ToList();
|
||||
due.Count.ShouldBe(3);
|
||||
due[0].ShouldBe(3UL); // earliest deadline first
|
||||
due[1].ShouldBe(2UL);
|
||||
due[2].ShouldBe(1UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void GetBackoffDelay_with_no_backoff_returns_ackWait()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 2000);
|
||||
tracker.GetBackoffDelay(1).ShouldBe(2000L);
|
||||
tracker.GetBackoffDelay(5).ShouldBe(2000L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void IncrementDeliveryCount_for_untracked_seq_starts_at_one()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 5, ackWaitMs: 1000);
|
||||
tracker.IncrementDeliveryCount(42);
|
||||
// First increment should make count = 1, so maxDeliveries=5 means not max yet
|
||||
tracker.IsMaxDeliveries(42).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Acknowledge_also_clears_delivery_count()
|
||||
{
|
||||
var tracker = new RedeliveryTracker(maxDeliveries: 3, ackWaitMs: 1000);
|
||||
tracker.IncrementDeliveryCount(1);
|
||||
tracker.IncrementDeliveryCount(1);
|
||||
tracker.Acknowledge(1);
|
||||
// After ack, delivery count should be cleared
|
||||
tracker.IsMaxDeliveries(1).ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,116 @@
|
||||
using NATS.Server.JetStream.Consumers;
|
||||
|
||||
namespace NATS.Server.Tests.JetStream.Consumers;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for WaitingRequestQueue FIFO queue with expiry and batch/byte tracking.
|
||||
/// Go reference: consumer.go processNextMsgRequest.
|
||||
/// </summary>
|
||||
public class WaitingRequestQueueTests
|
||||
{
|
||||
[Fact]
|
||||
public void Enqueue_and_dequeue_fifo()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
queue.Enqueue(new PullRequest("reply.1", Batch: 10, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false));
|
||||
queue.Enqueue(new PullRequest("reply.2", Batch: 5, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false));
|
||||
|
||||
queue.Count.ShouldBe(2);
|
||||
|
||||
var first = queue.TryDequeue();
|
||||
first.ShouldNotBeNull();
|
||||
first.ReplyTo.ShouldBe("reply.1");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void TryDequeue_returns_null_when_empty()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
queue.TryDequeue().ShouldBeNull();
|
||||
queue.IsEmpty.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Expired_requests_are_removed()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
queue.Enqueue(new PullRequest("expired", Batch: 10, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMilliseconds(-100), NoWait: false));
|
||||
queue.Enqueue(new PullRequest("valid", Batch: 10, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false));
|
||||
|
||||
queue.RemoveExpired(DateTimeOffset.UtcNow);
|
||||
queue.Count.ShouldBe(1);
|
||||
|
||||
var next = queue.TryDequeue();
|
||||
next!.ReplyTo.ShouldBe("valid");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void NoWait_request_returns_immediately_when_empty()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
queue.Enqueue(new PullRequest("nowait", Batch: 10, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: true));
|
||||
|
||||
var req = queue.TryDequeue();
|
||||
req.ShouldNotBeNull();
|
||||
req.NoWait.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void MaxBytes_tracks_accumulation()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
var req = new PullRequest("mb", Batch: 100, MaxBytes: 1024, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false);
|
||||
queue.Enqueue(req);
|
||||
|
||||
var dequeued = queue.TryDequeue()!;
|
||||
dequeued.MaxBytes.ShouldBe(1024L);
|
||||
dequeued.RemainingBytes.ShouldBe(1024L);
|
||||
|
||||
dequeued.ConsumeBytes(256);
|
||||
dequeued.RemainingBytes.ShouldBe(768L);
|
||||
dequeued.IsExhausted.ShouldBeFalse();
|
||||
|
||||
dequeued.ConsumeBytes(800);
|
||||
dequeued.IsExhausted.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Batch_decrements_on_delivery()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
var req = new PullRequest("batch", Batch: 3, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false);
|
||||
queue.Enqueue(req);
|
||||
|
||||
var dequeued = queue.TryDequeue()!;
|
||||
dequeued.RemainingBatch.ShouldBe(3);
|
||||
|
||||
dequeued.ConsumeBatch();
|
||||
dequeued.RemainingBatch.ShouldBe(2);
|
||||
|
||||
dequeued.ConsumeBatch();
|
||||
dequeued.ConsumeBatch();
|
||||
dequeued.IsExhausted.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void RemoveExpired_handles_all_expired()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
queue.Enqueue(new PullRequest("a", Batch: 1, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMilliseconds(-100), NoWait: false));
|
||||
queue.Enqueue(new PullRequest("b", Batch: 1, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMilliseconds(-50), NoWait: false));
|
||||
|
||||
queue.RemoveExpired(DateTimeOffset.UtcNow);
|
||||
queue.Count.ShouldBe(0);
|
||||
queue.IsEmpty.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PinId_is_stored()
|
||||
{
|
||||
var queue = new WaitingRequestQueue();
|
||||
queue.Enqueue(new PullRequest("pin", Batch: 1, MaxBytes: 0, Expires: DateTimeOffset.UtcNow.AddMinutes(1), NoWait: false, PinId: "pin-123"));
|
||||
|
||||
var dequeued = queue.TryDequeue()!;
|
||||
dequeued.PinId.ShouldBe("pin-123");
|
||||
}
|
||||
}
|
||||
91
tests/NATS.Server.Tests/JetStream/InterestRetentionTests.cs
Normal file
91
tests/NATS.Server.Tests/JetStream/InterestRetentionTests.cs
Normal file
@@ -0,0 +1,91 @@
|
||||
using NATS.Server.JetStream;
|
||||
|
||||
namespace NATS.Server.Tests.JetStream;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for InterestRetentionPolicy per-consumer ack tracking.
|
||||
/// Go reference: stream.go checkInterestState/noInterest.
|
||||
/// </summary>
|
||||
public class InterestRetentionTests
|
||||
{
|
||||
[Fact]
|
||||
public void ShouldRetain_true_when_consumers_have_not_acked()
|
||||
{
|
||||
var policy = new InterestRetentionPolicy();
|
||||
policy.RegisterInterest("consumer-A", "orders.>");
|
||||
policy.RegisterInterest("consumer-B", "orders.>");
|
||||
|
||||
policy.ShouldRetain(1, "orders.new").ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShouldRetain_false_when_all_consumers_acked()
|
||||
{
|
||||
var policy = new InterestRetentionPolicy();
|
||||
policy.RegisterInterest("consumer-A", "orders.>");
|
||||
policy.RegisterInterest("consumer-B", "orders.>");
|
||||
|
||||
policy.AcknowledgeDelivery("consumer-A", 1);
|
||||
policy.ShouldRetain(1, "orders.new").ShouldBeTrue(); // B hasn't acked
|
||||
|
||||
policy.AcknowledgeDelivery("consumer-B", 1);
|
||||
policy.ShouldRetain(1, "orders.new").ShouldBeFalse(); // both acked
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShouldRetain_ignores_consumers_without_interest()
|
||||
{
|
||||
var policy = new InterestRetentionPolicy();
|
||||
policy.RegisterInterest("consumer-A", "orders.>");
|
||||
policy.RegisterInterest("consumer-B", "billing.>"); // no interest in orders
|
||||
|
||||
policy.AcknowledgeDelivery("consumer-A", 1);
|
||||
policy.ShouldRetain(1, "orders.new").ShouldBeFalse(); // B has no interest
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void UnregisterInterest_removes_consumer()
|
||||
{
|
||||
var policy = new InterestRetentionPolicy();
|
||||
policy.RegisterInterest("consumer-A", "x.>");
|
||||
policy.RegisterInterest("consumer-B", "x.>");
|
||||
|
||||
policy.UnregisterInterest("consumer-B");
|
||||
|
||||
// Only A needs to ack
|
||||
policy.AcknowledgeDelivery("consumer-A", 1);
|
||||
policy.ShouldRetain(1, "x.y").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ShouldRetain_false_when_no_consumers_registered()
|
||||
{
|
||||
var policy = new InterestRetentionPolicy();
|
||||
policy.ShouldRetain(1, "any.subject").ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Multiple_sequences_tracked_independently()
|
||||
{
|
||||
var policy = new InterestRetentionPolicy();
|
||||
policy.RegisterInterest("c1", "x.>");
|
||||
|
||||
policy.AcknowledgeDelivery("c1", 1);
|
||||
policy.ShouldRetain(1, "x.y").ShouldBeFalse();
|
||||
policy.ShouldRetain(2, "x.y").ShouldBeTrue(); // seq 2 not acked
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void ConsumerCount_tracks_registrations()
|
||||
{
|
||||
var policy = new InterestRetentionPolicy();
|
||||
policy.ConsumerCount.ShouldBe(0);
|
||||
|
||||
policy.RegisterInterest("c1", "x.>");
|
||||
policy.RegisterInterest("c2", "y.>");
|
||||
policy.ConsumerCount.ShouldBe(2);
|
||||
|
||||
policy.UnregisterInterest("c1");
|
||||
policy.ConsumerCount.ShouldBe(1);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,243 @@
|
||||
// Reference: golang/nats-server/server/filestore.go:5783-5842
|
||||
// Tests for Task 4: Crash Recovery Enhancement — FlushAllPending and WriteStreamState.
|
||||
// Go parity:
|
||||
// TestFileStoreSyncIntervals → FlushAllPending_flushes_active_block
|
||||
// TestFileStoreWriteFullStateBasics → FlushAllPending_writes_stream_state_file
|
||||
// TestFileStoreTtlWheelExpiry (recovery variant) → Recovery_rebuilds_ttl_and_expires_old
|
||||
// TestFileStoreBitRot (block tail truncation variant) → Recovery_handles_truncated_block
|
||||
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.Tests.JetStream.Storage;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for <see cref="FileStore.FlushAllPending"/> and stream state checkpoint writes.
|
||||
/// Verifies that buffered block data is flushed to disk, that an atomic stream.state
|
||||
/// checkpoint file is written, that TTL recovery works across a restart, and that
|
||||
/// recovery is graceful when the tail of a block file has been truncated (simulating
|
||||
/// a crash mid-write).
|
||||
/// Reference: golang/nats-server/server/filestore.go:5783 (flushPendingWritesUnlocked).
|
||||
/// </summary>
|
||||
public sealed class FileStoreCrashRecoveryTests : IDisposable
|
||||
{
|
||||
private readonly string _root;
|
||||
|
||||
public FileStoreCrashRecoveryTests()
|
||||
{
|
||||
_root = Path.Combine(Path.GetTempPath(), $"nats-js-crash-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_root);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_root))
|
||||
Directory.Delete(_root, recursive: true);
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string subDir, FileStoreOptions? opts = null)
|
||||
{
|
||||
var dir = Path.Combine(_root, subDir);
|
||||
Directory.CreateDirectory(dir);
|
||||
var o = opts ?? new FileStoreOptions();
|
||||
o.Directory = dir;
|
||||
return new FileStore(o);
|
||||
}
|
||||
|
||||
private string StoreDir(string subDir) => Path.Combine(_root, subDir);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FlushAllPending — block flush
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreSyncIntervals (filestore_test.go) — verifies that pending writes
|
||||
// are flushed to disk and the .blk file is non-empty after FlushAllPending.
|
||||
[Fact]
|
||||
public void FlushAllPending_flushes_active_block()
|
||||
{
|
||||
// Arrange: write a message to the store.
|
||||
const string sub = "flush-block";
|
||||
var dir = StoreDir(sub);
|
||||
using var store = CreateStore(sub);
|
||||
|
||||
store.StoreMsg("events.a", null, "payload-for-flush"u8.ToArray(), 0L);
|
||||
|
||||
// Act: flush all pending writes.
|
||||
store.FlushAllPending();
|
||||
|
||||
// Assert: at least one .blk file exists and it is non-empty, proving the
|
||||
// active block was flushed to disk.
|
||||
var blkFiles = Directory.GetFiles(dir, "*.blk");
|
||||
blkFiles.Length.ShouldBeGreaterThanOrEqualTo(1);
|
||||
blkFiles.All(f => new FileInfo(f).Length > 0).ShouldBeTrue(
|
||||
"every .blk file should contain at least the record bytes after a flush");
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FlushAllPending — stream.state checkpoint
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreWriteFullStateBasics (filestore_test.go:5461) — verifies that
|
||||
// WriteStreamState creates a valid, atomic stream.state checkpoint file.
|
||||
[Fact]
|
||||
public void FlushAllPending_writes_stream_state_file()
|
||||
{
|
||||
// Arrange: store several messages across subjects.
|
||||
const string sub = "state-file";
|
||||
var dir = StoreDir(sub);
|
||||
using var store = CreateStore(sub);
|
||||
|
||||
store.StoreMsg("orders.new", null, "order-1"u8.ToArray(), 0L);
|
||||
store.StoreMsg("orders.new", null, "order-2"u8.ToArray(), 0L);
|
||||
store.StoreMsg("events.a", null, "event-1"u8.ToArray(), 0L);
|
||||
|
||||
// Act: flush — this should write stream.state atomically.
|
||||
store.FlushAllPending();
|
||||
|
||||
// Assert: stream.state exists and no leftover .tmp file.
|
||||
var statePath = Path.Combine(dir, "stream.state");
|
||||
var tmpPath = statePath + ".tmp";
|
||||
File.Exists(statePath).ShouldBeTrue("stream.state checkpoint must exist after FlushAllPending");
|
||||
File.Exists(tmpPath).ShouldBeFalse("stream.state.tmp must be renamed away after atomic write");
|
||||
|
||||
// Assert: the file is valid JSON with the expected fields.
|
||||
var json = File.ReadAllText(statePath);
|
||||
using var doc = JsonDocument.Parse(json);
|
||||
var root = doc.RootElement;
|
||||
|
||||
root.TryGetProperty("FirstSeq", out var firstSeq).ShouldBeTrue("stream.state must contain FirstSeq");
|
||||
root.TryGetProperty("LastSeq", out var lastSeq ).ShouldBeTrue("stream.state must contain LastSeq");
|
||||
root.TryGetProperty("Messages", out var messages).ShouldBeTrue("stream.state must contain Messages");
|
||||
|
||||
firstSeq.GetUInt64().ShouldBe(1UL);
|
||||
lastSeq.GetUInt64().ShouldBe(3UL);
|
||||
messages.GetUInt64().ShouldBe(3UL);
|
||||
}
|
||||
|
||||
// Go: FlushAllPending is idempotent — calling it twice must not throw and must
|
||||
// overwrite the previous state file with the latest state.
|
||||
[Fact]
|
||||
public void FlushAllPending_is_idempotent()
|
||||
{
|
||||
const string sub = "flush-idempotent";
|
||||
var dir = StoreDir(sub);
|
||||
using var store = CreateStore(sub);
|
||||
|
||||
store.StoreMsg("foo", null, "msg-1"u8.ToArray(), 0L);
|
||||
store.FlushAllPending();
|
||||
|
||||
store.StoreMsg("foo", null, "msg-2"u8.ToArray(), 0L);
|
||||
store.FlushAllPending();
|
||||
|
||||
// The state file should reflect the second flush (2 messages, seq 1..2).
|
||||
var statePath = Path.Combine(dir, "stream.state");
|
||||
File.Exists(statePath).ShouldBeTrue();
|
||||
using var doc = JsonDocument.Parse(File.ReadAllText(statePath));
|
||||
doc.RootElement.GetProperty("Messages").GetUInt64().ShouldBe(2UL);
|
||||
doc.RootElement.GetProperty("LastSeq").GetUInt64().ShouldBe(2UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// TTL wheel rebuild across restart
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreRecoveryReregiistersTtls (filestore_test.go) — verifies that
|
||||
// messages whose timestamps pre-date the MaxAgeMs cutoff are pruned during recovery.
|
||||
// No Task.Delay is needed: messages are written directly to a MsgBlock with a
|
||||
// timestamp 1 hour in the past, so they are already expired when FileStore opens.
|
||||
[Fact]
|
||||
public async Task Recovery_rebuilds_ttl_and_expires_old()
|
||||
{
|
||||
// Arrange: build a block file with messages backdated 1 hour so they are
|
||||
// already past the MaxAgeMs cutoff at the moment FileStore opens for recovery.
|
||||
const string sub = "ttl-recovery";
|
||||
var dir = StoreDir(sub);
|
||||
Directory.CreateDirectory(dir);
|
||||
|
||||
var oneHourAgoNs = new DateTimeOffset(DateTime.UtcNow.AddHours(-1))
|
||||
.ToUnixTimeMilliseconds() * 1_000_000L;
|
||||
|
||||
using (var block = MsgBlock.Create(0, dir, maxBytes: 64 * 1024, firstSequence: 1))
|
||||
{
|
||||
block.WriteAt(1, "expire.me", ReadOnlyMemory<byte>.Empty, "short-lived"u8.ToArray(), oneHourAgoNs);
|
||||
block.WriteAt(2, "expire.me", ReadOnlyMemory<byte>.Empty, "short-lived-2"u8.ToArray(), oneHourAgoNs);
|
||||
block.Flush();
|
||||
}
|
||||
|
||||
// Act: open FileStore with a 60-second MaxAgeMs — messages timestamped 1 hour
|
||||
// ago are already expired, so PruneExpired during RecoverBlocks removes them.
|
||||
await using var recovered = CreateStore(sub, new FileStoreOptions { MaxAgeMs = 60_000 });
|
||||
|
||||
// Assert: expired messages must be gone after recovery + prune.
|
||||
var stateAfter = await recovered.GetStateAsync(default);
|
||||
stateAfter.Messages.ShouldBe(0UL, "expired messages must be pruned during recovery");
|
||||
stateAfter.FirstSeq.ShouldBe(0UL, "first sequence must be 0 when the store is empty after full expiry");
|
||||
stateAfter.LastSeq.ShouldBe(0UL, "last sequence must be 0 when the store is empty after full expiry");
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Truncated block recovery
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: TestFileStoreErrPartialLoad (filestore_test.go) — verifies that recovery
|
||||
// handles a block whose tail has been truncated (simulating a crash mid-write).
|
||||
// The earlier records in the block must still be recoverable; the truncated tail
|
||||
// must be silently skipped.
|
||||
[Fact]
|
||||
public async Task Recovery_handles_truncated_block()
|
||||
{
|
||||
// Arrange: write a few messages and flush so the .blk file has valid data.
|
||||
const string sub = "truncated-block";
|
||||
var dir = StoreDir(sub);
|
||||
|
||||
await using (var store = CreateStore(sub, new FileStoreOptions { BlockSizeBytes = 4096 }))
|
||||
{
|
||||
for (var i = 0; i < 5; i++)
|
||||
store.StoreMsg("events", null, Encoding.UTF8.GetBytes($"msg-{i}"), 0L);
|
||||
|
||||
// Flush to ensure data is on disk before we close.
|
||||
store.FlushAllPending();
|
||||
}
|
||||
|
||||
// Simulate a crash mid-write by truncating the .blk file by a few bytes at
|
||||
// the tail. This leaves all but the last record in a valid state.
|
||||
var blkFile = Directory.GetFiles(dir, "*.blk").OrderBy(f => f).First();
|
||||
var originalLength = new FileInfo(blkFile).Length;
|
||||
originalLength.ShouldBeGreaterThan(4, "block file must have content before truncation");
|
||||
|
||||
// Remove the last 4 bytes — simulates a torn write at the file tail.
|
||||
using (var fs = new FileStream(blkFile, FileMode.Open, FileAccess.Write))
|
||||
{
|
||||
var newLength = Math.Max(0, originalLength - 4);
|
||||
fs.SetLength(newLength);
|
||||
}
|
||||
|
||||
// Act: re-open — recovery must not throw and must load what it can.
|
||||
// The key assertion is that recovery does not throw; it may lose the last
|
||||
// partial record but must preserve earlier complete records.
|
||||
Exception? thrown = null;
|
||||
FileStore? recovered = null;
|
||||
try
|
||||
{
|
||||
recovered = CreateStore(sub, new FileStoreOptions { BlockSizeBytes = 4096 });
|
||||
var state = await recovered.GetStateAsync(default);
|
||||
|
||||
// At least some messages must survive (the truncation only hit the tail).
|
||||
state.Messages.ShouldBeGreaterThanOrEqualTo(0UL,
|
||||
"recovery from a truncated block must not throw and must expose the surviving messages");
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
thrown = ex;
|
||||
}
|
||||
finally
|
||||
{
|
||||
recovered?.Dispose();
|
||||
}
|
||||
|
||||
// InvalidDataException is reserved for integrity failures (wrong encryption key);
|
||||
// a tail truncation must be silently skipped during recovery.
|
||||
thrown.ShouldBeNull("recovery from a truncated block must not propagate exceptions");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,294 @@
|
||||
// Reference: golang/nats-server/server/filestore.go
|
||||
// Tests in this file:
|
||||
// StoreRawMsg_stores_at_specified_sequence — IStreamStore.StoreRawMsg preserves caller seq/ts
|
||||
// LoadPrevMsg_returns_message_before_seq — IStreamStore.LoadPrevMsg backward scan
|
||||
// Type_returns_file — IStreamStore.Type() returns StorageType.File
|
||||
// Stop_prevents_further_writes — IStreamStore.Stop() sets _stopped flag
|
||||
|
||||
using NATS.Server.JetStream.Storage;
|
||||
using StorageType = NATS.Server.JetStream.Models.StorageType;
|
||||
|
||||
namespace NATS.Server.Tests.JetStream.Storage;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for IStreamStore methods added to FileStore in Batch 1:
|
||||
/// StoreRawMsg, LoadPrevMsg, Type, and Stop.
|
||||
/// </summary>
|
||||
public sealed class FileStoreStreamStoreTests : IDisposable
|
||||
{
|
||||
private readonly string _root;
|
||||
|
||||
public FileStoreStreamStoreTests()
|
||||
{
|
||||
_root = Path.Combine(Path.GetTempPath(), $"nats-js-sstest-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_root);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
// Best-effort cleanup of temp directory. If it fails (e.g. open handles on CI),
|
||||
// the OS will clean it up on the next reboot. Letting it throw would suppress
|
||||
// the real test failure so we absorb IO errors explicitly.
|
||||
if (!Directory.Exists(_root))
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
Directory.Delete(_root, recursive: true);
|
||||
}
|
||||
catch (IOException ex)
|
||||
{
|
||||
// Open file handles (common on Windows CI) — log and continue.
|
||||
Console.Error.WriteLine($"[FileStoreStreamStoreTests] Dispose: {ex.Message}");
|
||||
}
|
||||
catch (UnauthorizedAccessException ex)
|
||||
{
|
||||
// Read-only files left by the test — log and continue.
|
||||
Console.Error.WriteLine($"[FileStoreStreamStoreTests] Dispose: {ex.Message}");
|
||||
}
|
||||
}
|
||||
|
||||
private FileStore CreateStore(string subDir, FileStoreOptions? opts = null)
|
||||
{
|
||||
var dir = Path.Combine(_root, subDir);
|
||||
Directory.CreateDirectory(dir);
|
||||
var o = opts ?? new FileStoreOptions();
|
||||
o.Directory = dir;
|
||||
return new FileStore(o);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// StoreRawMsg
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: filestore.go storeRawMsg — caller specifies seq and ts; store must not
|
||||
// auto-increment, and _last must be updated to Math.Max(_last, seq).
|
||||
[Fact]
|
||||
public void StoreRawMsg_stores_at_specified_sequence()
|
||||
{
|
||||
using var store = CreateStore("raw-seq");
|
||||
IStreamStore ss = store;
|
||||
|
||||
var subject = "events.raw";
|
||||
var data = "hello raw"u8.ToArray();
|
||||
// Use a specific Unix nanosecond timestamp.
|
||||
var tsNs = new DateTimeOffset(2024, 6, 1, 12, 0, 0, TimeSpan.Zero).ToUnixTimeMilliseconds() * 1_000_000L;
|
||||
const ulong targetSeq = 42UL;
|
||||
|
||||
ss.StoreRawMsg(subject, null, data, targetSeq, tsNs, 0, false);
|
||||
|
||||
// Verify by loading the message back via LoadMsg.
|
||||
var sm = ss.LoadMsg(targetSeq, null);
|
||||
sm.Subject.ShouldBe(subject);
|
||||
sm.Sequence.ShouldBe(targetSeq);
|
||||
sm.Timestamp.ShouldBe(tsNs);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StoreRawMsg_updates_last_watermark()
|
||||
{
|
||||
using var store = CreateStore("raw-wm");
|
||||
IStreamStore ss = store;
|
||||
|
||||
// Store a message at seq 100 — _last should become 100.
|
||||
ss.StoreRawMsg("foo", null, "x"u8.ToArray(), 100UL, 1_000_000L, 0, false);
|
||||
|
||||
var state = new StreamState();
|
||||
ss.FastState(ref state);
|
||||
state.LastSeq.ShouldBe(100UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StoreRawMsg_does_not_decrement_last_for_lower_seq()
|
||||
{
|
||||
using var store = CreateStore("raw-order");
|
||||
IStreamStore ss = store;
|
||||
|
||||
// Write seq 50 first, then seq 30 (out-of-order replication scenario).
|
||||
ss.StoreRawMsg("foo", null, "x"u8.ToArray(), 50UL, 1_000_000L, 0, false);
|
||||
ss.StoreRawMsg("bar", null, "y"u8.ToArray(), 30UL, 2_000_000L, 0, false);
|
||||
|
||||
var state = new StreamState();
|
||||
ss.FastState(ref state);
|
||||
// _last should remain 50, not go down to 30.
|
||||
state.LastSeq.ShouldBe(50UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StoreRawMsg_preserves_caller_timestamp()
|
||||
{
|
||||
using var store = CreateStore("raw-ts");
|
||||
IStreamStore ss = store;
|
||||
|
||||
var subject = "ts.test";
|
||||
var data = "payload"u8.ToArray();
|
||||
// A deterministic Unix nanosecond timestamp.
|
||||
var tsNs = 1_717_238_400_000_000_000L; // 2024-06-01 00:00:00 UTC in ns
|
||||
|
||||
ss.StoreRawMsg(subject, null, data, 7UL, tsNs, 0, false);
|
||||
|
||||
var sm = ss.LoadMsg(7UL, null);
|
||||
sm.Timestamp.ShouldBe(tsNs);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void StoreRawMsg_throws_after_stop()
|
||||
{
|
||||
using var store = CreateStore("raw-stop");
|
||||
IStreamStore ss = store;
|
||||
|
||||
ss.Stop();
|
||||
|
||||
Should.Throw<ObjectDisposedException>(() =>
|
||||
ss.StoreRawMsg("foo", null, "x"u8.ToArray(), 1UL, 1_000_000L, 0, false));
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// LoadPrevMsg
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: filestore.go LoadPrevMsg — walks backward from start-1 to first.
|
||||
[Fact]
|
||||
public void LoadPrevMsg_returns_message_before_seq()
|
||||
{
|
||||
using var store = CreateStore("prev-basic");
|
||||
IStreamStore ss = store;
|
||||
|
||||
// Write 3 messages at seqs 1, 2, 3.
|
||||
ss.StoreMsg("a", null, "msg1"u8.ToArray(), 0);
|
||||
ss.StoreMsg("b", null, "msg2"u8.ToArray(), 0);
|
||||
ss.StoreMsg("c", null, "msg3"u8.ToArray(), 0);
|
||||
|
||||
// LoadPrevMsg(3) should return seq 2.
|
||||
var sm = ss.LoadPrevMsg(3UL, null);
|
||||
sm.Sequence.ShouldBe(2UL);
|
||||
sm.Subject.ShouldBe("b");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void LoadPrevMsg_skips_deleted_message()
|
||||
{
|
||||
using var store = CreateStore("prev-skip");
|
||||
IStreamStore ss = store;
|
||||
|
||||
ss.StoreMsg("a", null, "msg1"u8.ToArray(), 0); // seq 1
|
||||
ss.StoreMsg("b", null, "msg2"u8.ToArray(), 0); // seq 2
|
||||
ss.StoreMsg("c", null, "msg3"u8.ToArray(), 0); // seq 3
|
||||
|
||||
// Delete seq 2 — LoadPrevMsg(3) must skip it and return seq 1.
|
||||
ss.RemoveMsg(2UL);
|
||||
|
||||
var sm = ss.LoadPrevMsg(3UL, null);
|
||||
sm.Sequence.ShouldBe(1UL);
|
||||
sm.Subject.ShouldBe("a");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void LoadPrevMsg_throws_when_no_message_before_seq()
|
||||
{
|
||||
using var store = CreateStore("prev-none");
|
||||
IStreamStore ss = store;
|
||||
|
||||
ss.StoreMsg("a", null, "msg1"u8.ToArray(), 0); // seq 1
|
||||
|
||||
// LoadPrevMsg(1) — nothing before seq 1.
|
||||
Should.Throw<KeyNotFoundException>(() => ss.LoadPrevMsg(1UL, null));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void LoadPrevMsg_reuses_provided_container()
|
||||
{
|
||||
using var store = CreateStore("prev-reuse");
|
||||
IStreamStore ss = store;
|
||||
|
||||
ss.StoreMsg("x", null, "d1"u8.ToArray(), 0); // seq 1
|
||||
ss.StoreMsg("y", null, "d2"u8.ToArray(), 0); // seq 2
|
||||
|
||||
var container = new StoreMsg();
|
||||
var result = ss.LoadPrevMsg(2UL, container);
|
||||
|
||||
// Should return the same object reference.
|
||||
result.ShouldBeSameAs(container);
|
||||
container.Sequence.ShouldBe(1UL);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Type
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: filestore.go fileStore.Type — returns StorageType.File.
|
||||
[Fact]
|
||||
public void Type_returns_file()
|
||||
{
|
||||
using var store = CreateStore("type");
|
||||
IStreamStore ss = store;
|
||||
|
||||
ss.Type().ShouldBe(StorageType.File);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Stop
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
// Go: filestore.go fileStore.Stop — flushes and marks as stopped.
|
||||
[Fact]
|
||||
public void Stop_prevents_further_writes_via_StoreMsg()
|
||||
{
|
||||
using var store = CreateStore("stop-storemsg");
|
||||
IStreamStore ss = store;
|
||||
|
||||
ss.StoreMsg("ok", null, "before"u8.ToArray(), 0);
|
||||
|
||||
ss.Stop();
|
||||
|
||||
Should.Throw<ObjectDisposedException>(() =>
|
||||
ss.StoreMsg("fail", null, "after"u8.ToArray(), 0));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stop_prevents_further_writes_via_AppendAsync()
|
||||
{
|
||||
using var store = CreateStore("stop-append");
|
||||
|
||||
await store.AppendAsync("ok", "before"u8.ToArray(), CancellationToken.None);
|
||||
|
||||
((IStreamStore)store).Stop();
|
||||
|
||||
await Should.ThrowAsync<ObjectDisposedException>(() =>
|
||||
store.AppendAsync("fail", "after"u8.ToArray(), CancellationToken.None).AsTask());
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Stop_is_idempotent()
|
||||
{
|
||||
using var store = CreateStore("stop-idem");
|
||||
IStreamStore ss = store;
|
||||
|
||||
ss.Stop();
|
||||
|
||||
// Second Stop() must not throw.
|
||||
var ex = Record.Exception(() => ss.Stop());
|
||||
ex.ShouldBeNull();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Stop_preserves_messages_on_disk()
|
||||
{
|
||||
var dir = Path.Combine(_root, "stop-persist");
|
||||
Directory.CreateDirectory(dir);
|
||||
|
||||
FileStore CreateWithDir() => new FileStore(new FileStoreOptions { Directory = dir });
|
||||
|
||||
// Write a message, stop the store.
|
||||
using (var store = CreateWithDir())
|
||||
{
|
||||
((IStreamStore)store).StoreMsg("saved", null, "payload"u8.ToArray(), 0);
|
||||
((IStreamStore)store).Stop();
|
||||
}
|
||||
|
||||
// Re-open and verify the message survived.
|
||||
using var recovered = CreateWithDir();
|
||||
var sm = ((IStreamStore)recovered).LoadMsg(1UL, null);
|
||||
sm.Subject.ShouldBe("saved");
|
||||
}
|
||||
}
|
||||
110
tests/NATS.Server.Tests/MirrorSourceRetryTests.cs
Normal file
110
tests/NATS.Server.Tests/MirrorSourceRetryTests.cs
Normal file
@@ -0,0 +1,110 @@
|
||||
using NSubstitute;
|
||||
using NATS.Server.JetStream.MirrorSource;
|
||||
using NATS.Server.JetStream.Models;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
|
||||
namespace NATS.Server.Tests;
|
||||
|
||||
// Go reference: server/stream.go:3478-3505 (calculateRetryBackoff),
|
||||
// server/stream.go:3125-3400 (setupMirrorConsumer retry logic)
|
||||
|
||||
public class MirrorSourceRetryTests
|
||||
{
|
||||
[Fact]
|
||||
public void Mirror_retry_uses_exponential_backoff()
|
||||
{
|
||||
// Go reference: server/stream.go:3478-3505 calculateRetryBackoff
|
||||
var mirror = MirrorCoordinatorTestHelper.Create();
|
||||
|
||||
mirror.RecordFailure();
|
||||
var delay1 = mirror.GetRetryDelay();
|
||||
delay1.ShouldBeGreaterThanOrEqualTo(TimeSpan.FromMilliseconds(250));
|
||||
|
||||
mirror.RecordFailure();
|
||||
var delay2 = mirror.GetRetryDelay();
|
||||
delay2.ShouldBeGreaterThan(delay1);
|
||||
|
||||
// Cap at max
|
||||
for (int i = 0; i < 20; i++) mirror.RecordFailure();
|
||||
var delayMax = mirror.GetRetryDelay();
|
||||
delayMax.ShouldBeLessThanOrEqualTo(TimeSpan.FromSeconds(30));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Mirror_success_resets_backoff()
|
||||
{
|
||||
// Go reference: server/stream.go setupMirrorConsumer — success resets retry
|
||||
var mirror = MirrorCoordinatorTestHelper.Create();
|
||||
|
||||
for (int i = 0; i < 5; i++) mirror.RecordFailure();
|
||||
mirror.RecordSuccess();
|
||||
|
||||
var delay = mirror.GetRetryDelay();
|
||||
delay.ShouldBe(TimeSpan.FromMilliseconds(250));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Mirror_tracks_sequence_gap()
|
||||
{
|
||||
// Go reference: server/stream.go:2863-3014 processInboundMirrorMsg — gap detection
|
||||
var mirror = MirrorCoordinatorTestHelper.Create();
|
||||
|
||||
mirror.RecordSourceSeq(1);
|
||||
mirror.RecordSourceSeq(2);
|
||||
mirror.RecordSourceSeq(5); // gap: 3, 4 missing
|
||||
|
||||
mirror.HasGap.ShouldBeTrue();
|
||||
mirror.GapStart.ShouldBe(3UL);
|
||||
mirror.GapEnd.ShouldBe(4UL);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Mirror_tracks_error_state()
|
||||
{
|
||||
// Go reference: server/stream.go mirror error state tracking
|
||||
var mirror = MirrorCoordinatorTestHelper.Create();
|
||||
|
||||
mirror.SetError("connection refused");
|
||||
mirror.HasError.ShouldBeTrue();
|
||||
mirror.ErrorMessage.ShouldBe("connection refused");
|
||||
|
||||
mirror.ClearError();
|
||||
mirror.HasError.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Source_dedup_window_prunes_expired()
|
||||
{
|
||||
// Go reference: server/stream.go duplicate window pruning
|
||||
var source = SourceCoordinatorTestHelper.Create();
|
||||
|
||||
source.RecordMsgId("msg-1");
|
||||
source.RecordMsgId("msg-2");
|
||||
|
||||
source.IsDuplicate("msg-1").ShouldBeTrue();
|
||||
source.IsDuplicate("msg-3").ShouldBeFalse();
|
||||
|
||||
// Simulate time passing beyond dedup window
|
||||
source.PruneDedupWindow(DateTimeOffset.UtcNow.AddMinutes(5));
|
||||
source.IsDuplicate("msg-1").ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
|
||||
public static class MirrorCoordinatorTestHelper
|
||||
{
|
||||
public static MirrorCoordinator Create()
|
||||
{
|
||||
var store = Substitute.For<IStreamStore>();
|
||||
return new MirrorCoordinator(store);
|
||||
}
|
||||
}
|
||||
|
||||
public static class SourceCoordinatorTestHelper
|
||||
{
|
||||
public static SourceCoordinator Create()
|
||||
{
|
||||
var store = Substitute.For<IStreamStore>();
|
||||
var config = new StreamSourceConfig { Name = "test-source", DuplicateWindowMs = 60_000 };
|
||||
return new SourceCoordinator(store, config);
|
||||
}
|
||||
}
|
||||
92
tests/NATS.Server.Tests/MqttPersistenceTests.cs
Normal file
92
tests/NATS.Server.Tests/MqttPersistenceTests.cs
Normal file
@@ -0,0 +1,92 @@
|
||||
using NSubstitute;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
using NATS.Server.Mqtt;
|
||||
|
||||
namespace NATS.Server.Tests;
|
||||
|
||||
// Go reference: server/mqtt.go ($MQTT_msgs, $MQTT_sess, $MQTT_rmsgs JetStream streams)
|
||||
|
||||
public class MqttPersistenceTests
|
||||
{
|
||||
[Fact]
|
||||
public async Task Session_persists_across_restart()
|
||||
{
|
||||
// Go reference: server/mqtt.go mqttStoreSession — session survives restart
|
||||
var store = MqttSessionStoreTestHelper.CreateWithJetStream();
|
||||
|
||||
await store.ConnectAsync("client-1", cleanSession: false);
|
||||
store.AddSubscription("client-1", "topic/test", qos: 1);
|
||||
await store.SaveSessionAsync("client-1");
|
||||
|
||||
// Simulate restart — new store backed by the same IStreamStore
|
||||
var recovered = MqttSessionStoreTestHelper.CreateWithJetStream(store.BackingStore!);
|
||||
await recovered.ConnectAsync("client-1", cleanSession: false);
|
||||
|
||||
var subs = recovered.GetSubscriptions("client-1");
|
||||
subs.ShouldContainKey("topic/test");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Clean_session_deletes_existing()
|
||||
{
|
||||
// Go reference: server/mqtt.go cleanSession=true deletes saved state
|
||||
var store = MqttSessionStoreTestHelper.CreateWithJetStream();
|
||||
|
||||
await store.ConnectAsync("client-2", cleanSession: false);
|
||||
store.AddSubscription("client-2", "persist/me", qos: 1);
|
||||
await store.SaveSessionAsync("client-2");
|
||||
|
||||
// Reconnect with clean session
|
||||
await store.ConnectAsync("client-2", cleanSession: true);
|
||||
|
||||
var subs = store.GetSubscriptions("client-2");
|
||||
subs.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Retained_message_survives_restart()
|
||||
{
|
||||
// Go reference: server/mqtt.go retained message persistence via JetStream
|
||||
var retained = MqttRetainedStoreTestHelper.CreateWithJetStream();
|
||||
|
||||
await retained.SetRetainedAsync("sensors/temp", "72.5"u8.ToArray());
|
||||
|
||||
// Simulate restart
|
||||
var recovered = MqttRetainedStoreTestHelper.CreateWithJetStream(retained.BackingStore!);
|
||||
var msg = await recovered.GetRetainedAsync("sensors/temp");
|
||||
|
||||
msg.ShouldNotBeNull();
|
||||
System.Text.Encoding.UTF8.GetString(msg).ShouldBe("72.5");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Retained_message_cleared_with_empty_payload()
|
||||
{
|
||||
// Go reference: server/mqtt.go empty payload clears retained
|
||||
var retained = MqttRetainedStoreTestHelper.CreateWithJetStream();
|
||||
|
||||
await retained.SetRetainedAsync("sensors/temp", "72.5"u8.ToArray());
|
||||
await retained.SetRetainedAsync("sensors/temp", ReadOnlyMemory<byte>.Empty); // clear
|
||||
|
||||
var msg = await retained.GetRetainedAsync("sensors/temp");
|
||||
msg.ShouldBeNull();
|
||||
}
|
||||
}
|
||||
|
||||
public static class MqttSessionStoreTestHelper
|
||||
{
|
||||
public static MqttSessionStore CreateWithJetStream(IStreamStore? backingStore = null)
|
||||
{
|
||||
var store = backingStore ?? new MemStore();
|
||||
return new MqttSessionStore(store);
|
||||
}
|
||||
}
|
||||
|
||||
public static class MqttRetainedStoreTestHelper
|
||||
{
|
||||
public static MqttRetainedStore CreateWithJetStream(IStreamStore? backingStore = null)
|
||||
{
|
||||
var store = backingStore ?? new MemStore();
|
||||
return new MqttRetainedStore(store);
|
||||
}
|
||||
}
|
||||
285
tests/NATS.Server.Tests/Raft/RaftJointConsensusTests.cs
Normal file
285
tests/NATS.Server.Tests/Raft/RaftJointConsensusTests.cs
Normal file
@@ -0,0 +1,285 @@
|
||||
using NATS.Server.Raft;
|
||||
|
||||
namespace NATS.Server.Tests.Raft;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for joint consensus membership changes per Raft paper Section 4.
|
||||
/// During a joint configuration transition a quorum requires majority from BOTH
|
||||
/// the old configuration (Cold) and the new configuration (Cnew).
|
||||
/// Go reference: raft.go joint consensus / two-phase membership transitions.
|
||||
/// </summary>
|
||||
public class RaftJointConsensusTests
|
||||
{
|
||||
// -- Helpers (self-contained, no shared TestHelpers class) --
|
||||
|
||||
private static (RaftNode[] nodes, InMemoryRaftTransport transport) CreateCluster(int size)
|
||||
{
|
||||
var transport = new InMemoryRaftTransport();
|
||||
var nodes = Enumerable.Range(1, size)
|
||||
.Select(i => new RaftNode($"n{i}", transport))
|
||||
.ToArray();
|
||||
foreach (var node in nodes)
|
||||
{
|
||||
transport.Register(node);
|
||||
node.ConfigureCluster(nodes);
|
||||
}
|
||||
return (nodes, transport);
|
||||
}
|
||||
|
||||
private static RaftNode ElectLeader(RaftNode[] nodes)
|
||||
{
|
||||
var candidate = nodes[0];
|
||||
candidate.StartElection(nodes.Length);
|
||||
foreach (var voter in nodes.Skip(1))
|
||||
candidate.ReceiveVote(voter.GrantVote(candidate.Term, candidate.Id), nodes.Length);
|
||||
return candidate;
|
||||
}
|
||||
|
||||
// -- BeginJointConsensus / InJointConsensus / JointNewMembers --
|
||||
|
||||
[Fact]
|
||||
public void BeginJointConsensus_sets_InJointConsensus_flag()
|
||||
{
|
||||
// Go reference: raft.go Section 4 — begin joint config
|
||||
var node = new RaftNode("n1");
|
||||
node.AddMember("n2");
|
||||
node.AddMember("n3");
|
||||
|
||||
node.InJointConsensus.ShouldBeFalse();
|
||||
|
||||
node.BeginJointConsensus(["n1", "n2", "n3"], ["n1", "n2", "n3", "n4"]);
|
||||
|
||||
node.InJointConsensus.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void BeginJointConsensus_exposes_JointNewMembers()
|
||||
{
|
||||
// Go reference: raft.go Section 4 — Cnew accessible during joint phase
|
||||
var node = new RaftNode("n1");
|
||||
node.AddMember("n2");
|
||||
node.AddMember("n3");
|
||||
|
||||
node.BeginJointConsensus(["n1", "n2", "n3"], ["n1", "n2", "n3", "n4"]);
|
||||
|
||||
node.JointNewMembers.ShouldNotBeNull();
|
||||
node.JointNewMembers!.ShouldContain("n4");
|
||||
node.JointNewMembers.Count.ShouldBe(4);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void BeginJointConsensus_adds_new_members_to_active_set()
|
||||
{
|
||||
// During joint consensus the active member set is the union of Cold and Cnew
|
||||
// so that entries are replicated to all participating nodes.
|
||||
// Go reference: raft.go Section 4 — joint config is union of Cold and Cnew.
|
||||
var node = new RaftNode("n1");
|
||||
node.AddMember("n2");
|
||||
node.AddMember("n3");
|
||||
|
||||
node.BeginJointConsensus(["n1", "n2", "n3"], ["n1", "n2", "n3", "n4"]);
|
||||
|
||||
node.Members.ShouldContain("n4");
|
||||
}
|
||||
|
||||
// -- CommitJointConsensus --
|
||||
|
||||
[Fact]
|
||||
public void CommitJointConsensus_clears_InJointConsensus_flag()
|
||||
{
|
||||
// Go reference: raft.go joint consensus commit finalizes Cnew
|
||||
var node = new RaftNode("n1");
|
||||
node.AddMember("n2");
|
||||
node.AddMember("n3");
|
||||
|
||||
node.BeginJointConsensus(["n1", "n2", "n3"], ["n1", "n2", "n3", "n4"]);
|
||||
node.CommitJointConsensus();
|
||||
|
||||
node.InJointConsensus.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CommitJointConsensus_finalizes_new_configuration_when_adding_peer()
|
||||
{
|
||||
// After commit, Members should exactly equal Cnew.
|
||||
// Go reference: raft.go joint consensus commit.
|
||||
var node = new RaftNode("n1");
|
||||
node.AddMember("n2");
|
||||
node.AddMember("n3");
|
||||
|
||||
node.BeginJointConsensus(["n1", "n2", "n3"], ["n1", "n2", "n3", "n4"]);
|
||||
node.CommitJointConsensus();
|
||||
|
||||
node.Members.Count.ShouldBe(4);
|
||||
node.Members.ShouldContain("n1");
|
||||
node.Members.ShouldContain("n2");
|
||||
node.Members.ShouldContain("n3");
|
||||
node.Members.ShouldContain("n4");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CommitJointConsensus_removes_old_only_members_when_removing_peer()
|
||||
{
|
||||
// Removing a peer: Cold={n1,n2,n3}, Cnew={n1,n2}.
|
||||
// After commit, n3 must be gone.
|
||||
// Go reference: raft.go joint consensus commit removes Cold-only members.
|
||||
var node = new RaftNode("n1");
|
||||
node.AddMember("n2");
|
||||
node.AddMember("n3");
|
||||
|
||||
node.BeginJointConsensus(["n1", "n2", "n3"], ["n1", "n2"]);
|
||||
node.CommitJointConsensus();
|
||||
|
||||
node.Members.Count.ShouldBe(2);
|
||||
node.Members.ShouldContain("n1");
|
||||
node.Members.ShouldContain("n2");
|
||||
node.Members.ShouldNotContain("n3");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void CommitJointConsensus_is_idempotent_when_not_in_joint()
|
||||
{
|
||||
// Calling commit when not in joint consensus must be a no-op.
|
||||
var node = new RaftNode("n1");
|
||||
node.AddMember("n2");
|
||||
|
||||
node.CommitJointConsensus(); // should not throw
|
||||
|
||||
node.Members.Count.ShouldBe(2);
|
||||
node.InJointConsensus.ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -- CalculateJointQuorum --
|
||||
|
||||
[Fact]
|
||||
public void CalculateJointQuorum_returns_false_when_not_in_joint_consensus()
|
||||
{
|
||||
// Outside joint consensus the method has no defined result and returns false.
|
||||
// Go reference: raft.go Section 4.
|
||||
var node = new RaftNode("n1");
|
||||
|
||||
node.CalculateJointQuorum(["n1"], ["n1"]).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Joint_quorum_requires_majority_from_both_old_and_new_configurations()
|
||||
{
|
||||
// Cold={n1,n2,n3} (size 3, quorum=2), Cnew={n1,n2,n3,n4} (size 4, quorum=3).
|
||||
// 2/3 old AND 3/4 new — both majorities satisfied.
|
||||
// Go reference: raft.go Section 4 — joint config quorum calculation.
|
||||
var (nodes, _) = CreateCluster(3);
|
||||
var leader = ElectLeader(nodes);
|
||||
|
||||
leader.BeginJointConsensus(["n1", "n2", "n3"], ["n1", "n2", "n3", "n4"]);
|
||||
|
||||
leader.CalculateJointQuorum(["n1", "n2"], ["n1", "n2", "n3"]).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Joint_quorum_fails_when_old_majority_not_met()
|
||||
{
|
||||
// Cold={n1,n2,n3} (quorum=2): only 1 old voter — fails old quorum.
|
||||
// Cnew={n1,n2,n3,n4} (quorum=3): 2 new voters — also fails new quorum.
|
||||
// Go reference: raft.go Section 4 — must satisfy BOTH majorities.
|
||||
var (nodes, _) = CreateCluster(3);
|
||||
var leader = ElectLeader(nodes);
|
||||
|
||||
leader.BeginJointConsensus(["n1", "n2", "n3"], ["n1", "n2", "n3", "n4"]);
|
||||
|
||||
leader.CalculateJointQuorum(["n1"], ["n1", "n2"]).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Joint_quorum_fails_when_new_majority_not_met()
|
||||
{
|
||||
// Cold={n1,n2,n3} (quorum=2): 2 old voters — passes old quorum.
|
||||
// Cnew={n1,n2,n3,n4} (quorum=3): only 2 new voters — fails new quorum.
|
||||
// Go reference: raft.go Section 4 — both are required.
|
||||
var (nodes, _) = CreateCluster(3);
|
||||
var leader = ElectLeader(nodes);
|
||||
|
||||
leader.BeginJointConsensus(["n1", "n2", "n3"], ["n1", "n2", "n3", "n4"]);
|
||||
|
||||
leader.CalculateJointQuorum(["n1", "n2"], ["n1", "n2"]).ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Joint_quorum_exact_majority_boundary_old_config()
|
||||
{
|
||||
// Cold={n1,n2,n3,n4,n5} (size 5, quorum=3).
|
||||
// Exactly 3 old voters meets old quorum boundary.
|
||||
var node = new RaftNode("n1");
|
||||
foreach (var m in new[] { "n2", "n3", "n4", "n5" })
|
||||
node.AddMember(m);
|
||||
|
||||
node.BeginJointConsensus(
|
||||
["n1", "n2", "n3", "n4", "n5"],
|
||||
["n1", "n2", "n3", "n4", "n5", "n6"]);
|
||||
|
||||
// 3/5 old (exact quorum=3) and 4/6 new (quorum=4) — both satisfied
|
||||
node.CalculateJointQuorum(["n1", "n2", "n3"], ["n1", "n2", "n3", "n4"]).ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Joint_quorum_just_below_boundary_old_config_fails()
|
||||
{
|
||||
// Cold={n1,n2,n3,n4,n5} (size 5, quorum=3): 2 voters fails.
|
||||
var node = new RaftNode("n1");
|
||||
foreach (var m in new[] { "n2", "n3", "n4", "n5" })
|
||||
node.AddMember(m);
|
||||
|
||||
node.BeginJointConsensus(
|
||||
["n1", "n2", "n3", "n4", "n5"],
|
||||
["n1", "n2", "n3", "n4", "n5", "n6"]);
|
||||
|
||||
// 2/5 old < quorum=3 — must fail
|
||||
node.CalculateJointQuorum(["n1", "n2"], ["n1", "n2", "n3", "n4"]).ShouldBeFalse();
|
||||
}
|
||||
|
||||
// -- Integration: existing ProposeAddPeerAsync/ProposeRemovePeerAsync unchanged --
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeAddPeerAsync_still_works_after_joint_consensus_fields_added()
|
||||
{
|
||||
// Verify that adding joint consensus fields does not break the existing
|
||||
// single-phase ProposeAddPeerAsync behaviour.
|
||||
// Go reference: raft.go:961-990 (proposeAddPeer).
|
||||
var (nodes, _) = CreateCluster(3);
|
||||
var leader = ElectLeader(nodes);
|
||||
|
||||
await leader.ProposeAddPeerAsync("n4", default);
|
||||
|
||||
leader.Members.ShouldContain("n4");
|
||||
leader.Members.Count.ShouldBe(4);
|
||||
leader.MembershipChangeInProgress.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ProposeRemovePeerAsync_still_works_after_joint_consensus_fields_added()
|
||||
{
|
||||
// Verify that adding joint consensus fields does not break the existing
|
||||
// single-phase ProposeRemovePeerAsync behaviour.
|
||||
// Go reference: raft.go:992-1019 (proposeRemovePeer).
|
||||
var (nodes, _) = CreateCluster(3);
|
||||
var leader = ElectLeader(nodes);
|
||||
|
||||
await leader.ProposeRemovePeerAsync("n3", default);
|
||||
|
||||
leader.Members.ShouldNotContain("n3");
|
||||
leader.Members.Count.ShouldBe(2);
|
||||
leader.MembershipChangeInProgress.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task MembershipChangeInProgress_is_false_after_completed_add()
|
||||
{
|
||||
// The single-change invariant must still hold: flag is cleared after completion.
|
||||
// Go reference: raft.go:961-1019 single-change invariant.
|
||||
var (nodes, _) = CreateCluster(3);
|
||||
var leader = ElectLeader(nodes);
|
||||
|
||||
await leader.ProposeAddPeerAsync("n4", default);
|
||||
|
||||
leader.MembershipChangeInProgress.ShouldBeFalse();
|
||||
}
|
||||
}
|
||||
147
tests/NATS.Server.Tests/Raft/RaftWalTests.cs
Normal file
147
tests/NATS.Server.Tests/Raft/RaftWalTests.cs
Normal file
@@ -0,0 +1,147 @@
|
||||
using NATS.Server.Raft;
|
||||
|
||||
// Go reference: server/raft.go (WAL binary format, compaction, CRC integrity)
|
||||
|
||||
namespace NATS.Server.Tests.Raft;
|
||||
|
||||
public class RaftWalTests : IDisposable
|
||||
{
|
||||
private readonly string _root;
|
||||
|
||||
public RaftWalTests()
|
||||
{
|
||||
_root = Path.Combine(Path.GetTempPath(), $"nats-wal-{Guid.NewGuid():N}");
|
||||
Directory.CreateDirectory(_root);
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
if (Directory.Exists(_root))
|
||||
Directory.Delete(_root, recursive: true);
|
||||
}
|
||||
|
||||
// Go reference: server/raft.go WAL append + recover
|
||||
[Fact]
|
||||
public async Task Wal_persists_and_recovers_entries()
|
||||
{
|
||||
var walPath = Path.Combine(_root, "raft.wal");
|
||||
|
||||
// Write entries
|
||||
{
|
||||
using var wal = new RaftWal(walPath);
|
||||
await wal.AppendAsync(new RaftLogEntry(1, 1, "cmd-1"));
|
||||
await wal.AppendAsync(new RaftLogEntry(2, 1, "cmd-2"));
|
||||
await wal.AppendAsync(new RaftLogEntry(3, 2, "cmd-3"));
|
||||
await wal.SyncAsync();
|
||||
}
|
||||
|
||||
// Recover
|
||||
using var recovered = RaftWal.Load(walPath);
|
||||
var entries = recovered.Entries;
|
||||
entries.Count.ShouldBe(3);
|
||||
entries[0].Index.ShouldBe(1);
|
||||
entries[0].Term.ShouldBe(1);
|
||||
entries[0].Command.ShouldBe("cmd-1");
|
||||
entries[2].Index.ShouldBe(3);
|
||||
entries[2].Term.ShouldBe(2);
|
||||
}
|
||||
|
||||
// Go reference: server/raft.go compactLog
|
||||
[Fact]
|
||||
public async Task Wal_compact_removes_old_entries()
|
||||
{
|
||||
var walPath = Path.Combine(_root, "compact.wal");
|
||||
|
||||
using var wal = new RaftWal(walPath);
|
||||
for (int i = 1; i <= 10; i++)
|
||||
await wal.AppendAsync(new RaftLogEntry(i, 1, $"cmd-{i}"));
|
||||
await wal.SyncAsync();
|
||||
|
||||
await wal.CompactAsync(5); // remove entries 1-5
|
||||
|
||||
using var recovered = RaftWal.Load(walPath);
|
||||
recovered.Entries.Count.ShouldBe(5);
|
||||
recovered.Entries.First().Index.ShouldBe(6);
|
||||
}
|
||||
|
||||
// Go reference: server/raft.go WAL crash-truncation tolerance
|
||||
[Fact]
|
||||
public async Task Wal_handles_truncated_file()
|
||||
{
|
||||
var walPath = Path.Combine(_root, "truncated.wal");
|
||||
|
||||
{
|
||||
using var wal = new RaftWal(walPath);
|
||||
await wal.AppendAsync(new RaftLogEntry(1, 1, "good-entry"));
|
||||
await wal.AppendAsync(new RaftLogEntry(2, 1, "will-be-truncated"));
|
||||
await wal.SyncAsync();
|
||||
}
|
||||
|
||||
// Truncate last few bytes to simulate crash
|
||||
using (var fs = File.OpenWrite(walPath))
|
||||
fs.SetLength(fs.Length - 3);
|
||||
|
||||
using var recovered = RaftWal.Load(walPath);
|
||||
recovered.Entries.Count.ShouldBe(1);
|
||||
recovered.Entries.First().Command.ShouldBe("good-entry");
|
||||
}
|
||||
|
||||
// Go reference: server/raft.go storeMeta (term + votedFor persistence)
|
||||
[Fact]
|
||||
public async Task RaftNode_persists_term_and_vote()
|
||||
{
|
||||
var dir = Path.Combine(_root, "node-persist");
|
||||
Directory.CreateDirectory(dir);
|
||||
|
||||
{
|
||||
using var node = new RaftNode("n1", persistDirectory: dir);
|
||||
node.TermState.CurrentTerm = 5;
|
||||
node.TermState.VotedFor = "n2";
|
||||
await node.PersistAsync(default);
|
||||
}
|
||||
|
||||
using var recovered = new RaftNode("n1", persistDirectory: dir);
|
||||
await recovered.LoadPersistedStateAsync(default);
|
||||
recovered.Term.ShouldBe(5);
|
||||
recovered.TermState.VotedFor.ShouldBe("n2");
|
||||
}
|
||||
|
||||
// Go reference: server/raft.go WAL empty file edge case
|
||||
[Fact]
|
||||
public async Task Wal_empty_file_loads_no_entries()
|
||||
{
|
||||
var walPath = Path.Combine(_root, "empty.wal");
|
||||
|
||||
{
|
||||
using var wal = new RaftWal(walPath);
|
||||
await wal.SyncAsync();
|
||||
}
|
||||
|
||||
using var recovered = RaftWal.Load(walPath);
|
||||
recovered.Entries.Count.ShouldBe(0);
|
||||
}
|
||||
|
||||
// Go reference: server/raft.go WAL CRC integrity check
|
||||
[Fact]
|
||||
public async Task Wal_crc_validates_record_integrity()
|
||||
{
|
||||
var walPath = Path.Combine(_root, "crc.wal");
|
||||
|
||||
{
|
||||
using var wal = new RaftWal(walPath);
|
||||
await wal.AppendAsync(new RaftLogEntry(1, 1, "valid"));
|
||||
await wal.AppendAsync(new RaftLogEntry(2, 1, "also-valid"));
|
||||
await wal.SyncAsync();
|
||||
}
|
||||
|
||||
// Corrupt one byte in the tail of the file (inside the second record)
|
||||
var bytes = File.ReadAllBytes(walPath);
|
||||
bytes[^5] ^= 0xFF;
|
||||
File.WriteAllBytes(walPath, bytes);
|
||||
|
||||
// Load should recover exactly the first record, stopping at the corrupt second
|
||||
using var recovered = RaftWal.Load(walPath);
|
||||
recovered.Entries.Count.ShouldBe(1);
|
||||
recovered.Entries.First().Command.ShouldBe("valid");
|
||||
}
|
||||
}
|
||||
61
tests/NATS.Server.Tests/SignalHandlerTests.cs
Normal file
61
tests/NATS.Server.Tests/SignalHandlerTests.cs
Normal file
@@ -0,0 +1,61 @@
|
||||
using NATS.Server.Configuration;
|
||||
|
||||
namespace NATS.Server.Tests;
|
||||
|
||||
// Go reference: server/signal_unix.go (handleSignals), server/reload.go (Reload)
|
||||
|
||||
public class SignalHandlerTests
|
||||
{
|
||||
[Fact]
|
||||
public void SignalHandler_registers_without_throwing()
|
||||
{
|
||||
// Go reference: server/signal_unix.go — registration should succeed
|
||||
Should.NotThrow(() => SignalHandler.Register(() => { }));
|
||||
SignalHandler.IsRegistered.ShouldBeTrue();
|
||||
SignalHandler.Unregister();
|
||||
SignalHandler.IsRegistered.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ConfigReloader_ReloadFromOptionsAsync_applies_reloadable_changes()
|
||||
{
|
||||
// Go reference: server/reload.go — reloadable options (e.g., MaxPayload) pass validation
|
||||
var original = new NatsOptions { Port = 4222, MaxPayload = 1024 };
|
||||
var updated = new NatsOptions { Port = 4222, MaxPayload = 2048 }; // MaxPayload is reloadable
|
||||
|
||||
var result = await ConfigReloader.ReloadFromOptionsAsync(original, updated);
|
||||
result.Success.ShouldBeTrue();
|
||||
result.RejectedChanges.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ConfigReloader_rejects_non_reloadable_changes()
|
||||
{
|
||||
// Go reference: server/reload.go — Port change is NOT reloadable
|
||||
var original = new NatsOptions { Port = 4222 };
|
||||
var updated = new NatsOptions { Port = 5555 }; // port change is NOT reloadable
|
||||
|
||||
var result = await ConfigReloader.ReloadFromOptionsAsync(original, updated);
|
||||
result.RejectedChanges.ShouldContain(c => c.Contains("Port"));
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task ConfigReloader_identical_options_succeeds()
|
||||
{
|
||||
// Go reference: server/reload.go — no changes = success
|
||||
var original = new NatsOptions { Port = 4222 };
|
||||
var updated = new NatsOptions { Port = 4222 };
|
||||
|
||||
var result = await ConfigReloader.ReloadFromOptionsAsync(original, updated);
|
||||
result.Success.ShouldBeTrue();
|
||||
result.RejectedChanges.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void SignalHandler_unregister_is_idempotent()
|
||||
{
|
||||
// Calling Unregister when not registered should not throw
|
||||
Should.NotThrow(() => SignalHandler.Unregister());
|
||||
Should.NotThrow(() => SignalHandler.Unregister());
|
||||
}
|
||||
}
|
||||
92
tests/NATS.Server.Tests/StallGateTests.cs
Normal file
92
tests/NATS.Server.Tests/StallGateTests.cs
Normal file
@@ -0,0 +1,92 @@
|
||||
namespace NATS.Server.Tests;
|
||||
|
||||
// Go reference: server/client.go (stc channel, stall gate backpressure)
|
||||
|
||||
public class StallGateTests
|
||||
{
|
||||
[Fact]
|
||||
public void Stall_gate_activates_at_threshold()
|
||||
{
|
||||
// Go reference: server/client.go stalledRoute — stalls at 75% capacity
|
||||
var gate = new NatsClient.StallGate(maxPending: 1000);
|
||||
|
||||
gate.IsStalled.ShouldBeFalse();
|
||||
|
||||
gate.UpdatePending(750); // 75% = threshold
|
||||
gate.IsStalled.ShouldBeTrue();
|
||||
|
||||
gate.UpdatePending(500); // below threshold — releases
|
||||
gate.IsStalled.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stall_gate_blocks_producer()
|
||||
{
|
||||
// Go reference: server/client.go stc channel blocks sends
|
||||
var gate = new NatsClient.StallGate(maxPending: 100);
|
||||
gate.UpdatePending(80); // stalled — 80% > 75%
|
||||
|
||||
// Use a TCS to signal that the producer has entered WaitAsync
|
||||
var entered = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously);
|
||||
var released = false;
|
||||
var task = Task.Run(async () =>
|
||||
{
|
||||
entered.SetResult();
|
||||
await gate.WaitAsync(TimeSpan.FromSeconds(5));
|
||||
released = true;
|
||||
});
|
||||
|
||||
// Wait until the producer has reached WaitAsync before asserting
|
||||
await entered.Task;
|
||||
released.ShouldBeFalse(); // still blocked
|
||||
|
||||
gate.UpdatePending(50); // below threshold — releases
|
||||
|
||||
await task;
|
||||
released.ShouldBeTrue();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stall_gate_timeout_returns_false()
|
||||
{
|
||||
// Go reference: server/client.go stc timeout → close as slow consumer
|
||||
var gate = new NatsClient.StallGate(maxPending: 100);
|
||||
gate.UpdatePending(80); // stalled
|
||||
|
||||
var result = await gate.WaitAsync(TimeSpan.FromMilliseconds(50));
|
||||
result.ShouldBeFalse(); // timed out, not released
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Stall_gate_not_stalled_below_threshold()
|
||||
{
|
||||
// Go reference: server/client.go — no stall when below threshold
|
||||
var gate = new NatsClient.StallGate(maxPending: 1000);
|
||||
|
||||
gate.UpdatePending(100); // well below 75%
|
||||
gate.IsStalled.ShouldBeFalse();
|
||||
|
||||
gate.UpdatePending(749); // just below 75%
|
||||
gate.IsStalled.ShouldBeFalse();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Stall_gate_wait_when_not_stalled_returns_immediately()
|
||||
{
|
||||
// Go reference: server/client.go — no stall, immediate return
|
||||
var gate = new NatsClient.StallGate(maxPending: 1000);
|
||||
|
||||
var result = await gate.WaitAsync(TimeSpan.FromSeconds(1));
|
||||
result.ShouldBeTrue(); // immediately released — not stalled
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void Stall_gate_release_is_idempotent()
|
||||
{
|
||||
// Release when not stalled should not throw
|
||||
var gate = new NatsClient.StallGate(maxPending: 100);
|
||||
|
||||
Should.NotThrow(() => gate.Release());
|
||||
Should.NotThrow(() => gate.Release());
|
||||
}
|
||||
}
|
||||
61
tests/NATS.Server.Tests/WriteTimeoutTests.cs
Normal file
61
tests/NATS.Server.Tests/WriteTimeoutTests.cs
Normal file
@@ -0,0 +1,61 @@
|
||||
namespace NATS.Server.Tests;
|
||||
|
||||
// Go reference: server/client.go (write timeout handling, per-kind policies)
|
||||
|
||||
public class WriteTimeoutTests
|
||||
{
|
||||
[Fact]
|
||||
public void WriteTimeoutPolicy_defaults_by_kind()
|
||||
{
|
||||
// Go reference: server/client.go — CLIENT closes, others use TCP flush
|
||||
NatsClient.GetWriteTimeoutPolicy(ClientKind.Client).ShouldBe(NatsClient.WriteTimeoutPolicy.Close);
|
||||
NatsClient.GetWriteTimeoutPolicy(ClientKind.Router).ShouldBe(NatsClient.WriteTimeoutPolicy.TcpFlush);
|
||||
NatsClient.GetWriteTimeoutPolicy(ClientKind.Gateway).ShouldBe(NatsClient.WriteTimeoutPolicy.TcpFlush);
|
||||
NatsClient.GetWriteTimeoutPolicy(ClientKind.Leaf).ShouldBe(NatsClient.WriteTimeoutPolicy.TcpFlush);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PartialFlushResult_tracks_bytes()
|
||||
{
|
||||
// Go reference: server/client.go — partial write tracking
|
||||
var result = new NatsClient.FlushResult(BytesAttempted: 1024, BytesWritten: 512);
|
||||
result.IsPartial.ShouldBeTrue();
|
||||
result.BytesRemaining.ShouldBe(512L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void PartialFlushResult_complete_is_not_partial()
|
||||
{
|
||||
// Go reference: server/client.go — complete write
|
||||
var result = new NatsClient.FlushResult(BytesAttempted: 1024, BytesWritten: 1024);
|
||||
result.IsPartial.ShouldBeFalse();
|
||||
result.BytesRemaining.ShouldBe(0L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void WriteTimeoutPolicy_system_kind_defaults_to_close()
|
||||
{
|
||||
// Go reference: server/client.go — system/internal kinds default to Close
|
||||
NatsClient.GetWriteTimeoutPolicy(ClientKind.System).ShouldBe(NatsClient.WriteTimeoutPolicy.Close);
|
||||
NatsClient.GetWriteTimeoutPolicy(ClientKind.JetStream).ShouldBe(NatsClient.WriteTimeoutPolicy.Close);
|
||||
NatsClient.GetWriteTimeoutPolicy(ClientKind.Account).ShouldBe(NatsClient.WriteTimeoutPolicy.Close);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void FlushResult_zero_bytes_is_partial_when_attempted_nonzero()
|
||||
{
|
||||
// Edge case: nothing written but something attempted
|
||||
var result = new NatsClient.FlushResult(BytesAttempted: 100, BytesWritten: 0);
|
||||
result.IsPartial.ShouldBeTrue();
|
||||
result.BytesRemaining.ShouldBe(100L);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public void FlushResult_zero_zero_is_not_partial()
|
||||
{
|
||||
// Edge case: nothing attempted, nothing written
|
||||
var result = new NatsClient.FlushResult(BytesAttempted: 0, BytesWritten: 0);
|
||||
result.IsPartial.ShouldBeFalse();
|
||||
result.BytesRemaining.ShouldBe(0L);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user