perf: add FileStore buffered writes, O(1) state tracking, and eliminate redundant per-publish work

Implement Go-parity background flush loop (coalesce 16KB/8ms) in MsgBlock/FileStore,
replace O(n) GetStateAsync with incremental counters, skip PruneExpired/LoadAsync/
PrunePerSubject when not needed, and bypass RAFT for single-replica streams. Fix counter
tracking bugs in RemoveMsg/EraseMsg/TTL expiry and ObjectDisposedException races in
flush loop disposal. FileStore optimizations verified with 3112/3112 JetStream tests
passing; async publish benchmark remains at ~174 msg/s due to E2E protocol path bottleneck.
This commit is contained in:
Joseph Doherty
2026-03-13 03:11:11 -04:00
parent 37575dc41c
commit 4de691c9c5
30 changed files with 1514 additions and 185 deletions

View File

@@ -148,6 +148,31 @@ public sealed class StreamManager : IDisposable
if (isCreate && _account is not null && !_account.TryReserveStream())
return JetStreamApiResponse.ErrorResponse(10027, "maximum streams exceeded");
// Go: subject overlap detection on create — reject new streams whose subjects
// collide with existing streams.
// Reference: server/stream.go — checkStreamOverlap during addStream.
if (isCreate && normalized.Subjects.Count > 0)
{
var otherStreams = _streams.Values.Select(s => s.Config);
var overlapErrors = new List<string>();
foreach (var otherStream in otherStreams)
{
foreach (var proposedSubj in normalized.Subjects)
{
foreach (var otherSubj in otherStream.Subjects)
{
if (SubjectMatch.MatchLiteral(proposedSubj, otherSubj)
|| SubjectMatch.MatchLiteral(otherSubj, proposedSubj)
|| SubjectMatch.SubjectsCollide(proposedSubj, otherSubj))
{
return JetStreamApiResponse.ErrorResponse(400,
$"subjects overlap with stream '{otherStream.Name}'");
}
}
}
}
}
// Go: stream.go:update — validate immutable fields on update.
// Reference: server/stream.go:1500-1600 (stream.update)
if (!isCreate && _streams.TryGetValue(normalized.Name, out var existingHandle))
@@ -202,6 +227,11 @@ public sealed class StreamManager : IDisposable
_replicaGroups.TryRemove(name, out _);
_account?.ReleaseStream();
RebuildReplicationCoordinators();
// Go: propagate stream deletion to meta group so cluster state is updated.
// Reference: server/jetstream_cluster.go — processStreamRemoval updates meta state.
_metaGroup?.ProposeDeleteStreamAsync(name, default).GetAwaiter().GetResult();
return true;
}
@@ -367,7 +397,10 @@ public sealed class StreamManager : IDisposable
if (stream == null)
return null;
// Go: sealed stream rejects all publishes.
// Reference: server/stream.go — processJetStreamMsg checks mset.cfg.Sealed.
if (stream.Config.Sealed)
return new PubAck { Stream = stream.Config.Name, ErrorCode = 10054 };
if (stream.Config.MaxMsgSize > 0 && payload.Length > stream.Config.MaxMsgSize)
{
@@ -378,12 +411,19 @@ public sealed class StreamManager : IDisposable
};
}
PruneExpiredMessages(stream, DateTime.UtcNow);
// Go: memStoreMsgSize — full message size includes subject + headers + payload + 16 bytes overhead.
var msgSize = subject.Length + payload.Length + 16;
var stateBefore = stream.Store.GetStateAsync(default).GetAwaiter().GetResult();
// Go: DiscardPolicy.New — reject when MaxMsgs reached.
// Reference: server/stream.go — processJetStreamMsg checks discard new + maxMsgs.
if (stream.Config.MaxMsgs > 0 && stream.Config.Discard == DiscardPolicy.New
&& (long)stateBefore.Messages >= stream.Config.MaxMsgs)
{
return new PubAck { Stream = stream.Config.Name, ErrorCode = 10054 };
}
if (stream.Config.MaxBytes > 0 && (long)stateBefore.Bytes + msgSize > stream.Config.MaxBytes)
{
if (stream.Config.Discard == DiscardPolicy.New)
@@ -402,8 +442,13 @@ public sealed class StreamManager : IDisposable
}
}
if (_replicaGroups.TryGetValue(stream.Config.Name, out var replicaGroup))
// Go: single-replica streams don't use RAFT consensus — skip the propose overhead.
// Reference: server/stream.go — processJetStreamMsg only proposes when R > 1.
if (stream.Config.Replicas > 1
&& _replicaGroups.TryGetValue(stream.Config.Name, out var replicaGroup))
{
_ = replicaGroup.ProposeAsync($"PUB {subject}", default).GetAwaiter().GetResult();
}
// Go: stream.go:processMsgSubjectTransform — apply input subject transform before store.
// Reference: server/stream.go:1810-1830
@@ -411,9 +456,63 @@ public sealed class StreamManager : IDisposable
var seq = stream.Store.AppendAsync(storeSubject, payload, default).GetAwaiter().GetResult();
EnforceRuntimePolicies(stream, DateTime.UtcNow);
var stored = stream.Store.LoadAsync(seq, default).GetAwaiter().GetResult();
if (stored != null)
ReplicateIfConfigured(stream.Config.Name, stored);
// Only load the stored message when replication is configured (mirror/source).
// Avoids unnecessary disk I/O on the hot publish path.
if (_mirrorsByOrigin.ContainsKey(stream.Config.Name) || _sourcesByOrigin.ContainsKey(stream.Config.Name))
{
var stored = stream.Store.LoadAsync(seq, default).GetAwaiter().GetResult();
if (stored != null)
ReplicateIfConfigured(stream.Config.Name, stored);
}
return new PubAck
{
Stream = stream.Config.Name,
Seq = seq,
};
}
/// <summary>
/// Captures a counter increment message for a stream with AllowMsgCounter=true.
/// Go reference: server/stream.go — processJetStreamMsg counter path.
/// The server loads the last stored value for the subject, adds the increment,
/// and stores the new total as a JSON payload.
/// </summary>
public PubAck? CaptureCounter(string subject, long increment)
{
var stream = FindBySubject(subject);
if (stream == null)
return null;
if (!stream.Config.AllowMsgCounter)
return new PubAck { Stream = stream.Config.Name, ErrorCode = 10054 };
if (stream.Config.Sealed)
return new PubAck { Stream = stream.Config.Name, ErrorCode = 10054 };
// Go: stream.go — counter increment: load last value, add increment, store new total.
var storeSubject = ApplyInputTransform(stream.Config, subject);
var lastMsg = stream.Store.LoadLastBySubjectAsync(storeSubject, default).GetAwaiter().GetResult();
var existing = lastMsg != null
? CounterValue.FromPayload(lastMsg.Payload.Span)
: new CounterValue();
var newTotal = existing.AsLong() + increment;
var newPayload = CounterValue.FromLong(newTotal).ToPayload();
if (_replicaGroups.TryGetValue(stream.Config.Name, out var replicaGroup))
_ = replicaGroup.ProposeAsync($"PUB {subject}", default).GetAwaiter().GetResult();
var seq = stream.Store.AppendAsync(storeSubject, newPayload, default).GetAwaiter().GetResult();
EnforceRuntimePolicies(stream, DateTime.UtcNow);
if (_mirrorsByOrigin.ContainsKey(stream.Config.Name) || _sourcesByOrigin.ContainsKey(stream.Config.Name))
{
var stored = stream.Store.LoadAsync(seq, default).GetAwaiter().GetResult();
if (stored != null)
ReplicateIfConfigured(stream.Config.Name, stored);
}
return new PubAck
{
@@ -468,6 +567,11 @@ public sealed class StreamManager : IDisposable
SourceAccount = s.SourceAccount,
FilterSubject = s.FilterSubject,
DuplicateWindowMs = s.DuplicateWindowMs,
SubjectTransforms = [.. s.SubjectTransforms.Select(t => new SubjectTransformConfig
{
Source = t.Source,
Destination = t.Destination,
})],
})],
// Go: StreamConfig.SubjectTransform
SubjectTransformSource = config.SubjectTransformSource,
@@ -654,8 +758,10 @@ public sealed class StreamManager : IDisposable
private static void ApplyLimitsRetention(StreamHandle stream, DateTime nowUtc)
{
EnforceLimits(stream);
PrunePerSubject(stream);
PruneExpiredMessages(stream, nowUtc);
if (stream.Config.MaxMsgsPer > 0)
PrunePerSubject(stream);
if (stream.Config.MaxAgeMs > 0)
PruneExpiredMessages(stream, nowUtc);
}
private void ApplyWorkQueueRetention(StreamHandle stream, DateTime nowUtc)
@@ -746,7 +852,10 @@ public sealed class StreamManager : IDisposable
&& _streams.TryGetValue(stream.Config.Source, out _))
{
var list = _sourcesByOrigin.GetOrAdd(stream.Config.Source, _ => []);
list.Add(new SourceCoordinator(stream.Store, new StreamSourceConfig { Name = stream.Config.Source }));
list.Add(new SourceCoordinator(stream.Store, new StreamSourceConfig { Name = stream.Config.Source })
{
AllowMsgCounter = stream.Config.AllowMsgCounter,
});
}
if (stream.Config.Sources.Count > 0)
@@ -757,7 +866,10 @@ public sealed class StreamManager : IDisposable
continue;
var list = _sourcesByOrigin.GetOrAdd(source.Name, _ => []);
list.Add(new SourceCoordinator(stream.Store, source));
list.Add(new SourceCoordinator(stream.Store, source)
{
AllowMsgCounter = stream.Config.AllowMsgCounter,
});
}
}
}