perf: eliminate per-message allocations in pub/sub hot path and coalesce outbound writes
Pub/sub 1:1 (16B) improved from 0.18x to 0.50x, fan-out from 0.18x to 0.44x, and JetStream durable fetch from 0.13x to 0.64x vs Go. Key changes: replace .ToArray() copy in SendMessage with pooled buffer handoff, batch multiple small writes into single WriteAsync via 64KB coalesce buffer in write loop, and remove profiling Stopwatch instrumentation from ProcessMessage/StreamManager hot paths.
This commit is contained in:
@@ -0,0 +1,260 @@
|
||||
using System.Diagnostics;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using NATS.Client.Core;
|
||||
using NATS.Client.JetStream;
|
||||
using NATS.Client.JetStream.Models;
|
||||
using NATS.Server.Configuration;
|
||||
using NATS.Server.JetStream.Publish;
|
||||
using NATS.Server.JetStream.Storage;
|
||||
using Xunit.Abstractions;
|
||||
using ServerStreamConfig = NATS.Server.JetStream.Models.StreamConfig;
|
||||
using ServerStorageType = NATS.Server.JetStream.Models.StorageType;
|
||||
using ServerRetentionPolicy = NATS.Server.JetStream.Models.RetentionPolicy;
|
||||
|
||||
namespace NATS.Server.JetStream.Tests.JetStream;
|
||||
|
||||
/// <summary>
|
||||
/// Profiling test for the JetStream publish hot path.
|
||||
/// </summary>
|
||||
public class JetStreamPublishProfileTest(ITestOutputHelper output)
|
||||
{
|
||||
/// <summary>
|
||||
/// FileStore.AppendAsync only — isolates the store layer without StreamManager overhead.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
[Trait("Category", "Profile")]
|
||||
public void Profile_FileStore_AppendAsync_Only()
|
||||
{
|
||||
const int messageCount = 10_000;
|
||||
const int payloadSize = 128;
|
||||
const string subject = "bench.append.fs";
|
||||
var payload = new byte[payloadSize];
|
||||
|
||||
var storeDir = Path.Combine(Path.GetTempPath(), "nats-profile-append-" + Guid.NewGuid().ToString("N")[..8]);
|
||||
Directory.CreateDirectory(storeDir);
|
||||
|
||||
try
|
||||
{
|
||||
var store = new FileStore(new FileStoreOptions { Directory = storeDir, BlockSizeBytes = 256 * 1024 });
|
||||
|
||||
// Warmup
|
||||
for (var i = 0; i < 1_000; i++)
|
||||
store.AppendAsync(subject, payload, default);
|
||||
|
||||
var sw = Stopwatch.StartNew();
|
||||
for (var i = 0; i < messageCount; i++)
|
||||
store.AppendAsync(subject, payload, default);
|
||||
sw.Stop();
|
||||
|
||||
var msgPerSec = messageCount / sw.Elapsed.TotalSeconds;
|
||||
output.WriteLine($"=== FileStore AppendAsync Only ===");
|
||||
output.WriteLine($"Messages: {messageCount:N0}, Duration: {sw.Elapsed.TotalMilliseconds:F1} ms");
|
||||
output.WriteLine($"Throughput: {msgPerSec:N0} msg/s");
|
||||
output.WriteLine($"GC Gen0: {GC.CollectionCount(0)}, Gen1: {GC.CollectionCount(1)}, Gen2: {GC.CollectionCount(2)}");
|
||||
|
||||
store.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
try { Directory.Delete(storeDir, recursive: true); }
|
||||
catch { /* best-effort */ }
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Server-side only: calls StreamManager.Capture directly (no network).
|
||||
/// Isolates FileStore vs MemStore performance.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
[Trait("Category", "Profile")]
|
||||
public void Profile_FileStore_Publish_ServerSide()
|
||||
{
|
||||
const int messageCount = 5_000;
|
||||
const int payloadSize = 128;
|
||||
const string subject = "bench.profile.fs";
|
||||
var payload = new byte[payloadSize];
|
||||
|
||||
var storeDir = Path.Combine(Path.GetTempPath(), "nats-profile-fs-" + Guid.NewGuid().ToString("N")[..8]);
|
||||
Directory.CreateDirectory(storeDir);
|
||||
|
||||
try
|
||||
{
|
||||
var streamManager = new StreamManager(storeDir: storeDir);
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
|
||||
var config = new ServerStreamConfig
|
||||
{
|
||||
Name = "PROFILE_FS",
|
||||
Subjects = [subject],
|
||||
Storage = ServerStorageType.File,
|
||||
Retention = ServerRetentionPolicy.Limits,
|
||||
MaxMsgs = 10_000_000,
|
||||
};
|
||||
streamManager.CreateOrUpdate(config);
|
||||
|
||||
// Warmup
|
||||
for (var i = 0; i < 500; i++)
|
||||
publisher.TryCapture(subject, payload, out _);
|
||||
|
||||
JetStreamProfiler.DumpAndReset();
|
||||
|
||||
var sw = Stopwatch.StartNew();
|
||||
for (var i = 0; i < messageCount; i++)
|
||||
publisher.TryCapture(subject, payload, out _);
|
||||
sw.Stop();
|
||||
|
||||
var msgPerSec = messageCount / sw.Elapsed.TotalSeconds;
|
||||
output.WriteLine($"=== FileStore Server-Side Profile ===");
|
||||
output.WriteLine($"Messages: {messageCount:N0}, Duration: {sw.Elapsed.TotalMilliseconds:F1} ms");
|
||||
output.WriteLine($"Throughput: {msgPerSec:N0} msg/s");
|
||||
output.WriteLine("");
|
||||
output.WriteLine(JetStreamProfiler.DumpAndReset());
|
||||
|
||||
streamManager.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
try { Directory.Delete(storeDir, recursive: true); }
|
||||
catch { /* best-effort */ }
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Server-side only: MemStore baseline for comparison.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
[Trait("Category", "Profile")]
|
||||
public void Profile_MemStore_Publish_ServerSide()
|
||||
{
|
||||
const int messageCount = 5_000;
|
||||
const int payloadSize = 128;
|
||||
const string subject = "bench.profile.mem";
|
||||
var payload = new byte[payloadSize];
|
||||
|
||||
var streamManager = new StreamManager();
|
||||
var publisher = new JetStreamPublisher(streamManager);
|
||||
|
||||
var config = new ServerStreamConfig
|
||||
{
|
||||
Name = "PROFILE_MEM",
|
||||
Subjects = [subject],
|
||||
Storage = ServerStorageType.Memory,
|
||||
Retention = ServerRetentionPolicy.Limits,
|
||||
MaxMsgs = 10_000_000,
|
||||
};
|
||||
streamManager.CreateOrUpdate(config);
|
||||
|
||||
// Warmup
|
||||
for (var i = 0; i < 500; i++)
|
||||
publisher.TryCapture(subject, payload, out _);
|
||||
|
||||
JetStreamProfiler.DumpAndReset();
|
||||
|
||||
var sw = Stopwatch.StartNew();
|
||||
for (var i = 0; i < messageCount; i++)
|
||||
publisher.TryCapture(subject, payload, out _);
|
||||
sw.Stop();
|
||||
|
||||
var msgPerSec = messageCount / sw.Elapsed.TotalSeconds;
|
||||
output.WriteLine($"=== MemStore Server-Side Profile ===");
|
||||
output.WriteLine($"Messages: {messageCount:N0}, Duration: {sw.Elapsed.TotalMilliseconds:F1} ms");
|
||||
output.WriteLine($"Throughput: {msgPerSec:N0} msg/s");
|
||||
output.WriteLine("");
|
||||
output.WriteLine(JetStreamProfiler.DumpAndReset());
|
||||
|
||||
streamManager.Dispose();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// E2E: in-process NatsServer with real NatsConnection client.
|
||||
/// Measures full publish path including network, protocol parsing, ack serialization.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
[Trait("Category", "Profile")]
|
||||
public async Task Profile_FileStore_Publish_E2E()
|
||||
{
|
||||
const int messageCount = 5_000;
|
||||
const int payloadSize = 128;
|
||||
const int batchSize = 100;
|
||||
const string subject = "bench.profile.e2e";
|
||||
var payload = new byte[payloadSize];
|
||||
|
||||
var storeDir = Path.Combine(Path.GetTempPath(), "nats-profile-e2e-" + Guid.NewGuid().ToString("N")[..8]);
|
||||
Directory.CreateDirectory(storeDir);
|
||||
|
||||
try
|
||||
{
|
||||
var options = new NatsOptions
|
||||
{
|
||||
Host = "127.0.0.1",
|
||||
Port = 0, // Ephemeral port
|
||||
JetStream = new JetStreamOptions
|
||||
{
|
||||
StoreDir = storeDir,
|
||||
MaxMemoryStore = 256 * 1024 * 1024,
|
||||
MaxFileStore = 1L * 1024 * 1024 * 1024,
|
||||
},
|
||||
};
|
||||
|
||||
using var server = new NATS.Server.NatsServer(options, NullLoggerFactory.Instance);
|
||||
_ = server.StartAsync(CancellationToken.None);
|
||||
await server.WaitForReadyAsync();
|
||||
|
||||
var port = server.Port;
|
||||
|
||||
await using var nats = new NatsConnection(new NatsOpts { Url = $"nats://127.0.0.1:{port}" });
|
||||
await nats.ConnectAsync();
|
||||
var js = new NatsJSContext(nats);
|
||||
|
||||
var streamName = $"PROFILE_E2E_{Guid.NewGuid():N}"[..24];
|
||||
await js.CreateStreamAsync(new StreamConfig(streamName, [subject])
|
||||
{
|
||||
Storage = StreamConfigStorage.File,
|
||||
Retention = StreamConfigRetention.Limits,
|
||||
MaxMsgs = 10_000_000,
|
||||
});
|
||||
|
||||
// Warmup
|
||||
for (var i = 0; i < 500; i++)
|
||||
await js.PublishAsync(subject, payload);
|
||||
|
||||
JetStreamProfiler.DumpAndReset();
|
||||
|
||||
// Measurement — fire-and-gather in batches (same as benchmark)
|
||||
var sw = Stopwatch.StartNew();
|
||||
var tasks = new List<ValueTask<PubAckResponse>>(batchSize);
|
||||
|
||||
for (var i = 0; i < messageCount; i++)
|
||||
{
|
||||
tasks.Add(js.PublishAsync(subject, payload));
|
||||
|
||||
if (tasks.Count >= batchSize)
|
||||
{
|
||||
foreach (var t in tasks)
|
||||
await t;
|
||||
tasks.Clear();
|
||||
}
|
||||
}
|
||||
|
||||
foreach (var t in tasks)
|
||||
await t;
|
||||
|
||||
sw.Stop();
|
||||
|
||||
var msgPerSec = messageCount / sw.Elapsed.TotalSeconds;
|
||||
output.WriteLine($"=== FileStore E2E Profile (in-process server) ===");
|
||||
output.WriteLine($"Messages: {messageCount:N0}, Duration: {sw.Elapsed.TotalMilliseconds:F1} ms");
|
||||
output.WriteLine($"Throughput: {msgPerSec:N0} msg/s");
|
||||
output.WriteLine("");
|
||||
output.WriteLine(JetStreamProfiler.DumpAndReset());
|
||||
|
||||
await js.DeleteStreamAsync(streamName);
|
||||
await server.ShutdownAsync();
|
||||
}
|
||||
finally
|
||||
{
|
||||
try { Directory.Delete(storeDir, recursive: true); }
|
||||
catch { /* best-effort */ }
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user