feat: add benchmark test project for Go vs .NET server comparison
Side-by-side performance benchmarks using NATS.Client.Core against both servers on ephemeral ports. Includes core pub/sub, request/reply latency, and JetStream throughput tests with comparison output and benchmarks_comparison.md results. Also fixes timestamp flakiness in StoreInterfaceTests by using explicit timestamps.
This commit is contained in:
@@ -0,0 +1,93 @@
|
||||
using System.Diagnostics;
|
||||
using NATS.Client.Core;
|
||||
using NATS.Client.JetStream;
|
||||
using NATS.Client.JetStream.Models;
|
||||
using NATS.Server.Benchmark.Tests.Harness;
|
||||
using NATS.Server.Benchmark.Tests.Infrastructure;
|
||||
using Xunit.Abstractions;
|
||||
|
||||
namespace NATS.Server.Benchmark.Tests.JetStream;
|
||||
|
||||
[Collection("Benchmark-JetStream")]
|
||||
public class AsyncPublishTests(JetStreamServerPairFixture fixture, ITestOutputHelper output)
|
||||
{
|
||||
[Fact]
|
||||
[Trait("Category", "Benchmark")]
|
||||
public async Task JSAsyncPublish_128B_FileStore()
|
||||
{
|
||||
const int payloadSize = 128;
|
||||
const int messageCount = 5_000;
|
||||
const int batchSize = 100;
|
||||
|
||||
var dotnetResult = await RunAsyncPublish("JS Async Publish (128B File)", "DotNet", payloadSize, messageCount, batchSize, fixture.CreateDotNetClient);
|
||||
|
||||
if (fixture.GoAvailable)
|
||||
{
|
||||
var goResult = await RunAsyncPublish("JS Async Publish (128B File)", "Go", payloadSize, messageCount, batchSize, fixture.CreateGoClient);
|
||||
BenchmarkResultWriter.WriteComparison(output, goResult, dotnetResult);
|
||||
}
|
||||
else
|
||||
{
|
||||
BenchmarkResultWriter.WriteSingle(output, dotnetResult);
|
||||
}
|
||||
}
|
||||
|
||||
private static async Task<BenchmarkResult> RunAsyncPublish(string name, string serverType, int payloadSize, int messageCount, int batchSize, Func<NatsConnection> createClient)
|
||||
{
|
||||
var payload = new byte[payloadSize];
|
||||
var streamName = $"BENCH_ASYNC_{serverType.ToUpperInvariant()}_{Guid.NewGuid():N}"[..30];
|
||||
var subject = $"bench.js.async.{serverType.ToLowerInvariant()}";
|
||||
|
||||
await using var nats = createClient();
|
||||
await nats.ConnectAsync();
|
||||
var js = new NatsJSContext(nats);
|
||||
|
||||
await js.CreateStreamAsync(new StreamConfig(streamName, [subject])
|
||||
{
|
||||
Storage = StreamConfigStorage.File,
|
||||
Retention = StreamConfigRetention.Limits,
|
||||
MaxMsgs = 10_000_000,
|
||||
});
|
||||
|
||||
try
|
||||
{
|
||||
// Warmup
|
||||
for (var i = 0; i < 500; i++)
|
||||
await js.PublishAsync(subject, payload);
|
||||
|
||||
// Measurement — fire-and-gather in batches
|
||||
var sw = Stopwatch.StartNew();
|
||||
var tasks = new List<ValueTask<PubAckResponse>>(batchSize);
|
||||
|
||||
for (var i = 0; i < messageCount; i++)
|
||||
{
|
||||
tasks.Add(js.PublishAsync(subject, payload));
|
||||
|
||||
if (tasks.Count >= batchSize)
|
||||
{
|
||||
foreach (var t in tasks)
|
||||
await t;
|
||||
tasks.Clear();
|
||||
}
|
||||
}
|
||||
|
||||
foreach (var t in tasks)
|
||||
await t;
|
||||
|
||||
sw.Stop();
|
||||
|
||||
return new BenchmarkResult
|
||||
{
|
||||
Name = name,
|
||||
ServerType = serverType,
|
||||
TotalMessages = messageCount,
|
||||
TotalBytes = (long)messageCount * payloadSize,
|
||||
Duration = sw.Elapsed,
|
||||
};
|
||||
}
|
||||
finally
|
||||
{
|
||||
await js.DeleteStreamAsync(streamName);
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user