feat: add benchmark test project for Go vs .NET server comparison

Side-by-side performance benchmarks using NATS.Client.Core against both
servers on ephemeral ports. Includes core pub/sub, request/reply latency,
and JetStream throughput tests with comparison output and
benchmarks_comparison.md results. Also fixes timestamp flakiness in
StoreInterfaceTests by using explicit timestamps.
This commit is contained in:
Joseph Doherty
2026-03-13 01:23:31 -04:00
parent e9c86c51c3
commit 37575dc41c
28 changed files with 2264 additions and 12 deletions

View File

@@ -0,0 +1,106 @@
using System.Diagnostics;
using NATS.Client.Core;
using NATS.Server.Benchmark.Tests.Harness;
using NATS.Server.Benchmark.Tests.Infrastructure;
using Xunit.Abstractions;
namespace NATS.Server.Benchmark.Tests.RequestReply;
[Collection("Benchmark-Core")]
public class MultiClientLatencyTests(CoreServerPairFixture fixture, ITestOutputHelper output)
{
[Fact]
[Trait("Category", "Benchmark")]
public async Task RequestReply_10Clients2Services_16B()
{
const int payloadSize = 16;
const int requestsPerClient = 1_000;
const int clientCount = 10;
const int serviceCount = 2;
var dotnetResult = await RunMultiLatency("Request-Reply 10Cx2S (16B)", "DotNet", payloadSize, requestsPerClient, clientCount, serviceCount, fixture.CreateDotNetClient);
if (fixture.GoAvailable)
{
var goResult = await RunMultiLatency("Request-Reply 10Cx2S (16B)", "Go", payloadSize, requestsPerClient, clientCount, serviceCount, fixture.CreateGoClient);
BenchmarkResultWriter.WriteComparison(output, goResult, dotnetResult);
}
else
{
BenchmarkResultWriter.WriteSingle(output, dotnetResult);
}
}
private static async Task<BenchmarkResult> RunMultiLatency(
string name, string serverType, int payloadSize, int requestsPerClient,
int clientCount, int serviceCount, Func<NatsConnection> createClient)
{
var payload = new byte[payloadSize];
const string subject = "bench.reqrep.multi";
var queueGroup = $"bench-svc-{serverType.ToLowerInvariant()}";
// Start service responders on a queue group
var serviceClients = new NatsConnection[serviceCount];
var serviceSubs = new INatsSub<byte[]>[serviceCount];
var serviceTasks = new Task[serviceCount];
for (var i = 0; i < serviceCount; i++)
{
serviceClients[i] = createClient();
await serviceClients[i].ConnectAsync();
serviceSubs[i] = await serviceClients[i].SubscribeCoreAsync<byte[]>(subject, queueGroup: queueGroup);
var client = serviceClients[i];
var sub = serviceSubs[i];
serviceTasks[i] = Task.Run(async () =>
{
await foreach (var msg in sub.Msgs.ReadAllAsync())
{
if (msg.ReplyTo is not null)
await client.PublishAsync(msg.ReplyTo, payload);
}
});
}
await Task.Delay(50);
// Run concurrent clients
var totalMessages = requestsPerClient * clientCount;
var tracker = new LatencyTracker(totalMessages);
var sw = Stopwatch.StartNew();
var clientTasks = new Task[clientCount];
for (var c = 0; c < clientCount; c++)
{
clientTasks[c] = Task.Run(async () =>
{
await using var client = createClient();
await client.ConnectAsync();
for (var i = 0; i < requestsPerClient; i++)
{
var start = Stopwatch.GetTimestamp();
await client.RequestAsync<byte[], byte[]>(subject, payload);
tracker.Record(Stopwatch.GetTimestamp() - start);
}
});
}
await Task.WhenAll(clientTasks);
sw.Stop();
foreach (var sub in serviceSubs)
await sub.UnsubscribeAsync();
foreach (var client in serviceClients)
await client.DisposeAsync();
return new BenchmarkResult
{
Name = name,
ServerType = serverType,
TotalMessages = totalMessages,
TotalBytes = (long)totalMessages * payloadSize,
Duration = sw.Elapsed,
Latencies = tracker.ComputePercentiles(),
};
}
}