Files
Joseph Doherty 1d4b87e5f9 docs: refresh benchmark comparison with increased message counts
Increase message counts across all 14 benchmark test files to reduce
run-to-run variance (e.g. PubSub 16B: 10K→50K, FanOut: 10K→15K,
SinglePub: 100K→500K, JS tests: 5K→25K). Rewrite benchmarks_comparison.md
with fresh numbers from two-batch runs. Key changes: multi 4x4 reached
parity (1.01x), fan-out improved to 0.84x, TLS pub/sub shows 4.70x .NET
advantage, previous small-count anomalies corrected.
2026-03-13 17:52:03 -04:00

107 lines
3.8 KiB
C#

using System.Diagnostics;
using NATS.Client.Core;
using NATS.Server.Benchmark.Tests.Harness;
using NATS.Server.Benchmark.Tests.Infrastructure;
using Xunit.Abstractions;
namespace NATS.Server.Benchmark.Tests.RequestReply;
[Collection("Benchmark-Core")]
public class MultiClientLatencyTests(CoreServerPairFixture fixture, ITestOutputHelper output)
{
[Fact]
[Trait("Category", "Benchmark")]
public async Task RequestReply_10Clients2Services_16B()
{
const int payloadSize = 16;
const int requestsPerClient = 5_000;
const int clientCount = 10;
const int serviceCount = 2;
var dotnetResult = await RunMultiLatency("Request-Reply 10Cx2S (16B)", "DotNet", payloadSize, requestsPerClient, clientCount, serviceCount, fixture.CreateDotNetClient);
if (fixture.GoAvailable)
{
var goResult = await RunMultiLatency("Request-Reply 10Cx2S (16B)", "Go", payloadSize, requestsPerClient, clientCount, serviceCount, fixture.CreateGoClient);
BenchmarkResultWriter.WriteComparison(output, goResult, dotnetResult);
}
else
{
BenchmarkResultWriter.WriteSingle(output, dotnetResult);
}
}
private static async Task<BenchmarkResult> RunMultiLatency(
string name, string serverType, int payloadSize, int requestsPerClient,
int clientCount, int serviceCount, Func<NatsConnection> createClient)
{
var payload = new byte[payloadSize];
const string subject = "bench.reqrep.multi";
var queueGroup = $"bench-svc-{serverType.ToLowerInvariant()}";
// Start service responders on a queue group
var serviceClients = new NatsConnection[serviceCount];
var serviceSubs = new INatsSub<byte[]>[serviceCount];
var serviceTasks = new Task[serviceCount];
for (var i = 0; i < serviceCount; i++)
{
serviceClients[i] = createClient();
await serviceClients[i].ConnectAsync();
serviceSubs[i] = await serviceClients[i].SubscribeCoreAsync<byte[]>(subject, queueGroup: queueGroup);
var client = serviceClients[i];
var sub = serviceSubs[i];
serviceTasks[i] = Task.Run(async () =>
{
await foreach (var msg in sub.Msgs.ReadAllAsync())
{
if (msg.ReplyTo is not null)
await client.PublishAsync(msg.ReplyTo, payload);
}
});
}
await Task.Delay(50);
// Run concurrent clients
var totalMessages = requestsPerClient * clientCount;
var tracker = new LatencyTracker(totalMessages);
var sw = Stopwatch.StartNew();
var clientTasks = new Task[clientCount];
for (var c = 0; c < clientCount; c++)
{
clientTasks[c] = Task.Run(async () =>
{
await using var client = createClient();
await client.ConnectAsync();
for (var i = 0; i < requestsPerClient; i++)
{
var start = Stopwatch.GetTimestamp();
await client.RequestAsync<byte[], byte[]>(subject, payload);
tracker.Record(Stopwatch.GetTimestamp() - start);
}
});
}
await Task.WhenAll(clientTasks);
sw.Stop();
foreach (var sub in serviceSubs)
await sub.UnsubscribeAsync();
foreach (var client in serviceClients)
await client.DisposeAsync();
return new BenchmarkResult
{
Name = name,
ServerType = serverType,
TotalMessages = totalMessages,
TotalBytes = (long)totalMessages * payloadSize,
Duration = sw.Elapsed,
Latencies = tracker.ComputePercentiles(),
};
}
}