Increase message counts across all 14 benchmark test files to reduce run-to-run variance (e.g. PubSub 16B: 10K→50K, FanOut: 10K→15K, SinglePub: 100K→500K, JS tests: 5K→25K). Rewrite benchmarks_comparison.md with fresh numbers from two-batch runs. Key changes: multi 4x4 reached parity (1.01x), fan-out improved to 0.84x, TLS pub/sub shows 4.70x .NET advantage, previous small-count anomalies corrected.
100 lines
3.5 KiB
C#
100 lines
3.5 KiB
C#
using NATS.Client.Core;
|
|
using NATS.Server.Benchmark.Tests.Harness;
|
|
using NATS.Server.Benchmark.Tests.Infrastructure;
|
|
using Xunit.Abstractions;
|
|
|
|
namespace NATS.Server.Benchmark.Tests.CorePubSub;
|
|
|
|
[Collection("Benchmark-Core")]
|
|
public class FanOutTests(CoreServerPairFixture fixture, ITestOutputHelper output)
|
|
{
|
|
[Fact]
|
|
[Trait("Category", "Benchmark")]
|
|
public async Task FanOut1To4_128B()
|
|
{
|
|
const int payloadSize = 128;
|
|
const int messageCount = 15_000;
|
|
const int subscriberCount = 4;
|
|
|
|
var dotnetResult = await RunFanOut("Fan-Out 1:4 (128B)", "DotNet", payloadSize, messageCount, subscriberCount, fixture.CreateDotNetClient);
|
|
|
|
if (fixture.GoAvailable)
|
|
{
|
|
var goResult = await RunFanOut("Fan-Out 1:4 (128B)", "Go", payloadSize, messageCount, subscriberCount, fixture.CreateGoClient);
|
|
BenchmarkResultWriter.WriteComparison(output, goResult, dotnetResult);
|
|
}
|
|
else
|
|
{
|
|
BenchmarkResultWriter.WriteSingle(output, dotnetResult);
|
|
}
|
|
}
|
|
|
|
private static async Task<BenchmarkResult> RunFanOut(string name, string serverType, int payloadSize, int messageCount, int subscriberCount, Func<NatsConnection> createClient)
|
|
{
|
|
var payload = new byte[payloadSize];
|
|
var subject = $"bench.fanout.{serverType.ToLowerInvariant()}.{Guid.NewGuid():N}";
|
|
|
|
await using var pubClient = createClient();
|
|
await pubClient.ConnectAsync();
|
|
|
|
var subClients = new NatsConnection[subscriberCount];
|
|
var subs = new INatsSub<byte[]>[subscriberCount];
|
|
var subTasks = new Task[subscriberCount];
|
|
var totalExpected = messageCount * subscriberCount;
|
|
var totalReceived = 0;
|
|
var tcs = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously);
|
|
|
|
for (var i = 0; i < subscriberCount; i++)
|
|
{
|
|
subClients[i] = createClient();
|
|
await subClients[i].ConnectAsync();
|
|
subs[i] = await subClients[i].SubscribeCoreAsync<byte[]>(subject);
|
|
}
|
|
|
|
// Flush to ensure all subscriptions are propagated
|
|
foreach (var client in subClients)
|
|
await client.PingAsync();
|
|
await pubClient.PingAsync();
|
|
|
|
// Start reading after subscriptions are confirmed
|
|
for (var i = 0; i < subscriberCount; i++)
|
|
{
|
|
var sub = subs[i];
|
|
subTasks[i] = Task.Run(async () =>
|
|
{
|
|
await foreach (var _ in sub.Msgs.ReadAllAsync())
|
|
{
|
|
if (Interlocked.Increment(ref totalReceived) >= totalExpected)
|
|
{
|
|
tcs.TrySetResult();
|
|
return;
|
|
}
|
|
}
|
|
});
|
|
}
|
|
|
|
var sw = System.Diagnostics.Stopwatch.StartNew();
|
|
for (var i = 0; i < messageCount; i++)
|
|
await pubClient.PublishAsync(subject, payload);
|
|
await pubClient.PingAsync();
|
|
|
|
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(180));
|
|
await tcs.Task.WaitAsync(cts.Token);
|
|
sw.Stop();
|
|
|
|
foreach (var sub in subs)
|
|
await sub.UnsubscribeAsync();
|
|
foreach (var client in subClients)
|
|
await client.DisposeAsync();
|
|
|
|
return new BenchmarkResult
|
|
{
|
|
Name = name,
|
|
ServerType = serverType,
|
|
TotalMessages = totalExpected,
|
|
TotalBytes = (long)totalExpected * payloadSize,
|
|
Duration = sw.Elapsed,
|
|
};
|
|
}
|
|
}
|