Increase message counts across all 14 benchmark test files to reduce run-to-run variance (e.g. PubSub 16B: 10K→50K, FanOut: 10K→15K, SinglePub: 100K→500K, JS tests: 5K→25K). Rewrite benchmarks_comparison.md with fresh numbers from two-batch runs. Key changes: multi 4x4 reached parity (1.01x), fan-out improved to 0.84x, TLS pub/sub shows 4.70x .NET advantage, previous small-count anomalies corrected.
94 lines
3.0 KiB
C#
94 lines
3.0 KiB
C#
using System.Diagnostics;
|
|
using NATS.Client.Core;
|
|
using NATS.Client.JetStream;
|
|
using NATS.Client.JetStream.Models;
|
|
using NATS.Server.Benchmark.Tests.Harness;
|
|
using NATS.Server.Benchmark.Tests.Infrastructure;
|
|
using Xunit.Abstractions;
|
|
|
|
namespace NATS.Server.Benchmark.Tests.JetStream;
|
|
|
|
[Collection("Benchmark-JetStream")]
|
|
public class AsyncPublishTests(JetStreamServerPairFixture fixture, ITestOutputHelper output)
|
|
{
|
|
[Fact]
|
|
[Trait("Category", "Benchmark")]
|
|
public async Task JSAsyncPublish_128B_FileStore()
|
|
{
|
|
const int payloadSize = 128;
|
|
const int messageCount = 25_000;
|
|
const int batchSize = 100;
|
|
|
|
var dotnetResult = await RunAsyncPublish("JS Async Publish (128B File)", "DotNet", payloadSize, messageCount, batchSize, fixture.CreateDotNetClient);
|
|
|
|
if (fixture.GoAvailable)
|
|
{
|
|
var goResult = await RunAsyncPublish("JS Async Publish (128B File)", "Go", payloadSize, messageCount, batchSize, fixture.CreateGoClient);
|
|
BenchmarkResultWriter.WriteComparison(output, goResult, dotnetResult);
|
|
}
|
|
else
|
|
{
|
|
BenchmarkResultWriter.WriteSingle(output, dotnetResult);
|
|
}
|
|
}
|
|
|
|
private static async Task<BenchmarkResult> RunAsyncPublish(string name, string serverType, int payloadSize, int messageCount, int batchSize, Func<NatsConnection> createClient)
|
|
{
|
|
var payload = new byte[payloadSize];
|
|
var streamName = $"BENCH_ASYNC_{serverType.ToUpperInvariant()}_{Guid.NewGuid():N}"[..30];
|
|
var subject = $"bench.js.async.{serverType.ToLowerInvariant()}";
|
|
|
|
await using var nats = createClient();
|
|
await nats.ConnectAsync();
|
|
var js = new NatsJSContext(nats);
|
|
|
|
await js.CreateStreamAsync(new StreamConfig(streamName, [subject])
|
|
{
|
|
Storage = StreamConfigStorage.File,
|
|
Retention = StreamConfigRetention.Limits,
|
|
MaxMsgs = 10_000_000,
|
|
});
|
|
|
|
try
|
|
{
|
|
// Warmup
|
|
for (var i = 0; i < 500; i++)
|
|
await js.PublishAsync(subject, payload);
|
|
|
|
// Measurement — fire-and-gather in batches
|
|
var sw = Stopwatch.StartNew();
|
|
var tasks = new List<ValueTask<PubAckResponse>>(batchSize);
|
|
|
|
for (var i = 0; i < messageCount; i++)
|
|
{
|
|
tasks.Add(js.PublishAsync(subject, payload));
|
|
|
|
if (tasks.Count >= batchSize)
|
|
{
|
|
foreach (var t in tasks)
|
|
await t;
|
|
tasks.Clear();
|
|
}
|
|
}
|
|
|
|
foreach (var t in tasks)
|
|
await t;
|
|
|
|
sw.Stop();
|
|
|
|
return new BenchmarkResult
|
|
{
|
|
Name = name,
|
|
ServerType = serverType,
|
|
TotalMessages = messageCount,
|
|
TotalBytes = (long)messageCount * payloadSize,
|
|
Duration = sw.Elapsed,
|
|
};
|
|
}
|
|
finally
|
|
{
|
|
await js.DeleteStreamAsync(streamName);
|
|
}
|
|
}
|
|
}
|