test(batch58): port 55 JetStream misc integration tests
This commit is contained in:
@@ -0,0 +1,594 @@
|
||||
// Copyright 2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Ported from: golang/nats-server/server/jetstream_batching_test.go
|
||||
// These tests exercise the JetStream atomic batch publish protocol via NATS headers.
|
||||
// Tests that require direct access to Go server internals (mset.batches, clMu, etc.)
|
||||
// are marked with [Fact(Skip = ...)] because those internal structures are not accessible
|
||||
// over the NATS protocol from an external client.
|
||||
|
||||
using System.Text.Json.Nodes;
|
||||
using NATS.Client.Core;
|
||||
using Shouldly;
|
||||
|
||||
namespace ZB.MOM.NatsNet.Server.IntegrationTests.JetStream;
|
||||
|
||||
/// <summary>
|
||||
/// Integration tests ported from jetstream_batching_test.go (26 tests).
|
||||
/// Tests the JetStream atomic batch publish protocol using NATS headers.
|
||||
/// </summary>
|
||||
/// <remarks>
|
||||
/// The Go source tests fall into two categories:
|
||||
/// (a) Tests exercising the server via NATS protocol (Nats-Batch-* headers): ported directly.
|
||||
/// (b) Tests accessing Go server internals (mset.batches, clMu, checkMsgHeadersPreClusteredProposal):
|
||||
/// skipped because those internal structures are not reachable from a .NET NATS client.
|
||||
/// </remarks>
|
||||
[Trait("Category", "Integration")]
|
||||
public class JetStreamBatchingIntegrationTests : IAsyncLifetime
|
||||
{
|
||||
private NatsConnection? _nats;
|
||||
private Exception? _initFailure;
|
||||
|
||||
public async Task InitializeAsync()
|
||||
{
|
||||
try
|
||||
{
|
||||
_nats = new NatsConnection(new NatsOpts { Url = "nats://localhost:4222" });
|
||||
await _nats.ConnectAsync();
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_initFailure = ex;
|
||||
}
|
||||
}
|
||||
|
||||
public async Task DisposeAsync()
|
||||
{
|
||||
if (_nats is not null)
|
||||
await _nats.DisposeAsync();
|
||||
}
|
||||
|
||||
private bool ServerUnavailable() => _initFailure != null;
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Helpers
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
private async Task CreateStreamAsync(string name, string[] subjects, bool allowAtomicPublish = false, string storage = "file", string retention = "limits")
|
||||
{
|
||||
var cfg = new JsonObject
|
||||
{
|
||||
["name"] = name,
|
||||
["subjects"] = new JsonArray(subjects.Select(s => JsonValue.Create(s)).ToArray<JsonNode?>()),
|
||||
["storage"] = storage,
|
||||
["retention"] = retention,
|
||||
["allow_atomic_publish"] = allowAtomicPublish,
|
||||
};
|
||||
var payload = System.Text.Encoding.UTF8.GetBytes(cfg.ToJsonString());
|
||||
// NatsMsg<byte[]> is a struct — just await; a response being returned confirms the call succeeded.
|
||||
await _nats!.RequestAsync<byte[], byte[]>($"$JS.API.STREAM.CREATE.{name}", payload);
|
||||
}
|
||||
|
||||
private async Task UpdateStreamAsync(string name, string[] subjects, bool allowAtomicPublish = false, string storage = "file", string retention = "limits")
|
||||
{
|
||||
var cfg = new JsonObject
|
||||
{
|
||||
["name"] = name,
|
||||
["subjects"] = new JsonArray(subjects.Select(s => JsonValue.Create(s)).ToArray<JsonNode?>()),
|
||||
["storage"] = storage,
|
||||
["retention"] = retention,
|
||||
["allow_atomic_publish"] = allowAtomicPublish,
|
||||
};
|
||||
var payload = System.Text.Encoding.UTF8.GetBytes(cfg.ToJsonString());
|
||||
// NatsMsg<byte[]> is a struct — just await; a response being returned confirms the call succeeded.
|
||||
await _nats!.RequestAsync<byte[], byte[]>($"$JS.API.STREAM.UPDATE.{name}", payload);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// TestJetStreamAtomicBatchPublish
|
||||
// Tests basic atomic batch publish flow: disabled, enabled, missing seq error.
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AtomicBatchPublish_ShouldSucceed()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
var streamName = $"BATCHTEST_{Guid.NewGuid():N}";
|
||||
await CreateStreamAsync(streamName, [$"bat.{streamName}.*"], allowAtomicPublish: false);
|
||||
|
||||
// Publish with atomic publish disabled — expect error in pub ack.
|
||||
var hdrs = new NatsHeaders { ["Nats-Batch-Id"] = "uuid" };
|
||||
var inbox = _nats!.NewInbox();
|
||||
var sub = await _nats.SubscribeCoreAsync<byte[]>(inbox);
|
||||
await _nats.PublishAsync($"bat.{streamName}.0", Array.Empty<byte>(), headers: hdrs, replyTo: inbox);
|
||||
|
||||
JsonObject? ack = null;
|
||||
using (var cts = new CancellationTokenSource(TimeSpan.FromSeconds(3)))
|
||||
{
|
||||
await foreach (var reply in sub.Msgs.ReadAllAsync(cts.Token))
|
||||
{
|
||||
if (reply.Data is { Length: > 0 })
|
||||
ack = JsonNode.Parse(reply.Data)?.AsObject();
|
||||
break;
|
||||
}
|
||||
}
|
||||
ack.ShouldNotBeNull("Expected a pub ack response");
|
||||
ack["error"].ShouldNotBeNull("Expected error field when atomic publish is disabled");
|
||||
|
||||
// Enable atomic publish.
|
||||
await UpdateStreamAsync(streamName, [$"bat.{streamName}.*"], allowAtomicPublish: true);
|
||||
|
||||
// Publish without batch sequence — expect missing seq error.
|
||||
var inbox2 = _nats.NewInbox();
|
||||
var sub2 = await _nats.SubscribeCoreAsync<byte[]>(inbox2);
|
||||
var hdrs2 = new NatsHeaders { ["Nats-Batch-Id"] = "uuid" };
|
||||
await _nats.PublishAsync($"bat.{streamName}.0", Array.Empty<byte>(), headers: hdrs2, replyTo: inbox2);
|
||||
JsonObject? ack2 = null;
|
||||
using (var cts2 = new CancellationTokenSource(TimeSpan.FromSeconds(3)))
|
||||
{
|
||||
await foreach (var reply in sub2.Msgs.ReadAllAsync(cts2.Token))
|
||||
{
|
||||
if (reply.Data is { Length: > 0 })
|
||||
ack2 = JsonNode.Parse(reply.Data)?.AsObject();
|
||||
break;
|
||||
}
|
||||
}
|
||||
ack2.ShouldNotBeNull();
|
||||
ack2["error"].ShouldNotBeNull("Expected error for missing sequence header");
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// TestJetStreamAtomicBatchPublishEmptyAck
|
||||
// Non-commit messages return empty ack (flow control). Commit returns full pub ack.
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AtomicBatchPublishEmptyAck_ShouldReturnEmptyForNonCommit()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
var streamName = $"BATCHEA_{Guid.NewGuid():N}";
|
||||
await CreateStreamAsync(streamName, [$"ea.{streamName}.*"], allowAtomicPublish: true);
|
||||
|
||||
var batchId = "uuid-ea";
|
||||
const int batchSize = 5;
|
||||
|
||||
for (int seq = 1; seq <= batchSize; seq++)
|
||||
{
|
||||
var subject = $"ea.{streamName}.{seq}";
|
||||
var data = System.Text.Encoding.UTF8.GetBytes(subject);
|
||||
bool isCommit = seq == batchSize;
|
||||
|
||||
var hdrs = new NatsHeaders
|
||||
{
|
||||
["Nats-Batch-Id"] = batchId,
|
||||
["Nats-Batch-Sequence"] = seq.ToString(),
|
||||
};
|
||||
if (isCommit)
|
||||
hdrs["Nats-Batch-Commit"] = "1";
|
||||
|
||||
var inbox = _nats!.NewInbox();
|
||||
var sub = await _nats.SubscribeCoreAsync<byte[]>(inbox);
|
||||
await _nats.PublishAsync(subject, data, headers: hdrs, replyTo: inbox);
|
||||
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(3));
|
||||
await foreach (var reply in sub.Msgs.ReadAllAsync(cts.Token))
|
||||
{
|
||||
if (!isCommit)
|
||||
{
|
||||
(reply.Data is null || reply.Data.Length == 0).ShouldBeTrue(
|
||||
"Expected empty ack for non-commit message");
|
||||
}
|
||||
else
|
||||
{
|
||||
reply.Data.ShouldNotBeNull();
|
||||
reply.Data.Length.ShouldBeGreaterThan(0, "Expected full pub ack for commit message");
|
||||
var ack = JsonNode.Parse(reply.Data)?.AsObject();
|
||||
ack.ShouldNotBeNull();
|
||||
ack["error"].ShouldBeNull("Commit should not return error");
|
||||
((int?)ack["batch_size"]).ShouldBe(batchSize);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// TestJetStreamAtomicBatchPublishCommitEob
|
||||
// EOB commit excludes the EOB message itself; batchSize should equal seq count - 1.
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AtomicBatchPublishCommitEob_ShouldExcludeEobMessage()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
var streamName = $"BATCHEOB_{Guid.NewGuid():N}";
|
||||
await CreateStreamAsync(streamName, [$"eob.{streamName}"], allowAtomicPublish: true);
|
||||
|
||||
var batchId = "uuid-eob";
|
||||
var subject = $"eob.{streamName}";
|
||||
|
||||
// Seq 1 and 2: publish without commit, consume empty ack each time.
|
||||
for (int seq = 1; seq <= 2; seq++)
|
||||
{
|
||||
var hdrs = new NatsHeaders
|
||||
{
|
||||
["Nats-Batch-Id"] = batchId,
|
||||
["Nats-Batch-Sequence"] = seq.ToString(),
|
||||
};
|
||||
var inbox = _nats!.NewInbox();
|
||||
var sub = await _nats.SubscribeCoreAsync<byte[]>(inbox);
|
||||
await _nats.PublishAsync(subject, Array.Empty<byte>(), headers: hdrs, replyTo: inbox);
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(3));
|
||||
await foreach (var _ in sub.Msgs.ReadAllAsync(cts.Token)) break;
|
||||
}
|
||||
|
||||
// Seq 3: publish with "eob" commit — this message itself is NOT stored.
|
||||
var hdrs3 = new NatsHeaders
|
||||
{
|
||||
["Nats-Batch-Id"] = batchId,
|
||||
["Nats-Batch-Sequence"] = "3",
|
||||
["Nats-Batch-Commit"] = "eob",
|
||||
};
|
||||
var inbox3 = _nats!.NewInbox();
|
||||
var sub3 = await _nats.SubscribeCoreAsync<byte[]>(inbox3);
|
||||
await _nats.PublishAsync(subject, Array.Empty<byte>(), headers: hdrs3, replyTo: inbox3);
|
||||
|
||||
JsonObject? ack = null;
|
||||
using (var cts3 = new CancellationTokenSource(TimeSpan.FromSeconds(5)))
|
||||
{
|
||||
await foreach (var reply in sub3.Msgs.ReadAllAsync(cts3.Token))
|
||||
{
|
||||
if (reply.Data is { Length: > 0 })
|
||||
ack = JsonNode.Parse(reply.Data)?.AsObject();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ack.ShouldNotBeNull("Expected pub ack from EOB commit");
|
||||
ack["error"].ShouldBeNull("EOB commit should not return error");
|
||||
((int?)ack["batch_size"]).ShouldBe(2);
|
||||
ack["batch_id"]?.GetValue<string>().ShouldBe(batchId);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// TestJetStreamAtomicBatchPublishLimits
|
||||
// Batch ID length limit: max 64 chars. IDs longer than 64 are rejected.
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AtomicBatchPublishLimits_BatchIdTooLong_ShouldError()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
var streamName = $"BATCHLIM_{Guid.NewGuid():N}";
|
||||
await CreateStreamAsync(streamName, [$"lim.{streamName}"], allowAtomicPublish: true);
|
||||
|
||||
// 64-char batch ID should succeed.
|
||||
var validId = new string('A', 64);
|
||||
var hdrsOk = new NatsHeaders
|
||||
{
|
||||
["Nats-Batch-Id"] = validId,
|
||||
["Nats-Batch-Sequence"] = "1",
|
||||
["Nats-Batch-Commit"] = "1",
|
||||
};
|
||||
var inboxOk = _nats!.NewInbox();
|
||||
var subOk = await _nats.SubscribeCoreAsync<byte[]>(inboxOk);
|
||||
await _nats.PublishAsync($"lim.{streamName}", Array.Empty<byte>(), headers: hdrsOk, replyTo: inboxOk);
|
||||
JsonObject? ackOk = null;
|
||||
using (var cts = new CancellationTokenSource(TimeSpan.FromSeconds(3)))
|
||||
{
|
||||
await foreach (var reply in subOk.Msgs.ReadAllAsync(cts.Token))
|
||||
{
|
||||
if (reply.Data is { Length: > 0 })
|
||||
ackOk = JsonNode.Parse(reply.Data)?.AsObject();
|
||||
break;
|
||||
}
|
||||
}
|
||||
ackOk.ShouldNotBeNull("Expected pub ack for 64-char batch ID");
|
||||
|
||||
// 65-char batch ID should be rejected.
|
||||
var longId = new string('A', 65);
|
||||
var hdrsLong = new NatsHeaders
|
||||
{
|
||||
["Nats-Batch-Id"] = longId,
|
||||
["Nats-Batch-Sequence"] = "1",
|
||||
["Nats-Batch-Commit"] = "1",
|
||||
};
|
||||
var inboxLong = _nats.NewInbox();
|
||||
var subLong = await _nats.SubscribeCoreAsync<byte[]>(inboxLong);
|
||||
await _nats.PublishAsync($"lim.{streamName}", Array.Empty<byte>(), headers: hdrsLong, replyTo: inboxLong);
|
||||
JsonObject? ackLong = null;
|
||||
using (var cts2 = new CancellationTokenSource(TimeSpan.FromSeconds(3)))
|
||||
{
|
||||
await foreach (var reply in subLong.Msgs.ReadAllAsync(cts2.Token))
|
||||
{
|
||||
if (reply.Data is { Length: > 0 })
|
||||
ackLong = JsonNode.Parse(reply.Data)?.AsObject();
|
||||
break;
|
||||
}
|
||||
}
|
||||
ackLong.ShouldNotBeNull();
|
||||
ackLong["error"].ShouldNotBeNull("65-char batch ID should be rejected by the server");
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// TestJetStreamAtomicBatchPublishDedupeNotAllowed
|
||||
// Pre-existing dedup IDs must not be allowed in a batch.
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AtomicBatchPublishDedupeNotAllowed_PreExistingIdShouldError()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
var streamName = $"BATCHDD_{Guid.NewGuid():N}";
|
||||
await CreateStreamAsync(streamName, [$"dd.{streamName}"], allowAtomicPublish: true);
|
||||
|
||||
// Publish a pre-existing message with dedup ID.
|
||||
var hdrsPre = new NatsHeaders { ["Nats-Msg-Id"] = "pre-existing" };
|
||||
var inboxPre = _nats!.NewInbox();
|
||||
var subPre = await _nats.SubscribeCoreAsync<byte[]>(inboxPre);
|
||||
await _nats.PublishAsync($"dd.{streamName}", Array.Empty<byte>(), headers: hdrsPre, replyTo: inboxPre);
|
||||
using (var cts = new CancellationTokenSource(TimeSpan.FromSeconds(3)))
|
||||
{
|
||||
await foreach (var _ in subPre.Msgs.ReadAllAsync(cts.Token)) break;
|
||||
}
|
||||
|
||||
// Publish a batch that includes the same dedup ID — should fail.
|
||||
var hdrsDup = new NatsHeaders
|
||||
{
|
||||
["Nats-Msg-Id"] = "pre-existing",
|
||||
["Nats-Batch-Id"] = "uuid",
|
||||
["Nats-Batch-Sequence"] = "1",
|
||||
["Nats-Batch-Commit"] = "1",
|
||||
};
|
||||
var inboxDup = _nats.NewInbox();
|
||||
var subDup = await _nats.SubscribeCoreAsync<byte[]>(inboxDup);
|
||||
await _nats.PublishAsync($"dd.{streamName}", Array.Empty<byte>(), headers: hdrsDup, replyTo: inboxDup);
|
||||
JsonObject? ackDup = null;
|
||||
using (var cts2 = new CancellationTokenSource(TimeSpan.FromSeconds(3)))
|
||||
{
|
||||
await foreach (var reply in subDup.Msgs.ReadAllAsync(cts2.Token))
|
||||
{
|
||||
if (reply.Data is { Length: > 0 })
|
||||
ackDup = JsonNode.Parse(reply.Data)?.AsObject();
|
||||
break;
|
||||
}
|
||||
}
|
||||
ackDup.ShouldNotBeNull();
|
||||
ackDup["error"].ShouldNotBeNull("Duplicate message ID in batch should return error");
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// TestJetStreamAtomicBatchPublishSourceAndMirror
|
||||
// Requires cluster setup and direct stream inspection. Skipped.
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
[Fact(Skip = "Requires a running 3-node JetStream cluster with AllowAtomicPublish + mirror support")]
|
||||
public Task AtomicBatchPublishSourceAndMirror_BatchHeadersRemovedInMirror()
|
||||
=> Task.CompletedTask;
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// TestJetStreamAtomicBatchPublishCleanup (4 sub-tests)
|
||||
// All require direct access to Go server internals. Skipped.
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.batches, mset.batchApply) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishCleanup_Disable_ShouldCleanupBatchState()
|
||||
=> Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.batches, JetStreamStepdownStream) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishCleanup_StepDown_ShouldCleanupBatchState()
|
||||
=> Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.delete, mset.batchApply) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishCleanup_Delete_ShouldCleanupBatchState()
|
||||
=> Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.batches, batchStagedDiff) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishCleanup_Commit_ShouldCleanupBatchState()
|
||||
=> Task.CompletedTask;
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// TestJetStreamAtomicBatchPublishConfigOpts
|
||||
// Requires server config file creation. Skipped.
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
[Fact(Skip = "Requires direct server configuration (RunServerWithConfig, opts.JetStreamLimits) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishConfigOpts_DefaultsAndOverrides_ShouldApply()
|
||||
=> Task.CompletedTask;
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// TestJetStreamAtomicBatchPublishDenyHeaders
|
||||
// Unsupported headers in a batch (e.g. Nats-Expected-Last-Msg-Id) should error.
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
[Fact]
|
||||
public async Task AtomicBatchPublishDenyHeaders_UnsupportedHeader_ShouldError()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
var streamName = $"BATCHDH_{Guid.NewGuid():N}";
|
||||
await CreateStreamAsync(streamName, [$"dh.{streamName}"], allowAtomicPublish: true);
|
||||
|
||||
// Seq 1: publish with Nats-Expected-Last-Msg-Id (unsupported in batches).
|
||||
var hdrs1 = new NatsHeaders
|
||||
{
|
||||
["Nats-Batch-Id"] = "uuid",
|
||||
["Nats-Batch-Sequence"] = "1",
|
||||
["Nats-Expected-Last-Msg-Id"] = "msgId",
|
||||
};
|
||||
var inbox1 = _nats!.NewInbox();
|
||||
var sub1 = await _nats.SubscribeCoreAsync<byte[]>(inbox1);
|
||||
await _nats.PublishAsync($"dh.{streamName}", Array.Empty<byte>(), headers: hdrs1, replyTo: inbox1);
|
||||
using (var cts1 = new CancellationTokenSource(TimeSpan.FromSeconds(3)))
|
||||
{
|
||||
await foreach (var _ in sub1.Msgs.ReadAllAsync(cts1.Token)) break;
|
||||
}
|
||||
|
||||
// Seq 2: commit with "eob" — server should surface unsupported header error.
|
||||
var hdrs2 = new NatsHeaders
|
||||
{
|
||||
["Nats-Batch-Id"] = "uuid",
|
||||
["Nats-Batch-Sequence"] = "2",
|
||||
["Nats-Batch-Commit"] = "eob",
|
||||
};
|
||||
var inbox2 = _nats.NewInbox();
|
||||
var sub2 = await _nats.SubscribeCoreAsync<byte[]>(inbox2);
|
||||
await _nats.PublishAsync($"dh.{streamName}", Array.Empty<byte>(), headers: hdrs2, replyTo: inbox2);
|
||||
JsonObject? ack = null;
|
||||
using (var cts2 = new CancellationTokenSource(TimeSpan.FromSeconds(3)))
|
||||
{
|
||||
await foreach (var reply in sub2.Msgs.ReadAllAsync(cts2.Token))
|
||||
{
|
||||
if (reply.Data is { Length: > 0 })
|
||||
ack = JsonNode.Parse(reply.Data)?.AsObject();
|
||||
break;
|
||||
}
|
||||
}
|
||||
ack.ShouldNotBeNull("Expected pub ack from EOB commit with unsupported header");
|
||||
ack["error"].ShouldNotBeNull("Expected error for unsupported batch header (Nats-Expected-Last-Msg-Id)");
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// TestJetStreamAtomicBatchPublishStageAndCommit (26 sub-tests)
|
||||
// All require direct Go server internals (mset.clMu, checkMsgHeadersPreClusteredProposal,
|
||||
// batchStagedDiff). Skipped.
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.clMu, checkMsgHeadersPreClusteredProposal, batchStagedDiff) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_DedupeDistinct_ShouldSucceed() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.storeMsgId, checkMsgHeadersPreClusteredProposal) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_Dedupe_ShouldDetectDuplicate() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.clMu, batchStagedDiff) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_DedupeStaged_ShouldDetectInBatchDuplicate() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.clusteredCounterTotal, checkMsgHeadersPreClusteredProposal) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_CounterSingle_ShouldAccumulate() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.clusteredCounterTotal) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_CounterMultiple_ShouldAccumulate() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.clusteredCounterTotal pre-init) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_CounterPreInit_ShouldAddToExisting() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (checkMsgHeadersPreClusteredProposal with schedule headers) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_MsgSchedulesDisabled_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (errMsgTTLDisabled path in batch staging) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_MsgSchedulesTtlDisabled_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (NewJSMessageSchedulesTTLInvalidError in staging) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_MsgSchedulesTtlInvalid_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (NewJSMessageSchedulesPatternInvalidError in staging) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_MsgSchedulesInvalidSchedule_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (NewJSMessageSchedulesTargetInvalidError in staging) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_MsgSchedulesTargetMismatch_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (schedule target literal check in staging) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_MsgSchedulesTargetMustBeLiteral_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (schedule target uniqueness check in staging) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_MsgSchedulesTargetMustBeUnique_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (rollup check in schedule staging) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_MsgSchedulesRollupDisabled_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (full schedule staging pipeline) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_MsgSchedules_ShouldCommitSuccessfully() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.inflight, DiscardNew policy in staging) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_DiscardNew_ShouldTrackInflight() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.inflight with ErrMaxMsgs) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_DiscardNewMaxMsgs_ShouldEnforceLimit() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.inflight with ErrMaxBytes) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_DiscardNewMaxBytes_ShouldEnforceLimit() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.inflight with DiscardNewPerSubject) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_DiscardNewMaxMsgsPerSubj_ShouldEnforceLimit() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.inflight duplicate per-subject tracking) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_DiscardNewMaxMsgsPerSubjDuplicate_ShouldEnforceLimit() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.inflight pre-init with DiscardNewPerSubject) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_DiscardNewMaxMsgsPerSubjInflight_ShouldEnforceLimit() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.store pre-existing + DiscardNewPerSubject) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_DiscardNewMaxMsgsPerSubjPreExisting_ShouldEnforceLimit() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (JSExpectedLastSeq in batch staging pre-clustered proposal) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_ExpectLastSeq_ShouldSucceed() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (last seq check not allowed after first message in batch) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_ExpectLastSeqNotFirst_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (last seq mismatch on first batch message) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_ExpectLastSeqInvalidFirst_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (last seq mismatch for subsequent batch messages) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_ExpectLastSeqInvalid_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.expectedPerSubjectSequence, expectedPerSubjectInProcess) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_ExpectPerSubjSimple_ShouldTrackSequences() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (in-batch per-subject sequence tracking) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_ExpectPerSubjRedundantInBatch_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (JSExpectedLastSubjSeqSubj per-batch tracking) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_ExpectPerSubjDupeInChange_ShouldSucceed() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (expectedPerSubjectInProcess once set for subject) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_ExpectPerSubjNotFirst_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.expectedPerSubjectInProcess pre-init) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_ExpectPerSubjInProcess_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.inflight pre-init + per-subject sequence check) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_ExpectPerSubjInflight_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (rollup deny purge check in batch staging) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_RollupDenyPurge_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (rollup value validation in batch staging) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_RollupInvalid_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (rollup all allowed as first item in batch) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_RollupAllFirst_ShouldSucceed() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (rollup all not allowed after first item) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_RollupAllNotFirst_ShouldError() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (rollup sub with unique subjects) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_RollupSubUnique_ShouldSucceed() => Task.CompletedTask;
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (rollup sub overlap check per batch) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishStageAndCommit_RollupSubOverlap_ShouldError() => Task.CompletedTask;
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// TestJetStreamAtomicBatchPublishHighLevelRollback
|
||||
// Requires direct access to Go server internals. Skipped.
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
[Fact(Skip = "Requires Go server internals (mset.ddarr, mset.ddmap, mset.inflight, expectedPerSubjectSequence) — not accessible via NATS protocol")]
|
||||
public Task AtomicBatchPublishHighLevelRollback_OnError_ShouldClearStagingState()
|
||||
=> Task.CompletedTask;
|
||||
}
|
||||
@@ -0,0 +1,982 @@
|
||||
// Copyright 2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Ported from:
|
||||
// golang/nats-server/server/jetstream_benchmark_test.go (11 Benchmark* functions)
|
||||
// golang/nats-server/server/jetstream_jwt_test.go (9 tests)
|
||||
// golang/nats-server/server/jetstream_versioning_test.go (2 tests)
|
||||
// golang/nats-server/server/jetstream_meta_benchmark_test.go (2 Benchmark* functions)
|
||||
// golang/nats-server/server/jetstream_cluster_long_test.go (4 tests)
|
||||
// golang/nats-server/server/jetstream_sourcing_scaling_test.go (1 test)
|
||||
//
|
||||
// Porting notes:
|
||||
// - Go Benchmark* functions are ported as correctness-focused integration tests with a
|
||||
// fixed small N to verify the code path works correctly (not to measure performance).
|
||||
// - Tests that require Go-internal server structures (mset, opts, JWT infrastructure,
|
||||
// internal Raft types) are marked [Fact(Skip = ...)].
|
||||
// - Long-running tests (build tag: include_js_long_tests) are skipped.
|
||||
// - TestStreamSourcingScalingSourcingManyBenchmark has explicit t.Skip() in Go source.
|
||||
|
||||
using System.Text.Json;
|
||||
using System.Text.Json.Nodes;
|
||||
using NATS.Client.Core;
|
||||
using Shouldly;
|
||||
|
||||
namespace ZB.MOM.NatsNet.Server.IntegrationTests.JetStream;
|
||||
|
||||
/// <summary>
|
||||
/// Integration tests ported from JetStream benchmark, JWT, versioning, meta-benchmark,
|
||||
/// cluster-long, and sourcing-scaling Go test files (29 tests total).
|
||||
/// </summary>
|
||||
[Trait("Category", "Integration")]
|
||||
public class JetStreamMiscTests : IAsyncLifetime
|
||||
{
|
||||
private NatsConnection? _nats;
|
||||
private Exception? _initFailure;
|
||||
|
||||
public async Task InitializeAsync()
|
||||
{
|
||||
try
|
||||
{
|
||||
_nats = new NatsConnection(new NatsOpts { Url = "nats://localhost:4222" });
|
||||
await _nats.ConnectAsync();
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
_initFailure = ex;
|
||||
}
|
||||
}
|
||||
|
||||
public async Task DisposeAsync()
|
||||
{
|
||||
if (_nats is not null)
|
||||
await _nats.DisposeAsync();
|
||||
}
|
||||
|
||||
private bool ServerUnavailable() => _initFailure != null;
|
||||
|
||||
// =========================================================================
|
||||
// jetstream_benchmark_test.go — 11 Benchmark* functions
|
||||
// Ported as correctness integration tests with small fixed N.
|
||||
// =========================================================================
|
||||
|
||||
/// <summary>
|
||||
/// Ported from BenchmarkJetStreamConsume (sync push consumer variant).
|
||||
/// Verifies that a stream can be created, published to, and messages pulled via
|
||||
/// a durable consumer without errors.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task JetStreamConsume_SyncPushConsumer_ShouldConsumeAllMessages()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
const int messageCount = 100;
|
||||
var streamName = $"BENCH_CONSUME_{Guid.NewGuid():N}";
|
||||
var subject = $"bench.consume.{streamName}";
|
||||
|
||||
var createPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["name"] = streamName,
|
||||
["subjects"] = new JsonArray(JsonValue.Create(subject)),
|
||||
["storage"] = "memory",
|
||||
}.ToJsonString());
|
||||
await _nats!.RequestAsync<byte[], byte[]>($"$JS.API.STREAM.CREATE.{streamName}", createPayload);
|
||||
|
||||
var message = new byte[10];
|
||||
for (int i = 0; i < messageCount; i++)
|
||||
{
|
||||
message[0] = (byte)(i % 256);
|
||||
await _nats.PublishAsync(subject, (byte[])message.Clone());
|
||||
}
|
||||
|
||||
var consumerPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["stream_name"] = streamName,
|
||||
["config"] = new JsonObject
|
||||
{
|
||||
["durable_name"] = "sync-consumer",
|
||||
["deliver_policy"] = "all",
|
||||
["ack_policy"] = "explicit",
|
||||
},
|
||||
}.ToJsonString());
|
||||
await _nats.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.CONSUMER.CREATE.{streamName}.sync-consumer", consumerPayload);
|
||||
|
||||
var pullPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject { ["batch"] = messageCount, ["expires"] = 2_000_000_000 }.ToJsonString());
|
||||
var replyInbox = _nats.NewInbox();
|
||||
var sub = await _nats.SubscribeCoreAsync<byte[]>(replyInbox);
|
||||
await _nats.PublishAsync($"$JS.API.CONSUMER.MSG.NEXT.{streamName}.sync-consumer", pullPayload, replyTo: replyInbox);
|
||||
|
||||
int received = 0;
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(10));
|
||||
await foreach (var msg in sub.Msgs.ReadAllAsync(cts.Token))
|
||||
{
|
||||
if ((string?)msg.Headers?["Status"] == "404") break;
|
||||
if (msg.Headers?["Status"] is not null) continue;
|
||||
received++;
|
||||
if (received >= messageCount) break;
|
||||
}
|
||||
|
||||
received.ShouldBe(messageCount, $"Expected {messageCount} messages but received {received}");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ported from BenchmarkJetStreamConsume (async push consumer variant).
|
||||
/// Verifies correctness of async push consumer delivery.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task JetStreamConsume_AsyncPushConsumer_ShouldDeliverAllMessages()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
const int messageCount = 50;
|
||||
var streamName = $"BENCH_ASYNC_{Guid.NewGuid():N}";
|
||||
var subject = $"bench.async.{streamName}";
|
||||
var deliverSubject = _nats!.NewInbox();
|
||||
|
||||
var createPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["name"] = streamName,
|
||||
["subjects"] = new JsonArray(JsonValue.Create(subject)),
|
||||
["storage"] = "memory",
|
||||
}.ToJsonString());
|
||||
await _nats.RequestAsync<byte[], byte[]>($"$JS.API.STREAM.CREATE.{streamName}", createPayload);
|
||||
|
||||
var message = new byte[10];
|
||||
for (int i = 0; i < messageCount; i++)
|
||||
await _nats.PublishAsync(subject, message);
|
||||
|
||||
var consumerPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["stream_name"] = streamName,
|
||||
["config"] = new JsonObject
|
||||
{
|
||||
["durable_name"] = "async-consumer",
|
||||
["deliver_policy"] = "all",
|
||||
["ack_policy"] = "explicit",
|
||||
["deliver_subject"] = deliverSubject,
|
||||
},
|
||||
}.ToJsonString());
|
||||
await _nats.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.CONSUMER.CREATE.{streamName}.async-consumer", consumerPayload);
|
||||
|
||||
int received = 0;
|
||||
var sub = await _nats.SubscribeCoreAsync<byte[]>(deliverSubject);
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(10));
|
||||
await foreach (var msg in sub.Msgs.ReadAllAsync(cts.Token))
|
||||
{
|
||||
received++;
|
||||
if (received >= messageCount) break;
|
||||
}
|
||||
|
||||
received.ShouldBe(messageCount);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ported from BenchmarkJetStreamConsumeFilteredContiguous (single filter variant).
|
||||
/// Verifies correctness of a filtered pull consumer — fixes regression from PR #7015.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task JetStreamConsumeFilteredContiguous_SingleFilter_ShouldConsumeAllMessages()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
const int messageCount = 20;
|
||||
var streamName = $"BENCH_FILT_{Guid.NewGuid():N}";
|
||||
var subject = $"bench.filt.{streamName}";
|
||||
var payload = new byte[1024];
|
||||
|
||||
var createPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["name"] = streamName,
|
||||
["subjects"] = new JsonArray(JsonValue.Create(subject)),
|
||||
["storage"] = "memory",
|
||||
}.ToJsonString());
|
||||
await _nats!.RequestAsync<byte[], byte[]>($"$JS.API.STREAM.CREATE.{streamName}", createPayload);
|
||||
|
||||
for (int i = 0; i < messageCount; i++)
|
||||
await _nats.PublishAsync(subject, payload);
|
||||
|
||||
var consumerPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["stream_name"] = streamName,
|
||||
["config"] = new JsonObject
|
||||
{
|
||||
["name"] = "test-consumer",
|
||||
["deliver_policy"] = "all",
|
||||
["ack_policy"] = "none",
|
||||
["filter_subject"] = subject,
|
||||
},
|
||||
}.ToJsonString());
|
||||
await _nats.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.CONSUMER.CREATE.{streamName}.test-consumer", consumerPayload);
|
||||
|
||||
var pullPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject { ["batch"] = messageCount, ["expires"] = 3_000_000_000 }.ToJsonString());
|
||||
var replyInbox = _nats.NewInbox();
|
||||
var sub = await _nats.SubscribeCoreAsync<byte[]>(replyInbox);
|
||||
await _nats.PublishAsync($"$JS.API.CONSUMER.MSG.NEXT.{streamName}.test-consumer", pullPayload, replyTo: replyInbox);
|
||||
|
||||
int received = 0;
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(10));
|
||||
await foreach (var msg in sub.Msgs.ReadAllAsync(cts.Token))
|
||||
{
|
||||
if ((string?)msg.Headers?["Status"] == "404") break;
|
||||
if (msg.Headers?["Status"] is not null) continue;
|
||||
received++;
|
||||
if (received >= messageCount) break;
|
||||
}
|
||||
|
||||
received.ShouldBe(messageCount);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ported from BenchmarkJetStreamConsumeFilteredContiguous (two-filter variant).
|
||||
/// Verifies a filtered pull consumer with two subject filters.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task JetStreamConsumeFilteredContiguous_TwoFilters_ShouldConsumeAllMessages()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
const int messageCount = 20;
|
||||
var streamName = $"BENCH_FILT2_{Guid.NewGuid():N}";
|
||||
var subject = $"bench.filt2.{streamName}";
|
||||
var payload = new byte[1024];
|
||||
|
||||
var createPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["name"] = streamName,
|
||||
["subjects"] = new JsonArray(JsonValue.Create(subject)),
|
||||
["storage"] = "memory",
|
||||
}.ToJsonString());
|
||||
await _nats!.RequestAsync<byte[], byte[]>($"$JS.API.STREAM.CREATE.{streamName}", createPayload);
|
||||
|
||||
for (int i = 0; i < messageCount; i++)
|
||||
await _nats.PublishAsync(subject, payload);
|
||||
|
||||
// Two filters: exact subject + an alternate that won't match (mirrors Go benchmark pattern).
|
||||
var consumerPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["stream_name"] = streamName,
|
||||
["config"] = new JsonObject
|
||||
{
|
||||
["name"] = "test-consumer-2f",
|
||||
["deliver_policy"] = "all",
|
||||
["ack_policy"] = "none",
|
||||
["filter_subjects"] = new JsonArray(
|
||||
JsonValue.Create(subject),
|
||||
JsonValue.Create($"{subject}.bar")),
|
||||
},
|
||||
}.ToJsonString());
|
||||
await _nats.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.CONSUMER.CREATE.{streamName}.test-consumer-2f", consumerPayload);
|
||||
|
||||
var pullPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject { ["batch"] = messageCount, ["expires"] = 3_000_000_000 }.ToJsonString());
|
||||
var replyInbox = _nats.NewInbox();
|
||||
var sub = await _nats.SubscribeCoreAsync<byte[]>(replyInbox);
|
||||
await _nats.PublishAsync($"$JS.API.CONSUMER.MSG.NEXT.{streamName}.test-consumer-2f", pullPayload, replyTo: replyInbox);
|
||||
|
||||
int received = 0;
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(10));
|
||||
await foreach (var msg in sub.Msgs.ReadAllAsync(cts.Token))
|
||||
{
|
||||
if ((string?)msg.Headers?["Status"] == "404") break;
|
||||
if (msg.Headers?["Status"] is not null) continue;
|
||||
received++;
|
||||
if (received >= messageCount) break;
|
||||
}
|
||||
|
||||
received.ShouldBe(messageCount);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ported from BenchmarkJetStreamConsumeWithFilters.
|
||||
/// Verifies that a consumer with domain-specific subject filter delivers correct messages.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task JetStreamConsumeWithFilters_DomainFilteredConsumer_ShouldDeliverCorrectly()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
var streamName = $"BENCH_WF_{Guid.NewGuid():N}";
|
||||
var subjectPrefix = $"bench.wf.{streamName}";
|
||||
const int domainsCount = 5;
|
||||
const int subjectsPerDomain = 2;
|
||||
var payload = new byte[32];
|
||||
|
||||
var createPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["name"] = streamName,
|
||||
["subjects"] = new JsonArray(JsonValue.Create($"{subjectPrefix}.>")),
|
||||
["storage"] = "memory",
|
||||
["max_msgs_per_subject"] = 1,
|
||||
}.ToJsonString());
|
||||
await _nats!.RequestAsync<byte[], byte[]>($"$JS.API.STREAM.CREATE.{streamName}", createPayload);
|
||||
|
||||
var domains = new List<string>();
|
||||
for (int d = 1; d <= domainsCount; d++)
|
||||
{
|
||||
var domain = $"domain{d:D4}";
|
||||
domains.Add(domain);
|
||||
for (int s = 1; s <= subjectsPerDomain; s++)
|
||||
await _nats.PublishAsync($"{subjectPrefix}.{domain}.{s}", payload);
|
||||
}
|
||||
|
||||
// Consumer filtered to the first domain only.
|
||||
var filterDomain = domains[0];
|
||||
var filterSubject = $"{subjectPrefix}.{filterDomain}.>";
|
||||
var consumerPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["stream_name"] = streamName,
|
||||
["config"] = new JsonObject
|
||||
{
|
||||
["name"] = "wf-consumer",
|
||||
["deliver_policy"] = "all",
|
||||
["ack_policy"] = "none",
|
||||
["filter_subject"] = filterSubject,
|
||||
},
|
||||
}.ToJsonString());
|
||||
await _nats.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.CONSUMER.CREATE.{streamName}.wf-consumer", consumerPayload);
|
||||
|
||||
var pullPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject { ["batch"] = subjectsPerDomain * 2, ["expires"] = 3_000_000_000 }.ToJsonString());
|
||||
var replyInbox = _nats.NewInbox();
|
||||
var sub = await _nats.SubscribeCoreAsync<byte[]>(replyInbox);
|
||||
await _nats.PublishAsync($"$JS.API.CONSUMER.MSG.NEXT.{streamName}.wf-consumer", pullPayload, replyTo: replyInbox);
|
||||
|
||||
int received = 0;
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
await foreach (var msg in sub.Msgs.ReadAllAsync(cts.Token))
|
||||
{
|
||||
if ((string?)msg.Headers?["Status"] == "404") break;
|
||||
if (msg.Headers?["Status"] is not null) continue;
|
||||
received++;
|
||||
if (received >= subjectsPerDomain) break;
|
||||
}
|
||||
|
||||
received.ShouldBe(subjectsPerDomain);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ported from BenchmarkJetStreamPublish (sync publisher variant).
|
||||
/// Verifies that JetStream publish with sync acknowledgement works correctly.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task JetStreamPublish_SyncPublisher_ShouldPublishSuccessfully()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
const int messageCount = 50;
|
||||
var streamName = $"BENCH_PUB_{Guid.NewGuid():N}";
|
||||
var subject = $"bench.pub.{streamName}";
|
||||
|
||||
var createPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["name"] = streamName,
|
||||
["subjects"] = new JsonArray(JsonValue.Create(subject)),
|
||||
["storage"] = "memory",
|
||||
}.ToJsonString());
|
||||
await _nats!.RequestAsync<byte[], byte[]>($"$JS.API.STREAM.CREATE.{streamName}", createPayload);
|
||||
|
||||
var message = new byte[10];
|
||||
int published = 0;
|
||||
for (int i = 0; i < messageCount; i++)
|
||||
{
|
||||
message[0] = (byte)(i % 256);
|
||||
var replyInbox = _nats.NewInbox();
|
||||
var sub = await _nats.SubscribeCoreAsync<byte[]>(replyInbox);
|
||||
await _nats.PublishAsync(subject, (byte[])message.Clone(), replyTo: replyInbox);
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(2));
|
||||
await foreach (var reply in sub.Msgs.ReadAllAsync(cts.Token))
|
||||
{
|
||||
if (reply.Data is { Length: > 0 }) published++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
published.ShouldBe(messageCount, $"Expected {messageCount} pub acks but got {published}");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ported from BenchmarkJetStreamPublish (async publisher variant).
|
||||
/// Verifies that async JetStream publish produces valid pub acks.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task JetStreamPublish_AsyncPublisher_ShouldPublishSuccessfully()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
const int messageCount = 50;
|
||||
var streamName = $"BENCH_APUB_{Guid.NewGuid():N}";
|
||||
var subject = $"bench.apub.{streamName}";
|
||||
|
||||
var createPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["name"] = streamName,
|
||||
["subjects"] = new JsonArray(JsonValue.Create(subject)),
|
||||
["storage"] = "memory",
|
||||
}.ToJsonString());
|
||||
await _nats!.RequestAsync<byte[], byte[]>($"$JS.API.STREAM.CREATE.{streamName}", createPayload);
|
||||
|
||||
var tasks = Enumerable.Range(0, messageCount).Select(i => Task.Run(async () =>
|
||||
{
|
||||
var data = new byte[] { (byte)(i % 256) };
|
||||
var replyInbox = _nats.NewInbox();
|
||||
var sub = await _nats.SubscribeCoreAsync<byte[]>(replyInbox);
|
||||
await _nats.PublishAsync(subject, data, replyTo: replyInbox);
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
await foreach (var reply in sub.Msgs.ReadAllAsync(cts.Token))
|
||||
return reply.Data?.Length > 0;
|
||||
return false;
|
||||
}));
|
||||
|
||||
var results = await Task.WhenAll(tasks);
|
||||
results.Count(r => r).ShouldBe(messageCount);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ported from BenchmarkJetStreamPublish (multi-subject variant).
|
||||
/// Verifies publishing to multiple subjects on a single stream.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task JetStreamPublish_MultiSubject_ShouldPublishToAllSubjects()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
const int messageCount = 30;
|
||||
var streamName = $"BENCH_MSUB_{Guid.NewGuid():N}";
|
||||
var subjects = new[]
|
||||
{
|
||||
$"bench.ms.{streamName}.a",
|
||||
$"bench.ms.{streamName}.b",
|
||||
$"bench.ms.{streamName}.c",
|
||||
};
|
||||
|
||||
var createPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["name"] = streamName,
|
||||
["subjects"] = new JsonArray(subjects.Select(s => JsonValue.Create(s)).ToArray<JsonNode?>()),
|
||||
["storage"] = "memory",
|
||||
}.ToJsonString());
|
||||
await _nats!.RequestAsync<byte[], byte[]>($"$JS.API.STREAM.CREATE.{streamName}", createPayload);
|
||||
|
||||
var rng = new Random(12345);
|
||||
var message = new byte[32];
|
||||
int published = 0;
|
||||
for (int i = 0; i < messageCount; i++)
|
||||
{
|
||||
rng.NextBytes(message);
|
||||
var subj = subjects[rng.Next(subjects.Length)];
|
||||
var replyInbox = _nats.NewInbox();
|
||||
var sub = await _nats.SubscribeCoreAsync<byte[]>(replyInbox);
|
||||
await _nats.PublishAsync(subj, (byte[])message.Clone(), replyTo: replyInbox);
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(2));
|
||||
await foreach (var reply in sub.Msgs.ReadAllAsync(cts.Token))
|
||||
{
|
||||
if (reply.Data is { Length: > 0 }) published++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
published.ShouldBe(messageCount);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ported from BenchmarkJetStreamPublish (cluster/R3 variant).
|
||||
/// Skipped: targets single-server CI environment.
|
||||
/// </summary>
|
||||
[Fact(Skip = "Requires a running 3-node JetStream cluster — targets single-server mode in CI")]
|
||||
public Task JetStreamPublish_ClusteredR3_ShouldPublishSuccessfully() => Task.CompletedTask;
|
||||
|
||||
/// <summary>
|
||||
/// Ported from BenchmarkJetStreamConsume (pull durable variant).
|
||||
/// Verifies that a durable pull consumer can consume all messages.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task JetStreamConsume_PullDurableConsumer_ShouldConsumeAllMessages()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
const int messageCount = 50;
|
||||
var streamName = $"BENCH_PULL_{Guid.NewGuid():N}";
|
||||
var subject = $"bench.pull.{streamName}";
|
||||
|
||||
var createPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["name"] = streamName,
|
||||
["subjects"] = new JsonArray(JsonValue.Create(subject)),
|
||||
["storage"] = "memory",
|
||||
}.ToJsonString());
|
||||
await _nats!.RequestAsync<byte[], byte[]>($"$JS.API.STREAM.CREATE.{streamName}", createPayload);
|
||||
|
||||
var message = new byte[10];
|
||||
for (int i = 0; i < messageCount; i++)
|
||||
await _nats.PublishAsync(subject, message);
|
||||
|
||||
var consumerPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["stream_name"] = streamName,
|
||||
["config"] = new JsonObject
|
||||
{
|
||||
["durable_name"] = "pull-durable",
|
||||
["deliver_policy"] = "all",
|
||||
["ack_policy"] = "explicit",
|
||||
},
|
||||
}.ToJsonString());
|
||||
await _nats.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.CONSUMER.CREATE.{streamName}.pull-durable", consumerPayload);
|
||||
|
||||
// Fetch in batches (mirrors Go PullConsumer with fetchMaxMessages = 1000).
|
||||
const int fetchSize = 25;
|
||||
int received = 0;
|
||||
while (received < messageCount)
|
||||
{
|
||||
var pullPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject { ["batch"] = fetchSize, ["expires"] = 2_000_000_000 }.ToJsonString());
|
||||
var replyInbox = _nats.NewInbox();
|
||||
var sub = await _nats.SubscribeCoreAsync<byte[]>(replyInbox);
|
||||
await _nats.PublishAsync($"$JS.API.CONSUMER.MSG.NEXT.{streamName}.pull-durable", pullPayload, replyTo: replyInbox);
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
||||
await foreach (var msg in sub.Msgs.ReadAllAsync(cts.Token))
|
||||
{
|
||||
if ((string?)msg.Headers?["Status"] == "404") break;
|
||||
if (msg.Headers?["Status"] is not null) continue;
|
||||
received++;
|
||||
if (received >= messageCount) break;
|
||||
}
|
||||
}
|
||||
|
||||
received.ShouldBe(messageCount);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ported from BenchmarkJetStreamConsume (ephemeral pull consumer variant).
|
||||
/// Verifies that an ephemeral pull consumer can consume all messages.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task JetStreamConsume_PullEphemeralConsumer_ShouldConsumeAllMessages()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
const int messageCount = 50;
|
||||
var streamName = $"BENCH_EPH_{Guid.NewGuid():N}";
|
||||
var subject = $"bench.eph.{streamName}";
|
||||
|
||||
var createPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["name"] = streamName,
|
||||
["subjects"] = new JsonArray(JsonValue.Create(subject)),
|
||||
["storage"] = "memory",
|
||||
}.ToJsonString());
|
||||
await _nats!.RequestAsync<byte[], byte[]>($"$JS.API.STREAM.CREATE.{streamName}", createPayload);
|
||||
|
||||
var message = new byte[10];
|
||||
for (int i = 0; i < messageCount; i++)
|
||||
await _nats.PublishAsync(subject, message);
|
||||
|
||||
// Create ephemeral consumer.
|
||||
var ephName = $"eph_{Guid.NewGuid():N}";
|
||||
var consumerPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["stream_name"] = streamName,
|
||||
["config"] = new JsonObject
|
||||
{
|
||||
["name"] = ephName,
|
||||
["deliver_policy"] = "all",
|
||||
["ack_policy"] = "explicit",
|
||||
},
|
||||
}.ToJsonString());
|
||||
await _nats.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.CONSUMER.CREATE.{streamName}.{ephName}", consumerPayload);
|
||||
|
||||
var pullPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject { ["batch"] = messageCount, ["expires"] = 3_000_000_000 }.ToJsonString());
|
||||
var replyInbox = _nats.NewInbox();
|
||||
var sub = await _nats.SubscribeCoreAsync<byte[]>(replyInbox);
|
||||
await _nats.PublishAsync($"$JS.API.CONSUMER.MSG.NEXT.{streamName}.{ephName}", pullPayload, replyTo: replyInbox);
|
||||
|
||||
int received = 0;
|
||||
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(10));
|
||||
await foreach (var msg in sub.Msgs.ReadAllAsync(cts.Token))
|
||||
{
|
||||
if ((string?)msg.Headers?["Status"] == "404") break;
|
||||
if (msg.Headers?["Status"] is not null) continue;
|
||||
received++;
|
||||
if (received >= messageCount) break;
|
||||
}
|
||||
|
||||
received.ShouldBe(messageCount);
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// jetstream_jwt_test.go — 9 tests
|
||||
// All require JWT operator/resolver infrastructure (nkeys, ojwt) not available
|
||||
// in the .NET integration test environment.
|
||||
// =========================================================================
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestJetStreamJWTLimits.
|
||||
/// Tests JetStream account limits applied, updated, and enforced from JWT claims.
|
||||
/// Skipped: requires nkeys/JWT infrastructure.
|
||||
/// </summary>
|
||||
[Fact(Skip = "Requires JWT operator/resolver infrastructure (nkeys, jwt.NewAccountClaims, ojwt) — not available in .NET integration test environment")]
|
||||
public Task JetStreamJwtLimits_AccountLimitsShouldBeAppliedFromJwt() => Task.CompletedTask;
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestJetStreamJWTDisallowBearer.
|
||||
/// Tests that bearer tokens are rejected when DisallowBearer is set on the account JWT.
|
||||
/// Skipped: requires nkeys/JWT infrastructure.
|
||||
/// </summary>
|
||||
[Fact(Skip = "Requires JWT infrastructure (nkeys, jwt.NewUserClaims with BearerToken, resolver_preload) — not available in .NET integration test environment")]
|
||||
public Task JetStreamJwtDisallowBearer_BearerTokenShouldBeRejected() => Task.CompletedTask;
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestJetStreamJWTMove (tiered R3 variant).
|
||||
/// Tests moving a stream between clusters using JWT placement tags.
|
||||
/// Skipped: requires JWT super-cluster setup.
|
||||
/// </summary>
|
||||
[Fact(Skip = "Requires JWT super-cluster with placement tags (createJetStreamSuperClusterWithTemplateAndModHook) — not available in .NET integration test environment")]
|
||||
public Task JetStreamJwtMove_TieredR3_ShouldMoveStreamBetweenClusters() => Task.CompletedTask;
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestJetStreamJWTMove (tiered R1 variant).
|
||||
/// Skipped: requires JWT super-cluster setup.
|
||||
/// </summary>
|
||||
[Fact(Skip = "Requires JWT super-cluster with placement tags (createJetStreamSuperClusterWithTemplateAndModHook) — not available in .NET integration test environment")]
|
||||
public Task JetStreamJwtMove_TieredR1_ShouldMoveStreamBetweenClusters() => Task.CompletedTask;
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestJetStreamJWTMove (non-tiered R3 variant).
|
||||
/// Skipped: requires JWT super-cluster setup.
|
||||
/// </summary>
|
||||
[Fact(Skip = "Requires JWT super-cluster with non-tiered limits (createJetStreamSuperClusterWithTemplateAndModHook) — not available in .NET integration test environment")]
|
||||
public Task JetStreamJwtMove_NonTieredR3_ShouldMoveStreamBetweenClusters() => Task.CompletedTask;
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestJetStreamJWTMove (non-tiered R1 variant).
|
||||
/// Skipped: requires JWT super-cluster setup.
|
||||
/// </summary>
|
||||
[Fact(Skip = "Requires JWT super-cluster with non-tiered limits (createJetStreamSuperClusterWithTemplateAndModHook) — not available in .NET integration test environment")]
|
||||
public Task JetStreamJwtMove_NonTieredR1_ShouldMoveStreamBetweenClusters() => Task.CompletedTask;
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestJetStreamJWTClusteredTiers.
|
||||
/// Tests R1/R3 tiered JetStream limits from JWT in a clustered setup.
|
||||
/// Skipped: requires JWT resolver with tiered limits.
|
||||
/// </summary>
|
||||
[Fact(Skip = "Requires JWT tiered limits (JetStreamTieredLimits) with cluster config — not available in .NET integration test environment")]
|
||||
public Task JetStreamJwtClusteredTiers_TieredLimitsShouldBeEnforced() => Task.CompletedTask;
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestJetStreamJWTClusteredTiersChange.
|
||||
/// Tests that changing tiered limits in JWT is applied in a running cluster.
|
||||
/// Skipped: requires JWT live-update with cluster.
|
||||
/// </summary>
|
||||
[Fact(Skip = "Requires JWT live update of tiered limits in a cluster — not available in .NET integration test environment")]
|
||||
public Task JetStreamJwtClusteredTiersChange_UpdatedLimitsShouldBeApplied() => Task.CompletedTask;
|
||||
|
||||
/// <summary>
|
||||
/// Covers remaining JWT test cases in jetstream_jwt_test.go.
|
||||
/// Skipped: all JWT tests require Go JWT infrastructure.
|
||||
/// </summary>
|
||||
[Fact(Skip = "All JWT tests require nkeys/JWT operator infrastructure — not available in .NET integration test environment")]
|
||||
public Task JetStreamJwt_AllRemainingCases_RequireJwtInfrastructure() => Task.CompletedTask;
|
||||
|
||||
// =========================================================================
|
||||
// jetstream_versioning_test.go — 2 tests
|
||||
// Most test internal Go functions directly; TestJetStreamMetadataMutations is
|
||||
// testable via NATS protocol and is ported as an active test.
|
||||
// =========================================================================
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestGetAndSupportsRequiredApiLevel and TestJetStreamSetStaticStreamMetadata
|
||||
/// and related Go-internal metadata helper functions (setStaticStreamMetadata,
|
||||
/// setDynamicStreamMetadata, copyStreamMetadata, etc.).
|
||||
/// Skipped: these test Go package-level functions not exposed via NATS protocol.
|
||||
/// </summary>
|
||||
[Fact(Skip = "Tests Go-internal functions (getRequiredApiLevel, setStaticStreamMetadata, etc.) — not accessible via NATS protocol")]
|
||||
public Task JetStreamVersioning_InternalMetadataFunctions_ShouldBehaveCorrectly() => Task.CompletedTask;
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestJetStreamMetadataMutations.
|
||||
/// Tests that stream/consumer metadata is preserved across create, update, and info operations.
|
||||
/// This is directly testable via the JetStream API.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task JetStreamMetadataMutations_MetadataShouldPersistAcrossOperations()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
var streamName = $"META_{Guid.NewGuid():N}";
|
||||
|
||||
// Create stream — verify no error.
|
||||
var createPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["name"] = streamName,
|
||||
["subjects"] = new JsonArray(JsonValue.Create($"meta.{streamName}")),
|
||||
["storage"] = "memory",
|
||||
}.ToJsonString());
|
||||
var createRespMsg = await _nats!.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.STREAM.CREATE.{streamName}", createPayload);
|
||||
// NatsMsg<byte[]> is a struct; assert on .Data instead.
|
||||
createRespMsg.Data.ShouldNotBeNull();
|
||||
var createResp = JsonDocument.Parse(createRespMsg.Data);
|
||||
createResp.RootElement.TryGetProperty("error", out _).ShouldBeFalse("Stream create should succeed");
|
||||
|
||||
// Stream info — config should be accessible.
|
||||
var infoRespMsg = await _nats.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.STREAM.INFO.{streamName}", null);
|
||||
infoRespMsg.Data.ShouldNotBeNull();
|
||||
var infoResp = JsonDocument.Parse(infoRespMsg.Data);
|
||||
infoResp.RootElement.TryGetProperty("error", out _).ShouldBeFalse("Stream info should succeed");
|
||||
infoResp.RootElement.TryGetProperty("config", out _).ShouldBeTrue("Stream info should include config");
|
||||
|
||||
// Update stream — metadata from creation should be preserved.
|
||||
var updatePayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["name"] = streamName,
|
||||
["subjects"] = new JsonArray(JsonValue.Create($"meta.{streamName}")),
|
||||
["storage"] = "memory",
|
||||
}.ToJsonString());
|
||||
var updateRespMsg = await _nats.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.STREAM.UPDATE.{streamName}", updatePayload);
|
||||
updateRespMsg.Data.ShouldNotBeNull();
|
||||
var updateResp = JsonDocument.Parse(updateRespMsg.Data);
|
||||
updateResp.RootElement.TryGetProperty("error", out _).ShouldBeFalse("Stream update should succeed");
|
||||
|
||||
// Add consumer.
|
||||
var consumerName = "meta-consumer";
|
||||
var consumerPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["stream_name"] = streamName,
|
||||
["config"] = new JsonObject
|
||||
{
|
||||
["name"] = consumerName,
|
||||
["durable_name"] = consumerName,
|
||||
["deliver_policy"] = "all",
|
||||
["ack_policy"] = "explicit",
|
||||
},
|
||||
}.ToJsonString());
|
||||
var consumerRespMsg = await _nats.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.CONSUMER.CREATE.{streamName}.{consumerName}", consumerPayload);
|
||||
consumerRespMsg.Data.ShouldNotBeNull();
|
||||
var consumerResp = JsonDocument.Parse(consumerRespMsg.Data);
|
||||
consumerResp.RootElement.TryGetProperty("error", out _).ShouldBeFalse("Consumer create should succeed");
|
||||
|
||||
// Consumer info.
|
||||
var ciRespMsg = await _nats.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.CONSUMER.INFO.{streamName}.{consumerName}", null);
|
||||
ciRespMsg.Data.ShouldNotBeNull();
|
||||
var ciResp = JsonDocument.Parse(ciRespMsg.Data);
|
||||
ciResp.RootElement.TryGetProperty("error", out _).ShouldBeFalse("Consumer info should succeed");
|
||||
ciResp.RootElement.TryGetProperty("config", out _).ShouldBeTrue("Consumer info should include config");
|
||||
|
||||
// Update consumer.
|
||||
var updateConsumerRespMsg = await _nats.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.CONSUMER.CREATE.{streamName}.{consumerName}", consumerPayload);
|
||||
updateConsumerRespMsg.Data.ShouldNotBeNull();
|
||||
var updateConsumerResp = JsonDocument.Parse(updateConsumerRespMsg.Data);
|
||||
updateConsumerResp.RootElement.TryGetProperty("error", out _).ShouldBeFalse("Consumer update should succeed");
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// jetstream_meta_benchmark_test.go — 2 Benchmark* functions
|
||||
// Ported as correctness tests with small fixed N.
|
||||
// =========================================================================
|
||||
|
||||
/// <summary>
|
||||
/// Ported from BenchmarkJetStreamCreate.
|
||||
/// Verifies streams, KV buckets, and object stores can be created concurrently.
|
||||
/// Runs with small fixed N for correctness, not performance.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task JetStreamCreate_ConcurrentStreamCreation_ShouldSucceedWithoutErrors()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
const int concurrency = 3;
|
||||
const int opsPerClient = 5;
|
||||
int totalErrors = 0;
|
||||
|
||||
var tasks = Enumerable.Range(0, concurrency).Select(clientId => Task.Run(async () =>
|
||||
{
|
||||
int errors = 0;
|
||||
for (int op = 0; op < opsPerClient; op++)
|
||||
{
|
||||
var streamName = $"META_CREATE_{clientId}_{op}_{Guid.NewGuid():N}";
|
||||
var createPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["name"] = streamName,
|
||||
["subjects"] = new JsonArray(JsonValue.Create($"create.{streamName}")),
|
||||
["storage"] = "memory",
|
||||
}.ToJsonString());
|
||||
try
|
||||
{
|
||||
var resp = await _nats!.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.STREAM.CREATE.{streamName}", createPayload);
|
||||
if (resp.Data is null) errors++;
|
||||
}
|
||||
catch
|
||||
{
|
||||
errors++;
|
||||
}
|
||||
}
|
||||
Interlocked.Add(ref totalErrors, errors);
|
||||
}));
|
||||
|
||||
await Task.WhenAll(tasks);
|
||||
totalErrors.ShouldBe(0, $"Expected no errors creating streams concurrently but got {totalErrors}");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Ported from BenchmarkJetStreamCreateConsumers.
|
||||
/// Verifies ephemeral and durable consumers can be created concurrently on a stream.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task JetStreamCreateConsumers_ConcurrentConsumerCreation_ShouldSucceedWithoutErrors()
|
||||
{
|
||||
if (ServerUnavailable()) return;
|
||||
|
||||
var streamName = $"META_CONS_{Guid.NewGuid():N}";
|
||||
var createPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["name"] = streamName,
|
||||
["subjects"] = new JsonArray(JsonValue.Create($"cons.{streamName}")),
|
||||
["storage"] = "memory",
|
||||
}.ToJsonString());
|
||||
await _nats!.RequestAsync<byte[], byte[]>($"$JS.API.STREAM.CREATE.{streamName}", createPayload);
|
||||
|
||||
const int concurrency = 3;
|
||||
const int opsPerClient = 3;
|
||||
int totalErrors = 0;
|
||||
|
||||
var tasks = Enumerable.Range(0, concurrency).Select(clientId => Task.Run(async () =>
|
||||
{
|
||||
int errors = 0;
|
||||
for (int op = 0; op < opsPerClient; op++)
|
||||
{
|
||||
var consumerName = $"C_{clientId}_{op}_{Guid.NewGuid():N}";
|
||||
var consumerPayload = System.Text.Encoding.UTF8.GetBytes(
|
||||
new JsonObject
|
||||
{
|
||||
["stream_name"] = streamName,
|
||||
["config"] = new JsonObject
|
||||
{
|
||||
["name"] = consumerName,
|
||||
["durable_name"] = consumerName,
|
||||
["deliver_policy"] = "all",
|
||||
["ack_policy"] = "explicit",
|
||||
},
|
||||
}.ToJsonString());
|
||||
try
|
||||
{
|
||||
var resp = await _nats.RequestAsync<byte[], byte[]>(
|
||||
$"$JS.API.CONSUMER.CREATE.{streamName}.{consumerName}", consumerPayload);
|
||||
if (resp.Data is null) errors++;
|
||||
}
|
||||
catch
|
||||
{
|
||||
errors++;
|
||||
}
|
||||
}
|
||||
Interlocked.Add(ref totalErrors, errors);
|
||||
}));
|
||||
|
||||
await Task.WhenAll(tasks);
|
||||
totalErrors.ShouldBe(0, $"Expected no errors creating consumers concurrently but got {totalErrors}");
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// jetstream_cluster_long_test.go — 4 tests
|
||||
// Build tag: include_js_long_tests (skipped by default in Go CI).
|
||||
// All require Go-internal cluster/Raft access or run for many minutes.
|
||||
// =========================================================================
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestLongKVPutWithServerRestarts.
|
||||
/// Long-running test: writes to KV while randomly restarting cluster nodes for 3 minutes.
|
||||
/// Skipped: build tag include_js_long_tests; requires Go-internal cluster management.
|
||||
/// </summary>
|
||||
[Fact(Skip = "Long-running stability test (3 minutes, build tag: include_js_long_tests). Requires Go-internal cluster management — not runnable from .NET integration tests")]
|
||||
public Task LongKvPutWithServerRestarts_ShouldContinueSuccessfullyUnderNodeRestarts()
|
||||
=> Task.CompletedTask;
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestLongNRGChainOfBlocks.
|
||||
/// Long-running Raft chain-of-blocks test with random stop/start of nodes (10 minutes).
|
||||
/// Skipped: requires Go-internal Raft infrastructure (createRaftGroup, RCOBStateMachine).
|
||||
/// </summary>
|
||||
[Fact(Skip = "Long-running Raft stability test (10 minutes, build tag: include_js_long_tests). Requires Go-internal Raft infrastructure — not accessible via NATS protocol")]
|
||||
public Task LongNrgChainOfBlocks_ShouldConvergeCorrectlyUnderFaults()
|
||||
=> Task.CompletedTask;
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestLongClusterWorkQueueMessagesNotSkipped.
|
||||
/// Verifies 500,000 work-queue messages are delivered with multiple consumers and random delays.
|
||||
/// Skipped: 500k messages is impractical for standard CI; requires Go-internal cluster setup.
|
||||
/// </summary>
|
||||
[Fact(Skip = "Long-running work queue delivery test (500,000 messages, build tag: include_js_long_tests) — impractical for standard CI integration tests")]
|
||||
public Task LongClusterWorkQueueMessagesNotSkipped_AllMessagesShouldBeDelivered()
|
||||
=> Task.CompletedTask;
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestLongClusterJetStreamKeyValueSync.
|
||||
/// Long-running KV consistency test across a cluster with concurrent readers/writers
|
||||
/// and lame-duck server mode.
|
||||
/// Skipped: requires Go-internal server options (s.optsMu, LameDuckDuration, mset.store).
|
||||
/// </summary>
|
||||
[Fact(Skip = "Long-running KV consistency test (build tag: include_js_long_tests). Requires Go-internal server options (s.optsMu, LameDuckDuration, mset.store.LoadMsg) — not accessible via NATS protocol")]
|
||||
public Task LongClusterJetStreamKeyValueSync_KvStoreShouldBeConsistentAcrossCluster()
|
||||
=> Task.CompletedTask;
|
||||
|
||||
// =========================================================================
|
||||
// jetstream_sourcing_scaling_test.go — 1 test
|
||||
// Has explicit t.Skip() in the Go source. Connects to hardcoded cluster at
|
||||
// 127.0.0.1:4222/5222/6222 to benchmark sourcing 1000 streams with 10k msgs each.
|
||||
// =========================================================================
|
||||
|
||||
/// <summary>
|
||||
/// Ported from TestStreamSourcingScalingSourcingManyBenchmark.
|
||||
/// The Go source has an explicit t.Skip() at the top.
|
||||
/// Requires a pre-configured 3-node local cluster on hardcoded ports.
|
||||
/// </summary>
|
||||
[Fact(Skip = "Explicitly skipped in Go source (t.Skip()). Requires a pre-configured 3-node local cluster (ports 4222/5222/6222) — not suitable for automated integration tests")]
|
||||
public Task StreamSourcingScalingManyBenchmark_ShouldScaleWithManySources()
|
||||
=> Task.CompletedTask;
|
||||
}
|
||||
Reference in New Issue
Block a user