- git mv JetStreamApiFixture, JetStreamClusterFixture, LeafFixture, Parity utilities, and TestData from NATS.Server.Tests to NATS.Server.TestUtilities - Update namespaces to NATS.Server.TestUtilities (and .Parity sub-ns) - Make fixture classes public for cross-project access - Add PollHelper to replace Task.Delay polling with SemaphoreSlim waits - Refactor all fixture polling loops to use PollHelper - Add 'using NATS.Server.TestUtilities;' to ~75 consuming test files - Rename local fixture duplicates (MetaGroupTestFixture, LeafProtocolTestFixture) to avoid shadowing shared fixtures - Remove TestData entry from NATS.Server.Tests.csproj (moved to TestUtilities)
703 lines
25 KiB
C#
703 lines
25 KiB
C#
using System.Net;
|
|
using System.Net.Sockets;
|
|
using System.Text;
|
|
using Microsoft.Extensions.Logging.Abstractions;
|
|
using NATS.Client.Core;
|
|
using NATS.Server.Auth;
|
|
using NATS.Server.Configuration;
|
|
using NATS.Server.LeafNodes;
|
|
using NATS.Server.Subscriptions;
|
|
using NATS.Server.TestUtilities;
|
|
|
|
namespace NATS.Server.Tests.LeafNodes;
|
|
|
|
/// <summary>
|
|
/// Advanced leaf node behavior tests: daisy chains, account scoping, concurrency,
|
|
/// multiple hub connections, and edge cases.
|
|
/// Reference: golang/nats-server/server/leafnode_test.go
|
|
/// </summary>
|
|
public class LeafNodeAdvancedTests
|
|
{
|
|
// Go: TestLeafNodeInterestPropagationDaisychain server/leafnode_test.go:3953
|
|
[Fact]
|
|
public async Task Daisy_chain_A_to_B_to_C_establishes_leaf_connections()
|
|
{
|
|
// A (hub) <- B (spoke/hub) <- C (spoke)
|
|
// Verify the three-server daisy chain topology connects correctly
|
|
var aOptions = new NatsOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
LeafNode = new LeafNodeOptions { Host = "127.0.0.1", Port = 0 },
|
|
};
|
|
var serverA = new NatsServer(aOptions, NullLoggerFactory.Instance);
|
|
var aCts = new CancellationTokenSource();
|
|
_ = serverA.StartAsync(aCts.Token);
|
|
await serverA.WaitForReadyAsync();
|
|
|
|
var bOptions = new NatsOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
LeafNode = new LeafNodeOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
Remotes = [serverA.LeafListen!],
|
|
},
|
|
};
|
|
var serverB = new NatsServer(bOptions, NullLoggerFactory.Instance);
|
|
var bCts = new CancellationTokenSource();
|
|
_ = serverB.StartAsync(bCts.Token);
|
|
await serverB.WaitForReadyAsync();
|
|
|
|
var cOptions = new NatsOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
LeafNode = new LeafNodeOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
Remotes = [serverB.LeafListen!],
|
|
},
|
|
};
|
|
var serverC = new NatsServer(cOptions, NullLoggerFactory.Instance);
|
|
var cCts = new CancellationTokenSource();
|
|
_ = serverC.StartAsync(cCts.Token);
|
|
await serverC.WaitForReadyAsync();
|
|
|
|
// Wait for leaf connections
|
|
using var waitTimeout = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
|
while (!waitTimeout.IsCancellationRequested
|
|
&& (serverA.Stats.Leafs == 0 || Interlocked.Read(ref serverB.Stats.Leafs) < 2 || serverC.Stats.Leafs == 0))
|
|
await Task.Delay(50, waitTimeout.Token).ContinueWith(_ => { }, TaskScheduler.Default);
|
|
|
|
Interlocked.Read(ref serverA.Stats.Leafs).ShouldBe(1);
|
|
Interlocked.Read(ref serverB.Stats.Leafs).ShouldBeGreaterThanOrEqualTo(2);
|
|
Interlocked.Read(ref serverC.Stats.Leafs).ShouldBe(1);
|
|
|
|
// Verify each server has a unique ID
|
|
serverA.ServerId.ShouldNotBe(serverB.ServerId);
|
|
serverB.ServerId.ShouldNotBe(serverC.ServerId);
|
|
serverA.ServerId.ShouldNotBe(serverC.ServerId);
|
|
|
|
await cCts.CancelAsync();
|
|
await bCts.CancelAsync();
|
|
await aCts.CancelAsync();
|
|
serverC.Dispose();
|
|
serverB.Dispose();
|
|
serverA.Dispose();
|
|
cCts.Dispose();
|
|
bCts.Dispose();
|
|
aCts.Dispose();
|
|
}
|
|
|
|
// Go: TestLeafNodeDupeDeliveryQueueSubAndPlainSub server/leafnode_test.go:9634
|
|
[Fact]
|
|
public async Task Queue_sub_and_plain_sub_both_receive_from_hub()
|
|
{
|
|
await using var fixture = await LeafFixture.StartAsync();
|
|
|
|
await using var leafConn = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://127.0.0.1:{fixture.Spoke.Port}",
|
|
});
|
|
await leafConn.ConnectAsync();
|
|
|
|
await using var hubConn = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://127.0.0.1:{fixture.Hub.Port}",
|
|
});
|
|
await hubConn.ConnectAsync();
|
|
|
|
// Plain sub
|
|
await using var plainSub = await leafConn.SubscribeCoreAsync<string>("mixed.test");
|
|
// Queue sub
|
|
await using var queueSub = await leafConn.SubscribeCoreAsync<string>("mixed.test", queueGroup: "q1");
|
|
await leafConn.PingAsync();
|
|
await fixture.WaitForRemoteInterestOnHubAsync("mixed.test");
|
|
|
|
await hubConn.PublishAsync("mixed.test", "to-both");
|
|
|
|
// Both should receive
|
|
using var cts1 = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
|
var plainMsg = await plainSub.Msgs.ReadAsync(cts1.Token);
|
|
plainMsg.Data.ShouldBe("to-both");
|
|
|
|
using var cts2 = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
|
var queueMsg = await queueSub.Msgs.ReadAsync(cts2.Token);
|
|
queueMsg.Data.ShouldBe("to-both");
|
|
}
|
|
|
|
// Go: TestLeafNodeAccountNotFound server/leafnode_test.go:352
|
|
[Fact]
|
|
public async Task Account_scoped_messages_do_not_cross_accounts()
|
|
{
|
|
var users = new User[]
|
|
{
|
|
new() { Username = "user_a", Password = "pass", Account = "ACCT_A" },
|
|
new() { Username = "user_b", Password = "pass", Account = "ACCT_B" },
|
|
};
|
|
|
|
var hubOptions = new NatsOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
Users = users,
|
|
LeafNode = new LeafNodeOptions { Host = "127.0.0.1", Port = 0 },
|
|
};
|
|
|
|
var hub = new NatsServer(hubOptions, NullLoggerFactory.Instance);
|
|
var hubCts = new CancellationTokenSource();
|
|
_ = hub.StartAsync(hubCts.Token);
|
|
await hub.WaitForReadyAsync();
|
|
|
|
var spokeOptions = new NatsOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
Users = users,
|
|
LeafNode = new LeafNodeOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
Remotes = [hub.LeafListen!],
|
|
},
|
|
};
|
|
|
|
var spoke = new NatsServer(spokeOptions, NullLoggerFactory.Instance);
|
|
var spokeCts = new CancellationTokenSource();
|
|
_ = spoke.StartAsync(spokeCts.Token);
|
|
await spoke.WaitForReadyAsync();
|
|
|
|
using var waitTimeout = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
|
while (!waitTimeout.IsCancellationRequested && (hub.Stats.Leafs == 0 || spoke.Stats.Leafs == 0))
|
|
await Task.Delay(50, waitTimeout.Token).ContinueWith(_ => { }, TaskScheduler.Default);
|
|
|
|
// Subscribe with account A on spoke
|
|
await using var connA = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://user_a:pass@127.0.0.1:{spoke.Port}",
|
|
});
|
|
await connA.ConnectAsync();
|
|
await using var subA = await connA.SubscribeCoreAsync<string>("acct.test");
|
|
|
|
// Subscribe with account B on spoke
|
|
await using var connB = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://user_b:pass@127.0.0.1:{spoke.Port}",
|
|
});
|
|
await connB.ConnectAsync();
|
|
await using var subB = await connB.SubscribeCoreAsync<string>("acct.test");
|
|
|
|
await connA.PingAsync();
|
|
await connB.PingAsync();
|
|
|
|
// Wait for account A interest to propagate
|
|
using var interestTimeout = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
|
while (!interestTimeout.IsCancellationRequested && !hub.HasRemoteInterest("ACCT_A", "acct.test"))
|
|
await Task.Delay(50, interestTimeout.Token).ContinueWith(_ => { }, TaskScheduler.Default);
|
|
|
|
// Publish from account A on hub
|
|
await using var pubA = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://user_a:pass@127.0.0.1:{hub.Port}",
|
|
});
|
|
await pubA.ConnectAsync();
|
|
await pubA.PublishAsync("acct.test", "for-A-only");
|
|
|
|
// Account A subscriber should receive
|
|
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
|
var msgA = await subA.Msgs.ReadAsync(cts.Token);
|
|
msgA.Data.ShouldBe("for-A-only");
|
|
|
|
// Account B subscriber should NOT receive
|
|
using var leakCts = new CancellationTokenSource(TimeSpan.FromMilliseconds(500));
|
|
await Should.ThrowAsync<OperationCanceledException>(async () =>
|
|
await subB.Msgs.ReadAsync(leakCts.Token));
|
|
|
|
await spokeCts.CancelAsync();
|
|
await hubCts.CancelAsync();
|
|
spoke.Dispose();
|
|
hub.Dispose();
|
|
spokeCts.Dispose();
|
|
hubCts.Dispose();
|
|
}
|
|
|
|
// Go: TestLeafNodePermissionsConcurrentAccess server/leafnode_test.go:1389
|
|
[Fact]
|
|
public async Task Concurrent_subscribe_unsubscribe_does_not_corrupt_interest_state()
|
|
{
|
|
await using var fixture = await LeafFixture.StartAsync();
|
|
|
|
var tasks = new List<Task>();
|
|
for (var i = 0; i < 10; i++)
|
|
{
|
|
var index = i;
|
|
tasks.Add(Task.Run(async () =>
|
|
{
|
|
await using var conn = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://127.0.0.1:{fixture.Spoke.Port}",
|
|
});
|
|
await conn.ConnectAsync();
|
|
|
|
var sub = await conn.SubscribeCoreAsync<string>($"concurrent.{index}");
|
|
await conn.PingAsync();
|
|
await Task.Delay(50);
|
|
await sub.DisposeAsync();
|
|
await conn.PingAsync();
|
|
}));
|
|
}
|
|
|
|
await Task.WhenAll(tasks);
|
|
|
|
// After all subs are unsubscribed, interest should be gone
|
|
await Task.Delay(200);
|
|
for (var i = 0; i < 10; i++)
|
|
fixture.Hub.HasRemoteInterest($"concurrent.{i}").ShouldBeFalse();
|
|
}
|
|
|
|
// Go: TestLeafNodePubAllowedPruning server/leafnode_test.go:1452
|
|
[Fact]
|
|
public async Task Hub_publishes_rapidly_and_leaf_receives_all()
|
|
{
|
|
await using var fixture = await LeafFixture.StartAsync();
|
|
|
|
await using var leafConn = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://127.0.0.1:{fixture.Spoke.Port}",
|
|
});
|
|
await leafConn.ConnectAsync();
|
|
|
|
await using var hubConn = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://127.0.0.1:{fixture.Hub.Port}",
|
|
});
|
|
await hubConn.ConnectAsync();
|
|
|
|
await using var sub = await leafConn.SubscribeCoreAsync<string>("rapid.test");
|
|
await leafConn.PingAsync();
|
|
await fixture.WaitForRemoteInterestOnHubAsync("rapid.test");
|
|
|
|
const int count = 50;
|
|
for (var i = 0; i < count; i++)
|
|
await hubConn.PublishAsync("rapid.test", $"r-{i}");
|
|
|
|
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(10));
|
|
var received = 0;
|
|
while (received < count)
|
|
{
|
|
await sub.Msgs.ReadAsync(cts.Token);
|
|
received++;
|
|
}
|
|
|
|
received.ShouldBe(count);
|
|
}
|
|
|
|
// Go: TestLeafNodeSameLocalAccountToMultipleHubs server/leafnode_test.go:8983
|
|
[Fact]
|
|
public async Task Leaf_with_multiple_subscribers_on_same_subject_all_receive()
|
|
{
|
|
await using var fixture = await LeafFixture.StartAsync();
|
|
|
|
await using var hubConn = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://127.0.0.1:{fixture.Hub.Port}",
|
|
});
|
|
await hubConn.ConnectAsync();
|
|
|
|
var connections = new List<NatsConnection>();
|
|
var subs = new List<INatsSub<string>>();
|
|
|
|
try
|
|
{
|
|
for (var i = 0; i < 3; i++)
|
|
{
|
|
var conn = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://127.0.0.1:{fixture.Spoke.Port}",
|
|
});
|
|
await conn.ConnectAsync();
|
|
connections.Add(conn);
|
|
|
|
var sub = await conn.SubscribeCoreAsync<string>("multi.sub.test");
|
|
subs.Add(sub);
|
|
await conn.PingAsync();
|
|
}
|
|
|
|
await fixture.WaitForRemoteInterestOnHubAsync("multi.sub.test");
|
|
|
|
await hubConn.PublishAsync("multi.sub.test", "fan-out");
|
|
|
|
// All 3 subscribers should receive
|
|
for (var i = 0; i < 3; i++)
|
|
{
|
|
using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
|
var msg = await subs[i].Msgs.ReadAsync(cts.Token);
|
|
msg.Data.ShouldBe("fan-out");
|
|
}
|
|
}
|
|
finally
|
|
{
|
|
foreach (var sub in subs)
|
|
await sub.DisposeAsync();
|
|
foreach (var conn in connections)
|
|
await conn.DisposeAsync();
|
|
}
|
|
}
|
|
|
|
// Go: TestLeafNodeHubWithGateways server/leafnode_test.go:1584
|
|
[Fact]
|
|
public async Task Server_info_shows_correct_leaf_connection_count()
|
|
{
|
|
var hubOptions = new NatsOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
LeafNode = new LeafNodeOptions { Host = "127.0.0.1", Port = 0 },
|
|
};
|
|
|
|
var hub = new NatsServer(hubOptions, NullLoggerFactory.Instance);
|
|
var hubCts = new CancellationTokenSource();
|
|
_ = hub.StartAsync(hubCts.Token);
|
|
await hub.WaitForReadyAsync();
|
|
|
|
Interlocked.Read(ref hub.Stats.Leafs).ShouldBe(0);
|
|
|
|
var spokeOptions = new NatsOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
LeafNode = new LeafNodeOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
Remotes = [hub.LeafListen!],
|
|
},
|
|
};
|
|
|
|
var spoke = new NatsServer(spokeOptions, NullLoggerFactory.Instance);
|
|
var spokeCts = new CancellationTokenSource();
|
|
_ = spoke.StartAsync(spokeCts.Token);
|
|
await spoke.WaitForReadyAsync();
|
|
|
|
using var waitTimeout = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
|
while (!waitTimeout.IsCancellationRequested && hub.Stats.Leafs == 0)
|
|
await Task.Delay(50, waitTimeout.Token).ContinueWith(_ => { }, TaskScheduler.Default);
|
|
|
|
Interlocked.Read(ref hub.Stats.Leafs).ShouldBe(1);
|
|
|
|
await spokeCts.CancelAsync();
|
|
spoke.Dispose();
|
|
|
|
// After spoke disconnects, wait for count to drop
|
|
using var disconnTimeout = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
|
while (!disconnTimeout.IsCancellationRequested && Interlocked.Read(ref hub.Stats.Leafs) > 0)
|
|
await Task.Delay(50, disconnTimeout.Token).ContinueWith(_ => { }, TaskScheduler.Default);
|
|
|
|
Interlocked.Read(ref hub.Stats.Leafs).ShouldBe(0);
|
|
|
|
await hubCts.CancelAsync();
|
|
hub.Dispose();
|
|
spokeCts.Dispose();
|
|
hubCts.Dispose();
|
|
}
|
|
|
|
// Go: TestLeafNodeOriginClusterInfo server/leafnode_test.go:1942
|
|
[Fact]
|
|
public async Task Server_id_is_unique_between_hub_and_spoke()
|
|
{
|
|
await using var fixture = await LeafFixture.StartAsync();
|
|
|
|
fixture.Hub.ServerId.ShouldNotBeNullOrEmpty();
|
|
fixture.Spoke.ServerId.ShouldNotBeNullOrEmpty();
|
|
fixture.Hub.ServerId.ShouldNotBe(fixture.Spoke.ServerId);
|
|
}
|
|
|
|
// Go: TestLeafNodeNoDuplicateWithinCluster server/leafnode_test.go:2286
|
|
[Fact]
|
|
public async Task LeafListen_returns_correct_endpoint()
|
|
{
|
|
var hubOptions = new NatsOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
LeafNode = new LeafNodeOptions { Host = "127.0.0.1", Port = 0 },
|
|
};
|
|
|
|
var hub = new NatsServer(hubOptions, NullLoggerFactory.Instance);
|
|
var hubCts = new CancellationTokenSource();
|
|
_ = hub.StartAsync(hubCts.Token);
|
|
await hub.WaitForReadyAsync();
|
|
|
|
hub.LeafListen.ShouldNotBeNull();
|
|
hub.LeafListen.ShouldStartWith("127.0.0.1:");
|
|
|
|
var parts = hub.LeafListen.Split(':');
|
|
parts.Length.ShouldBe(2);
|
|
int.TryParse(parts[1], out var port).ShouldBeTrue();
|
|
port.ShouldBeGreaterThan(0);
|
|
|
|
await hubCts.CancelAsync();
|
|
hub.Dispose();
|
|
hubCts.Dispose();
|
|
}
|
|
|
|
// Go: TestLeafNodeQueueGroupDistribution server/leafnode_test.go:4021
|
|
[Fact]
|
|
public async Task Queue_group_interest_from_two_spokes_both_propagate_to_hub()
|
|
{
|
|
await using var fixture = await TwoSpokeFixture.StartAsync();
|
|
|
|
await using var conn1 = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://127.0.0.1:{fixture.Spoke1.Port}",
|
|
});
|
|
await conn1.ConnectAsync();
|
|
|
|
await using var conn2 = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://127.0.0.1:{fixture.Spoke2.Port}",
|
|
});
|
|
await conn2.ConnectAsync();
|
|
|
|
// Queue subs on each spoke
|
|
await using var sub1 = await conn1.SubscribeCoreAsync<string>("dist.test", queueGroup: "workers");
|
|
await using var sub2 = await conn2.SubscribeCoreAsync<string>("dist.test", queueGroup: "workers");
|
|
await conn1.PingAsync();
|
|
await conn2.PingAsync();
|
|
|
|
using var interestTimeout = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
|
while (!interestTimeout.IsCancellationRequested && !fixture.Hub.HasRemoteInterest("dist.test"))
|
|
await Task.Delay(50, interestTimeout.Token).ContinueWith(_ => { }, TaskScheduler.Default);
|
|
|
|
// Hub should have remote interest from at least one spoke
|
|
fixture.Hub.HasRemoteInterest("dist.test").ShouldBeTrue();
|
|
|
|
// Both spokes should track their own leaf connection
|
|
Interlocked.Read(ref fixture.Spoke1.Stats.Leafs).ShouldBeGreaterThan(0);
|
|
Interlocked.Read(ref fixture.Spoke2.Stats.Leafs).ShouldBeGreaterThan(0);
|
|
|
|
// Hub should have both leaf connections
|
|
Interlocked.Read(ref fixture.Hub.Stats.Leafs).ShouldBeGreaterThanOrEqualTo(2);
|
|
}
|
|
|
|
// Go: TestLeafNodeConfigureWriteDeadline server/leafnode_test.go:10802
|
|
[Fact]
|
|
public void LeafNodeOptions_defaults_to_empty_remotes_list()
|
|
{
|
|
var options = new LeafNodeOptions();
|
|
options.Remotes.ShouldNotBeNull();
|
|
options.Remotes.Count.ShouldBe(0);
|
|
options.Host.ShouldBe("0.0.0.0");
|
|
options.Port.ShouldBe(0);
|
|
}
|
|
|
|
// Go: TestLeafNodeValidateAuthOptions server/leafnode_test.go:583
|
|
[Fact]
|
|
public void NatsOptions_with_no_leaf_config_has_null_leaf()
|
|
{
|
|
var options = new NatsOptions();
|
|
options.LeafNode.ShouldBeNull();
|
|
}
|
|
|
|
// Go: TestLeafNodeAccountNotFound server/leafnode_test.go:352
|
|
[Fact]
|
|
public void NatsOptions_leaf_node_can_be_configured()
|
|
{
|
|
var options = new NatsOptions
|
|
{
|
|
LeafNode = new LeafNodeOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 5222,
|
|
Remotes = ["127.0.0.1:6222"],
|
|
},
|
|
};
|
|
|
|
options.LeafNode.ShouldNotBeNull();
|
|
options.LeafNode.Host.ShouldBe("127.0.0.1");
|
|
options.LeafNode.Port.ShouldBe(5222);
|
|
options.LeafNode.Remotes.Count.ShouldBe(1);
|
|
}
|
|
|
|
// Go: TestLeafNodePermissionWithLiteralSubjectAndQueueInterest server/leafnode_test.go:9935
|
|
[Fact]
|
|
public async Task Multiple_wildcard_subs_on_leaf_all_receive_matching_messages()
|
|
{
|
|
await using var fixture = await LeafFixture.StartAsync();
|
|
|
|
await using var leafConn = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://127.0.0.1:{fixture.Spoke.Port}",
|
|
});
|
|
await leafConn.ConnectAsync();
|
|
|
|
await using var hubConn = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://127.0.0.1:{fixture.Hub.Port}",
|
|
});
|
|
await hubConn.ConnectAsync();
|
|
|
|
// Two different wildcard subs that both match the same subject
|
|
await using var sub1 = await leafConn.SubscribeCoreAsync<string>("multi.*.test");
|
|
await using var sub2 = await leafConn.SubscribeCoreAsync<string>("multi.>");
|
|
await leafConn.PingAsync();
|
|
await fixture.WaitForRemoteInterestOnHubAsync("multi.xyz.test");
|
|
|
|
await hubConn.PublishAsync("multi.xyz.test", "match-both");
|
|
|
|
using var cts1 = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
|
var msg1 = await sub1.Msgs.ReadAsync(cts1.Token);
|
|
msg1.Data.ShouldBe("match-both");
|
|
|
|
using var cts2 = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
|
var msg2 = await sub2.Msgs.ReadAsync(cts2.Token);
|
|
msg2.Data.ShouldBe("match-both");
|
|
}
|
|
|
|
// Go: TestLeafNodeExportPermissionsNotForSpecialSubs server/leafnode_test.go:1484
|
|
[Fact]
|
|
public async Task Leaf_node_hub_client_count_is_correct_with_multiple_clients()
|
|
{
|
|
await using var fixture = await LeafFixture.StartAsync();
|
|
|
|
var connections = new List<NatsConnection>();
|
|
try
|
|
{
|
|
for (var i = 0; i < 5; i++)
|
|
{
|
|
var conn = new NatsConnection(new NatsOpts
|
|
{
|
|
Url = $"nats://127.0.0.1:{fixture.Hub.Port}",
|
|
});
|
|
await conn.ConnectAsync();
|
|
connections.Add(conn);
|
|
}
|
|
|
|
fixture.Hub.ClientCount.ShouldBeGreaterThanOrEqualTo(5);
|
|
}
|
|
finally
|
|
{
|
|
foreach (var conn in connections)
|
|
await conn.DisposeAsync();
|
|
}
|
|
}
|
|
|
|
// Go: TestLeafNodeInterestPropagationDaisychain server/leafnode_test.go:3953
|
|
[Fact]
|
|
public async Task Leaf_server_port_is_nonzero_after_ephemeral_bind()
|
|
{
|
|
var options = new NatsOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
LeafNode = new LeafNodeOptions { Host = "127.0.0.1", Port = 0 },
|
|
};
|
|
|
|
var server = new NatsServer(options, NullLoggerFactory.Instance);
|
|
var cts = new CancellationTokenSource();
|
|
_ = server.StartAsync(cts.Token);
|
|
await server.WaitForReadyAsync();
|
|
|
|
server.Port.ShouldBeGreaterThan(0);
|
|
server.LeafListen.ShouldNotBeNull();
|
|
|
|
await cts.CancelAsync();
|
|
server.Dispose();
|
|
cts.Dispose();
|
|
}
|
|
|
|
// Go: TestLeafNodeRoutedSubKeyDifferentBetweenLeafSubAndRoutedSub server/leafnode_test.go:5602
|
|
[Fact]
|
|
public async Task Spoke_shutdown_reduces_hub_leaf_count()
|
|
{
|
|
var hubOptions = new NatsOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
LeafNode = new LeafNodeOptions { Host = "127.0.0.1", Port = 0 },
|
|
};
|
|
|
|
var hub = new NatsServer(hubOptions, NullLoggerFactory.Instance);
|
|
var hubCts = new CancellationTokenSource();
|
|
_ = hub.StartAsync(hubCts.Token);
|
|
await hub.WaitForReadyAsync();
|
|
|
|
var spokeOptions = new NatsOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
LeafNode = new LeafNodeOptions
|
|
{
|
|
Host = "127.0.0.1",
|
|
Port = 0,
|
|
Remotes = [hub.LeafListen!],
|
|
},
|
|
};
|
|
|
|
var spoke = new NatsServer(spokeOptions, NullLoggerFactory.Instance);
|
|
var spokeCts = new CancellationTokenSource();
|
|
_ = spoke.StartAsync(spokeCts.Token);
|
|
await spoke.WaitForReadyAsync();
|
|
|
|
using var waitTimeout = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
|
while (!waitTimeout.IsCancellationRequested && hub.Stats.Leafs == 0)
|
|
await Task.Delay(50, waitTimeout.Token).ContinueWith(_ => { }, TaskScheduler.Default);
|
|
|
|
Interlocked.Read(ref hub.Stats.Leafs).ShouldBe(1);
|
|
|
|
// Shut down spoke
|
|
await spokeCts.CancelAsync();
|
|
spoke.Dispose();
|
|
|
|
using var disconnTimeout = new CancellationTokenSource(TimeSpan.FromSeconds(5));
|
|
while (!disconnTimeout.IsCancellationRequested && Interlocked.Read(ref hub.Stats.Leafs) > 0)
|
|
await Task.Delay(50, disconnTimeout.Token).ContinueWith(_ => { }, TaskScheduler.Default);
|
|
|
|
Interlocked.Read(ref hub.Stats.Leafs).ShouldBe(0);
|
|
|
|
await hubCts.CancelAsync();
|
|
hub.Dispose();
|
|
spokeCts.Dispose();
|
|
hubCts.Dispose();
|
|
}
|
|
|
|
// Go: TestLeafNodeHubWithGateways server/leafnode_test.go:1584
|
|
[Fact]
|
|
public void LeafHubSpokeMapper_maps_accounts_in_both_directions()
|
|
{
|
|
var mapper = new LeafHubSpokeMapper(new Dictionary<string, string>
|
|
{
|
|
["HUB_ACCT"] = "SPOKE_ACCT",
|
|
["SYS"] = "SPOKE_SYS",
|
|
});
|
|
|
|
var outbound = mapper.Map("HUB_ACCT", "foo.bar", LeafMapDirection.Outbound);
|
|
outbound.Account.ShouldBe("SPOKE_ACCT");
|
|
outbound.Subject.ShouldBe("foo.bar");
|
|
|
|
var inbound = mapper.Map("SPOKE_ACCT", "foo.bar", LeafMapDirection.Inbound);
|
|
inbound.Account.ShouldBe("HUB_ACCT");
|
|
|
|
var sys = mapper.Map("SYS", "sys.event", LeafMapDirection.Outbound);
|
|
sys.Account.ShouldBe("SPOKE_SYS");
|
|
}
|
|
|
|
// Go: TestLeafNodeHubWithGateways server/leafnode_test.go:1584
|
|
[Fact]
|
|
public void LeafHubSpokeMapper_returns_original_for_unmapped_account()
|
|
{
|
|
var mapper = new LeafHubSpokeMapper(new Dictionary<string, string>
|
|
{
|
|
["KNOWN"] = "MAPPED",
|
|
});
|
|
|
|
var result = mapper.Map("UNKNOWN", "test", LeafMapDirection.Outbound);
|
|
result.Account.ShouldBe("UNKNOWN");
|
|
result.Subject.ShouldBe("test");
|
|
}
|
|
}
|