Initial import of the CBDDC codebase with docs and tests. Add a .NET-focused gitignore to keep generated artifacts out of source control.
Some checks failed
CI / verify (push) Has been cancelled

This commit is contained in:
Joseph Doherty
2026-02-20 13:03:21 -05:00
commit 08bfc17218
218 changed files with 33910 additions and 0 deletions

View File

@@ -0,0 +1,162 @@
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Network.Leadership;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class BullyLeaderElectionServiceTests
{
private static IDiscoveryService CreateDiscovery(IList<PeerNode> peers)
{
var discovery = Substitute.For<IDiscoveryService>();
discovery.GetActivePeers().Returns(_ => peers);
return discovery;
}
private static IPeerNodeConfigurationProvider CreateConfig(string nodeId)
{
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration { NodeId = nodeId });
return configProvider;
}
/// <summary>
/// Verifies that a single node elects itself as leader.
/// </summary>
[Fact]
public async Task SingleNode_ShouldBecomeLeader()
{
var peers = new List<PeerNode>();
var electionService = new BullyLeaderElectionService(
CreateDiscovery(peers),
CreateConfig("node-A"),
electionInterval: TimeSpan.FromMilliseconds(100));
LeadershipChangedEventArgs? lastEvent = null;
electionService.LeadershipChanged += (_, e) => lastEvent = e;
await electionService.Start();
await Task.Delay(200);
electionService.IsCloudGateway.ShouldBeTrue();
electionService.CurrentGatewayNodeId.ShouldBe("node-A");
lastEvent.ShouldNotBeNull();
lastEvent!.IsLocalNodeGateway.ShouldBeTrue();
lastEvent.CurrentGatewayNodeId.ShouldBe("node-A");
await electionService.Stop();
}
/// <summary>
/// Verifies that the smallest node ID is elected as leader among LAN peers.
/// </summary>
[Fact]
public async Task MultipleNodes_SmallestNodeIdShouldBeLeader()
{
var peers = new List<PeerNode>
{
new("node-B", "192.168.1.2:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered),
new("node-C", "192.168.1.3:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered)
};
var electionService = new BullyLeaderElectionService(
CreateDiscovery(peers),
CreateConfig("node-A"),
electionInterval: TimeSpan.FromMilliseconds(100));
await electionService.Start();
await Task.Delay(200);
electionService.IsCloudGateway.ShouldBeTrue();
electionService.CurrentGatewayNodeId.ShouldBe("node-A");
await electionService.Stop();
}
/// <summary>
/// Verifies that the local node is not elected when it is not the smallest node ID.
/// </summary>
[Fact]
public async Task LocalNodeNotSmallest_ShouldNotBeLeader()
{
var peers = new List<PeerNode>
{
new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered),
new("node-B", "192.168.1.2:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered)
};
var electionService = new BullyLeaderElectionService(
CreateDiscovery(peers),
CreateConfig("node-C"),
electionInterval: TimeSpan.FromMilliseconds(100));
await electionService.Start();
await Task.Delay(200);
electionService.IsCloudGateway.ShouldBeFalse();
electionService.CurrentGatewayNodeId.ShouldBe("node-A");
await electionService.Stop();
}
/// <summary>
/// Verifies that leadership is re-elected when the current leader fails.
/// </summary>
[Fact]
public async Task LeaderFailure_ShouldReelect()
{
var peers = new List<PeerNode>
{
new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered)
};
var electionService = new BullyLeaderElectionService(
CreateDiscovery(peers),
CreateConfig("node-B"),
electionInterval: TimeSpan.FromMilliseconds(100));
var leadershipChanges = new List<LeadershipChangedEventArgs>();
electionService.LeadershipChanged += (_, e) => leadershipChanges.Add(e);
await electionService.Start();
await Task.Delay(200);
electionService.CurrentGatewayNodeId.ShouldBe("node-A");
peers.Clear();
await Task.Delay(200);
electionService.IsCloudGateway.ShouldBeTrue();
electionService.CurrentGatewayNodeId.ShouldBe("node-B");
leadershipChanges.ShouldNotBeEmpty();
leadershipChanges.Last().IsLocalNodeGateway.ShouldBeTrue();
leadershipChanges.Last().CurrentGatewayNodeId.ShouldBe("node-B");
await electionService.Stop();
}
/// <summary>
/// Verifies that cloud peers are excluded from LAN gateway election.
/// </summary>
[Fact]
public async Task CloudPeersExcludedFromElection()
{
var peers = new List<PeerNode>
{
new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered),
new("cloud-node-Z", "cloud.example.com:9000", DateTimeOffset.UtcNow, PeerType.CloudRemote)
};
var electionService = new BullyLeaderElectionService(
CreateDiscovery(peers),
CreateConfig("node-B"),
electionInterval: TimeSpan.FromMilliseconds(100));
await electionService.Start();
await Task.Delay(200);
electionService.CurrentGatewayNodeId.ShouldBe("node-A");
await electionService.Stop();
}
}

View File

@@ -0,0 +1,90 @@
using System.IO;
using System.Net.Sockets;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network.Security;
using Microsoft.Extensions.Logging.Abstractions;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class ConnectionTests
{
/// <summary>
/// Verifies that the server rejects new clients when the configured connection limit is reached.
/// </summary>
[Fact]
public async Task Server_Should_Reject_Clients_When_Limit_Reached()
{
// Arrange
var oplogStore = Substitute.For<IOplogStore>();
oplogStore.GetLatestTimestampAsync(Arg.Any<CancellationToken>())
.Returns(new HlcTimestamp(0, 0, "node"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(new VectorClock());
oplogStore.GetOplogAfterAsync(Arg.Any<HlcTimestamp>(), Arg.Any<IEnumerable<string>?>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<OplogEntry>());
oplogStore.GetOplogForNodeAfterAsync(Arg.Any<string>(), Arg.Any<HlcTimestamp>(), Arg.Any<IEnumerable<string>?>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<OplogEntry>());
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration
{
NodeId = "server-node",
AuthToken = "auth-token",
TcpPort = 0
});
var snapshotService = Substitute.For<ISnapshotService>();
var documentStore = Substitute.For<IDocumentStore>();
documentStore.InterestedCollection.Returns(["Users", "TodoLists"]);
var authenticator = Substitute.For<IAuthenticator>();
authenticator.ValidateAsync(Arg.Any<string>(), Arg.Any<string>()).Returns(true);
var handshakeService = Substitute.For<IPeerHandshakeService>();
handshakeService.HandshakeAsync(Arg.Any<Stream>(), Arg.Any<bool>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((CipherState?)null);
var server = new TcpSyncServer(
oplogStore,
documentStore,
snapshotService,
configProvider,
NullLogger<TcpSyncServer>.Instance,
authenticator,
handshakeService);
server.MaxConnections = 2;
await server.Start();
var port = server.ListeningPort ?? throw new Exception("Server not started");
using var client1 = new TcpClient();
using var client2 = new TcpClient();
using var client3 = new TcpClient();
try
{
// Act
await client1.ConnectAsync("127.0.0.1", port);
await client2.ConnectAsync("127.0.0.1", port);
await Task.Delay(100);
await client3.ConnectAsync("127.0.0.1", port);
// Assert
var stream3 = client3.GetStream();
var buffer = new byte[10];
var read = await stream3.ReadAsync(buffer, 0, 10);
read.ShouldBe(0, "Server should close connection immediately for client 3");
client1.Connected.ShouldBeTrue();
client2.Connected.ShouldBeTrue();
}
finally
{
await server.Stop();
}
}
}

View File

@@ -0,0 +1,50 @@
using System.Security.Cryptography;
using ZB.MOM.WW.CBDDC.Network.Security;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class CryptoHelperTests
{
/// <summary>
/// Verifies that encrypted data can be decrypted back to the original payload.
/// </summary>
[Fact]
public void EncryptDecrypt_ShouldPreserveData()
{
// Arrange
var key = new byte[32]; // 256 bits
RandomNumberGenerator.Fill(key);
var original = new byte[] { 1, 2, 3, 4, 5, 255, 0, 10 };
// Act
var (ciphertext, iv, tag) = CryptoHelper.Encrypt(original, key);
var decrypted = CryptoHelper.Decrypt(ciphertext, iv, tag, key);
// Assert
decrypted.ShouldBe(original);
}
/// <summary>
/// Verifies that decryption fails when ciphertext is tampered with.
/// </summary>
[Fact]
public void Decrypt_ShouldFail_IfTampered()
{
// Arrange
var key = new byte[32];
RandomNumberGenerator.Fill(key);
var original = new byte[] { 1, 2, 3 };
var (ciphertext, iv, tag) = CryptoHelper.Encrypt(original, key);
// Tamper ciphertext
ciphertext[0] ^= 0xFF;
// Act
Action act = () => CryptoHelper.Decrypt(ciphertext, iv, tag, key);
// Assert
Should.Throw<CryptographicException>(act);
}
}

View File

@@ -0,0 +1,2 @@
global using NSubstitute;
global using Shouldly;

View File

@@ -0,0 +1,75 @@
using System.IO;
using System.Net.Sockets;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network.Security;
using Microsoft.Extensions.Logging.Abstractions;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class HandshakeRegressionTests
{
/// <summary>
/// Verifies that the server invokes the handshake service when a client connects.
/// </summary>
[Fact]
public async Task Server_Should_Call_HandshakeService_On_Client_Connection()
{
// Arrange
var oplogStore = Substitute.For<IOplogStore>();
oplogStore.GetLatestTimestampAsync(Arg.Any<CancellationToken>())
.Returns(new HlcTimestamp(0, 0, "node"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(new VectorClock());
oplogStore.GetOplogAfterAsync(Arg.Any<HlcTimestamp>(), Arg.Any<IEnumerable<string>?>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<OplogEntry>());
oplogStore.GetOplogForNodeAfterAsync(Arg.Any<string>(), Arg.Any<HlcTimestamp>(), Arg.Any<IEnumerable<string>?>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<OplogEntry>());
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration
{
NodeId = "server-node",
AuthToken = "auth-token",
TcpPort = 0
});
var snapshotService = Substitute.For<ISnapshotService>();
var documentStore = Substitute.For<IDocumentStore>();
documentStore.InterestedCollection.Returns(["Users"]);
var authenticator = Substitute.For<IAuthenticator>();
authenticator.ValidateAsync(Arg.Any<string>(), Arg.Any<string>()).Returns(true);
var handshakeService = Substitute.For<IPeerHandshakeService>();
handshakeService.HandshakeAsync(Arg.Any<Stream>(), Arg.Any<bool>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((CipherState?)null);
var server = new TcpSyncServer(
oplogStore,
documentStore,
snapshotService,
configProvider,
NullLogger<TcpSyncServer>.Instance,
authenticator,
handshakeService);
await server.Start();
var port = server.ListeningPort ?? throw new Exception("Server did not start or report port");
// Act
using (var client = new TcpClient())
{
await client.ConnectAsync("127.0.0.1", port);
await Task.Delay(500);
}
await server.Stop();
// Assert
await handshakeService.Received(1)
.HandshakeAsync(Arg.Any<Stream>(), false, "server-node", Arg.Any<CancellationToken>());
}
}

View File

@@ -0,0 +1,178 @@
using System;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Network.Proto;
using ZB.MOM.WW.CBDDC.Network.Protocol;
using ZB.MOM.WW.CBDDC.Network.Security;
using Google.Protobuf;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests
{
public class ProtocolTests
{
private readonly ProtocolHandler _handler;
/// <summary>
/// Initializes a new instance of the <see cref="ProtocolTests"/> class.
/// </summary>
public ProtocolTests()
{
_handler = new ProtocolHandler(NullLogger<ProtocolHandler>.Instance);
}
/// <summary>
/// Verifies a plain message can be written and read without transformation.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWorks_WithPlainMessage()
{
// Arrange
var stream = new MemoryStream();
var message = new HandshakeRequest { NodeId = "node-1", AuthToken = "token" };
// Act
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, false, null);
stream.Position = 0; // Reset for reading
var (type, payload) = await _handler.ReadMessageAsync(stream, null);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe("node-1");
decoded.AuthToken.ShouldBe("token");
}
/// <summary>
/// Verifies a compressed message can be written and read successfully.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWork_WithCompression()
{
// Arrange
var stream = new MemoryStream();
// Create a large message to trigger compression logic (threshold is small but let's be safe)
var largeData = string.Join("", Enumerable.Repeat("ABCDEF0123456789", 100));
var message = new HandshakeRequest { NodeId = largeData, AuthToken = "token" };
// Act
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, true, null);
stream.Position = 0;
var (type, payload) = await _handler.ReadMessageAsync(stream, null);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe(largeData);
}
/// <summary>
/// Verifies an encrypted message can be written and read successfully.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWork_WithEncryption()
{
// Arrange
var stream = new MemoryStream();
var message = new HandshakeRequest { NodeId = "secure-node", AuthToken = "secure-token" };
// Mock CipherState
var key = new byte[32]; // 256-bit key
new Random().NextBytes(key);
var cipherState = new CipherState(key, key); // Encrypt and Decrypt with same key for loopback
// Act
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, false, cipherState);
stream.Position = 0;
var (type, payload) = await _handler.ReadMessageAsync(stream, cipherState);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe("secure-node");
}
/// <summary>
/// Verifies a message can be round-tripped when both compression and encryption are enabled.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWork_WithEncryption_And_Compression()
{
// Arrange
var stream = new MemoryStream();
var largeData = string.Join("", Enumerable.Repeat("SECURECOMPRESSION", 100));
var message = new HandshakeRequest { NodeId = largeData };
var key = new byte[32];
new Random().NextBytes(key);
var cipherState = new CipherState(key, key);
// Act: Compress THEN Encrypt
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, true, cipherState);
stream.Position = 0;
// Verify wire encryption (should be MessageType.SecureEnv)
// But ReadMessageAsync abstracts this away.
// We can peek at the stream if we want, but let's trust ReadMessageAsync handles it.
var (type, payload) = await _handler.ReadMessageAsync(stream, cipherState);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe(largeData);
}
/// <summary>
/// Verifies that message reads succeed when bytes arrive in small fragments.
/// </summary>
[Fact]
public async Task ReadMessage_ShouldHandle_Fragmentation()
{
// Arrange
var fullStream = new MemoryStream();
var message = new HandshakeRequest { NodeId = "fragmented" };
await _handler.SendMessageAsync(fullStream, MessageType.HandshakeReq, message, false, null);
byte[] completeBytes = fullStream.ToArray();
var fragmentedStream = new FragmentedMemoryStream(completeBytes, chunkSize: 2); // Read 2 bytes at a time
// Act
var (type, payload) = await _handler.ReadMessageAsync(fragmentedStream, null);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe("fragmented");
}
// Helper Stream for fragmentation test
private class FragmentedMemoryStream : MemoryStream
{
private readonly int _chunkSize;
/// <summary>
/// Initializes a new instance of the <see cref="FragmentedMemoryStream"/> class.
/// </summary>
/// <param name="buffer">The backing stream buffer.</param>
/// <param name="chunkSize">The maximum bytes returned per read.</param>
public FragmentedMemoryStream(byte[] buffer, int chunkSize) : base(buffer)
{
_chunkSize = chunkSize;
}
/// <inheritdoc />
public override async Task<int> ReadAsync(byte[] buffer, int offset, int count, System.Threading.CancellationToken cancellationToken)
{
// Force read to be max _chunkSize, even if more is requested
int toRead = Math.Min(count, _chunkSize);
return await base.ReadAsync(buffer, offset, toRead, cancellationToken);
}
}
}
}

View File

@@ -0,0 +1,177 @@
using System;
using System.IO;
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Network.Security;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests
{
public class SecureHandshakeTests
{
/// <summary>
/// Verifies handshake negotiation succeeds between initiator and responder services.
/// </summary>
[Fact]
public async Task Handshake_Should_Succeed_Between_Two_Services()
{
// Arrange
var clientStream = new PipeStream();
var serverStream = new PipeStream();
// Client writes to clientStream, server reads from clientStream
// Server writes to serverStream, client reads from serverStream
var clientSocket = new DuplexStream(serverStream, clientStream); // Read from server, Write to client
var serverSocket = new DuplexStream(clientStream, serverStream); // Read from client, Write to server
var clientService = new SecureHandshakeService(NullLogger<SecureHandshakeService>.Instance);
var serverService = new SecureHandshakeService(NullLogger<SecureHandshakeService>.Instance);
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
// Act
var clientTask = clientService.HandshakeAsync(clientSocket, isInitiator: true, myNodeId: "client", token: cts.Token);
var serverTask = serverService.HandshakeAsync(serverSocket, isInitiator: false, myNodeId: "server", token: cts.Token);
await Task.WhenAll(clientTask, serverTask);
// Assert
var clientState = clientTask.Result;
var serverState = serverTask.Result;
clientState.ShouldNotBeNull();
serverState.ShouldNotBeNull();
// Keys should match (Symmetric)
clientState!.EncryptKey.ShouldBe(serverState!.DecryptKey);
clientState.DecryptKey.ShouldBe(serverState.EncryptKey);
}
// Simulates a pipe. Writes go to buffer, Reads drain buffer.
class SimplexStream : MemoryStream
{
// Simple approach: Use one MemoryStream as a shared buffer?
// No, MemoryStream is not thread safe for concurrent Read/Write in this pipe manner really.
// Better to use a producer/consumer stream but for simplicity let's use a basic blocking queue logic or just wait.
// Actually, for unit tests, strictly ordered operations are better. But handshake is interactive.
// We need a proper pipe.
}
// Let's use a simple PipeStream implementation using SemaphoreSlim for sync
class PipeStream : Stream
{
private readonly MemoryStream _buffer = new MemoryStream();
private readonly SemaphoreSlim _readSemaphore = new SemaphoreSlim(0);
private readonly object _lock = new object();
/// <inheritdoc />
public override bool CanRead => true;
/// <inheritdoc />
public override bool CanSeek => false;
/// <inheritdoc />
public override bool CanWrite => true;
/// <inheritdoc />
public override long Length => _buffer.Length;
/// <inheritdoc />
public override long Position { get => _buffer.Position; set => throw new NotSupportedException(); }
/// <inheritdoc />
public override void Flush() { }
/// <inheritdoc />
public override int Read(byte[] buffer, int offset, int count) => throw new NotImplementedException("Use Async");
/// <inheritdoc />
public override async Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
{
await _readSemaphore.WaitAsync(cancellationToken);
lock (_lock)
{
_buffer.Position = 0;
int read = _buffer.Read(buffer, offset, count);
// Compact buffer (inefficient but works for unit tests)
byte[] remaining = _buffer.ToArray().Skip(read).ToArray();
_buffer.SetLength(0);
_buffer.Write(remaining, 0, remaining.Length);
if (_buffer.Length > 0) _readSemaphore.Release(); // Signal if data remains
return read;
}
}
/// <inheritdoc />
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
/// <inheritdoc />
public override void SetLength(long value) => throw new NotSupportedException();
/// <inheritdoc />
public override void Write(byte[] buffer, int offset, int count)
{
lock (_lock)
{
long pos = _buffer.Position;
_buffer.Seek(0, SeekOrigin.End);
_buffer.Write(buffer, offset, count);
_buffer.Position = pos;
}
_readSemaphore.Release();
}
}
class DuplexStream : Stream
{
private readonly Stream _readSource;
private readonly Stream _writeTarget;
/// <summary>
/// Initializes a new instance of the <see cref="DuplexStream"/> class.
/// </summary>
/// <param name="readSource">The underlying stream used for read operations.</param>
/// <param name="writeTarget">The underlying stream used for write operations.</param>
public DuplexStream(Stream readSource, Stream writeTarget)
{
_readSource = readSource;
_writeTarget = writeTarget;
}
/// <inheritdoc />
public override bool CanRead => true;
/// <inheritdoc />
public override bool CanSeek => false;
/// <inheritdoc />
public override bool CanWrite => true;
/// <inheritdoc />
public override long Length => 0;
/// <inheritdoc />
public override long Position { get => 0; set { } }
/// <inheritdoc />
public override void Flush() => _writeTarget.Flush();
/// <inheritdoc />
public override int Read(byte[] buffer, int offset, int count) => _readSource.Read(buffer, offset, count);
/// <inheritdoc />
public override Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
=> _readSource.ReadAsync(buffer, offset, count, cancellationToken);
/// <inheritdoc />
public override void Write(byte[] buffer, int offset, int count) => _writeTarget.Write(buffer, offset, count);
/// <inheritdoc />
public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
=> _writeTarget.WriteAsync(buffer, offset, count, cancellationToken);
/// <inheritdoc />
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
/// <inheritdoc />
public override void SetLength(long value) => throw new NotSupportedException();
}
}
}

View File

@@ -0,0 +1,287 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Telemetry;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests
{
public class SnapshotReconnectRegressionTests
{
// Subclass to expose private method
private class TestableSyncOrchestrator : SyncOrchestrator
{
/// <summary>
/// Initializes a new instance of the <see cref="TestableSyncOrchestrator"/> class.
/// </summary>
/// <param name="discovery">The discovery service.</param>
/// <param name="oplogStore">The oplog store.</param>
/// <param name="documentStore">The document store.</param>
/// <param name="snapshotMetadataStore">The snapshot metadata store.</param>
/// <param name="snapshotService">The snapshot service.</param>
/// <param name="peerNodeConfigurationProvider">The peer node configuration provider.</param>
/// <param name="peerOplogConfirmationStore">The peer oplog confirmation store.</param>
public TestableSyncOrchestrator(
IDiscoveryService discovery,
IOplogStore oplogStore,
IDocumentStore documentStore,
ISnapshotMetadataStore snapshotMetadataStore,
ISnapshotService snapshotService,
IPeerNodeConfigurationProvider peerNodeConfigurationProvider,
IPeerOplogConfirmationStore peerOplogConfirmationStore)
: base(
discovery,
oplogStore,
documentStore,
snapshotMetadataStore,
snapshotService,
peerNodeConfigurationProvider,
NullLoggerFactory.Instance,
peerOplogConfirmationStore)
{
}
/// <summary>
/// Invokes the inbound batch processing path through reflection for regression testing.
/// </summary>
/// <param name="client">The peer client.</param>
/// <param name="peerNodeId">The peer node identifier.</param>
/// <param name="changes">The incoming oplog changes.</param>
/// <param name="token">The cancellation token.</param>
public async Task<string> TestProcessInboundBatchAsync(
TcpPeerClient client,
string peerNodeId,
IList<OplogEntry> changes,
CancellationToken token)
{
// Reflection to invoke private method since it's private not protected
var method = typeof(SyncOrchestrator).GetMethod(
"ProcessInboundBatchAsync",
System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Instance);
if (method == null)
throw new InvalidOperationException("ProcessInboundBatchAsync method not found.");
try
{
var task = (Task)method.Invoke(this, new object[] { client, peerNodeId, changes, token })!;
await task.ConfigureAwait(false);
// Access .Result via reflection because generic type is private
var resultProp = task.GetType().GetProperty("Result");
var result = resultProp?.GetValue(task);
return result?.ToString() ?? "null";
}
catch (System.Reflection.TargetInvocationException ex)
{
if (ex.InnerException != null) throw ex.InnerException;
throw;
}
}
}
private static ISnapshotMetadataStore CreateSnapshotMetadataStore()
{
var snapshotMetadataStore = Substitute.For<ISnapshotMetadataStore>();
snapshotMetadataStore.GetSnapshotMetadataAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((SnapshotMetadata?)null);
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((string?)null);
snapshotMetadataStore.GetAllSnapshotMetadataAsync(Arg.Any<CancellationToken>())
.Returns(Array.Empty<SnapshotMetadata>());
return snapshotMetadataStore;
}
private static ISnapshotService CreateSnapshotService()
{
var snapshotService = Substitute.For<ISnapshotService>();
snapshotService.CreateSnapshotAsync(Arg.Any<Stream>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
snapshotService.ReplaceDatabaseAsync(Arg.Any<Stream>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
snapshotService.MergeSnapshotAsync(Arg.Any<Stream>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
return snapshotService;
}
private static IDocumentStore CreateDocumentStore()
{
var documentStore = Substitute.For<IDocumentStore>();
documentStore.InterestedCollection.Returns(["Users", "TodoLists"]);
documentStore.GetDocumentAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((Document?)null);
documentStore.GetDocumentsByCollectionAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<Document>());
documentStore.GetDocumentsAsync(Arg.Any<List<(string Collection, string Key)>>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<Document>());
documentStore.PutDocumentAsync(Arg.Any<Document>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.InsertBatchDocumentsAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.UpdateBatchDocumentsAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.DeleteDocumentAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.DeleteBatchDocumentsAsync(Arg.Any<IEnumerable<string>>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.MergeAsync(Arg.Any<Document>(), Arg.Any<CancellationToken>())
.Returns(ci => ci.ArgAt<Document>(0));
documentStore.DropAsync(Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
documentStore.ExportAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<Document>());
documentStore.ImportAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
documentStore.MergeAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
return documentStore;
}
private static IOplogStore CreateOplogStore(string? localHeadHash)
{
var oplogStore = Substitute.For<IOplogStore>();
oplogStore.GetLastEntryHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(localHeadHash);
oplogStore.ApplyBatchAsync(Arg.Any<IEnumerable<OplogEntry>>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
return oplogStore;
}
private static TcpPeerClient CreateSnapshotRequiredClient()
{
var logger = Substitute.For<ILogger<TcpPeerClient>>();
var client = Substitute.For<TcpPeerClient>(
"127.0.0.1:0",
logger,
(IPeerHandshakeService?)null,
(INetworkTelemetryService?)null);
client.GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(_ => Task.FromException<List<OplogEntry>>(new SnapshotRequiredException()));
return client;
}
private static IDiscoveryService CreateDiscovery()
{
var discovery = Substitute.For<IDiscoveryService>();
discovery.GetActivePeers().Returns(Array.Empty<PeerNode>());
discovery.Start().Returns(Task.CompletedTask);
discovery.Stop().Returns(Task.CompletedTask);
return discovery;
}
private static IPeerNodeConfigurationProvider CreateConfig()
{
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration { NodeId = "local" });
return configProvider;
}
private static IPeerOplogConfirmationStore CreatePeerOplogConfirmationStore()
{
var store = Substitute.For<IPeerOplogConfirmationStore>();
store.EnsurePeerRegisteredAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<PeerType>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.UpdateConfirmationAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<HlcTimestamp>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.GetConfirmationsAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<PeerOplogConfirmation>());
store.GetConfirmationsForPeerAsync(Arg.Any<string>(), Arg.Any<CancellationToken>()).Returns(Array.Empty<PeerOplogConfirmation>());
store.RemovePeerTrackingAsync(Arg.Any<string>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
store.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<string>());
store.ExportAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<PeerOplogConfirmation>());
store.ImportAsync(Arg.Any<IEnumerable<PeerOplogConfirmation>>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
store.MergeAsync(Arg.Any<IEnumerable<PeerOplogConfirmation>>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
return store;
}
/// <summary>
/// Verifies that gap recovery is skipped when an inbound entry already matches the snapshot boundary hash.
/// </summary>
[Fact]
public async Task ProcessInboundBatch_ShouldSkipGapRecovery_WhenEntryMatchesSnapshotBoundary()
{
// Arrange
var oplogStore = CreateOplogStore("snapshot-boundary-hash");
var snapshotMetadataStore = CreateSnapshotMetadataStore();
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns("snapshot-boundary-hash");
var snapshotService = CreateSnapshotService();
var orch = new TestableSyncOrchestrator(
CreateDiscovery(),
oplogStore,
CreateDocumentStore(),
snapshotMetadataStore,
snapshotService,
CreateConfig(),
CreatePeerOplogConfirmationStore());
using var client = CreateSnapshotRequiredClient();
// Incoming entry that connects to snapshot boundary
var entries = new List<OplogEntry>
{
new OplogEntry(
"col", "key", OperationType.Put, null,
new HlcTimestamp(100, 1, "remote-node"),
"snapshot-boundary-hash" // PreviousHash matches SnapshotHash!
)
};
// Act
var result = await orch.TestProcessInboundBatchAsync(client, "remote-node", entries, CancellationToken.None);
// Assert
result.ShouldBe("Success");
await client.DidNotReceive().GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that gap recovery is attempted when the inbound entry does not match the snapshot boundary hash.
/// </summary>
[Fact]
public async Task ProcessInboundBatch_ShouldTryRecovery_WhenSnapshotMismatch()
{
// Arrange
var oplogStore = CreateOplogStore("some-old-hash");
var snapshotMetadataStore = CreateSnapshotMetadataStore();
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns("snapshot-boundary-hash");
var snapshotService = CreateSnapshotService();
var orch = new TestableSyncOrchestrator(
CreateDiscovery(),
oplogStore,
CreateDocumentStore(),
snapshotMetadataStore,
snapshotService,
CreateConfig(),
CreatePeerOplogConfirmationStore());
using var client = CreateSnapshotRequiredClient();
var entries = new List<OplogEntry>
{
new OplogEntry(
"col", "key", OperationType.Put, null,
new HlcTimestamp(100, 1, "remote-node"),
"different-hash" // Mismatch!
)
};
// Act & Assert
// When gap recovery triggers, the client throws SnapshotRequiredException.
// SyncOrchestrator catches SnapshotRequiredException and re-throws it to trigger full sync
// So we expect SnapshotRequiredException to bubble up (wrapped in TargetInvocationException/AggregateException if not unwrapped by helper)
await Should.ThrowAsync<SnapshotRequiredException>(async () =>
await orch.TestProcessInboundBatchAsync(client, "remote-node", entries, CancellationToken.None));
await client.Received(1).GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>());
}
}
}

View File

@@ -0,0 +1,251 @@
using System;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class SyncOrchestratorConfirmationTests
{
/// <summary>
/// Verifies that merged peers are registered and the local node is skipped.
/// </summary>
[Fact]
public async Task EnsurePeersRegisteredAsync_ShouldRegisterMergedPeers_AndSkipLocalNode()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore);
var now = DateTimeOffset.UtcNow;
var discoveredPeers = new List<PeerNode>
{
new("local", "127.0.0.1:9000", now, PeerType.LanDiscovered),
new("peer-a", "10.0.0.1:9000", now, PeerType.LanDiscovered)
};
var knownPeers = new List<PeerNode>
{
new("peer-a", "10.99.0.1:9000", now, PeerType.StaticRemote),
new("peer-b", "10.0.0.2:9010", now, PeerType.StaticRemote)
};
var mergedPeers = SyncOrchestrator.BuildMergedPeerList(discoveredPeers, knownPeers, "local");
mergedPeers.Count.ShouldBe(2);
await orchestrator.EnsurePeersRegisteredAsync(mergedPeers, "local", CancellationToken.None);
await confirmationStore.Received(1).EnsurePeerRegisteredAsync(
"peer-a",
"10.0.0.1:9000",
PeerType.LanDiscovered,
Arg.Any<CancellationToken>());
await confirmationStore.Received(1).EnsurePeerRegisteredAsync(
"peer-b",
"10.0.0.2:9010",
PeerType.StaticRemote,
Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().EnsurePeerRegisteredAsync(
"local",
Arg.Any<string>(),
Arg.Any<PeerType>(),
Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that a newly discovered node is auto-registered when peer lists are refreshed.
/// </summary>
[Fact]
public async Task EnsurePeersRegisteredAsync_WhenNewNodeJoins_ShouldAutoRegisterJoinedNode()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore);
var now = DateTimeOffset.UtcNow;
var knownPeers = new List<PeerNode>
{
new("peer-static", "10.0.0.10:9000", now, PeerType.StaticRemote)
};
var firstDiscovered = new List<PeerNode>
{
new("peer-static", "10.0.0.10:9000", now, PeerType.StaticRemote)
};
var firstMerged = SyncOrchestrator.BuildMergedPeerList(firstDiscovered, knownPeers, "local");
await orchestrator.EnsurePeersRegisteredAsync(firstMerged, "local", CancellationToken.None);
var secondDiscovered = new List<PeerNode>
{
new("peer-static", "10.0.0.10:9000", now, PeerType.StaticRemote),
new("peer-new", "10.0.0.25:9010", now, PeerType.LanDiscovered)
};
var secondMerged = SyncOrchestrator.BuildMergedPeerList(secondDiscovered, knownPeers, "local");
await orchestrator.EnsurePeersRegisteredAsync(secondMerged, "local", CancellationToken.None);
await confirmationStore.Received(1).EnsurePeerRegisteredAsync(
"peer-new",
"10.0.0.25:9010",
PeerType.LanDiscovered,
Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that confirmations advance only for nodes where remote vector-clock entries are at or ahead.
/// </summary>
[Fact]
public async Task AdvanceConfirmationsFromVectorClockAsync_ShouldAdvanceOnlyForRemoteAtOrAhead()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore);
var local = new VectorClock();
local.SetTimestamp("node-equal", new HlcTimestamp(100, 1, "node-equal"));
local.SetTimestamp("node-ahead", new HlcTimestamp(200, 0, "node-ahead"));
local.SetTimestamp("node-behind", new HlcTimestamp(300, 0, "node-behind"));
local.SetTimestamp("node-local-only", new HlcTimestamp(150, 0, "node-local-only"));
var remote = new VectorClock();
remote.SetTimestamp("node-equal", new HlcTimestamp(100, 1, "node-equal"));
remote.SetTimestamp("node-ahead", new HlcTimestamp(250, 0, "node-ahead"));
remote.SetTimestamp("node-behind", new HlcTimestamp(299, 9, "node-behind"));
remote.SetTimestamp("node-remote-only", new HlcTimestamp(900, 0, "node-remote-only"));
oplogStore.GetLastEntryHashAsync("node-equal", Arg.Any<CancellationToken>())
.Returns("hash-equal");
oplogStore.GetLastEntryHashAsync("node-ahead", Arg.Any<CancellationToken>())
.Returns((string?)null);
await orchestrator.AdvanceConfirmationsFromVectorClockAsync("peer-1", local, remote, CancellationToken.None);
await confirmationStore.Received(1).UpdateConfirmationAsync(
"peer-1",
"node-equal",
new HlcTimestamp(100, 1, "node-equal"),
"hash-equal",
Arg.Any<CancellationToken>());
await confirmationStore.Received(1).UpdateConfirmationAsync(
"peer-1",
"node-ahead",
new HlcTimestamp(200, 0, "node-ahead"),
string.Empty,
Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().UpdateConfirmationAsync(
"peer-1",
"node-behind",
Arg.Any<HlcTimestamp>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().UpdateConfirmationAsync(
"peer-1",
"node-local-only",
Arg.Any<HlcTimestamp>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().UpdateConfirmationAsync(
"peer-1",
"node-remote-only",
Arg.Any<HlcTimestamp>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that pushed-batch confirmation uses the maximum timestamp and its matching hash.
/// </summary>
[Fact]
public async Task AdvanceConfirmationForPushedBatchAsync_ShouldUseMaxTimestampAndHash()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore);
var pushedChanges = new List<OplogEntry>
{
CreateEntry("source-1", 100, 0, "hash-100"),
CreateEntry("source-1", 120, 1, "hash-120"),
CreateEntry("source-1", 110, 5, "hash-110")
};
await orchestrator.AdvanceConfirmationForPushedBatchAsync("peer-1", "source-1", pushedChanges, CancellationToken.None);
await confirmationStore.Received(1).UpdateConfirmationAsync(
"peer-1",
"source-1",
new HlcTimestamp(120, 1, "source-1"),
"hash-120",
Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that no confirmation update occurs when a pushed batch is empty.
/// </summary>
[Fact]
public async Task AdvanceConfirmationForPushedBatchAsync_ShouldSkipEmptyBatch()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore);
await orchestrator.AdvanceConfirmationForPushedBatchAsync(
"peer-1",
"source-1",
Array.Empty<OplogEntry>(),
CancellationToken.None);
await confirmationStore.DidNotReceive().UpdateConfirmationAsync(
Arg.Any<string>(),
Arg.Any<string>(),
Arg.Any<HlcTimestamp>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
}
private static SyncOrchestrator CreateOrchestrator(IOplogStore oplogStore, IPeerOplogConfirmationStore confirmationStore)
{
var discovery = Substitute.For<IDiscoveryService>();
discovery.GetActivePeers().Returns(Array.Empty<PeerNode>());
var documentStore = Substitute.For<IDocumentStore>();
documentStore.InterestedCollection.Returns(Array.Empty<string>());
var snapshotMetadataStore = Substitute.For<ISnapshotMetadataStore>();
var snapshotService = Substitute.For<ISnapshotService>();
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration { NodeId = "local" });
return new SyncOrchestrator(
discovery,
oplogStore,
documentStore,
snapshotMetadataStore,
snapshotService,
configProvider,
NullLoggerFactory.Instance,
confirmationStore);
}
private static OplogEntry CreateEntry(string nodeId, long wall, int logic, string hash)
{
return new OplogEntry(
"users",
$"{nodeId}-{wall}-{logic}",
OperationType.Put,
payload: null,
timestamp: new HlcTimestamp(wall, logic, nodeId),
previousHash: string.Empty,
hash: hash);
}
}

View File

@@ -0,0 +1,292 @@
using System;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class SyncOrchestratorMaintenancePruningTests
{
/// <summary>
/// Verifies that mixed peer confirmations produce the safest effective cutoff across peers and sources.
/// </summary>
[Fact]
public async Task CalculateEffectiveCutoffAsync_MixedPeerStates_ShouldUseSafestConfirmationAcrossPeers()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var calculator = new OplogPruneCutoffCalculator(oplogStore, confirmationStore);
var vectorClock = new VectorClock();
vectorClock.SetTimestamp("node-local", new HlcTimestamp(500, 0, "node-local"));
vectorClock.SetTimestamp("node-secondary", new HlcTimestamp(450, 0, "node-secondary"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(vectorClock);
confirmationStore.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>())
.Returns(new[] { "peer-a", "peer-b", " " });
confirmationStore.GetConfirmationsForPeerAsync("peer-a", Arg.Any<CancellationToken>())
.Returns(new[]
{
CreateConfirmation("peer-a", "node-local", wall: 300, logic: 0, isActive: true),
CreateConfirmation("peer-a", "node-secondary", wall: 120, logic: 1, isActive: true),
CreateConfirmation("peer-a", "node-secondary", wall: 500, logic: 0, isActive: false)
});
confirmationStore.GetConfirmationsForPeerAsync("peer-b", Arg.Any<CancellationToken>())
.Returns(new[]
{
CreateConfirmation("peer-b", "node-local", wall: 250, logic: 0, isActive: true),
CreateConfirmation("peer-b", "node-secondary", wall: 180, logic: 0, isActive: true)
});
var decision = await calculator.CalculateEffectiveCutoffAsync(
new PeerNodeConfiguration
{
NodeId = "node-local",
OplogRetentionHours = 24
},
CancellationToken.None);
decision.HasCutoff.ShouldBeTrue();
decision.ConfirmationCutoff.HasValue.ShouldBeTrue();
decision.EffectiveCutoff.HasValue.ShouldBeTrue();
decision.ConfirmationCutoff.Value.PhysicalTime.ShouldBe(120);
decision.ConfirmationCutoff.Value.LogicalCounter.ShouldBe(1);
decision.ConfirmationCutoff.Value.NodeId.ShouldBe("node-secondary");
decision.EffectiveCutoff.Value.ShouldBe(decision.ConfirmationCutoff.Value);
}
/// <summary>
/// Verifies that removing a peer from tracking immediately restores pruning eligibility.
/// </summary>
[Fact]
public async Task CalculateEffectiveCutoffAsync_RemovingPeerFromTracking_ShouldImmediatelyRestoreEligibility()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var calculator = new OplogPruneCutoffCalculator(oplogStore, confirmationStore);
var vectorClock = new VectorClock();
vectorClock.SetTimestamp("node-local", new HlcTimestamp(200, 0, "node-local"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(vectorClock);
confirmationStore.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>())
.Returns(
new[] { "peer-active", "peer-deprecated" },
new[] { "peer-active" });
confirmationStore.GetConfirmationsForPeerAsync("peer-active", Arg.Any<CancellationToken>())
.Returns(new[]
{
CreateConfirmation("peer-active", "node-local", wall: 150, logic: 0, isActive: true)
});
confirmationStore.GetConfirmationsForPeerAsync("peer-deprecated", Arg.Any<CancellationToken>())
.Returns(Array.Empty<PeerOplogConfirmation>());
var configuration = new PeerNodeConfiguration
{
NodeId = "node-local",
OplogRetentionHours = 24
};
var blockedDecision = await calculator.CalculateEffectiveCutoffAsync(configuration, CancellationToken.None);
blockedDecision.HasCutoff.ShouldBeFalse();
confirmationStore.ClearReceivedCalls();
var unblockedDecision = await calculator.CalculateEffectiveCutoffAsync(configuration, CancellationToken.None);
unblockedDecision.HasCutoff.ShouldBeTrue();
unblockedDecision.EffectiveCutoff.HasValue.ShouldBeTrue();
unblockedDecision.EffectiveCutoff.Value.PhysicalTime.ShouldBe(150);
unblockedDecision.EffectiveCutoff.Value.NodeId.ShouldBe("node-local");
await confirmationStore.Received(1).GetConfirmationsForPeerAsync("peer-active", Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().GetConfirmationsForPeerAsync("peer-deprecated", Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that maintenance does not prune when peer confirmation is missing in a two-node topology.
/// </summary>
[Fact]
public async Task RunMaintenanceIfDueAsync_TwoNode_ShouldNotPruneBeforePeerConfirmation()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var calculator = new OplogPruneCutoffCalculator(oplogStore, confirmationStore);
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore, calculator);
var vectorClock = new VectorClock();
vectorClock.SetTimestamp("node-local", new HlcTimestamp(200, 0, "node-local"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(vectorClock);
confirmationStore.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>())
.Returns(new[] { "node-peer" });
confirmationStore.GetConfirmationsForPeerAsync("node-peer", Arg.Any<CancellationToken>())
.Returns(Array.Empty<PeerOplogConfirmation>());
var config = new PeerNodeConfiguration
{
NodeId = "node-local",
MaintenanceIntervalMinutes = 1,
OplogRetentionHours = 24
};
await orchestrator.RunMaintenanceIfDueAsync(config, DateTime.UtcNow, CancellationToken.None);
await oplogStore.DidNotReceive().PruneOplogAsync(Arg.Any<HlcTimestamp>(), Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that maintenance prunes after peer confirmation is available in a two-node topology.
/// </summary>
[Fact]
public async Task RunMaintenanceIfDueAsync_TwoNode_ShouldPruneAfterPeerConfirmation()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var calculator = new OplogPruneCutoffCalculator(oplogStore, confirmationStore);
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore, calculator);
var vectorClock = new VectorClock();
vectorClock.SetTimestamp("node-local", new HlcTimestamp(200, 0, "node-local"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(vectorClock);
confirmationStore.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>())
.Returns(new[] { "node-peer" });
confirmationStore.GetConfirmationsForPeerAsync("node-peer", Arg.Any<CancellationToken>())
.Returns(new[]
{
new PeerOplogConfirmation
{
PeerNodeId = "node-peer",
SourceNodeId = "node-local",
ConfirmedWall = 100,
ConfirmedLogic = 0,
ConfirmedHash = "hash-100",
IsActive = true
}
});
var config = new PeerNodeConfiguration
{
NodeId = "node-local",
MaintenanceIntervalMinutes = 1,
OplogRetentionHours = 24
};
await orchestrator.RunMaintenanceIfDueAsync(config, DateTime.UtcNow, CancellationToken.None);
await oplogStore.Received(1).PruneOplogAsync(
Arg.Is<HlcTimestamp>(timestamp =>
timestamp.PhysicalTime == 100 &&
timestamp.LogicalCounter == 0 &&
string.Equals(timestamp.NodeId, "node-local", StringComparison.Ordinal)),
Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that deprecated-node removal unblocks pruning on a subsequent maintenance run.
/// </summary>
[Fact]
public async Task RunMaintenanceIfDueAsync_DeprecatedNodeRemoval_ShouldUnblockPruning()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var calculator = new OplogPruneCutoffCalculator(oplogStore, confirmationStore);
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore, calculator);
var vectorClock = new VectorClock();
vectorClock.SetTimestamp("node-local", new HlcTimestamp(220, 0, "node-local"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(vectorClock);
confirmationStore.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>())
.Returns(
new[] { "node-active", "node-deprecated" },
new[] { "node-active" });
confirmationStore.GetConfirmationsForPeerAsync("node-active", Arg.Any<CancellationToken>())
.Returns(new[]
{
CreateConfirmation("node-active", "node-local", wall: 100, logic: 0, isActive: true)
});
confirmationStore.GetConfirmationsForPeerAsync("node-deprecated", Arg.Any<CancellationToken>())
.Returns(Array.Empty<PeerOplogConfirmation>());
var config = new PeerNodeConfiguration
{
NodeId = "node-local",
MaintenanceIntervalMinutes = 1,
OplogRetentionHours = 24
};
var now = DateTime.UtcNow;
await orchestrator.RunMaintenanceIfDueAsync(config, now, CancellationToken.None);
await oplogStore.DidNotReceive().PruneOplogAsync(Arg.Any<HlcTimestamp>(), Arg.Any<CancellationToken>());
await orchestrator.RunMaintenanceIfDueAsync(config, now.AddMinutes(2), CancellationToken.None);
await oplogStore.Received(1).PruneOplogAsync(
Arg.Is<HlcTimestamp>(timestamp =>
timestamp.PhysicalTime == 100 &&
timestamp.LogicalCounter == 0 &&
string.Equals(timestamp.NodeId, "node-local", StringComparison.Ordinal)),
Arg.Any<CancellationToken>());
}
private static SyncOrchestrator CreateOrchestrator(
IOplogStore oplogStore,
IPeerOplogConfirmationStore confirmationStore,
IOplogPruneCutoffCalculator cutoffCalculator)
{
var discovery = Substitute.For<IDiscoveryService>();
discovery.GetActivePeers().Returns(Array.Empty<PeerNode>());
var documentStore = Substitute.For<IDocumentStore>();
documentStore.InterestedCollection.Returns(Array.Empty<string>());
var snapshotMetadataStore = Substitute.For<ISnapshotMetadataStore>();
var snapshotService = Substitute.For<ISnapshotService>();
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration { NodeId = "node-local" });
return new SyncOrchestrator(
discovery,
oplogStore,
documentStore,
snapshotMetadataStore,
snapshotService,
configProvider,
NullLoggerFactory.Instance,
confirmationStore,
telemetry: null,
oplogPruneCutoffCalculator: cutoffCalculator);
}
private static PeerOplogConfirmation CreateConfirmation(
string peerNodeId,
string sourceNodeId,
long wall,
int logic,
bool isActive)
{
return new PeerOplogConfirmation
{
PeerNodeId = peerNodeId,
SourceNodeId = sourceNodeId,
ConfirmedWall = wall,
ConfirmedLogic = logic,
ConfirmedHash = $"hash-{wall}-{logic}",
IsActive = isActive
};
}
}

View File

@@ -0,0 +1,108 @@
using System;
using System.IO;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Network.Telemetry;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests
{
public class TelemetryTests : IDisposable
{
private readonly string _tempFile;
/// <summary>
/// Initializes a new instance of the <see cref="TelemetryTests"/> class.
/// </summary>
public TelemetryTests()
{
_tempFile = Path.GetTempFileName();
}
/// <summary>
/// Cleans up temporary test artifacts created for telemetry persistence validation.
/// </summary>
public void Dispose()
{
if (File.Exists(_tempFile)) File.Delete(_tempFile);
}
/// <summary>
/// Verifies that telemetry metrics are recorded and persisted to disk.
/// </summary>
[Fact]
public async Task Should_Record_And_Persist_Metrics()
{
// Arrange
using var service = new NetworkTelemetryService(NullLogger<NetworkTelemetryService>.Instance, _tempFile);
// Act
// Record some values for CompressionRatio
service.RecordValue(MetricType.CompressionRatio, 0.5);
service.RecordValue(MetricType.CompressionRatio, 0.7);
// Record time metric
using (var timer = service.StartMetric(MetricType.EncryptionTime))
{
await Task.Delay(10); // Should be > 0 ms
}
// Allow channel to process
await Task.Delay(500);
// Force persist to file
service.ForcePersist();
// Assert
File.Exists(_tempFile).ShouldBeTrue();
var fileInfo = new FileInfo(_tempFile);
fileInfo.Length.ShouldBeGreaterThan(0);
using var fs = File.OpenRead(_tempFile);
using var br = new BinaryReader(fs);
// Header
byte version = br.ReadByte();
version.ShouldBe((byte)1);
long timestamp = br.ReadInt64();
var now = DateTimeOffset.UtcNow.ToUnixTimeSeconds();
timestamp.ShouldBeInRange(now - 5, now + 5);
// Metrics
// We expect all MetricTypes
int typeCount = Enum.GetValues(typeof(MetricType)).Length;
bool foundCompression = false;
bool foundEncryption = false;
for (int i = 0; i < typeCount; i++)
{
int typeInt = br.ReadInt32();
var type = (MetricType)typeInt;
// 4 Windows per type
for (int w = 0; w < 4; w++)
{
int window = br.ReadInt32(); // 60, 300, 600, 1800
double avg = br.ReadDouble();
if (type == MetricType.CompressionRatio && window == 60)
{
// Avg of 0.5 and 0.7 is 0.6
avg.ShouldBe(0.6, 0.001);
foundCompression = true;
}
if (type == MetricType.EncryptionTime && window == 60)
{
avg.ShouldBeGreaterThan(0);
foundEncryption = true;
}
}
}
foundCompression.ShouldBeTrue();
foundEncryption.ShouldBeTrue();
}
}
}

View File

@@ -0,0 +1,264 @@
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class VectorClockSyncTests
{
/// <summary>
/// Verifies sync pull selection includes only nodes where the remote clock is ahead.
/// </summary>
[Fact]
public async Task VectorClockSync_ShouldPullOnlyNodesWithUpdates()
{
// Arrange
var (localStore, localVectorClock, _) = CreatePeerStore();
var (remoteStore, remoteVectorClock, remoteOplogEntries) = CreatePeerStore();
// Local knows about node1 and node2
localVectorClock.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
localVectorClock.SetTimestamp("node2", new HlcTimestamp(100, 1, "node2"));
// Remote has updates for node1 only
remoteVectorClock.SetTimestamp("node1", new HlcTimestamp(200, 5, "node1"));
remoteVectorClock.SetTimestamp("node2", new HlcTimestamp(100, 1, "node2"));
// Add oplog entries for node1 in remote
remoteOplogEntries.Add(new OplogEntry(
"users", "user1", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"Alice\"}"),
new HlcTimestamp(150, 2, "node1"), "", "hash1"
));
remoteOplogEntries.Add(new OplogEntry(
"users", "user2", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"Bob\"}"),
new HlcTimestamp(200, 5, "node1"), "hash1", "hash2"
));
// Act
var localVC = await localStore.GetVectorClockAsync(default);
var remoteVC = remoteVectorClock;
var nodesToPull = localVC.GetNodesWithUpdates(remoteVC).ToList();
// Assert
nodesToPull.Count().ShouldBe(1);
nodesToPull.ShouldContain("node1");
// Simulate pull
foreach (var nodeId in nodesToPull)
{
var localTs = localVC.GetTimestamp(nodeId);
var changes = await remoteStore.GetOplogForNodeAfterAsync(nodeId, localTs, default);
changes.Count().ShouldBe(2);
}
}
/// <summary>
/// Verifies sync push selection includes only nodes where the local clock is ahead.
/// </summary>
[Fact]
public async Task VectorClockSync_ShouldPushOnlyNodesWithLocalUpdates()
{
// Arrange
var (localStore, localVectorClock, localOplogEntries) = CreatePeerStore();
var (_, remoteVectorClock, _) = CreatePeerStore();
// Local has updates for node1
localVectorClock.SetTimestamp("node1", new HlcTimestamp(200, 5, "node1"));
localVectorClock.SetTimestamp("node2", new HlcTimestamp(100, 1, "node2"));
// Remote is behind on node1
remoteVectorClock.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
remoteVectorClock.SetTimestamp("node2", new HlcTimestamp(100, 1, "node2"));
// Add oplog entries for node1 in local
localOplogEntries.Add(new OplogEntry(
"users", "user1", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"Charlie\"}"),
new HlcTimestamp(150, 2, "node1"), "", "hash1"
));
// Act
var localVC = localVectorClock;
var remoteVC = remoteVectorClock;
var nodesToPush = localVC.GetNodesToPush(remoteVC).ToList();
// Assert
nodesToPush.Count().ShouldBe(1);
nodesToPush.ShouldContain("node1");
// Simulate push
foreach (var nodeId in nodesToPush)
{
var remoteTs = remoteVC.GetTimestamp(nodeId);
var changes = await localStore.GetOplogForNodeAfterAsync(nodeId, remoteTs, default);
changes.Count().ShouldBe(1);
}
}
/// <summary>
/// Verifies split-brain clocks result in bidirectional synchronization requirements.
/// </summary>
[Fact]
public async Task VectorClockSync_SplitBrain_ShouldSyncBothDirections()
{
// Arrange - Simulating split brain
var (partition1Store, partition1VectorClock, partition1OplogEntries) = CreatePeerStore();
var (partition2Store, partition2VectorClock, partition2OplogEntries) = CreatePeerStore();
// Partition 1 has node1 and node2 updates
partition1VectorClock.SetTimestamp("node1", new HlcTimestamp(300, 5, "node1"));
partition1VectorClock.SetTimestamp("node2", new HlcTimestamp(200, 3, "node2"));
partition1VectorClock.SetTimestamp("node3", new HlcTimestamp(50, 1, "node3"));
// Partition 2 has node3 updates
partition2VectorClock.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
partition2VectorClock.SetTimestamp("node2", new HlcTimestamp(100, 1, "node2"));
partition2VectorClock.SetTimestamp("node3", new HlcTimestamp(400, 8, "node3"));
partition1OplogEntries.Add(new OplogEntry(
"users", "user1", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"P1User\"}"),
new HlcTimestamp(300, 5, "node1"), "", "hash_p1"
));
partition2OplogEntries.Add(new OplogEntry(
"users", "user2", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"P2User\"}"),
new HlcTimestamp(400, 8, "node3"), "", "hash_p2"
));
// Act
var vc1 = partition1VectorClock;
var vc2 = partition2VectorClock;
var relation = vc1.CompareTo(vc2);
var partition1NeedsToPull = vc1.GetNodesWithUpdates(vc2).ToList();
var partition1NeedsToPush = vc1.GetNodesToPush(vc2).ToList();
// Assert
relation.ShouldBe(CausalityRelation.Concurrent);
// Partition 1 needs to pull node3
partition1NeedsToPull.Count().ShouldBe(1);
partition1NeedsToPull.ShouldContain("node3");
// Partition 1 needs to push node1 and node2
partition1NeedsToPush.Count.ShouldBe(2);
partition1NeedsToPush.ShouldContain("node1");
partition1NeedsToPush.ShouldContain("node2");
// Verify data can be synced
var changesToPull = await partition2Store.GetOplogForNodeAfterAsync("node3", vc1.GetTimestamp("node3"), default);
changesToPull.Count().ShouldBe(1);
var changesToPush = await partition1Store.GetOplogForNodeAfterAsync("node1", vc2.GetTimestamp("node1"), default);
changesToPush.Count().ShouldBe(1);
}
/// <summary>
/// Verifies no pull or push is required when vector clocks are equal.
/// </summary>
[Fact]
public void VectorClockSync_EqualClocks_ShouldNotSync()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc1.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
var vc2 = new VectorClock();
vc2.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc2.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
// Act
var relation = vc1.CompareTo(vc2);
var nodesToPull = vc1.GetNodesWithUpdates(vc2).ToList();
var nodesToPush = vc1.GetNodesToPush(vc2).ToList();
// Assert
relation.ShouldBe(CausalityRelation.Equal);
nodesToPull.ShouldBeEmpty();
nodesToPush.ShouldBeEmpty();
}
/// <summary>
/// Verifies a newly observed node is detected as a required pull source.
/// </summary>
[Fact]
public async Task VectorClockSync_NewNodeJoins_ShouldBeDetected()
{
// Arrange - Simulating a new node joining the cluster
var (_, existingNodeVectorClock, _) = CreatePeerStore();
existingNodeVectorClock.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
existingNodeVectorClock.SetTimestamp("node2", new HlcTimestamp(100, 1, "node2"));
var (newNodeStore, newNodeVectorClock, newNodeOplogEntries) = CreatePeerStore();
newNodeVectorClock.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
newNodeVectorClock.SetTimestamp("node2", new HlcTimestamp(100, 1, "node2"));
newNodeVectorClock.SetTimestamp("node3", new HlcTimestamp(50, 1, "node3")); // New node
newNodeOplogEntries.Add(new OplogEntry(
"users", "user3", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"NewNode\"}"),
new HlcTimestamp(50, 1, "node3"), "", "hash_new"
));
// Act
var existingVC = existingNodeVectorClock;
var newNodeVC = newNodeVectorClock;
var nodesToPull = existingVC.GetNodesWithUpdates(newNodeVC).ToList();
// Assert
nodesToPull.Count().ShouldBe(1);
nodesToPull.ShouldContain("node3");
var changes = await newNodeStore.GetOplogForNodeAfterAsync("node3", existingVC.GetTimestamp("node3"), default);
changes.Count().ShouldBe(1);
}
private static (IOplogStore Store, VectorClock VectorClock, List<OplogEntry> OplogEntries) CreatePeerStore()
{
var vectorClock = new VectorClock();
var oplogEntries = new List<OplogEntry>();
var store = Substitute.For<IOplogStore>();
store.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(Task.FromResult(vectorClock));
store.GetOplogForNodeAfterAsync(
Arg.Any<string>(),
Arg.Any<HlcTimestamp>(),
Arg.Any<IEnumerable<string>?>(),
Arg.Any<CancellationToken>())
.Returns(callInfo =>
{
var nodeId = callInfo.ArgAt<string>(0);
var since = callInfo.ArgAt<HlcTimestamp>(1);
var collections = callInfo.ArgAt<IEnumerable<string>?>(2)?.ToList();
IEnumerable<OplogEntry> query = oplogEntries
.Where(e => e.Timestamp.NodeId == nodeId && e.Timestamp.CompareTo(since) > 0);
if (collections is { Count: > 0 })
{
query = query.Where(e => collections.Contains(e.Collection));
}
return Task.FromResult<IEnumerable<OplogEntry>>(query.OrderBy(e => e.Timestamp).ToList());
});
return (store, vectorClock, oplogEntries);
}
}

View File

@@ -0,0 +1,31 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.Network.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.Network.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.Network.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="NSubstitute" Version="5.3.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1" />
<PackageReference Include="Shouldly" Version="4.3.0" />
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4" />
<PackageReference Include="xunit.v3" Version="3.2.0" />
</ItemGroup>
<ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj" />
</ItemGroup>
</Project>