Reformat/cleanup
All checks were successful
NuGet Package Publish / nuget (push) Successful in 1m10s

This commit is contained in:
Joseph Doherty
2026-02-21 07:53:53 -05:00
parent c6f6d9329a
commit 7ebc2cb567
160 changed files with 7258 additions and 7262 deletions

View File

@@ -1,14 +1,12 @@
using System.Reflection;
using System.Text.RegularExpressions;
using System.Xml.Linq;
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Core.Tests;
public class ArchitectureFitnessTests
{
/// <summary>
/// Verifies that the core assembly does not reference outer-layer assemblies.
/// Verifies that the core assembly does not reference outer-layer assemblies.
/// </summary>
[Fact]
public void CoreAssembly_ShouldNotReferenceOuterAssemblies()
@@ -25,71 +23,71 @@ public class ArchitectureFitnessTests
}
/// <summary>
/// Verifies that project references under src form an acyclic graph.
/// Verifies that project references under src form an acyclic graph.
/// </summary>
[Fact]
public void SourceProjectGraph_ShouldBeAcyclic()
{
var repoRoot = FindRepoRoot();
var srcRoot = Path.Combine(repoRoot, "src");
string repoRoot = FindRepoRoot();
string srcRoot = Path.Combine(repoRoot, "src");
var projectFiles = Directory
.EnumerateFiles(srcRoot, "*.csproj", SearchOption.AllDirectories)
.Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}", StringComparison.Ordinal)
&& !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}", StringComparison.Ordinal))
.Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}",
StringComparison.Ordinal)
&& !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}",
StringComparison.Ordinal))
.ToList();
var nodes = projectFiles.ToDictionary(
p => Path.GetFileNameWithoutExtension(p),
p => new HashSet<string>(StringComparer.Ordinal));
foreach (var projectFile in projectFiles)
foreach (string projectFile in projectFiles)
{
var projectName = Path.GetFileNameWithoutExtension(projectFile);
string projectName = Path.GetFileNameWithoutExtension(projectFile);
var doc = XDocument.Load(projectFile);
var refs = doc.Descendants("ProjectReference")
.Select(x => x.Attribute("Include")?.Value)
.Where(v => !string.IsNullOrWhiteSpace(v))
.Select(v => Path.GetFileNameWithoutExtension(v!.Replace('\\', '/')));
foreach (var reference in refs)
{
foreach (string reference in refs)
if (nodes.ContainsKey(reference))
{
nodes[projectName].Add(reference);
}
}
}
HasCycle(nodes).ShouldBeFalse();
}
/// <summary>
/// Verifies the allowed dependency graph between source projects.
/// Verifies the allowed dependency graph between source projects.
/// </summary>
[Fact]
public void SourceProjectReferences_ShouldMatchAllowedDependencyGraph()
{
var repoRoot = FindRepoRoot();
var srcRoot = Path.Combine(repoRoot, "src");
string repoRoot = FindRepoRoot();
string srcRoot = Path.Combine(repoRoot, "src");
var projectFiles = Directory
.EnumerateFiles(srcRoot, "*.csproj", SearchOption.AllDirectories)
.Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}", StringComparison.Ordinal)
&& !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}", StringComparison.Ordinal))
.Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}",
StringComparison.Ordinal)
&& !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}",
StringComparison.Ordinal))
.ToList();
var allowedDependencies = new Dictionary<string, HashSet<string>>(StringComparer.Ordinal)
{
["ZB.MOM.WW.CBDDC.Core"] = new HashSet<string>(StringComparer.Ordinal),
["ZB.MOM.WW.CBDDC.Network"] = new HashSet<string>(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Core" },
["ZB.MOM.WW.CBDDC.Persistence"] = new HashSet<string>(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Core" },
["ZB.MOM.WW.CBDDC.Hosting"] = new HashSet<string>(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Network" }
["ZB.MOM.WW.CBDDC.Core"] = new(StringComparer.Ordinal),
["ZB.MOM.WW.CBDDC.Network"] = new(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Core" },
["ZB.MOM.WW.CBDDC.Persistence"] = new(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Core" },
["ZB.MOM.WW.CBDDC.Hosting"] = new(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Network" }
};
foreach (var projectFile in projectFiles)
foreach (string projectFile in projectFiles)
{
var projectName = Path.GetFileNameWithoutExtension(projectFile);
string projectName = Path.GetFileNameWithoutExtension(projectFile);
allowedDependencies.ContainsKey(projectName)
.ShouldBeTrue($"Unexpected source project found: {projectName}");
@@ -105,18 +103,19 @@ public class ArchitectureFitnessTests
var missing = expected.Where(e => !references.Contains(e)).ToList();
extra.ShouldBeEmpty($"Project {projectName} has disallowed references: {string.Join(", ", extra)}");
missing.ShouldBeEmpty($"Project {projectName} is missing required references: {string.Join(", ", missing)}");
missing.ShouldBeEmpty(
$"Project {projectName} is missing required references: {string.Join(", ", missing)}");
}
}
/// <summary>
/// Verifies non-generic ILogger usage is restricted to explicit compatibility shims.
/// Verifies non-generic ILogger usage is restricted to explicit compatibility shims.
/// </summary>
[Fact]
public void SourceCode_ShouldRestrictNonGenericILoggerUsage()
{
var repoRoot = FindRepoRoot();
var srcRoot = Path.Combine(repoRoot, "src");
string repoRoot = FindRepoRoot();
string srcRoot = Path.Combine(repoRoot, "src");
var loggerPattern = new Regex(@"\bILogger\b(?!\s*<|\s*Factory\b)", RegexOptions.Compiled);
var allowedSnippets = new[]
@@ -130,45 +129,39 @@ public class ArchitectureFitnessTests
var violations = new List<string>();
var sourceFiles = Directory.EnumerateFiles(srcRoot, "*.cs", SearchOption.AllDirectories)
.Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}", StringComparison.Ordinal)
&& !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}", StringComparison.Ordinal));
.Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}",
StringComparison.Ordinal)
&& !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}",
StringComparison.Ordinal));
foreach (var file in sourceFiles)
foreach (string file in sourceFiles)
{
var lines = File.ReadAllLines(file);
string[] lines = File.ReadAllLines(file);
for (var i = 0; i < lines.Length; i++)
{
var line = lines[i].Trim();
if (string.IsNullOrWhiteSpace(line) || line.StartsWith("//", StringComparison.Ordinal))
{
continue;
}
string line = lines[i].Trim();
if (string.IsNullOrWhiteSpace(line) || line.StartsWith("//", StringComparison.Ordinal)) continue;
if (!loggerPattern.IsMatch(line))
{
continue;
}
if (!loggerPattern.IsMatch(line)) continue;
if (allowedSnippets.Any(line.Contains))
{
continue;
}
if (allowedSnippets.Any(line.Contains)) continue;
var relativePath = Path.GetRelativePath(repoRoot, file).Replace('\\', '/');
string relativePath = Path.GetRelativePath(repoRoot, file).Replace('\\', '/');
violations.Add($"{relativePath}:{i + 1} -> {line}");
}
}
violations.ShouldBeEmpty($"Unexpected non-generic ILogger usage:{Environment.NewLine}{string.Join(Environment.NewLine, violations)}");
violations.ShouldBeEmpty(
$"Unexpected non-generic ILogger usage:{Environment.NewLine}{string.Join(Environment.NewLine, violations)}");
}
/// <summary>
/// Verifies log boundaries push operation context for hosted/background entry points.
/// Verifies log boundaries push operation context for hosted/background entry points.
/// </summary>
[Fact]
public void BoundaryServices_ShouldPushOperationLogContext()
{
var repoRoot = FindRepoRoot();
string repoRoot = FindRepoRoot();
var boundaryFiles = new[]
{
"src/ZB.MOM.WW.CBDDC.Network/CBDDCNodeService.cs",
@@ -180,24 +173,24 @@ public class ArchitectureFitnessTests
"src/ZB.MOM.WW.CBDDC.Hosting/Services/NoOpSyncOrchestrator.cs"
};
foreach (var relativePath in boundaryFiles)
foreach (string relativePath in boundaryFiles)
{
var filePath = Path.Combine(repoRoot, relativePath.Replace('/', Path.DirectorySeparatorChar));
string filePath = Path.Combine(repoRoot, relativePath.Replace('/', Path.DirectorySeparatorChar));
File.Exists(filePath).ShouldBeTrue($"Missing expected boundary file: {relativePath}");
var contents = File.ReadAllText(filePath);
string contents = File.ReadAllText(filePath);
contents.Contains("LogContext.PushProperty(\"OperationId\"", StringComparison.Ordinal)
.ShouldBeTrue($"Boundary file is missing OperationId log enrichment: {relativePath}");
}
}
/// <summary>
/// Verifies boundary projects include Serilog for LogContext support.
/// Verifies boundary projects include Serilog for LogContext support.
/// </summary>
[Fact]
public void BoundaryProjects_ShouldReferenceSerilog()
{
var repoRoot = FindRepoRoot();
string repoRoot = FindRepoRoot();
var projects = new[]
{
"src/ZB.MOM.WW.CBDDC.Network/ZB.MOM.WW.CBDDC.Network.csproj",
@@ -205,12 +198,12 @@ public class ArchitectureFitnessTests
"samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj"
};
foreach (var relativePath in projects)
foreach (string relativePath in projects)
{
var filePath = Path.Combine(repoRoot, relativePath.Replace('/', Path.DirectorySeparatorChar));
string filePath = Path.Combine(repoRoot, relativePath.Replace('/', Path.DirectorySeparatorChar));
File.Exists(filePath).ShouldBeTrue($"Missing project file: {relativePath}");
var contents = File.ReadAllText(filePath);
string contents = File.ReadAllText(filePath);
contents.Contains("<PackageReference Include=\"Serilog\"", StringComparison.Ordinal)
.ShouldBeTrue($"Serilog package reference is required for logging boundary enrichment: {relativePath}");
}
@@ -218,13 +211,10 @@ public class ArchitectureFitnessTests
private static string FindRepoRoot()
{
var dir = AppContext.BaseDirectory;
string dir = AppContext.BaseDirectory;
for (var i = 0; i < 10 && !string.IsNullOrWhiteSpace(dir); i++)
{
if (File.Exists(Path.Combine(dir, "CBDDC.slnx")))
{
return dir;
}
if (File.Exists(Path.Combine(dir, "CBDDC.slnx"))) return dir;
dir = Directory.GetParent(dir)?.FullName ?? string.Empty;
}
@@ -239,24 +229,14 @@ public class ArchitectureFitnessTests
bool Dfs(string node)
{
if (visiting.Contains(node))
{
return true;
}
if (visiting.Contains(node)) return true;
if (!visited.Add(node))
{
return false;
}
if (!visited.Add(node)) return false;
visiting.Add(node);
foreach (var next in graph[node])
{
foreach (string next in graph[node])
if (Dfs(next))
{
return true;
}
}
visiting.Remove(node);
return false;
@@ -264,4 +244,4 @@ public class ArchitectureFitnessTests
return graph.Keys.Any(Dfs);
}
}
}

View File

@@ -1,2 +1,2 @@
global using NSubstitute;
global using Shouldly;
global using Shouldly;

View File

@@ -1,77 +1,73 @@
using System;
using System.Text.Json;
using Xunit;
using ZB.MOM.WW.CBDDC.Core;
using System.Globalization;
using System.Text.Json;
namespace ZB.MOM.WW.CBDDC.Core.Tests
namespace ZB.MOM.WW.CBDDC.Core.Tests;
public class OplogEntryTests
{
public class OplogEntryTests
{
/// <summary>
/// Verifies that hash computation is deterministic even when payload content differs.
/// </summary>
[Fact]
public void ComputeHash_ShouldBeDeterministic_RegardlessOfPayload()
{
// Arrange
var collection = "test-collection";
var key = "test-key";
var op = OperationType.Put;
var timestamp = new HlcTimestamp(100, 0, "node-1");
var prevHash = "prev-hash";
/// <summary>
/// Verifies that hash computation is deterministic even when payload content differs.
/// </summary>
[Fact]
public void ComputeHash_ShouldBeDeterministic_RegardlessOfPayload()
{
// Arrange
var collection = "test-collection";
var key = "test-key";
var op = OperationType.Put;
var timestamp = new HlcTimestamp(100, 0, "node-1");
var prevHash = "prev-hash";
var payload1 = JsonDocument.Parse("{\"prop\": 1}").RootElement;
var payload2 = JsonDocument.Parse("{\"prop\": 2, \"extra\": \"whitespace\"}").RootElement;
var payload1 = JsonDocument.Parse("{\"prop\": 1}").RootElement;
var payload2 = JsonDocument.Parse("{\"prop\": 2, \"extra\": \"whitespace\"}").RootElement;
// Act
var entry1 = new OplogEntry(collection, key, op, payload1, timestamp, prevHash);
var entry2 = new OplogEntry(collection, key, op, payload2, timestamp, prevHash);
// Assert
entry2.Hash.ShouldBe(entry1.Hash);
}
/// <summary>
/// Verifies that hash computation uses invariant culture formatting for timestamp values.
/// </summary>
[Fact]
public void ComputeHash_ShouldUseInvariantCulture_ForTimestamp()
{
// Arrange
var originalCulture = CultureInfo.CurrentCulture;
try
{
var culture = CultureInfo.GetCultureInfo("de-DE");
CultureInfo.CurrentCulture = culture;
var timestamp = new HlcTimestamp(123456789, 1, "node");
var entry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev");
// Act
var entry1 = new OplogEntry(collection, key, op, payload1, timestamp, prevHash);
var entry2 = new OplogEntry(collection, key, op, payload2, timestamp, prevHash);
string hash = entry.ComputeHash();
// Assert
entry2.Hash.ShouldBe(entry1.Hash);
}
/// <summary>
/// Verifies that hash computation uses invariant culture formatting for timestamp values.
/// </summary>
[Fact]
public void ComputeHash_ShouldUseInvariantCulture_ForTimestamp()
{
// Arrange
var originalCulture = CultureInfo.CurrentCulture;
try
{
var culture = CultureInfo.GetCultureInfo("de-DE");
CultureInfo.CurrentCulture = culture;
var timestamp = new HlcTimestamp(123456789, 1, "node");
var entry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev");
// Act
var hash = entry.ComputeHash();
// Assert
CultureInfo.CurrentCulture = CultureInfo.InvariantCulture;
var expectedEntry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev");
hash.ShouldBe(expectedEntry.Hash);
}
finally
{
CultureInfo.CurrentCulture = originalCulture;
}
}
/// <summary>
/// Verifies that an entry is valid when its stored hash matches computed content.
/// </summary>
[Fact]
public void IsValid_ShouldReturnTrue_WhenHashMatches()
{
var timestamp = new HlcTimestamp(100, 0, "node-1");
var entry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev");
entry.IsValid().ShouldBeTrue();
CultureInfo.CurrentCulture = CultureInfo.InvariantCulture;
var expectedEntry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev");
hash.ShouldBe(expectedEntry.Hash);
}
finally
{
CultureInfo.CurrentCulture = originalCulture;
}
}
}
/// <summary>
/// Verifies that an entry is valid when its stored hash matches computed content.
/// </summary>
[Fact]
public void IsValid_ShouldReturnTrue_WhenHashMatches()
{
var timestamp = new HlcTimestamp(100, 0, "node-1");
var entry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev");
entry.IsValid().ShouldBeTrue();
}
}

View File

@@ -6,7 +6,8 @@ namespace ZB.MOM.WW.CBDDC.Core.Tests;
public class PeerManagementServiceTests
{
/// <summary>
/// Verifies that removing peer tracking with remote removal enabled removes both tracking and remote peer configuration.
/// Verifies that removing peer tracking with remote removal enabled removes both tracking and remote peer
/// configuration.
/// </summary>
[Fact]
public async Task RemovePeerTrackingAsync_WhenRemoveRemoteConfigTrue_RemovesTrackingAndRemoteConfig()
@@ -16,14 +17,14 @@ public class PeerManagementServiceTests
var service = new PeerManagementService(configStore, confirmationStore);
var token = new CancellationTokenSource().Token;
await service.RemovePeerTrackingAsync("peer-1", removeRemoteConfig: true, token);
await service.RemovePeerTrackingAsync("peer-1", true, token);
await confirmationStore.Received(1).RemovePeerTrackingAsync("peer-1", token);
await configStore.Received(1).RemoveRemotePeerAsync("peer-1", token);
}
/// <summary>
/// Verifies that removing peer tracking with remote removal disabled removes only tracking data.
/// Verifies that removing peer tracking with remote removal disabled removes only tracking data.
/// </summary>
[Fact]
public async Task RemovePeerTrackingAsync_WhenRemoveRemoteConfigFalse_RemovesTrackingOnly()
@@ -32,14 +33,14 @@ public class PeerManagementServiceTests
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var service = new PeerManagementService(configStore, confirmationStore);
await service.RemovePeerTrackingAsync("peer-1", removeRemoteConfig: false);
await service.RemovePeerTrackingAsync("peer-1", false);
await confirmationStore.Received(1).RemovePeerTrackingAsync("peer-1", Arg.Any<CancellationToken>());
await configStore.DidNotReceive().RemoveRemotePeerAsync(Arg.Any<string>(), Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that removing a remote peer delegates to tracking removal with remote configuration cleanup enabled.
/// Verifies that removing a remote peer delegates to tracking removal with remote configuration cleanup enabled.
/// </summary>
[Fact]
public async Task RemoveRemotePeerAsync_DelegatesToTrackingRemovalWithRemoteConfig()
@@ -56,7 +57,7 @@ public class PeerManagementServiceTests
}
/// <summary>
/// Verifies that removing peer tracking with an invalid node identifier throws an <see cref="ArgumentException"/>.
/// Verifies that removing peer tracking with an invalid node identifier throws an <see cref="ArgumentException" />.
/// </summary>
[Fact]
public async Task RemovePeerTrackingAsync_WhenNodeIdInvalid_ThrowsArgumentException()
@@ -65,9 +66,10 @@ public class PeerManagementServiceTests
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var service = new PeerManagementService(configStore, confirmationStore);
await Should.ThrowAsync<ArgumentException>(() => service.RemovePeerTrackingAsync(" ", removeRemoteConfig: true));
await Should.ThrowAsync<ArgumentException>(() => service.RemovePeerTrackingAsync(" ", true));
await confirmationStore.DidNotReceive().RemovePeerTrackingAsync(Arg.Any<string>(), Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive()
.RemovePeerTrackingAsync(Arg.Any<string>(), Arg.Any<CancellationToken>());
await configStore.DidNotReceive().RemoveRemotePeerAsync(Arg.Any<string>(), Arg.Any<CancellationToken>());
}
}
}

View File

@@ -6,43 +6,43 @@ namespace ZB.MOM.WW.CBDDC.Core.Tests;
public class PerformanceRegressionTests
{
private readonly RecursiveNodeMergeConflictResolver _resolver;
private readonly Dictionary<string, int> _limits;
private readonly RecursiveNodeMergeConflictResolver _resolver;
/// <summary>
/// Initializes a new instance of the <see cref="PerformanceRegressionTests"/> class.
/// Initializes a new instance of the <see cref="PerformanceRegressionTests" /> class.
/// </summary>
public PerformanceRegressionTests()
{
_resolver = new RecursiveNodeMergeConflictResolver();
// Load limits
var json = File.ReadAllText("benchmark_limits.json");
string json = File.ReadAllText("benchmark_limits.json");
_limits = JsonSerializer.Deserialize<Dictionary<string, int>>(json) ?? new Dictionary<string, int>();
}
private Document CreateDoc(string key, object data, HlcTimestamp ts)
{
var json = JsonSerializer.Serialize(data);
string json = JsonSerializer.Serialize(data);
var element = JsonDocument.Parse(json).RootElement;
return new Document("test", key, element, ts, false);
}
private OplogEntry CreateOp(string key, object data, HlcTimestamp ts)
{
var json = JsonSerializer.Serialize(data);
string json = JsonSerializer.Serialize(data);
var element = JsonDocument.Parse(json).RootElement;
return new OplogEntry("test", key, OperationType.Put, element, ts, string.Empty);
}
/// <summary>
/// Verifies simple recursive merge operations stay within configured performance limits.
/// Verifies simple recursive merge operations stay within configured performance limits.
/// </summary>
[Fact]
public void RecursiveMerge_Simple_ShouldBeWithinLimits()
{
int iterations = 10000;
string limitKey = "RecursiveMerge_Simple_10k_Ops_MaxMs";
var iterations = 10000;
var limitKey = "RecursiveMerge_Simple_10k_Ops_MaxMs";
var ts1 = new HlcTimestamp(100, 0, "n1");
var ts2 = new HlcTimestamp(200, 0, "n2");
@@ -50,72 +50,61 @@ public class PerformanceRegressionTests
var op = CreateOp("k1", new { name = "Bob", age = 31 }, ts2);
// Warmup
for (int i = 0; i < 100; i++) _resolver.Resolve(doc, op);
for (var i = 0; i < 100; i++) _resolver.Resolve(doc, op);
// Run
var sw = Stopwatch.StartNew();
for (int i = 0; i < iterations; i++)
{
_resolver.Resolve(doc, op);
}
for (var i = 0; i < iterations; i++) _resolver.Resolve(doc, op);
sw.Stop();
long elapsed = sw.ElapsedMilliseconds;
Console.WriteLine($"Executed {iterations} merges in {elapsed}ms");
if (_limits.TryGetValue(limitKey, out int maxMs))
{
elapsed.ShouldBeLessThan(maxMs, $"Performance regression! Expected < {maxMs}ms but took {elapsed}ms");
}
else
{
Console.WriteLine($"Warning: No limit found for key '{limitKey}'");
}
}
/// <summary>
/// Verifies deep array recursive merge operations stay within configured performance limits.
/// Verifies deep array recursive merge operations stay within configured performance limits.
/// </summary>
[Fact]
public void RecursiveMerge_DeepArray_ShouldBeWithinLimits()
{
int iterations = 1000; // Lower iterations for heavier op
string limitKey = "RecursiveMerge_Array_1k_Ops_MaxMs";
var iterations = 1000; // Lower iterations for heavier op
var limitKey = "RecursiveMerge_Array_1k_Ops_MaxMs";
var ts1 = new HlcTimestamp(100, 0, "n1");
var ts2 = new HlcTimestamp(200, 0, "n2");
var items = new List<object>();
for (int i = 0; i < 100; i++) items.Add(new { id = i.ToString(), val = i });
for (var i = 0; i < 100; i++) items.Add(new { id = i.ToString(), val = i });
var doc = CreateDoc("k1", new { items = items }, ts1);
var op = CreateDoc("k1", new { items = items }, ts2).ToOplogEntry(OperationType.Put); // Same content to force id check traversal
var doc = CreateDoc("k1", new { items }, ts1);
var op = CreateDoc("k1", new { items }, ts2)
.ToOplogEntry(OperationType.Put); // Same content to force id check traversal
// Warmup
_resolver.Resolve(doc, op);
// Run
var sw = Stopwatch.StartNew();
for (int i = 0; i < iterations; i++)
{
_resolver.Resolve(doc, op);
}
for (var i = 0; i < iterations; i++) _resolver.Resolve(doc, op);
sw.Stop();
long elapsed = sw.ElapsedMilliseconds;
Console.WriteLine($"Executed {iterations} array merges in {elapsed}ms");
if (_limits.TryGetValue(limitKey, out int maxMs))
{
elapsed.ShouldBeLessThan(maxMs, $"Performance regression! Expected < {maxMs}ms but took {elapsed}ms");
}
}
}
public static class DocExt
{
/// <summary>
/// Creates an operation log entry from a document instance.
/// Creates an operation log entry from a document instance.
/// </summary>
/// <param name="d">The source document.</param>
/// <param name="t">The operation type to apply to the created entry.</param>
@@ -124,4 +113,4 @@ public static class DocExt
{
return new OplogEntry(d.Collection, d.Key, t, d.Content, d.UpdatedAt, string.Empty);
}
}
}

View File

@@ -8,7 +8,7 @@ public class RecursiveNodeMergeConflictResolverTests
private readonly RecursiveNodeMergeConflictResolver _resolver;
/// <summary>
/// Initializes a new instance of the <see cref="RecursiveNodeMergeConflictResolverTests"/> class.
/// Initializes a new instance of the <see cref="RecursiveNodeMergeConflictResolverTests" /> class.
/// </summary>
public RecursiveNodeMergeConflictResolverTests()
{
@@ -17,20 +17,20 @@ public class RecursiveNodeMergeConflictResolverTests
private Document CreateDoc(string key, object data, HlcTimestamp ts)
{
var json = JsonSerializer.Serialize(data);
string json = JsonSerializer.Serialize(data);
var element = JsonDocument.Parse(json).RootElement;
return new Document("test", key, element, ts, false);
}
private OplogEntry CreateOp(string key, object data, HlcTimestamp ts)
{
var json = JsonSerializer.Serialize(data);
string json = JsonSerializer.Serialize(data);
var element = JsonDocument.Parse(json).RootElement;
return new OplogEntry("test", key, OperationType.Put, element, ts, string.Empty);
}
/// <summary>
/// Verifies that disjoint fields are merged into a single document.
/// Verifies that disjoint fields are merged into a single document.
/// </summary>
[Fact]
public void Resolve_ShouldMergeDisjointFields()
@@ -56,7 +56,7 @@ public class RecursiveNodeMergeConflictResolverTests
}
/// <summary>
/// Verifies that primitive collisions are resolved using the higher timestamp value.
/// Verifies that primitive collisions are resolved using the higher timestamp value.
/// </summary>
[Fact]
public void Resolve_ShouldPrioritizeHigherTimestamp_PrimitiveCollision()
@@ -81,7 +81,7 @@ public class RecursiveNodeMergeConflictResolverTests
}
/// <summary>
/// Verifies that nested object content is merged recursively.
/// Verifies that nested object content is merged recursively.
/// </summary>
[Fact]
public void Resolve_ShouldRecursivelyMergeObjects()
@@ -104,7 +104,7 @@ public class RecursiveNodeMergeConflictResolverTests
}
/// <summary>
/// Verifies that arrays containing object identifiers are merged by item identity.
/// Verifies that arrays containing object identifiers are merged by item identity.
/// </summary>
[Fact]
public void Resolve_ShouldMergeArraysById()
@@ -115,7 +115,8 @@ public class RecursiveNodeMergeConflictResolverTests
var doc = CreateDoc("k1", new
{
items = new[] {
items = new[]
{
new { id = "1", val = "A" },
new { id = "2", val = "B" }
}
@@ -123,9 +124,10 @@ public class RecursiveNodeMergeConflictResolverTests
var op = CreateOp("k1", new
{
items = new[] {
items = new[]
{
new { id = "1", val = "A-Updated" }, // Update
new { id = "3", val = "C" } // Insert
new { id = "3", val = "C" } // Insert
}
}, ts2);
@@ -133,14 +135,14 @@ public class RecursiveNodeMergeConflictResolverTests
var result = _resolver.Resolve(doc, op);
// Assert
Action<JsonElement> validate = (root) =>
Action<JsonElement> validate = root =>
{
var items = root.GetProperty("items");
items.GetArrayLength().ShouldBe(3);
// Order is not guaranteed, so find by id
// But simplified test checking content exists
var text = items.GetRawText();
string text = items.GetRawText();
text.ShouldContain("A-Updated");
text.ShouldContain("B");
text.ShouldContain("C");
@@ -150,7 +152,7 @@ public class RecursiveNodeMergeConflictResolverTests
}
/// <summary>
/// Verifies that primitive arrays fall back to last-write-wins behavior.
/// Verifies that primitive arrays fall back to last-write-wins behavior.
/// </summary>
[Fact]
public void Resolve_ShouldFallbackToLWW_ForPrimitiveArrays()
@@ -170,4 +172,4 @@ public class RecursiveNodeMergeConflictResolverTests
tags.GetArrayLength().ShouldBe(1);
tags[0].GetString().ShouldBe("c");
}
}
}

View File

@@ -1,17 +1,13 @@
using ZB.MOM.WW.CBDDC.Core;
using System.Linq;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Core.Tests;
public class VectorClockTests
{
/// <summary>
/// Verifies an empty vector clock returns the default timestamp for unknown nodes.
/// </summary>
[Fact]
public void EmptyVectorClock_ShouldReturnDefaultTimestamp()
{
public class VectorClockTests
{
/// <summary>
/// Verifies an empty vector clock returns the default timestamp for unknown nodes.
/// </summary>
[Fact]
public void EmptyVectorClock_ShouldReturnDefaultTimestamp()
{
// Arrange
var vc = new VectorClock();
@@ -19,15 +15,15 @@ public class VectorClockTests
var ts = vc.GetTimestamp("node1");
// Assert
ts.ShouldBe(default(HlcTimestamp));
}
/// <summary>
/// Verifies setting a timestamp stores it for the specified node.
/// </summary>
[Fact]
public void SetTimestamp_ShouldStoreTimestamp()
{
ts.ShouldBe(default);
}
/// <summary>
/// Verifies setting a timestamp stores it for the specified node.
/// </summary>
[Fact]
public void SetTimestamp_ShouldStoreTimestamp()
{
// Arrange
var vc = new VectorClock();
var ts = new HlcTimestamp(100, 1, "node1");
@@ -36,15 +32,15 @@ public class VectorClockTests
vc.SetTimestamp("node1", ts);
// Assert
vc.GetTimestamp("node1").ShouldBe(ts);
}
/// <summary>
/// Verifies node identifiers are returned for all known nodes.
/// </summary>
[Fact]
public void NodeIds_ShouldReturnAllNodes()
{
vc.GetTimestamp("node1").ShouldBe(ts);
}
/// <summary>
/// Verifies node identifiers are returned for all known nodes.
/// </summary>
[Fact]
public void NodeIds_ShouldReturnAllNodes()
{
// Arrange
var vc = new VectorClock();
vc.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
@@ -56,15 +52,15 @@ public class VectorClockTests
// Assert
nodeIds.Count.ShouldBe(2);
nodeIds.ShouldContain("node1");
nodeIds.ShouldContain("node2");
}
/// <summary>
/// Verifies equal vector clocks are compared as equal.
/// </summary>
[Fact]
public void CompareTo_EqualClocks_ShouldReturnEqual()
{
nodeIds.ShouldContain("node2");
}
/// <summary>
/// Verifies equal vector clocks are compared as equal.
/// </summary>
[Fact]
public void CompareTo_EqualClocks_ShouldReturnEqual()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
@@ -78,15 +74,15 @@ public class VectorClockTests
var result = vc1.CompareTo(vc2);
// Assert
result.ShouldBe(CausalityRelation.Equal);
}
/// <summary>
/// Verifies a clock strictly ahead of another is reported as strictly ahead.
/// </summary>
[Fact]
public void CompareTo_StrictlyAhead_ShouldReturnStrictlyAhead()
{
result.ShouldBe(CausalityRelation.Equal);
}
/// <summary>
/// Verifies a clock strictly ahead of another is reported as strictly ahead.
/// </summary>
[Fact]
public void CompareTo_StrictlyAhead_ShouldReturnStrictlyAhead()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(200, 1, "node1")); // Ahead
@@ -100,15 +96,15 @@ public class VectorClockTests
var result = vc1.CompareTo(vc2);
// Assert
result.ShouldBe(CausalityRelation.StrictlyAhead);
}
/// <summary>
/// Verifies a clock strictly behind another is reported as strictly behind.
/// </summary>
[Fact]
public void CompareTo_StrictlyBehind_ShouldReturnStrictlyBehind()
{
result.ShouldBe(CausalityRelation.StrictlyAhead);
}
/// <summary>
/// Verifies a clock strictly behind another is reported as strictly behind.
/// </summary>
[Fact]
public void CompareTo_StrictlyBehind_ShouldReturnStrictlyBehind()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1")); // Behind
@@ -122,15 +118,15 @@ public class VectorClockTests
var result = vc1.CompareTo(vc2);
// Assert
result.ShouldBe(CausalityRelation.StrictlyBehind);
}
/// <summary>
/// Verifies divergent per-node progress is reported as concurrent.
/// </summary>
[Fact]
public void CompareTo_Concurrent_ShouldReturnConcurrent()
{
result.ShouldBe(CausalityRelation.StrictlyBehind);
}
/// <summary>
/// Verifies divergent per-node progress is reported as concurrent.
/// </summary>
[Fact]
public void CompareTo_Concurrent_ShouldReturnConcurrent()
{
// Arrange - Split brain scenario
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(200, 1, "node1")); // Node1 ahead
@@ -144,15 +140,15 @@ public class VectorClockTests
var result = vc1.CompareTo(vc2);
// Assert
result.ShouldBe(CausalityRelation.Concurrent);
}
/// <summary>
/// Verifies pull candidates include nodes where the other clock is ahead.
/// </summary>
[Fact]
public void GetNodesWithUpdates_ShouldReturnNodesWhereOtherIsAhead()
{
result.ShouldBe(CausalityRelation.Concurrent);
}
/// <summary>
/// Verifies pull candidates include nodes where the other clock is ahead.
/// </summary>
[Fact]
public void GetNodesWithUpdates_ShouldReturnNodesWhereOtherIsAhead()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
@@ -167,15 +163,15 @@ public class VectorClockTests
// Assert
nodesToPull.Count().ShouldBe(1);
nodesToPull.ShouldContain("node1");
}
/// <summary>
/// Verifies push candidates include nodes where this clock is ahead.
/// </summary>
[Fact]
public void GetNodesToPush_ShouldReturnNodesWhereThisIsAhead()
{
nodesToPull.ShouldContain("node1");
}
/// <summary>
/// Verifies push candidates include nodes where this clock is ahead.
/// </summary>
[Fact]
public void GetNodesToPush_ShouldReturnNodesWhereThisIsAhead()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(200, 1, "node1")); // Ahead
@@ -190,15 +186,15 @@ public class VectorClockTests
// Assert
nodesToPush.Count().ShouldBe(1);
nodesToPush.ShouldContain("node1");
}
/// <summary>
/// Verifies a newly introduced remote node is included in pull candidates.
/// </summary>
[Fact]
public void GetNodesWithUpdates_WhenNewNodeAppearsInOther_ShouldReturnIt()
{
nodesToPush.ShouldContain("node1");
}
/// <summary>
/// Verifies a newly introduced remote node is included in pull candidates.
/// </summary>
[Fact]
public void GetNodesWithUpdates_WhenNewNodeAppearsInOther_ShouldReturnIt()
{
// Arrange - Simulates a new node joining the cluster
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
@@ -212,15 +208,15 @@ public class VectorClockTests
// Assert
nodesToPull.Count().ShouldBe(1);
nodesToPull.ShouldContain("node3");
}
/// <summary>
/// Verifies merge keeps the maximum timestamp per node.
/// </summary>
[Fact]
public void Merge_ShouldTakeMaximumForEachNode()
{
nodesToPull.ShouldContain("node3");
}
/// <summary>
/// Verifies merge keeps the maximum timestamp per node.
/// </summary>
[Fact]
public void Merge_ShouldTakeMaximumForEachNode()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(200, 1, "node1"));
@@ -234,18 +230,18 @@ public class VectorClockTests
// Act
vc1.Merge(vc2);
// Assert
vc1.GetTimestamp("node1").ShouldBe(new HlcTimestamp(200, 1, "node1")); // Kept max
vc1.GetTimestamp("node2").ShouldBe(new HlcTimestamp(200, 2, "node2")); // Merged max
vc1.GetTimestamp("node3").ShouldBe(new HlcTimestamp(150, 1, "node3")); // Added new
}
/// <summary>
/// Verifies cloning creates an independent copy of the vector clock.
/// </summary>
[Fact]
public void Clone_ShouldCreateIndependentCopy()
{
// Assert
vc1.GetTimestamp("node1").ShouldBe(new HlcTimestamp(200, 1, "node1")); // Kept max
vc1.GetTimestamp("node2").ShouldBe(new HlcTimestamp(200, 2, "node2")); // Merged max
vc1.GetTimestamp("node3").ShouldBe(new HlcTimestamp(150, 1, "node3")); // Added new
}
/// <summary>
/// Verifies cloning creates an independent copy of the vector clock.
/// </summary>
[Fact]
public void Clone_ShouldCreateIndependentCopy()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
@@ -256,15 +252,15 @@ public class VectorClockTests
// Assert
vc1.NodeIds.Count().ShouldBe(1);
vc2.NodeIds.Count().ShouldBe(2);
}
/// <summary>
/// Verifies the string representation includes serialized node timestamps.
/// </summary>
[Fact]
public void ToString_ShouldReturnReadableFormat()
{
vc2.NodeIds.Count().ShouldBe(2);
}
/// <summary>
/// Verifies the string representation includes serialized node timestamps.
/// </summary>
[Fact]
public void ToString_ShouldReturnReadableFormat()
{
// Arrange
var vc = new VectorClock();
vc.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
@@ -275,15 +271,15 @@ public class VectorClockTests
// Assert
str.ShouldContain("node1:100:1:node1");
str.ShouldContain("node2:200:2:node2");
}
/// <summary>
/// Verifies split-brain updates are detected as concurrent.
/// </summary>
[Fact]
public void SplitBrainScenario_ShouldDetectConcurrency()
{
str.ShouldContain("node2:200:2:node2");
}
/// <summary>
/// Verifies split-brain updates are detected as concurrent.
/// </summary>
[Fact]
public void SplitBrainScenario_ShouldDetectConcurrency()
{
// Arrange - Simulating a network partition scenario
// Partition 1: node1 and node2 are alive
var vcPartition1 = new VectorClock();
@@ -310,4 +306,4 @@ public class VectorClockTests
partition1NeedsToPush.ShouldContain("node1");
partition1NeedsToPush.ShouldContain("node2");
}
}
}

View File

@@ -1,37 +1,37 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.Core.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.Core.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.Core.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.Core.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.Core.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.Core.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="NSubstitute" Version="5.3.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1" />
<PackageReference Include="Shouldly" Version="4.3.0" />
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4" />
<PackageReference Include="xunit.v3" Version="3.2.0" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4"/>
<PackageReference Include="NSubstitute" Version="5.3.0"/>
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1"/>
<PackageReference Include="Shouldly" Version="4.3.0"/>
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4"/>
<PackageReference Include="xunit.v3" Version="3.2.0"/>
</ItemGroup>
<ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<Using Include="Xunit"/>
</ItemGroup>
<ItemGroup>
<None Update="benchmark_limits.json">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
</ItemGroup>
<ItemGroup>
<None Update="benchmark_limits.json">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj"/>
</ItemGroup>
</Project>
</Project>

View File

@@ -1,4 +1,4 @@
{
"RecursiveMerge_Simple_10k_Ops_MaxMs": 500,
"RecursiveMerge_Array_1k_Ops_MaxMs": 1500
"RecursiveMerge_Simple_10k_Ops_MaxMs": 500,
"RecursiveMerge_Array_1k_Ops_MaxMs": 1500
}

View File

@@ -1,6 +1,6 @@
using System.Collections.Concurrent;
using System.Net;
using System.Net.Sockets;
using System.Collections.Concurrent;
using System.Text.Json;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
@@ -16,24 +16,20 @@ namespace ZB.MOM.WW.CBDDC.E2E.Tests;
public class ClusterCrudSyncE2ETests
{
/// <summary>
/// Verifies two real peers replicate create, update, and delete operations in both directions.
/// Verifies two real peers replicate create, update, and delete operations in both directions.
/// </summary>
[Fact]
public async Task TwoPeers_ShouldReplicateCrudBidirectionally()
{
var clusterToken = Guid.NewGuid().ToString("N");
var nodeAPort = GetAvailableTcpPort();
var nodeBPort = GetAvailableTcpPort();
while (nodeBPort == nodeAPort)
{
nodeBPort = GetAvailableTcpPort();
}
int nodeAPort = GetAvailableTcpPort();
int nodeBPort = GetAvailableTcpPort();
while (nodeBPort == nodeAPort) nodeBPort = GetAvailableTcpPort();
await using var nodeA = TestPeerNode.Create(
nodeId: "node-a",
tcpPort: nodeAPort,
authToken: clusterToken,
knownPeers:
"node-a",
nodeAPort,
clusterToken,
[
new KnownPeerConfiguration
{
@@ -44,10 +40,9 @@ public class ClusterCrudSyncE2ETests
]);
await using var nodeB = TestPeerNode.Create(
nodeId: "node-b",
tcpPort: nodeBPort,
authToken: clusterToken,
knownPeers:
"node-b",
nodeBPort,
clusterToken,
[
new KnownPeerConfiguration
{
@@ -75,9 +70,9 @@ public class ClusterCrudSyncE2ETests
{
var replicated = nodeB.ReadUser(nodeAUserId);
return replicated is not null
&& replicated.Name == "Alice"
&& replicated.Age == 30
&& replicated.Address?.City == "Austin";
&& replicated.Name == "Alice"
&& replicated.Age == 30
&& replicated.Address?.City == "Austin";
}, timeoutSeconds, "Node B did not receive create from node A.", () => BuildDiagnostics(nodeA, nodeB));
await AssertEventuallyAsync(
@@ -100,9 +95,9 @@ public class ClusterCrudSyncE2ETests
{
var replicated = nodeB.ReadUser(nodeAUserId);
return replicated is not null
&& replicated.Name == "Alice Updated"
&& replicated.Age == 31
&& replicated.Address?.City == "Dallas";
&& replicated.Name == "Alice Updated"
&& replicated.Age == 31
&& replicated.Address?.City == "Dallas";
}, timeoutSeconds, "Node B did not receive update from node A.", () => BuildDiagnostics(nodeA, nodeB));
await nodeA.DeleteUserAsync(nodeAUserId);
@@ -126,9 +121,9 @@ public class ClusterCrudSyncE2ETests
{
var replicated = nodeA.ReadUser(nodeBUserId);
return replicated is not null
&& replicated.Name == "Bob"
&& replicated.Age == 40
&& replicated.Address?.City == "Boston";
&& replicated.Name == "Bob"
&& replicated.Age == 40
&& replicated.Address?.City == "Boston";
}, timeoutSeconds, "Node A did not receive create from node B.", () => BuildDiagnostics(nodeA, nodeB));
await AssertEventuallyAsync(
@@ -151,9 +146,9 @@ public class ClusterCrudSyncE2ETests
{
var replicated = nodeA.ReadUser(nodeBUserId);
return replicated is not null
&& replicated.Name == "Bob Updated"
&& replicated.Age == 41
&& replicated.Address?.City == "Denver";
&& replicated.Name == "Bob Updated"
&& replicated.Age == 41
&& replicated.Address?.City == "Denver";
}, timeoutSeconds, "Node A did not receive update from node B.", () => BuildDiagnostics(nodeA, nodeB));
await nodeB.DeleteUserAsync(nodeBUserId);
@@ -175,36 +170,35 @@ public class ClusterCrudSyncE2ETests
var startedAt = DateTime.UtcNow;
while (DateTime.UtcNow - startedAt < timeout)
{
if (predicate())
{
return;
}
if (predicate()) return;
await Task.Delay(250);
}
var suffix = diagnostics is null ? string.Empty : $"{Environment.NewLine}{diagnostics()}";
throw new Shouldly.ShouldAssertException($"{failureMessage}{suffix}");
string suffix = diagnostics is null ? string.Empty : $"{Environment.NewLine}{diagnostics()}";
throw new ShouldAssertException($"{failureMessage}{suffix}");
}
private static string BuildDiagnostics(TestPeerNode nodeA, TestPeerNode nodeB)
{
var nodeAUserCount = nodeA.Context.Users.FindAll().Count();
var nodeBUserCount = nodeB.Context.Users.FindAll().Count();
var nodeAOplogCount = nodeA.Context.OplogEntries.FindAll().Count();
var nodeBOplogCount = nodeB.Context.OplogEntries.FindAll().Count();
var nodeAOplogByAuthor = string.Join(
int nodeAUserCount = nodeA.Context.Users.FindAll().Count();
int nodeBUserCount = nodeB.Context.Users.FindAll().Count();
int nodeAOplogCount = nodeA.Context.OplogEntries.FindAll().Count();
int nodeBOplogCount = nodeB.Context.OplogEntries.FindAll().Count();
string nodeAOplogByAuthor = string.Join(
", ",
nodeA.Context.OplogEntries.FindAll()
.GroupBy(e => e.TimestampNodeId)
.Select(g => $"{g.Key}:{g.Count()}"));
var nodeBOplogByAuthor = string.Join(
string nodeBOplogByAuthor = string.Join(
", ",
nodeB.Context.OplogEntries.FindAll()
.GroupBy(e => e.TimestampNodeId)
.Select(g => $"{g.Key}:{g.Count()}"));
var nodeAUsers = string.Join(", ", nodeA.Context.Users.FindAll().Select(u => $"{u.Id}:{u.Name}:{u.Age}:{u.Address?.City}"));
var nodeBUsers = string.Join(", ", nodeB.Context.Users.FindAll().Select(u => $"{u.Id}:{u.Name}:{u.Age}:{u.Address?.City}"));
string nodeAUsers = string.Join(", ",
nodeA.Context.Users.FindAll().Select(u => $"{u.Id}:{u.Name}:{u.Age}:{u.Address?.City}"));
string nodeBUsers = string.Join(", ",
nodeB.Context.Users.FindAll().Select(u => $"{u.Id}:{u.Name}:{u.Age}:{u.Address?.City}"));
return string.Join(
Environment.NewLine,
@@ -230,20 +224,15 @@ public class ClusterCrudSyncE2ETests
private sealed class TestPeerNode : IAsyncDisposable
{
private readonly ServiceProvider _services;
private readonly ICBDDCNode _node;
private readonly IOplogStore _oplogStore;
private readonly string _nodeId;
private readonly string _workDir;
private readonly InMemoryLogSink _logSink;
private bool _started;
private readonly ICBDDCNode _node;
private readonly string _nodeId;
private readonly IOplogStore _oplogStore;
private readonly ServiceProvider _services;
private readonly string _workDir;
private long _lastPhysicalTime = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
private int _logicalCounter;
/// <summary>
/// Gets the LiteDB-backed context used by this test peer.
/// </summary>
public SampleDbContext Context { get; }
private bool _started;
private TestPeerNode(
ServiceProvider services,
@@ -264,82 +253,9 @@ public class ClusterCrudSyncE2ETests
}
/// <summary>
/// Creates a test peer node and wires all required services.
/// Gets the LiteDB-backed context used by this test peer.
/// </summary>
/// <param name="nodeId">The unique node identifier.</param>
/// <param name="tcpPort">The TCP port used by the node listener.</param>
/// <param name="authToken">The cluster authentication token.</param>
/// <param name="knownPeers">The known peers this node can connect to.</param>
/// <returns>A configured <see cref="TestPeerNode"/> instance.</returns>
public static TestPeerNode Create(
string nodeId,
int tcpPort,
string authToken,
IReadOnlyList<KnownPeerConfiguration> knownPeers)
{
var workDir = Path.Combine(Path.GetTempPath(), $"cbddc-e2e-{nodeId}-{Guid.NewGuid():N}");
Directory.CreateDirectory(workDir);
var dbPath = Path.Combine(workDir, "node.blite");
var configProvider = new StaticPeerNodeConfigurationProvider(new PeerNodeConfiguration
{
NodeId = nodeId,
TcpPort = tcpPort,
AuthToken = authToken,
KnownPeers = knownPeers.ToList()
});
var services = new ServiceCollection();
services.AddSingleton(new InMemoryLogSink(nodeId));
services.AddSingleton<ILoggerProvider, InMemoryLoggerProvider>();
services.AddLogging(builder => builder.SetMinimumLevel(LogLevel.Debug));
services.AddSingleton(configProvider);
services.AddSingleton<IPeerNodeConfigurationProvider>(configProvider);
services.AddCBDDCCore()
.AddCBDDCBLite<SampleDbContext, SampleDocumentStore>(_ => new SampleDbContext(dbPath))
.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>(useHostedService: false);
// Deterministic tests: sync uses explicit known peers, so disable UDP discovery.
services.AddSingleton<IDiscoveryService, PassiveDiscoveryService>();
services.AddSingleton<IPeerHandshakeService, NoOpHandshakeService>();
var provider = services.BuildServiceProvider();
var node = provider.GetRequiredService<ICBDDCNode>();
var oplogStore = provider.GetRequiredService<IOplogStore>();
var context = provider.GetRequiredService<SampleDbContext>();
var logSink = provider.GetRequiredService<InMemoryLogSink>();
return new TestPeerNode(provider, node, oplogStore, context, logSink, workDir, nodeId);
}
/// <summary>
/// Starts the underlying node when it has not been started yet.
/// </summary>
/// <returns>A task that represents the asynchronous operation.</returns>
public async Task StartAsync()
{
if (_started)
{
return;
}
await _node.Start();
_started = true;
}
/// <summary>
/// Stops the underlying node when it is currently running.
/// </summary>
/// <returns>A task that represents the asynchronous operation.</returns>
public async Task StopAsync()
{
if (!_started)
{
return;
}
await _node.Stop();
_started = false;
}
public SampleDbContext Context { get; }
/// <inheritdoc />
public async ValueTask DisposeAsync()
@@ -357,17 +273,89 @@ public class ClusterCrudSyncE2ETests
}
/// <summary>
/// Reads a user document by identifier.
/// Creates a test peer node and wires all required services.
/// </summary>
/// <param name="nodeId">The unique node identifier.</param>
/// <param name="tcpPort">The TCP port used by the node listener.</param>
/// <param name="authToken">The cluster authentication token.</param>
/// <param name="knownPeers">The known peers this node can connect to.</param>
/// <returns>A configured <see cref="TestPeerNode" /> instance.</returns>
public static TestPeerNode Create(
string nodeId,
int tcpPort,
string authToken,
IReadOnlyList<KnownPeerConfiguration> knownPeers)
{
string workDir = Path.Combine(Path.GetTempPath(), $"cbddc-e2e-{nodeId}-{Guid.NewGuid():N}");
Directory.CreateDirectory(workDir);
string dbPath = Path.Combine(workDir, "node.blite");
var configProvider = new StaticPeerNodeConfigurationProvider(new PeerNodeConfiguration
{
NodeId = nodeId,
TcpPort = tcpPort,
AuthToken = authToken,
KnownPeers = knownPeers.ToList()
});
var services = new ServiceCollection();
services.AddSingleton(new InMemoryLogSink(nodeId));
services.AddSingleton<ILoggerProvider, InMemoryLoggerProvider>();
services.AddLogging(builder => builder.SetMinimumLevel(LogLevel.Debug));
services.AddSingleton(configProvider);
services.AddSingleton<IPeerNodeConfigurationProvider>(configProvider);
services.AddCBDDCCore()
.AddCBDDCBLite<SampleDbContext, SampleDocumentStore>(_ => new SampleDbContext(dbPath))
.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>(false);
// Deterministic tests: sync uses explicit known peers, so disable UDP discovery.
services.AddSingleton<IDiscoveryService, PassiveDiscoveryService>();
services.AddSingleton<IPeerHandshakeService, NoOpHandshakeService>();
var provider = services.BuildServiceProvider();
var node = provider.GetRequiredService<ICBDDCNode>();
var oplogStore = provider.GetRequiredService<IOplogStore>();
var context = provider.GetRequiredService<SampleDbContext>();
var logSink = provider.GetRequiredService<InMemoryLogSink>();
return new TestPeerNode(provider, node, oplogStore, context, logSink, workDir, nodeId);
}
/// <summary>
/// Starts the underlying node when it has not been started yet.
/// </summary>
/// <returns>A task that represents the asynchronous operation.</returns>
public async Task StartAsync()
{
if (_started) return;
await _node.Start();
_started = true;
}
/// <summary>
/// Stops the underlying node when it is currently running.
/// </summary>
/// <returns>A task that represents the asynchronous operation.</returns>
public async Task StopAsync()
{
if (!_started) return;
await _node.Stop();
_started = false;
}
/// <summary>
/// Reads a user document by identifier.
/// </summary>
/// <param name="userId">The identifier of the user to read.</param>
/// <returns>The matching user when found; otherwise <see langword="null"/>.</returns>
/// <returns>The matching user when found; otherwise <see langword="null" />.</returns>
public User? ReadUser(string userId)
{
return Context.Users.Find(u => u.Id == userId).FirstOrDefault();
}
/// <summary>
/// Inserts or updates a user and persists the matching oplog entry.
/// Inserts or updates a user and persists the matching oplog entry.
/// </summary>
/// <param name="user">The user payload to upsert.</param>
/// <returns>A task that represents the asynchronous operation.</returns>
@@ -381,20 +369,16 @@ public class ClusterCrudSyncE2ETests
{
var existing = Context.Users.Find(u => u.Id == user.Id).FirstOrDefault();
if (existing == null)
{
await Context.Users.InsertAsync(user);
}
else
{
await Context.Users.UpdateAsync(user);
}
await Context.SaveChangesAsync();
});
}
/// <summary>
/// Deletes a user and persists the matching oplog entry.
/// Deletes a user and persists the matching oplog entry.
/// </summary>
/// <param name="userId">The identifier of the user to delete.</param>
/// <returns>A task that represents the asynchronous operation.</returns>
@@ -403,7 +387,7 @@ public class ClusterCrudSyncE2ETests
await PersistUserMutationWithOplogFallbackAsync(
userId,
OperationType.Delete,
payload: null,
null,
async () =>
{
await Context.Users.DeleteAsync(userId);
@@ -412,7 +396,7 @@ public class ClusterCrudSyncE2ETests
}
/// <summary>
/// Gets recent in-memory logs captured for this node.
/// Gets recent in-memory logs captured for this node.
/// </summary>
/// <param name="max">The maximum number of log entries to return.</param>
/// <returns>A newline-delimited string of recent log entries.</returns>
@@ -427,29 +411,26 @@ public class ClusterCrudSyncE2ETests
JsonElement? payload,
Func<Task> mutation)
{
var oplogCountBefore = Context.OplogEntries.FindAll().Count();
int oplogCountBefore = Context.OplogEntries.FindAll().Count();
await mutation();
// Prefer native CDC path; fallback only when CDC fails to emit.
var deadline = DateTime.UtcNow.AddSeconds(3);
while (DateTime.UtcNow < deadline)
{
if (Context.OplogEntries.FindAll().Count() > oplogCountBefore)
{
return;
}
if (Context.OplogEntries.FindAll().Count() > oplogCountBefore) return;
await Task.Delay(50);
}
var previousHash = await _oplogStore.GetLastEntryHashAsync(_nodeId) ?? string.Empty;
string previousHash = await _oplogStore.GetLastEntryHashAsync(_nodeId) ?? string.Empty;
var fallbackEntry = new OplogEntry(
collection: "Users",
key: userId,
operation: operationType,
payload: payload,
timestamp: NextTimestamp(),
previousHash: previousHash);
"Users",
userId,
operationType,
payload,
NextTimestamp(),
previousHash);
await _oplogStore.AppendOplogEntryAsync(fallbackEntry);
await Context.SaveChangesAsync();
@@ -457,7 +438,7 @@ public class ClusterCrudSyncE2ETests
private HlcTimestamp NextTimestamp()
{
var now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
long now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
if (now > _lastPhysicalTime)
{
_lastPhysicalTime = now;
@@ -473,14 +454,11 @@ public class ClusterCrudSyncE2ETests
private static void TryDeleteDirectory(string path)
{
if (!Directory.Exists(path))
{
return;
}
if (!Directory.Exists(path)) return;
try
{
Directory.Delete(path, recursive: true);
Directory.Delete(path, true);
}
catch
{
@@ -514,7 +492,7 @@ public class ClusterCrudSyncE2ETests
private PeerNodeConfiguration _configuration;
/// <summary>
/// Initializes a new instance of the <see cref="StaticPeerNodeConfigurationProvider"/> class.
/// Initializes a new instance of the <see cref="StaticPeerNodeConfigurationProvider" /> class.
/// </summary>
/// <param name="configuration">The initial peer node configuration.</param>
public StaticPeerNodeConfigurationProvider(PeerNodeConfiguration configuration)
@@ -545,7 +523,7 @@ public class ClusterCrudSyncE2ETests
private readonly string _nodeId;
/// <summary>
/// Initializes a new instance of the <see cref="InMemoryLogSink"/> class.
/// Initializes a new instance of the <see cref="InMemoryLogSink" /> class.
/// </summary>
/// <param name="nodeId">The node identifier associated with emitted logs.</param>
public InMemoryLogSink(string nodeId)
@@ -554,7 +532,7 @@ public class ClusterCrudSyncE2ETests
}
/// <summary>
/// Adds a log entry to the in-memory sink.
/// Adds a log entry to the in-memory sink.
/// </summary>
/// <param name="category">The log category.</param>
/// <param name="level">The log level.</param>
@@ -563,10 +541,7 @@ public class ClusterCrudSyncE2ETests
public void Add(string category, LogLevel level, string message, Exception? exception)
{
var text = $"[{DateTime.UtcNow:O}] {_nodeId} {level} {category}: {message}";
if (exception is not null)
{
text = $"{text}{Environment.NewLine}{exception}";
}
if (exception is not null) text = $"{text}{Environment.NewLine}{exception}";
_entries.Enqueue(text);
while (_entries.Count > 500 && _entries.TryDequeue(out _))
@@ -575,17 +550,14 @@ public class ClusterCrudSyncE2ETests
}
/// <summary>
/// Gets the most recent log entries from the sink.
/// Gets the most recent log entries from the sink.
/// </summary>
/// <param name="max">The maximum number of entries to return.</param>
/// <returns>A newline-delimited string of recent log entries, or a placeholder when none exist.</returns>
public string GetRecent(int max)
{
var entries = _entries.ToArray();
if (entries.Length == 0)
{
return "<no logs>";
}
string[] entries = _entries.ToArray();
if (entries.Length == 0) return "<no logs>";
return string.Join(Environment.NewLine, entries.TakeLast(max));
}
@@ -596,7 +568,7 @@ public class ClusterCrudSyncE2ETests
private readonly InMemoryLogSink _sink;
/// <summary>
/// Initializes a new instance of the <see cref="InMemoryLoggerProvider"/> class.
/// Initializes a new instance of the <see cref="InMemoryLoggerProvider" /> class.
/// </summary>
/// <param name="sink">The shared sink used to capture log messages.</param>
public InMemoryLoggerProvider(InMemoryLogSink sink)
@@ -622,7 +594,7 @@ public class ClusterCrudSyncE2ETests
private readonly InMemoryLogSink _sink;
/// <summary>
/// Initializes a new instance of the <see cref="InMemoryLogger"/> class.
/// Initializes a new instance of the <see cref="InMemoryLogger" /> class.
/// </summary>
/// <param name="categoryName">The logger category name.</param>
/// <param name="sink">The sink that stores emitted log messages.</param>
@@ -665,4 +637,4 @@ public class ClusterCrudSyncE2ETests
{
}
}
}
}

View File

@@ -1,2 +1,2 @@
global using Shouldly;
global using ZB.MOM.WW.CBDDC.Sample.Console;
global using ZB.MOM.WW.CBDDC.Sample.Console;

View File

@@ -1,33 +1,33 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.E2E.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.E2E.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.E2E.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.E2E.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.E2E.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.E2E.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1" />
<PackageReference Include="Shouldly" Version="4.3.0" />
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4" />
<PackageReference Include="xunit.v3" Version="3.2.0" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4"/>
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1"/>
<PackageReference Include="Shouldly" Version="4.3.0"/>
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4"/>
<PackageReference Include="xunit.v3" Version="3.2.0"/>
</ItemGroup>
<ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<Using Include="Xunit"/>
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\samples\ZB.MOM.WW.CBDDC.Sample.Console\ZB.MOM.WW.CBDDC.Sample.Console.csproj" />
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj" />
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj" />
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Persistence\ZB.MOM.WW.CBDDC.Persistence.csproj" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\samples\ZB.MOM.WW.CBDDC.Sample.Console\ZB.MOM.WW.CBDDC.Sample.Console.csproj"/>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj"/>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj"/>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Persistence\ZB.MOM.WW.CBDDC.Persistence.csproj"/>
</ItemGroup>
</Project>

View File

@@ -9,7 +9,7 @@ namespace ZB.MOM.WW.CBDDC.Hosting.Tests;
public class CBDDCHealthCheckTests
{
/// <summary>
/// Verifies that health is reported as healthy when persistence is available and all peers are within lag thresholds.
/// Verifies that health is reported as healthy when persistence is available and all peers are within lag thresholds.
/// </summary>
[Fact]
public async Task CheckHealthAsync_WhenPersistenceOkAndPeersWithinLagThreshold_ReturnsHealthyWithPayload()
@@ -52,7 +52,7 @@ public class CBDDCHealthCheckTests
var healthCheck = new CBDDCHealthCheck(
store,
confirmationStore,
CreateOptions(lagThresholdMs: 20, criticalLagThresholdMs: 50));
CreateOptions(20, 50));
var result = await healthCheck.CheckHealthAsync(new HealthCheckContext());
@@ -69,7 +69,7 @@ public class CBDDCHealthCheckTests
}
/// <summary>
/// Verifies that health is reported as degraded when at least one peer is lagging or has no confirmation.
/// Verifies that health is reported as degraded when at least one peer is lagging or has no confirmation.
/// </summary>
[Fact]
public async Task CheckHealthAsync_WhenPeersLaggingOrUnconfirmed_ReturnsDegradedWithPayload()
@@ -113,7 +113,7 @@ public class CBDDCHealthCheckTests
var healthCheck = new CBDDCHealthCheck(
store,
confirmationStore,
CreateOptions(lagThresholdMs: 30, criticalLagThresholdMs: 100));
CreateOptions(30, 100));
var result = await healthCheck.CheckHealthAsync(new HealthCheckContext());
@@ -130,7 +130,7 @@ public class CBDDCHealthCheckTests
}
/// <summary>
/// Verifies that health is reported as unhealthy when critical lag threshold is exceeded.
/// Verifies that health is reported as unhealthy when critical lag threshold is exceeded.
/// </summary>
[Fact]
public async Task CheckHealthAsync_WhenCriticalLagBreached_ReturnsUnhealthyWithPayload()
@@ -158,7 +158,7 @@ public class CBDDCHealthCheckTests
var healthCheck = new CBDDCHealthCheck(
store,
confirmationStore,
CreateOptions(lagThresholdMs: 30, criticalLagThresholdMs: 80));
CreateOptions(30, 80));
var result = await healthCheck.CheckHealthAsync(new HealthCheckContext());
@@ -168,7 +168,7 @@ public class CBDDCHealthCheckTests
}
/// <summary>
/// Verifies that worst-case lag is used when a peer has multiple source confirmations.
/// Verifies that worst-case lag is used when a peer has multiple source confirmations.
/// </summary>
[Fact]
public async Task CheckHealthAsync_WhenPeerHasMultipleSourceConfirmations_UsesWorstCaseLag()
@@ -205,7 +205,7 @@ public class CBDDCHealthCheckTests
var healthCheck = new CBDDCHealthCheck(
store,
confirmationStore,
CreateOptions(lagThresholdMs: 80, criticalLagThresholdMs: 150));
CreateOptions(80, 150));
var result = await healthCheck.CheckHealthAsync(new HealthCheckContext());
@@ -215,7 +215,7 @@ public class CBDDCHealthCheckTests
}
/// <summary>
/// Verifies that health is reported as unhealthy when the persistence store throws.
/// Verifies that health is reported as unhealthy when the persistence store throws.
/// </summary>
[Fact]
public async Task CheckHealthAsync_WhenStoreThrows_ReturnsUnhealthy()
@@ -253,4 +253,4 @@ public class CBDDCHealthCheckTests
}
};
}
}
}

View File

@@ -12,7 +12,7 @@ namespace ZB.MOM.WW.CBDDC.Hosting.Tests;
public class CBDDCHostingExtensionsTests
{
/// <summary>
/// Verifies that adding CBDDC hosting throws when the service collection is null.
/// Verifies that adding CBDDC hosting throws when the service collection is null.
/// </summary>
[Fact]
public void AddCBDDCHosting_WithNullServices_ThrowsArgumentNullException()
@@ -22,7 +22,7 @@ public class CBDDCHostingExtensionsTests
}
/// <summary>
/// Verifies that adding CBDDC hosting throws when the configuration delegate is null.
/// Verifies that adding CBDDC hosting throws when the configuration delegate is null.
/// </summary>
[Fact]
public void AddCBDDCHosting_WithNullConfigure_ThrowsArgumentNullException()
@@ -33,7 +33,7 @@ public class CBDDCHostingExtensionsTests
}
/// <summary>
/// Verifies that single-cluster hosting registers expected services and configured options.
/// Verifies that single-cluster hosting registers expected services and configured options.
/// </summary>
[Fact]
public void AddCBDDCHostingSingleCluster_RegistersExpectedServicesAndOptions()
@@ -73,7 +73,7 @@ public class CBDDCHostingExtensionsTests
}
/// <summary>
/// Verifies that single-cluster hosting uses default options when no configuration delegate is provided.
/// Verifies that single-cluster hosting uses default options when no configuration delegate is provided.
/// </summary>
[Fact]
public void AddCBDDCHostingSingleCluster_WithNullConfigure_UsesDefaults()
@@ -90,17 +90,14 @@ public class CBDDCHostingExtensionsTests
}
/// <summary>
/// Verifies that health check registration is skipped when health checks are disabled.
/// Verifies that health check registration is skipped when health checks are disabled.
/// </summary>
[Fact]
public void AddCBDDCHosting_WithHealthChecksDisabled_DoesNotRegisterCBDDCHealthCheck()
{
var services = new ServiceCollection();
services.AddCBDDCHosting(options =>
{
options.EnableHealthChecks = false;
});
services.AddCBDDCHosting(options => { options.EnableHealthChecks = false; });
services.Any(d => d.ServiceType == typeof(IConfigureOptions<HealthCheckServiceOptions>))
.ShouldBeFalse();
@@ -121,4 +118,4 @@ public class CBDDCHostingExtensionsTests
d.ImplementationType == typeof(THostedService))
.ShouldBeTrue();
}
}
}

View File

@@ -1,2 +1,2 @@
global using NSubstitute;
global using Shouldly;
global using Shouldly;

View File

@@ -7,7 +7,7 @@ namespace ZB.MOM.WW.CBDDC.Hosting.Tests;
public class HostedServicesTests
{
/// <summary>
/// Verifies that the TCP sync server hosted service starts and stops the server lifecycle.
/// Verifies that the TCP sync server hosted service starts and stops the server lifecycle.
/// </summary>
[Fact]
public async Task TcpSyncServerHostedService_StartAndStop_CallsServerLifecycle()
@@ -24,7 +24,7 @@ public class HostedServicesTests
}
/// <summary>
/// Verifies that the discovery hosted service starts and stops the discovery lifecycle.
/// Verifies that the discovery hosted service starts and stops the discovery lifecycle.
/// </summary>
[Fact]
public async Task DiscoveryServiceHostedService_StartAndStop_CallsDiscoveryLifecycle()
@@ -39,4 +39,4 @@ public class HostedServicesTests
await discoveryService.Received(1).Start();
await discoveryService.Received(1).Stop();
}
}
}

View File

@@ -5,7 +5,7 @@ namespace ZB.MOM.WW.CBDDC.Hosting.Tests;
public class NoOpServicesTests
{
/// <summary>
/// Verifies that no-op discovery service lifecycle calls complete and no peers are returned.
/// Verifies that no-op discovery service lifecycle calls complete and no peers are returned.
/// </summary>
[Fact]
public async Task NoOpDiscoveryService_ReturnsNoPeers_AndCompletesLifecycleCalls()
@@ -20,7 +20,7 @@ public class NoOpServicesTests
}
/// <summary>
/// Verifies that no-op sync orchestrator lifecycle calls complete without exceptions.
/// Verifies that no-op sync orchestrator lifecycle calls complete without exceptions.
/// </summary>
[Fact]
public async Task NoOpSyncOrchestrator_CompletesLifecycleCalls()
@@ -32,4 +32,4 @@ public class NoOpServicesTests
orchestrator.Dispose();
}
}
}

View File

@@ -1,32 +1,32 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.Hosting.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.Hosting.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.Hosting.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.Hosting.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.Hosting.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.Hosting.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="NSubstitute" Version="5.3.0" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection" Version="8.0.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1" />
<PackageReference Include="Shouldly" Version="4.3.0" />
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4" />
<PackageReference Include="xunit.v3" Version="3.2.0" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4"/>
<PackageReference Include="NSubstitute" Version="5.3.0"/>
<PackageReference Include="Microsoft.Extensions.DependencyInjection" Version="8.0.0"/>
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1"/>
<PackageReference Include="Shouldly" Version="4.3.0"/>
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4"/>
<PackageReference Include="xunit.v3" Version="3.2.0"/>
</ItemGroup>
<ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<Using Include="Xunit"/>
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Hosting\ZB.MOM.WW.CBDDC.Hosting.csproj" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Hosting\ZB.MOM.WW.CBDDC.Hosting.csproj"/>
</ItemGroup>
</Project>

View File

@@ -1,4 +1,3 @@
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Network.Leadership;
@@ -21,7 +20,7 @@ public class BullyLeaderElectionServiceTests
}
/// <summary>
/// Verifies that a single node elects itself as leader.
/// Verifies that a single node elects itself as leader.
/// </summary>
[Fact]
public async Task SingleNode_ShouldBecomeLeader()
@@ -48,15 +47,15 @@ public class BullyLeaderElectionServiceTests
}
/// <summary>
/// Verifies that the smallest node ID is elected as leader among LAN peers.
/// Verifies that the smallest node ID is elected as leader among LAN peers.
/// </summary>
[Fact]
public async Task MultipleNodes_SmallestNodeIdShouldBeLeader()
{
var peers = new List<PeerNode>
{
new("node-B", "192.168.1.2:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered),
new("node-C", "192.168.1.3:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered)
new("node-B", "192.168.1.2:9000", DateTimeOffset.UtcNow),
new("node-C", "192.168.1.3:9000", DateTimeOffset.UtcNow)
};
var electionService = new BullyLeaderElectionService(
@@ -74,15 +73,15 @@ public class BullyLeaderElectionServiceTests
}
/// <summary>
/// Verifies that the local node is not elected when it is not the smallest node ID.
/// Verifies that the local node is not elected when it is not the smallest node ID.
/// </summary>
[Fact]
public async Task LocalNodeNotSmallest_ShouldNotBeLeader()
{
var peers = new List<PeerNode>
{
new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered),
new("node-B", "192.168.1.2:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered)
new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow),
new("node-B", "192.168.1.2:9000", DateTimeOffset.UtcNow)
};
var electionService = new BullyLeaderElectionService(
@@ -100,14 +99,14 @@ public class BullyLeaderElectionServiceTests
}
/// <summary>
/// Verifies that leadership is re-elected when the current leader fails.
/// Verifies that leadership is re-elected when the current leader fails.
/// </summary>
[Fact]
public async Task LeaderFailure_ShouldReelect()
{
var peers = new List<PeerNode>
{
new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered)
new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow)
};
var electionService = new BullyLeaderElectionService(
@@ -136,14 +135,14 @@ public class BullyLeaderElectionServiceTests
}
/// <summary>
/// Verifies that cloud peers are excluded from LAN gateway election.
/// Verifies that cloud peers are excluded from LAN gateway election.
/// </summary>
[Fact]
public async Task CloudPeersExcludedFromElection()
{
var peers = new List<PeerNode>
{
new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered),
new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow),
new("cloud-node-Z", "cloud.example.com:9000", DateTimeOffset.UtcNow, PeerType.CloudRemote)
};
@@ -159,4 +158,4 @@ public class BullyLeaderElectionServiceTests
await electionService.Stop();
}
}
}

View File

@@ -1,17 +1,16 @@
using System.IO;
using System.Net.Sockets;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network.Security;
using Microsoft.Extensions.Logging.Abstractions;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class ConnectionTests
{
/// <summary>
/// Verifies that the server rejects new clients when the configured connection limit is reached.
/// Verifies that the server rejects new clients when the configured connection limit is reached.
/// </summary>
[Fact]
public async Task Server_Should_Reject_Clients_When_Limit_Reached()
@@ -22,9 +21,11 @@ public class ConnectionTests
.Returns(new HlcTimestamp(0, 0, "node"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(new VectorClock());
oplogStore.GetOplogAfterAsync(Arg.Any<HlcTimestamp>(), Arg.Any<IEnumerable<string>?>(), Arg.Any<CancellationToken>())
oplogStore.GetOplogAfterAsync(Arg.Any<HlcTimestamp>(), Arg.Any<IEnumerable<string>?>(),
Arg.Any<CancellationToken>())
.Returns(Array.Empty<OplogEntry>());
oplogStore.GetOplogForNodeAfterAsync(Arg.Any<string>(), Arg.Any<HlcTimestamp>(), Arg.Any<IEnumerable<string>?>(), Arg.Any<CancellationToken>())
oplogStore.GetOplogForNodeAfterAsync(Arg.Any<string>(), Arg.Any<HlcTimestamp>(),
Arg.Any<IEnumerable<string>?>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<OplogEntry>());
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
@@ -44,7 +45,8 @@ public class ConnectionTests
authenticator.ValidateAsync(Arg.Any<string>(), Arg.Any<string>()).Returns(true);
var handshakeService = Substitute.For<IPeerHandshakeService>();
handshakeService.HandshakeAsync(Arg.Any<Stream>(), Arg.Any<bool>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
handshakeService.HandshakeAsync(Arg.Any<Stream>(), Arg.Any<bool>(), Arg.Any<string>(),
Arg.Any<CancellationToken>())
.Returns((CipherState?)null);
var server = new TcpSyncServer(
@@ -59,7 +61,7 @@ public class ConnectionTests
server.MaxConnections = 2;
await server.Start();
var port = server.ListeningPort ?? throw new Exception("Server not started");
int port = server.ListeningPort ?? throw new Exception("Server not started");
using var client1 = new TcpClient();
using var client2 = new TcpClient();
@@ -76,7 +78,7 @@ public class ConnectionTests
// Assert
var stream3 = client3.GetStream();
var buffer = new byte[10];
var read = await stream3.ReadAsync(buffer, 0, 10);
int read = await stream3.ReadAsync(buffer, 0, 10);
read.ShouldBe(0, "Server should close connection immediately for client 3");
client1.Connected.ShouldBeTrue();
@@ -87,4 +89,4 @@ public class ConnectionTests
await server.Stop();
}
}
}
}

View File

@@ -1,42 +1,41 @@
using System.Security.Cryptography;
using ZB.MOM.WW.CBDDC.Network.Security;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class CryptoHelperTests
{
/// <summary>
/// Verifies that encrypted data can be decrypted back to the original payload.
/// </summary>
[Fact]
public void EncryptDecrypt_ShouldPreserveData()
{
public class CryptoHelperTests
{
/// <summary>
/// Verifies that encrypted data can be decrypted back to the original payload.
/// </summary>
[Fact]
public void EncryptDecrypt_ShouldPreserveData()
{
// Arrange
var key = new byte[32]; // 256 bits
RandomNumberGenerator.Fill(key);
RandomNumberGenerator.Fill(key);
var original = new byte[] { 1, 2, 3, 4, 5, 255, 0, 10 };
// Act
var (ciphertext, iv, tag) = CryptoHelper.Encrypt(original, key);
var decrypted = CryptoHelper.Decrypt(ciphertext, iv, tag, key);
(byte[] ciphertext, byte[] iv, byte[] tag) = CryptoHelper.Encrypt(original, key);
byte[] decrypted = CryptoHelper.Decrypt(ciphertext, iv, tag, key);
// Assert
decrypted.ShouldBe(original);
}
/// <summary>
/// Verifies that decryption fails when ciphertext is tampered with.
/// </summary>
[Fact]
public void Decrypt_ShouldFail_IfTampered()
{
/// <summary>
/// Verifies that decryption fails when ciphertext is tampered with.
/// </summary>
[Fact]
public void Decrypt_ShouldFail_IfTampered()
{
// Arrange
var key = new byte[32];
RandomNumberGenerator.Fill(key);
var original = new byte[] { 1, 2, 3 };
var (ciphertext, iv, tag) = CryptoHelper.Encrypt(original, key);
(byte[] ciphertext, byte[] iv, byte[] tag) = CryptoHelper.Encrypt(original, key);
// Tamper ciphertext
ciphertext[0] ^= 0xFF;
@@ -45,6 +44,6 @@ public class CryptoHelperTests
Action act = () => CryptoHelper.Decrypt(ciphertext, iv, tag, key);
// Assert
Should.Throw<CryptographicException>(act);
Should.Throw<CryptographicException>(act);
}
}
}

View File

@@ -1,2 +1,2 @@
global using NSubstitute;
global using Shouldly;
global using Shouldly;

View File

@@ -1,17 +1,16 @@
using System.IO;
using System.Net.Sockets;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network.Security;
using Microsoft.Extensions.Logging.Abstractions;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class HandshakeRegressionTests
{
/// <summary>
/// Verifies that the server invokes the handshake service when a client connects.
/// Verifies that the server invokes the handshake service when a client connects.
/// </summary>
[Fact]
public async Task Server_Should_Call_HandshakeService_On_Client_Connection()
@@ -22,9 +21,11 @@ public class HandshakeRegressionTests
.Returns(new HlcTimestamp(0, 0, "node"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(new VectorClock());
oplogStore.GetOplogAfterAsync(Arg.Any<HlcTimestamp>(), Arg.Any<IEnumerable<string>?>(), Arg.Any<CancellationToken>())
oplogStore.GetOplogAfterAsync(Arg.Any<HlcTimestamp>(), Arg.Any<IEnumerable<string>?>(),
Arg.Any<CancellationToken>())
.Returns(Array.Empty<OplogEntry>());
oplogStore.GetOplogForNodeAfterAsync(Arg.Any<string>(), Arg.Any<HlcTimestamp>(), Arg.Any<IEnumerable<string>?>(), Arg.Any<CancellationToken>())
oplogStore.GetOplogForNodeAfterAsync(Arg.Any<string>(), Arg.Any<HlcTimestamp>(),
Arg.Any<IEnumerable<string>?>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<OplogEntry>());
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
@@ -44,7 +45,8 @@ public class HandshakeRegressionTests
authenticator.ValidateAsync(Arg.Any<string>(), Arg.Any<string>()).Returns(true);
var handshakeService = Substitute.For<IPeerHandshakeService>();
handshakeService.HandshakeAsync(Arg.Any<Stream>(), Arg.Any<bool>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
handshakeService.HandshakeAsync(Arg.Any<Stream>(), Arg.Any<bool>(), Arg.Any<string>(),
Arg.Any<CancellationToken>())
.Returns((CipherState?)null);
var server = new TcpSyncServer(
@@ -57,7 +59,7 @@ public class HandshakeRegressionTests
handshakeService);
await server.Start();
var port = server.ListeningPort ?? throw new Exception("Server did not start or report port");
int port = server.ListeningPort ?? throw new Exception("Server did not start or report port");
// Act
using (var client = new TcpClient())
@@ -72,4 +74,4 @@ public class HandshakeRegressionTests
await handshakeService.Received(1)
.HandshakeAsync(Arg.Any<Stream>(), false, "server-node", Arg.Any<CancellationToken>());
}
}
}

View File

@@ -1,178 +1,172 @@
using System;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Network.Proto;
using ZB.MOM.WW.CBDDC.Network.Protocol;
using ZB.MOM.WW.CBDDC.Network.Security;
using Google.Protobuf;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests
{
public class ProtocolTests
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Network.Proto;
using ZB.MOM.WW.CBDDC.Network.Protocol;
using ZB.MOM.WW.CBDDC.Network.Security;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class ProtocolTests
{
private readonly ProtocolHandler _handler;
/// <summary>
/// Initializes a new instance of the <see cref="ProtocolTests" /> class.
/// </summary>
public ProtocolTests()
{
private readonly ProtocolHandler _handler;
_handler = new ProtocolHandler(NullLogger<ProtocolHandler>.Instance);
}
/// <summary>
/// Verifies a plain message can be written and read without transformation.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWorks_WithPlainMessage()
{
// Arrange
var stream = new MemoryStream();
var message = new HandshakeRequest { NodeId = "node-1", AuthToken = "token" };
// Act
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, false, null);
stream.Position = 0; // Reset for reading
(var type, byte[] payload) = await _handler.ReadMessageAsync(stream, null);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe("node-1");
decoded.AuthToken.ShouldBe("token");
}
/// <summary>
/// Verifies a compressed message can be written and read successfully.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWork_WithCompression()
{
// Arrange
var stream = new MemoryStream();
// Create a large message to trigger compression logic (threshold is small but let's be safe)
string largeData = string.Join("", Enumerable.Repeat("ABCDEF0123456789", 100));
var message = new HandshakeRequest { NodeId = largeData, AuthToken = "token" };
// Act
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, true, null);
stream.Position = 0;
(var type, byte[] payload) = await _handler.ReadMessageAsync(stream, null);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe(largeData);
}
/// <summary>
/// Verifies an encrypted message can be written and read successfully.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWork_WithEncryption()
{
// Arrange
var stream = new MemoryStream();
var message = new HandshakeRequest { NodeId = "secure-node", AuthToken = "secure-token" };
// Mock CipherState
var key = new byte[32]; // 256-bit key
new Random().NextBytes(key);
var cipherState = new CipherState(key, key); // Encrypt and Decrypt with same key for loopback
// Act
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, false, cipherState);
stream.Position = 0;
(var type, byte[] payload) = await _handler.ReadMessageAsync(stream, cipherState);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe("secure-node");
}
/// <summary>
/// Verifies a message can be round-tripped when both compression and encryption are enabled.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWork_WithEncryption_And_Compression()
{
// Arrange
var stream = new MemoryStream();
string largeData = string.Join("", Enumerable.Repeat("SECURECOMPRESSION", 100));
var message = new HandshakeRequest { NodeId = largeData };
var key = new byte[32];
new Random().NextBytes(key);
var cipherState = new CipherState(key, key);
// Act: Compress THEN Encrypt
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, true, cipherState);
stream.Position = 0;
// Verify wire encryption (should be MessageType.SecureEnv)
// But ReadMessageAsync abstracts this away.
// We can peek at the stream if we want, but let's trust ReadMessageAsync handles it.
(var type, byte[] payload) = await _handler.ReadMessageAsync(stream, cipherState);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe(largeData);
}
/// <summary>
/// Verifies that message reads succeed when bytes arrive in small fragments.
/// </summary>
[Fact]
public async Task ReadMessage_ShouldHandle_Fragmentation()
{
// Arrange
var fullStream = new MemoryStream();
var message = new HandshakeRequest { NodeId = "fragmented" };
await _handler.SendMessageAsync(fullStream, MessageType.HandshakeReq, message, false, null);
byte[] completeBytes = fullStream.ToArray();
var fragmentedStream = new FragmentedMemoryStream(completeBytes, 2); // Read 2 bytes at a time
// Act
(var type, byte[] payload) = await _handler.ReadMessageAsync(fragmentedStream, null);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe("fragmented");
}
// Helper Stream for fragmentation test
private class FragmentedMemoryStream : MemoryStream
{
private readonly int _chunkSize;
/// <summary>
/// Initializes a new instance of the <see cref="ProtocolTests"/> class.
/// Initializes a new instance of the <see cref="FragmentedMemoryStream" /> class.
/// </summary>
public ProtocolTests()
/// <param name="buffer">The backing stream buffer.</param>
/// <param name="chunkSize">The maximum bytes returned per read.</param>
public FragmentedMemoryStream(byte[] buffer, int chunkSize) : base(buffer)
{
_handler = new ProtocolHandler(NullLogger<ProtocolHandler>.Instance);
_chunkSize = chunkSize;
}
/// <summary>
/// Verifies a plain message can be written and read without transformation.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWorks_WithPlainMessage()
/// <inheritdoc />
public override async Task<int> ReadAsync(byte[] buffer, int offset, int count,
CancellationToken cancellationToken)
{
// Arrange
var stream = new MemoryStream();
var message = new HandshakeRequest { NodeId = "node-1", AuthToken = "token" };
// Act
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, false, null);
stream.Position = 0; // Reset for reading
var (type, payload) = await _handler.ReadMessageAsync(stream, null);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe("node-1");
decoded.AuthToken.ShouldBe("token");
// Force read to be max _chunkSize, even if more is requested
int toRead = Math.Min(count, _chunkSize);
return await base.ReadAsync(buffer, offset, toRead, cancellationToken);
}
/// <summary>
/// Verifies a compressed message can be written and read successfully.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWork_WithCompression()
{
// Arrange
var stream = new MemoryStream();
// Create a large message to trigger compression logic (threshold is small but let's be safe)
var largeData = string.Join("", Enumerable.Repeat("ABCDEF0123456789", 100));
var message = new HandshakeRequest { NodeId = largeData, AuthToken = "token" };
// Act
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, true, null);
stream.Position = 0;
var (type, payload) = await _handler.ReadMessageAsync(stream, null);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe(largeData);
}
/// <summary>
/// Verifies an encrypted message can be written and read successfully.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWork_WithEncryption()
{
// Arrange
var stream = new MemoryStream();
var message = new HandshakeRequest { NodeId = "secure-node", AuthToken = "secure-token" };
// Mock CipherState
var key = new byte[32]; // 256-bit key
new Random().NextBytes(key);
var cipherState = new CipherState(key, key); // Encrypt and Decrypt with same key for loopback
// Act
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, false, cipherState);
stream.Position = 0;
var (type, payload) = await _handler.ReadMessageAsync(stream, cipherState);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe("secure-node");
}
/// <summary>
/// Verifies a message can be round-tripped when both compression and encryption are enabled.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWork_WithEncryption_And_Compression()
{
// Arrange
var stream = new MemoryStream();
var largeData = string.Join("", Enumerable.Repeat("SECURECOMPRESSION", 100));
var message = new HandshakeRequest { NodeId = largeData };
var key = new byte[32];
new Random().NextBytes(key);
var cipherState = new CipherState(key, key);
// Act: Compress THEN Encrypt
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, true, cipherState);
stream.Position = 0;
// Verify wire encryption (should be MessageType.SecureEnv)
// But ReadMessageAsync abstracts this away.
// We can peek at the stream if we want, but let's trust ReadMessageAsync handles it.
var (type, payload) = await _handler.ReadMessageAsync(stream, cipherState);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe(largeData);
}
/// <summary>
/// Verifies that message reads succeed when bytes arrive in small fragments.
/// </summary>
[Fact]
public async Task ReadMessage_ShouldHandle_Fragmentation()
{
// Arrange
var fullStream = new MemoryStream();
var message = new HandshakeRequest { NodeId = "fragmented" };
await _handler.SendMessageAsync(fullStream, MessageType.HandshakeReq, message, false, null);
byte[] completeBytes = fullStream.ToArray();
var fragmentedStream = new FragmentedMemoryStream(completeBytes, chunkSize: 2); // Read 2 bytes at a time
// Act
var (type, payload) = await _handler.ReadMessageAsync(fragmentedStream, null);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe("fragmented");
}
// Helper Stream for fragmentation test
private class FragmentedMemoryStream : MemoryStream
{
private readonly int _chunkSize;
/// <summary>
/// Initializes a new instance of the <see cref="FragmentedMemoryStream"/> class.
/// </summary>
/// <param name="buffer">The backing stream buffer.</param>
/// <param name="chunkSize">The maximum bytes returned per read.</param>
public FragmentedMemoryStream(byte[] buffer, int chunkSize) : base(buffer)
{
_chunkSize = chunkSize;
}
/// <inheritdoc />
public override async Task<int> ReadAsync(byte[] buffer, int offset, int count, System.Threading.CancellationToken cancellationToken)
{
// Force read to be max _chunkSize, even if more is requested
int toRead = Math.Min(count, _chunkSize);
return await base.ReadAsync(buffer, offset, toRead, cancellationToken);
}
}
}
}
}
}

View File

@@ -1,177 +1,219 @@
using System;
using System.IO;
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Network.Security;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests
{
public class SecureHandshakeTests
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Network.Security;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class SecureHandshakeTests
{
/// <summary>
/// Verifies handshake negotiation succeeds between initiator and responder services.
/// </summary>
[Fact]
public async Task Handshake_Should_Succeed_Between_Two_Services()
{
/// <summary>
/// Verifies handshake negotiation succeeds between initiator and responder services.
/// </summary>
[Fact]
public async Task Handshake_Should_Succeed_Between_Two_Services()
// Arrange
var clientStream = new PipeStream();
var serverStream = new PipeStream();
// Client writes to clientStream, server reads from clientStream
// Server writes to serverStream, client reads from serverStream
var clientSocket = new DuplexStream(serverStream, clientStream); // Read from server, Write to client
var serverSocket = new DuplexStream(clientStream, serverStream); // Read from client, Write to server
var clientService = new SecureHandshakeService(NullLogger<SecureHandshakeService>.Instance);
var serverService = new SecureHandshakeService(NullLogger<SecureHandshakeService>.Instance);
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
// Act
var clientTask = clientService.HandshakeAsync(clientSocket, true, "client", cts.Token);
var serverTask = serverService.HandshakeAsync(serverSocket, false, "server", cts.Token);
await Task.WhenAll(clientTask, serverTask);
// Assert
var clientState = clientTask.Result;
var serverState = serverTask.Result;
clientState.ShouldNotBeNull();
serverState.ShouldNotBeNull();
// Keys should match (Symmetric)
clientState!.EncryptKey.ShouldBe(serverState!.DecryptKey);
clientState.DecryptKey.ShouldBe(serverState.EncryptKey);
}
// Simulates a pipe. Writes go to buffer, Reads drain buffer.
private class SimplexStream : MemoryStream
{
// Simple approach: Use one MemoryStream as a shared buffer?
// No, MemoryStream is not thread safe for concurrent Read/Write in this pipe manner really.
// Better to use a producer/consumer stream but for simplicity let's use a basic blocking queue logic or just wait.
// Actually, for unit tests, strictly ordered operations are better. But handshake is interactive.
// We need a proper pipe.
}
// Let's use a simple PipeStream implementation using SemaphoreSlim for sync
private class PipeStream : Stream
{
private readonly MemoryStream _buffer = new();
private readonly object _lock = new();
private readonly SemaphoreSlim _readSemaphore = new(0);
/// <inheritdoc />
public override bool CanRead => true;
/// <inheritdoc />
public override bool CanSeek => false;
/// <inheritdoc />
public override bool CanWrite => true;
/// <inheritdoc />
public override long Length => _buffer.Length;
/// <inheritdoc />
public override long Position
{
// Arrange
var clientStream = new PipeStream();
var serverStream = new PipeStream();
get => _buffer.Position;
set => throw new NotSupportedException();
}
// Client writes to clientStream, server reads from clientStream
// Server writes to serverStream, client reads from serverStream
var clientSocket = new DuplexStream(serverStream, clientStream); // Read from server, Write to client
var serverSocket = new DuplexStream(clientStream, serverStream); // Read from client, Write to server
var clientService = new SecureHandshakeService(NullLogger<SecureHandshakeService>.Instance);
var serverService = new SecureHandshakeService(NullLogger<SecureHandshakeService>.Instance);
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
// Act
var clientTask = clientService.HandshakeAsync(clientSocket, isInitiator: true, myNodeId: "client", token: cts.Token);
var serverTask = serverService.HandshakeAsync(serverSocket, isInitiator: false, myNodeId: "server", token: cts.Token);
await Task.WhenAll(clientTask, serverTask);
// Assert
var clientState = clientTask.Result;
var serverState = serverTask.Result;
clientState.ShouldNotBeNull();
serverState.ShouldNotBeNull();
// Keys should match (Symmetric)
clientState!.EncryptKey.ShouldBe(serverState!.DecryptKey);
clientState.DecryptKey.ShouldBe(serverState.EncryptKey);
}
// Simulates a pipe. Writes go to buffer, Reads drain buffer.
class SimplexStream : MemoryStream
{
// Simple approach: Use one MemoryStream as a shared buffer?
// No, MemoryStream is not thread safe for concurrent Read/Write in this pipe manner really.
// Better to use a producer/consumer stream but for simplicity let's use a basic blocking queue logic or just wait.
// Actually, for unit tests, strictly ordered operations are better. But handshake is interactive.
// We need a proper pipe.
}
// Let's use a simple PipeStream implementation using SemaphoreSlim for sync
class PipeStream : Stream
/// <inheritdoc />
public override void Flush()
{
private readonly MemoryStream _buffer = new MemoryStream();
private readonly SemaphoreSlim _readSemaphore = new SemaphoreSlim(0);
private readonly object _lock = new object();
}
/// <inheritdoc />
public override bool CanRead => true;
/// <inheritdoc />
public override bool CanSeek => false;
/// <inheritdoc />
public override bool CanWrite => true;
/// <inheritdoc />
public override long Length => _buffer.Length;
/// <inheritdoc />
public override long Position { get => _buffer.Position; set => throw new NotSupportedException(); }
/// <inheritdoc />
public override int Read(byte[] buffer, int offset, int count)
{
throw new NotImplementedException("Use Async");
}
/// <inheritdoc />
public override void Flush() { }
/// <inheritdoc />
public override int Read(byte[] buffer, int offset, int count) => throw new NotImplementedException("Use Async");
/// <inheritdoc />
public override async Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
/// <inheritdoc />
public override async Task<int> ReadAsync(byte[] buffer, int offset, int count,
CancellationToken cancellationToken)
{
await _readSemaphore.WaitAsync(cancellationToken);
lock (_lock)
{
await _readSemaphore.WaitAsync(cancellationToken);
lock (_lock)
{
_buffer.Position = 0;
int read = _buffer.Read(buffer, offset, count);
_buffer.Position = 0;
int read = _buffer.Read(buffer, offset, count);
// Compact buffer (inefficient but works for unit tests)
byte[] remaining = _buffer.ToArray().Skip(read).ToArray();
_buffer.SetLength(0);
_buffer.Write(remaining, 0, remaining.Length);
// Compact buffer (inefficient but works for unit tests)
byte[] remaining = _buffer.ToArray().Skip(read).ToArray();
_buffer.SetLength(0);
_buffer.Write(remaining, 0, remaining.Length);
if (_buffer.Length > 0) _readSemaphore.Release(); // Signal if data remains
if (_buffer.Length > 0) _readSemaphore.Release(); // Signal if data remains
return read;
}
return read;
}
}
/// <inheritdoc />
public override long Seek(long offset, SeekOrigin origin)
{
throw new NotSupportedException();
}
/// <inheritdoc />
public override void SetLength(long value)
{
throw new NotSupportedException();
}
/// <inheritdoc />
public override void Write(byte[] buffer, int offset, int count)
{
lock (_lock)
{
long pos = _buffer.Position;
_buffer.Seek(0, SeekOrigin.End);
_buffer.Write(buffer, offset, count);
_buffer.Position = pos;
}
/// <inheritdoc />
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
/// <inheritdoc />
public override void SetLength(long value) => throw new NotSupportedException();
/// <inheritdoc />
public override void Write(byte[] buffer, int offset, int count)
{
lock (_lock)
{
long pos = _buffer.Position;
_buffer.Seek(0, SeekOrigin.End);
_buffer.Write(buffer, offset, count);
_buffer.Position = pos;
}
_readSemaphore.Release();
}
}
class DuplexStream : Stream
{
private readonly Stream _readSource;
private readonly Stream _writeTarget;
/// <summary>
/// Initializes a new instance of the <see cref="DuplexStream"/> class.
/// </summary>
/// <param name="readSource">The underlying stream used for read operations.</param>
/// <param name="writeTarget">The underlying stream used for write operations.</param>
public DuplexStream(Stream readSource, Stream writeTarget)
{
_readSource = readSource;
_writeTarget = writeTarget;
}
/// <inheritdoc />
public override bool CanRead => true;
/// <inheritdoc />
public override bool CanSeek => false;
/// <inheritdoc />
public override bool CanWrite => true;
/// <inheritdoc />
public override long Length => 0;
/// <inheritdoc />
public override long Position { get => 0; set { } }
/// <inheritdoc />
public override void Flush() => _writeTarget.Flush();
/// <inheritdoc />
public override int Read(byte[] buffer, int offset, int count) => _readSource.Read(buffer, offset, count);
/// <inheritdoc />
public override Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
=> _readSource.ReadAsync(buffer, offset, count, cancellationToken);
/// <inheritdoc />
public override void Write(byte[] buffer, int offset, int count) => _writeTarget.Write(buffer, offset, count);
/// <inheritdoc />
public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
=> _writeTarget.WriteAsync(buffer, offset, count, cancellationToken);
/// <inheritdoc />
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
/// <inheritdoc />
public override void SetLength(long value) => throw new NotSupportedException();
_readSemaphore.Release();
}
}
}
private class DuplexStream : Stream
{
private readonly Stream _readSource;
private readonly Stream _writeTarget;
/// <summary>
/// Initializes a new instance of the <see cref="DuplexStream" /> class.
/// </summary>
/// <param name="readSource">The underlying stream used for read operations.</param>
/// <param name="writeTarget">The underlying stream used for write operations.</param>
public DuplexStream(Stream readSource, Stream writeTarget)
{
_readSource = readSource;
_writeTarget = writeTarget;
}
/// <inheritdoc />
public override bool CanRead => true;
/// <inheritdoc />
public override bool CanSeek => false;
/// <inheritdoc />
public override bool CanWrite => true;
/// <inheritdoc />
public override long Length => 0;
/// <inheritdoc />
public override long Position
{
get => 0;
set { }
}
/// <inheritdoc />
public override void Flush()
{
_writeTarget.Flush();
}
/// <inheritdoc />
public override int Read(byte[] buffer, int offset, int count)
{
return _readSource.Read(buffer, offset, count);
}
/// <inheritdoc />
public override Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
{
return _readSource.ReadAsync(buffer, offset, count, cancellationToken);
}
/// <inheritdoc />
public override void Write(byte[] buffer, int offset, int count)
{
_writeTarget.Write(buffer, offset, count);
}
/// <inheritdoc />
public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
{
return _writeTarget.WriteAsync(buffer, offset, count, cancellationToken);
}
/// <inheritdoc />
public override long Seek(long offset, SeekOrigin origin)
{
throw new NotSupportedException();
}
/// <inheritdoc />
public override void SetLength(long value)
{
throw new NotSupportedException();
}
}
}

View File

@@ -1,287 +1,286 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Threading;
using System.Threading.Tasks;
using System.Reflection;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Telemetry;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests
{
public class SnapshotReconnectRegressionTests
{
// Subclass to expose private method
private class TestableSyncOrchestrator : SyncOrchestrator
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class SnapshotReconnectRegressionTests
{
private static ISnapshotMetadataStore CreateSnapshotMetadataStore()
{
var snapshotMetadataStore = Substitute.For<ISnapshotMetadataStore>();
snapshotMetadataStore.GetSnapshotMetadataAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((SnapshotMetadata?)null);
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((string?)null);
snapshotMetadataStore.GetAllSnapshotMetadataAsync(Arg.Any<CancellationToken>())
.Returns(Array.Empty<SnapshotMetadata>());
return snapshotMetadataStore;
}
private static ISnapshotService CreateSnapshotService()
{
var snapshotService = Substitute.For<ISnapshotService>();
snapshotService.CreateSnapshotAsync(Arg.Any<Stream>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
snapshotService.ReplaceDatabaseAsync(Arg.Any<Stream>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
snapshotService.MergeSnapshotAsync(Arg.Any<Stream>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
return snapshotService;
}
private static IDocumentStore CreateDocumentStore()
{
var documentStore = Substitute.For<IDocumentStore>();
documentStore.InterestedCollection.Returns(["Users", "TodoLists"]);
documentStore.GetDocumentAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((Document?)null);
documentStore.GetDocumentsByCollectionAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<Document>());
documentStore.GetDocumentsAsync(Arg.Any<List<(string Collection, string Key)>>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<Document>());
documentStore.PutDocumentAsync(Arg.Any<Document>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.InsertBatchDocumentsAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.UpdateBatchDocumentsAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.DeleteDocumentAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.DeleteBatchDocumentsAsync(Arg.Any<IEnumerable<string>>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.MergeAsync(Arg.Any<Document>(), Arg.Any<CancellationToken>())
.Returns(ci => ci.ArgAt<Document>(0));
documentStore.DropAsync(Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
documentStore.ExportAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<Document>());
documentStore.ImportAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
documentStore.MergeAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
return documentStore;
}
private static IOplogStore CreateOplogStore(string? localHeadHash)
{
var oplogStore = Substitute.For<IOplogStore>();
oplogStore.GetLastEntryHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(localHeadHash);
oplogStore.ApplyBatchAsync(Arg.Any<IEnumerable<OplogEntry>>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
return oplogStore;
}
private static TcpPeerClient CreateSnapshotRequiredClient()
{
var logger = Substitute.For<ILogger<TcpPeerClient>>();
var client = Substitute.For<TcpPeerClient>(
"127.0.0.1:0",
logger,
null,
null);
client.GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(_ => Task.FromException<List<OplogEntry>>(new SnapshotRequiredException()));
return client;
}
private static IDiscoveryService CreateDiscovery()
{
var discovery = Substitute.For<IDiscoveryService>();
discovery.GetActivePeers().Returns(Array.Empty<PeerNode>());
discovery.Start().Returns(Task.CompletedTask);
discovery.Stop().Returns(Task.CompletedTask);
return discovery;
}
private static IPeerNodeConfigurationProvider CreateConfig()
{
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration { NodeId = "local" });
return configProvider;
}
private static IPeerOplogConfirmationStore CreatePeerOplogConfirmationStore()
{
var store = Substitute.For<IPeerOplogConfirmationStore>();
store.EnsurePeerRegisteredAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<PeerType>(),
Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.UpdateConfirmationAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<HlcTimestamp>(), Arg.Any<string>(),
Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.GetConfirmationsAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<PeerOplogConfirmation>());
store.GetConfirmationsForPeerAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<PeerOplogConfirmation>());
store.RemovePeerTrackingAsync(Arg.Any<string>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
store.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<string>());
store.ExportAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<PeerOplogConfirmation>());
store.ImportAsync(Arg.Any<IEnumerable<PeerOplogConfirmation>>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.MergeAsync(Arg.Any<IEnumerable<PeerOplogConfirmation>>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
return store;
}
/// <summary>
/// Verifies that gap recovery is skipped when an inbound entry already matches the snapshot boundary hash.
/// </summary>
[Fact]
public async Task ProcessInboundBatch_ShouldSkipGapRecovery_WhenEntryMatchesSnapshotBoundary()
{
// Arrange
var oplogStore = CreateOplogStore("snapshot-boundary-hash");
var snapshotMetadataStore = CreateSnapshotMetadataStore();
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns("snapshot-boundary-hash");
var snapshotService = CreateSnapshotService();
var orch = new TestableSyncOrchestrator(
CreateDiscovery(),
oplogStore,
CreateDocumentStore(),
snapshotMetadataStore,
snapshotService,
CreateConfig(),
CreatePeerOplogConfirmationStore());
using var client = CreateSnapshotRequiredClient();
// Incoming entry that connects to snapshot boundary
var entries = new List<OplogEntry>
{
/// <summary>
/// Initializes a new instance of the <see cref="TestableSyncOrchestrator"/> class.
/// </summary>
/// <param name="discovery">The discovery service.</param>
/// <param name="oplogStore">The oplog store.</param>
/// <param name="documentStore">The document store.</param>
/// <param name="snapshotMetadataStore">The snapshot metadata store.</param>
/// <param name="snapshotService">The snapshot service.</param>
/// <param name="peerNodeConfigurationProvider">The peer node configuration provider.</param>
/// <param name="peerOplogConfirmationStore">The peer oplog confirmation store.</param>
public TestableSyncOrchestrator(
IDiscoveryService discovery,
IOplogStore oplogStore,
IDocumentStore documentStore,
ISnapshotMetadataStore snapshotMetadataStore,
ISnapshotService snapshotService,
IPeerNodeConfigurationProvider peerNodeConfigurationProvider,
IPeerOplogConfirmationStore peerOplogConfirmationStore)
: base(
discovery,
oplogStore,
documentStore,
snapshotMetadataStore,
snapshotService,
peerNodeConfigurationProvider,
NullLoggerFactory.Instance,
peerOplogConfirmationStore)
new(
"col", "key", OperationType.Put, null,
new HlcTimestamp(100, 1, "remote-node"),
"snapshot-boundary-hash" // PreviousHash matches SnapshotHash!
)
};
// Act
string result = await orch.TestProcessInboundBatchAsync(client, "remote-node", entries, CancellationToken.None);
// Assert
result.ShouldBe("Success");
await client.DidNotReceive()
.GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that gap recovery is attempted when the inbound entry does not match the snapshot boundary hash.
/// </summary>
[Fact]
public async Task ProcessInboundBatch_ShouldTryRecovery_WhenSnapshotMismatch()
{
// Arrange
var oplogStore = CreateOplogStore("some-old-hash");
var snapshotMetadataStore = CreateSnapshotMetadataStore();
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns("snapshot-boundary-hash");
var snapshotService = CreateSnapshotService();
var orch = new TestableSyncOrchestrator(
CreateDiscovery(),
oplogStore,
CreateDocumentStore(),
snapshotMetadataStore,
snapshotService,
CreateConfig(),
CreatePeerOplogConfirmationStore());
using var client = CreateSnapshotRequiredClient();
var entries = new List<OplogEntry>
{
new(
"col", "key", OperationType.Put, null,
new HlcTimestamp(100, 1, "remote-node"),
"different-hash" // Mismatch!
)
};
// Act & Assert
// When gap recovery triggers, the client throws SnapshotRequiredException.
// SyncOrchestrator catches SnapshotRequiredException and re-throws it to trigger full sync
// So we expect SnapshotRequiredException to bubble up (wrapped in TargetInvocationException/AggregateException if not unwrapped by helper)
await Should.ThrowAsync<SnapshotRequiredException>(async () =>
await orch.TestProcessInboundBatchAsync(client, "remote-node", entries, CancellationToken.None));
await client.Received(1).GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>());
}
// Subclass to expose private method
private class TestableSyncOrchestrator : SyncOrchestrator
{
/// <summary>
/// Initializes a new instance of the <see cref="TestableSyncOrchestrator" /> class.
/// </summary>
/// <param name="discovery">The discovery service.</param>
/// <param name="oplogStore">The oplog store.</param>
/// <param name="documentStore">The document store.</param>
/// <param name="snapshotMetadataStore">The snapshot metadata store.</param>
/// <param name="snapshotService">The snapshot service.</param>
/// <param name="peerNodeConfigurationProvider">The peer node configuration provider.</param>
/// <param name="peerOplogConfirmationStore">The peer oplog confirmation store.</param>
public TestableSyncOrchestrator(
IDiscoveryService discovery,
IOplogStore oplogStore,
IDocumentStore documentStore,
ISnapshotMetadataStore snapshotMetadataStore,
ISnapshotService snapshotService,
IPeerNodeConfigurationProvider peerNodeConfigurationProvider,
IPeerOplogConfirmationStore peerOplogConfirmationStore)
: base(
discovery,
oplogStore,
documentStore,
snapshotMetadataStore,
snapshotService,
peerNodeConfigurationProvider,
NullLoggerFactory.Instance,
peerOplogConfirmationStore)
{
}
/// <summary>
/// Invokes the inbound batch processing path through reflection for regression testing.
/// </summary>
/// <param name="client">The peer client.</param>
/// <param name="peerNodeId">The peer node identifier.</param>
/// <param name="changes">The incoming oplog changes.</param>
/// <param name="token">The cancellation token.</param>
public async Task<string> TestProcessInboundBatchAsync(
TcpPeerClient client,
string peerNodeId,
IList<OplogEntry> changes,
CancellationToken token)
{
// Reflection to invoke private method since it's private not protected
var method = typeof(SyncOrchestrator).GetMethod(
"ProcessInboundBatchAsync",
BindingFlags.NonPublic | BindingFlags.Instance);
if (method == null)
throw new InvalidOperationException("ProcessInboundBatchAsync method not found.");
try
{
var task = (Task)method.Invoke(this, new object[] { client, peerNodeId, changes, token })!;
await task.ConfigureAwait(false);
// Access .Result via reflection because generic type is private
var resultProp = task.GetType().GetProperty("Result");
object? result = resultProp?.GetValue(task);
return result?.ToString() ?? "null";
}
catch (TargetInvocationException ex)
{
if (ex.InnerException != null) throw ex.InnerException;
throw;
}
/// <summary>
/// Invokes the inbound batch processing path through reflection for regression testing.
/// </summary>
/// <param name="client">The peer client.</param>
/// <param name="peerNodeId">The peer node identifier.</param>
/// <param name="changes">The incoming oplog changes.</param>
/// <param name="token">The cancellation token.</param>
public async Task<string> TestProcessInboundBatchAsync(
TcpPeerClient client,
string peerNodeId,
IList<OplogEntry> changes,
CancellationToken token)
{
// Reflection to invoke private method since it's private not protected
var method = typeof(SyncOrchestrator).GetMethod(
"ProcessInboundBatchAsync",
System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Instance);
if (method == null)
throw new InvalidOperationException("ProcessInboundBatchAsync method not found.");
try
{
var task = (Task)method.Invoke(this, new object[] { client, peerNodeId, changes, token })!;
await task.ConfigureAwait(false);
// Access .Result via reflection because generic type is private
var resultProp = task.GetType().GetProperty("Result");
var result = resultProp?.GetValue(task);
return result?.ToString() ?? "null";
}
catch (System.Reflection.TargetInvocationException ex)
{
if (ex.InnerException != null) throw ex.InnerException;
throw;
}
}
}
private static ISnapshotMetadataStore CreateSnapshotMetadataStore()
{
var snapshotMetadataStore = Substitute.For<ISnapshotMetadataStore>();
snapshotMetadataStore.GetSnapshotMetadataAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((SnapshotMetadata?)null);
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((string?)null);
snapshotMetadataStore.GetAllSnapshotMetadataAsync(Arg.Any<CancellationToken>())
.Returns(Array.Empty<SnapshotMetadata>());
return snapshotMetadataStore;
}
private static ISnapshotService CreateSnapshotService()
{
var snapshotService = Substitute.For<ISnapshotService>();
snapshotService.CreateSnapshotAsync(Arg.Any<Stream>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
snapshotService.ReplaceDatabaseAsync(Arg.Any<Stream>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
snapshotService.MergeSnapshotAsync(Arg.Any<Stream>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
return snapshotService;
}
private static IDocumentStore CreateDocumentStore()
{
var documentStore = Substitute.For<IDocumentStore>();
documentStore.InterestedCollection.Returns(["Users", "TodoLists"]);
documentStore.GetDocumentAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((Document?)null);
documentStore.GetDocumentsByCollectionAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<Document>());
documentStore.GetDocumentsAsync(Arg.Any<List<(string Collection, string Key)>>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<Document>());
documentStore.PutDocumentAsync(Arg.Any<Document>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.InsertBatchDocumentsAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.UpdateBatchDocumentsAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.DeleteDocumentAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.DeleteBatchDocumentsAsync(Arg.Any<IEnumerable<string>>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.MergeAsync(Arg.Any<Document>(), Arg.Any<CancellationToken>())
.Returns(ci => ci.ArgAt<Document>(0));
documentStore.DropAsync(Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
documentStore.ExportAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<Document>());
documentStore.ImportAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
documentStore.MergeAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
return documentStore;
}
private static IOplogStore CreateOplogStore(string? localHeadHash)
{
var oplogStore = Substitute.For<IOplogStore>();
oplogStore.GetLastEntryHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(localHeadHash);
oplogStore.ApplyBatchAsync(Arg.Any<IEnumerable<OplogEntry>>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
return oplogStore;
}
private static TcpPeerClient CreateSnapshotRequiredClient()
{
var logger = Substitute.For<ILogger<TcpPeerClient>>();
var client = Substitute.For<TcpPeerClient>(
"127.0.0.1:0",
logger,
(IPeerHandshakeService?)null,
(INetworkTelemetryService?)null);
client.GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(_ => Task.FromException<List<OplogEntry>>(new SnapshotRequiredException()));
return client;
}
private static IDiscoveryService CreateDiscovery()
{
var discovery = Substitute.For<IDiscoveryService>();
discovery.GetActivePeers().Returns(Array.Empty<PeerNode>());
discovery.Start().Returns(Task.CompletedTask);
discovery.Stop().Returns(Task.CompletedTask);
return discovery;
}
private static IPeerNodeConfigurationProvider CreateConfig()
{
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration { NodeId = "local" });
return configProvider;
}
private static IPeerOplogConfirmationStore CreatePeerOplogConfirmationStore()
{
var store = Substitute.For<IPeerOplogConfirmationStore>();
store.EnsurePeerRegisteredAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<PeerType>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.UpdateConfirmationAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<HlcTimestamp>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.GetConfirmationsAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<PeerOplogConfirmation>());
store.GetConfirmationsForPeerAsync(Arg.Any<string>(), Arg.Any<CancellationToken>()).Returns(Array.Empty<PeerOplogConfirmation>());
store.RemovePeerTrackingAsync(Arg.Any<string>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
store.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<string>());
store.ExportAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<PeerOplogConfirmation>());
store.ImportAsync(Arg.Any<IEnumerable<PeerOplogConfirmation>>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
store.MergeAsync(Arg.Any<IEnumerable<PeerOplogConfirmation>>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
return store;
}
/// <summary>
/// Verifies that gap recovery is skipped when an inbound entry already matches the snapshot boundary hash.
/// </summary>
[Fact]
public async Task ProcessInboundBatch_ShouldSkipGapRecovery_WhenEntryMatchesSnapshotBoundary()
{
// Arrange
var oplogStore = CreateOplogStore("snapshot-boundary-hash");
var snapshotMetadataStore = CreateSnapshotMetadataStore();
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns("snapshot-boundary-hash");
var snapshotService = CreateSnapshotService();
var orch = new TestableSyncOrchestrator(
CreateDiscovery(),
oplogStore,
CreateDocumentStore(),
snapshotMetadataStore,
snapshotService,
CreateConfig(),
CreatePeerOplogConfirmationStore());
using var client = CreateSnapshotRequiredClient();
// Incoming entry that connects to snapshot boundary
var entries = new List<OplogEntry>
{
new OplogEntry(
"col", "key", OperationType.Put, null,
new HlcTimestamp(100, 1, "remote-node"),
"snapshot-boundary-hash" // PreviousHash matches SnapshotHash!
)
};
// Act
var result = await orch.TestProcessInboundBatchAsync(client, "remote-node", entries, CancellationToken.None);
// Assert
result.ShouldBe("Success");
await client.DidNotReceive().GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that gap recovery is attempted when the inbound entry does not match the snapshot boundary hash.
/// </summary>
[Fact]
public async Task ProcessInboundBatch_ShouldTryRecovery_WhenSnapshotMismatch()
{
// Arrange
var oplogStore = CreateOplogStore("some-old-hash");
var snapshotMetadataStore = CreateSnapshotMetadataStore();
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns("snapshot-boundary-hash");
var snapshotService = CreateSnapshotService();
var orch = new TestableSyncOrchestrator(
CreateDiscovery(),
oplogStore,
CreateDocumentStore(),
snapshotMetadataStore,
snapshotService,
CreateConfig(),
CreatePeerOplogConfirmationStore());
using var client = CreateSnapshotRequiredClient();
var entries = new List<OplogEntry>
{
new OplogEntry(
"col", "key", OperationType.Put, null,
new HlcTimestamp(100, 1, "remote-node"),
"different-hash" // Mismatch!
)
};
// Act & Assert
// When gap recovery triggers, the client throws SnapshotRequiredException.
// SyncOrchestrator catches SnapshotRequiredException and re-throws it to trigger full sync
// So we expect SnapshotRequiredException to bubble up (wrapped in TargetInvocationException/AggregateException if not unwrapped by helper)
await Should.ThrowAsync<SnapshotRequiredException>(async () =>
await orch.TestProcessInboundBatchAsync(client, "remote-node", entries, CancellationToken.None));
await client.Received(1).GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>());
}
}
}
}
}

View File

@@ -1,9 +1,4 @@
using System;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
@@ -13,7 +8,7 @@ namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class SyncOrchestratorConfirmationTests
{
/// <summary>
/// Verifies that merged peers are registered and the local node is skipped.
/// Verifies that merged peers are registered and the local node is skipped.
/// </summary>
[Fact]
public async Task EnsurePeersRegisteredAsync_ShouldRegisterMergedPeers_AndSkipLocalNode()
@@ -25,8 +20,8 @@ public class SyncOrchestratorConfirmationTests
var now = DateTimeOffset.UtcNow;
var discoveredPeers = new List<PeerNode>
{
new("local", "127.0.0.1:9000", now, PeerType.LanDiscovered),
new("peer-a", "10.0.0.1:9000", now, PeerType.LanDiscovered)
new("local", "127.0.0.1:9000", now),
new("peer-a", "10.0.0.1:9000", now)
};
var knownPeers = new List<PeerNode>
@@ -60,7 +55,7 @@ public class SyncOrchestratorConfirmationTests
}
/// <summary>
/// Verifies that a newly discovered node is auto-registered when peer lists are refreshed.
/// Verifies that a newly discovered node is auto-registered when peer lists are refreshed.
/// </summary>
[Fact]
public async Task EnsurePeersRegisteredAsync_WhenNewNodeJoins_ShouldAutoRegisterJoinedNode()
@@ -85,7 +80,7 @@ public class SyncOrchestratorConfirmationTests
var secondDiscovered = new List<PeerNode>
{
new("peer-static", "10.0.0.10:9000", now, PeerType.StaticRemote),
new("peer-new", "10.0.0.25:9010", now, PeerType.LanDiscovered)
new("peer-new", "10.0.0.25:9010", now)
};
var secondMerged = SyncOrchestrator.BuildMergedPeerList(secondDiscovered, knownPeers, "local");
await orchestrator.EnsurePeersRegisteredAsync(secondMerged, "local", CancellationToken.None);
@@ -98,7 +93,7 @@ public class SyncOrchestratorConfirmationTests
}
/// <summary>
/// Verifies that confirmations advance only for nodes where remote vector-clock entries are at or ahead.
/// Verifies that confirmations advance only for nodes where remote vector-clock entries are at or ahead.
/// </summary>
[Fact]
public async Task AdvanceConfirmationsFromVectorClockAsync_ShouldAdvanceOnlyForRemoteAtOrAhead()
@@ -163,7 +158,7 @@ public class SyncOrchestratorConfirmationTests
}
/// <summary>
/// Verifies that pushed-batch confirmation uses the maximum timestamp and its matching hash.
/// Verifies that pushed-batch confirmation uses the maximum timestamp and its matching hash.
/// </summary>
[Fact]
public async Task AdvanceConfirmationForPushedBatchAsync_ShouldUseMaxTimestampAndHash()
@@ -179,7 +174,8 @@ public class SyncOrchestratorConfirmationTests
CreateEntry("source-1", 110, 5, "hash-110")
};
await orchestrator.AdvanceConfirmationForPushedBatchAsync("peer-1", "source-1", pushedChanges, CancellationToken.None);
await orchestrator.AdvanceConfirmationForPushedBatchAsync("peer-1", "source-1", pushedChanges,
CancellationToken.None);
await confirmationStore.Received(1).UpdateConfirmationAsync(
"peer-1",
@@ -190,7 +186,7 @@ public class SyncOrchestratorConfirmationTests
}
/// <summary>
/// Verifies that no confirmation update occurs when a pushed batch is empty.
/// Verifies that no confirmation update occurs when a pushed batch is empty.
/// </summary>
[Fact]
public async Task AdvanceConfirmationForPushedBatchAsync_ShouldSkipEmptyBatch()
@@ -213,7 +209,8 @@ public class SyncOrchestratorConfirmationTests
Arg.Any<CancellationToken>());
}
private static SyncOrchestrator CreateOrchestrator(IOplogStore oplogStore, IPeerOplogConfirmationStore confirmationStore)
private static SyncOrchestrator CreateOrchestrator(IOplogStore oplogStore,
IPeerOplogConfirmationStore confirmationStore)
{
var discovery = Substitute.For<IDiscoveryService>();
discovery.GetActivePeers().Returns(Array.Empty<PeerNode>());
@@ -243,9 +240,9 @@ public class SyncOrchestratorConfirmationTests
"users",
$"{nodeId}-{wall}-{logic}",
OperationType.Put,
payload: null,
timestamp: new HlcTimestamp(wall, logic, nodeId),
previousHash: string.Empty,
hash: hash);
null,
new HlcTimestamp(wall, logic, nodeId),
string.Empty,
hash);
}
}
}

View File

@@ -1,8 +1,4 @@
using System;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
@@ -12,7 +8,7 @@ namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class SyncOrchestratorMaintenancePruningTests
{
/// <summary>
/// Verifies that mixed peer confirmations produce the safest effective cutoff across peers and sources.
/// Verifies that mixed peer confirmations produce the safest effective cutoff across peers and sources.
/// </summary>
[Fact]
public async Task CalculateEffectiveCutoffAsync_MixedPeerStates_ShouldUseSafestConfirmationAcrossPeers()
@@ -33,16 +29,16 @@ public class SyncOrchestratorMaintenancePruningTests
confirmationStore.GetConfirmationsForPeerAsync("peer-a", Arg.Any<CancellationToken>())
.Returns(new[]
{
CreateConfirmation("peer-a", "node-local", wall: 300, logic: 0, isActive: true),
CreateConfirmation("peer-a", "node-secondary", wall: 120, logic: 1, isActive: true),
CreateConfirmation("peer-a", "node-secondary", wall: 500, logic: 0, isActive: false)
CreateConfirmation("peer-a", "node-local", 300, 0, true),
CreateConfirmation("peer-a", "node-secondary", 120, 1, true),
CreateConfirmation("peer-a", "node-secondary", 500, 0, false)
});
confirmationStore.GetConfirmationsForPeerAsync("peer-b", Arg.Any<CancellationToken>())
.Returns(new[]
{
CreateConfirmation("peer-b", "node-local", wall: 250, logic: 0, isActive: true),
CreateConfirmation("peer-b", "node-secondary", wall: 180, logic: 0, isActive: true)
CreateConfirmation("peer-b", "node-local", 250, 0, true),
CreateConfirmation("peer-b", "node-secondary", 180, 0, true)
});
var decision = await calculator.CalculateEffectiveCutoffAsync(
@@ -63,7 +59,7 @@ public class SyncOrchestratorMaintenancePruningTests
}
/// <summary>
/// Verifies that removing a peer from tracking immediately restores pruning eligibility.
/// Verifies that removing a peer from tracking immediately restores pruning eligibility.
/// </summary>
[Fact]
public async Task CalculateEffectiveCutoffAsync_RemovingPeerFromTracking_ShouldImmediatelyRestoreEligibility()
@@ -85,7 +81,7 @@ public class SyncOrchestratorMaintenancePruningTests
confirmationStore.GetConfirmationsForPeerAsync("peer-active", Arg.Any<CancellationToken>())
.Returns(new[]
{
CreateConfirmation("peer-active", "node-local", wall: 150, logic: 0, isActive: true)
CreateConfirmation("peer-active", "node-local", 150, 0, true)
});
confirmationStore.GetConfirmationsForPeerAsync("peer-deprecated", Arg.Any<CancellationToken>())
.Returns(Array.Empty<PeerOplogConfirmation>());
@@ -108,11 +104,12 @@ public class SyncOrchestratorMaintenancePruningTests
unblockedDecision.EffectiveCutoff.Value.NodeId.ShouldBe("node-local");
await confirmationStore.Received(1).GetConfirmationsForPeerAsync("peer-active", Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().GetConfirmationsForPeerAsync("peer-deprecated", Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive()
.GetConfirmationsForPeerAsync("peer-deprecated", Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that maintenance does not prune when peer confirmation is missing in a two-node topology.
/// Verifies that maintenance does not prune when peer confirmation is missing in a two-node topology.
/// </summary>
[Fact]
public async Task RunMaintenanceIfDueAsync_TwoNode_ShouldNotPruneBeforePeerConfirmation()
@@ -145,7 +142,7 @@ public class SyncOrchestratorMaintenancePruningTests
}
/// <summary>
/// Verifies that maintenance prunes after peer confirmation is available in a two-node topology.
/// Verifies that maintenance prunes after peer confirmation is available in a two-node topology.
/// </summary>
[Fact]
public async Task RunMaintenanceIfDueAsync_TwoNode_ShouldPruneAfterPeerConfirmation()
@@ -194,7 +191,7 @@ public class SyncOrchestratorMaintenancePruningTests
}
/// <summary>
/// Verifies that deprecated-node removal unblocks pruning on a subsequent maintenance run.
/// Verifies that deprecated-node removal unblocks pruning on a subsequent maintenance run.
/// </summary>
[Fact]
public async Task RunMaintenanceIfDueAsync_DeprecatedNodeRemoval_ShouldUnblockPruning()
@@ -217,7 +214,7 @@ public class SyncOrchestratorMaintenancePruningTests
confirmationStore.GetConfirmationsForPeerAsync("node-active", Arg.Any<CancellationToken>())
.Returns(new[]
{
CreateConfirmation("node-active", "node-local", wall: 100, logic: 0, isActive: true)
CreateConfirmation("node-active", "node-local", 100, 0, true)
});
confirmationStore.GetConfirmationsForPeerAsync("node-deprecated", Arg.Any<CancellationToken>())
.Returns(Array.Empty<PeerOplogConfirmation>());
@@ -289,4 +286,4 @@ public class SyncOrchestratorMaintenancePruningTests
IsActive = isActive
};
}
}
}

View File

@@ -1,108 +1,103 @@
using System;
using System.IO;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Network.Telemetry;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
using ZB.MOM.WW.CBDDC.Network.Telemetry;
namespace ZB.MOM.WW.CBDDC.Network.Tests
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class TelemetryTests : IDisposable
{
public class TelemetryTests : IDisposable
{
private readonly string _tempFile;
/// <summary>
/// Initializes a new instance of the <see cref="TelemetryTests"/> class.
/// </summary>
public TelemetryTests()
{
_tempFile = Path.GetTempFileName();
}
/// <summary>
/// Cleans up temporary test artifacts created for telemetry persistence validation.
/// </summary>
public void Dispose()
{
if (File.Exists(_tempFile)) File.Delete(_tempFile);
}
/// <summary>
/// Verifies that telemetry metrics are recorded and persisted to disk.
/// </summary>
[Fact]
public async Task Should_Record_And_Persist_Metrics()
{
// Arrange
using var service = new NetworkTelemetryService(NullLogger<NetworkTelemetryService>.Instance, _tempFile);
private readonly string _tempFile;
// Act
// Record some values for CompressionRatio
service.RecordValue(MetricType.CompressionRatio, 0.5);
service.RecordValue(MetricType.CompressionRatio, 0.7);
// Record time metric
using (var timer = service.StartMetric(MetricType.EncryptionTime))
/// <summary>
/// Initializes a new instance of the <see cref="TelemetryTests" /> class.
/// </summary>
public TelemetryTests()
{
_tempFile = Path.GetTempFileName();
}
/// <summary>
/// Cleans up temporary test artifacts created for telemetry persistence validation.
/// </summary>
public void Dispose()
{
if (File.Exists(_tempFile)) File.Delete(_tempFile);
}
/// <summary>
/// Verifies that telemetry metrics are recorded and persisted to disk.
/// </summary>
[Fact]
public async Task Should_Record_And_Persist_Metrics()
{
// Arrange
using var service = new NetworkTelemetryService(NullLogger<NetworkTelemetryService>.Instance, _tempFile);
// Act
// Record some values for CompressionRatio
service.RecordValue(MetricType.CompressionRatio, 0.5);
service.RecordValue(MetricType.CompressionRatio, 0.7);
// Record time metric
using (var timer = service.StartMetric(MetricType.EncryptionTime))
{
await Task.Delay(10); // Should be > 0 ms
}
// Allow channel to process
await Task.Delay(500);
// Force persist to file
service.ForcePersist();
// Assert
File.Exists(_tempFile).ShouldBeTrue();
var fileInfo = new FileInfo(_tempFile);
fileInfo.Length.ShouldBeGreaterThan(0);
using var fs = File.OpenRead(_tempFile);
using var br = new BinaryReader(fs);
// Header
byte version = br.ReadByte();
version.ShouldBe((byte)1);
long timestamp = br.ReadInt64();
long now = DateTimeOffset.UtcNow.ToUnixTimeSeconds();
timestamp.ShouldBeInRange(now - 5, now + 5);
// Metrics
// We expect all MetricTypes
int typeCount = Enum.GetValues(typeof(MetricType)).Length;
var foundCompression = false;
var foundEncryption = false;
for (var i = 0; i < typeCount; i++)
{
int typeInt = br.ReadInt32();
var type = (MetricType)typeInt;
// 4 Windows per type
for (var w = 0; w < 4; w++)
{
await Task.Delay(10); // Should be > 0 ms
}
int window = br.ReadInt32(); // 60, 300, 600, 1800
double avg = br.ReadDouble();
// Allow channel to process
await Task.Delay(500);
// Force persist to file
service.ForcePersist();
// Assert
File.Exists(_tempFile).ShouldBeTrue();
var fileInfo = new FileInfo(_tempFile);
fileInfo.Length.ShouldBeGreaterThan(0);
using var fs = File.OpenRead(_tempFile);
using var br = new BinaryReader(fs);
// Header
byte version = br.ReadByte();
version.ShouldBe((byte)1);
long timestamp = br.ReadInt64();
var now = DateTimeOffset.UtcNow.ToUnixTimeSeconds();
timestamp.ShouldBeInRange(now - 5, now + 5);
// Metrics
// We expect all MetricTypes
int typeCount = Enum.GetValues(typeof(MetricType)).Length;
bool foundCompression = false;
bool foundEncryption = false;
for (int i = 0; i < typeCount; i++)
{
int typeInt = br.ReadInt32();
var type = (MetricType)typeInt;
// 4 Windows per type
for (int w = 0; w < 4; w++)
if (type == MetricType.CompressionRatio && window == 60)
{
int window = br.ReadInt32(); // 60, 300, 600, 1800
double avg = br.ReadDouble();
// Avg of 0.5 and 0.7 is 0.6
avg.ShouldBe(0.6, 0.001);
foundCompression = true;
}
if (type == MetricType.CompressionRatio && window == 60)
{
// Avg of 0.5 and 0.7 is 0.6
avg.ShouldBe(0.6, 0.001);
foundCompression = true;
}
if (type == MetricType.EncryptionTime && window == 60)
{
avg.ShouldBeGreaterThan(0);
foundEncryption = true;
}
if (type == MetricType.EncryptionTime && window == 60)
{
avg.ShouldBeGreaterThan(0);
foundEncryption = true;
}
}
foundCompression.ShouldBeTrue();
foundEncryption.ShouldBeTrue();
}
foundCompression.ShouldBeTrue();
foundEncryption.ShouldBeTrue();
}
}
}

View File

@@ -1,18 +1,13 @@
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using System.Text.Json;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class VectorClockSyncTests
{
/// <summary>
/// Verifies sync pull selection includes only nodes where the remote clock is ahead.
/// Verifies sync pull selection includes only nodes where the remote clock is ahead.
/// </summary>
[Fact]
public async Task VectorClockSync_ShouldPullOnlyNodesWithUpdates()
@@ -32,37 +27,37 @@ public class VectorClockSyncTests
// Add oplog entries for node1 in remote
remoteOplogEntries.Add(new OplogEntry(
"users", "user1", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"Alice\"}"),
JsonSerializer.Deserialize<JsonElement>("{\"name\":\"Alice\"}"),
new HlcTimestamp(150, 2, "node1"), "", "hash1"
));
remoteOplogEntries.Add(new OplogEntry(
"users", "user2", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"Bob\"}"),
JsonSerializer.Deserialize<JsonElement>("{\"name\":\"Bob\"}"),
new HlcTimestamp(200, 5, "node1"), "hash1", "hash2"
));
// Act
var localVC = await localStore.GetVectorClockAsync(default);
var localVC = await localStore.GetVectorClockAsync();
var remoteVC = remoteVectorClock;
var nodesToPull = localVC.GetNodesWithUpdates(remoteVC).ToList();
// Assert
nodesToPull.Count().ShouldBe(1);
nodesToPull.ShouldContain("node1");
// Simulate pull
foreach (var nodeId in nodesToPull)
{
var localTs = localVC.GetTimestamp(nodeId);
var changes = await remoteStore.GetOplogForNodeAfterAsync(nodeId, localTs, default);
var nodesToPull = localVC.GetNodesWithUpdates(remoteVC).ToList();
// Assert
nodesToPull.Count().ShouldBe(1);
nodesToPull.ShouldContain("node1");
// Simulate pull
foreach (string nodeId in nodesToPull)
{
var localTs = localVC.GetTimestamp(nodeId);
var changes = await remoteStore.GetOplogForNodeAfterAsync(nodeId, localTs);
changes.Count().ShouldBe(2);
}
}
/// <summary>
/// Verifies sync push selection includes only nodes where the local clock is ahead.
/// Verifies sync push selection includes only nodes where the local clock is ahead.
/// </summary>
[Fact]
public async Task VectorClockSync_ShouldPushOnlyNodesWithLocalUpdates()
@@ -82,32 +77,32 @@ public class VectorClockSyncTests
// Add oplog entries for node1 in local
localOplogEntries.Add(new OplogEntry(
"users", "user1", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"Charlie\"}"),
JsonSerializer.Deserialize<JsonElement>("{\"name\":\"Charlie\"}"),
new HlcTimestamp(150, 2, "node1"), "", "hash1"
));
// Act
var localVC = localVectorClock;
var remoteVC = remoteVectorClock;
var nodesToPush = localVC.GetNodesToPush(remoteVC).ToList();
// Assert
nodesToPush.Count().ShouldBe(1);
nodesToPush.ShouldContain("node1");
// Simulate push
foreach (var nodeId in nodesToPush)
{
var remoteTs = remoteVC.GetTimestamp(nodeId);
var changes = await localStore.GetOplogForNodeAfterAsync(nodeId, remoteTs, default);
var nodesToPush = localVC.GetNodesToPush(remoteVC).ToList();
// Assert
nodesToPush.Count().ShouldBe(1);
nodesToPush.ShouldContain("node1");
// Simulate push
foreach (string nodeId in nodesToPush)
{
var remoteTs = remoteVC.GetTimestamp(nodeId);
var changes = await localStore.GetOplogForNodeAfterAsync(nodeId, remoteTs);
changes.Count().ShouldBe(1);
}
}
/// <summary>
/// Verifies split-brain clocks result in bidirectional synchronization requirements.
/// Verifies split-brain clocks result in bidirectional synchronization requirements.
/// </summary>
[Fact]
public async Task VectorClockSync_SplitBrain_ShouldSyncBothDirections()
@@ -128,72 +123,72 @@ public class VectorClockSyncTests
partition1OplogEntries.Add(new OplogEntry(
"users", "user1", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"P1User\"}"),
JsonSerializer.Deserialize<JsonElement>("{\"name\":\"P1User\"}"),
new HlcTimestamp(300, 5, "node1"), "", "hash_p1"
));
partition2OplogEntries.Add(new OplogEntry(
"users", "user2", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"P2User\"}"),
JsonSerializer.Deserialize<JsonElement>("{\"name\":\"P2User\"}"),
new HlcTimestamp(400, 8, "node3"), "", "hash_p2"
));
// Act
var vc1 = partition1VectorClock;
var vc2 = partition2VectorClock;
var relation = vc1.CompareTo(vc2);
var partition1NeedsToPull = vc1.GetNodesWithUpdates(vc2).ToList();
var partition1NeedsToPush = vc1.GetNodesToPush(vc2).ToList();
// Assert
relation.ShouldBe(CausalityRelation.Concurrent);
// Partition 1 needs to pull node3
partition1NeedsToPull.Count().ShouldBe(1);
partition1NeedsToPull.ShouldContain("node3");
// Partition 1 needs to push node1 and node2
partition1NeedsToPush.Count.ShouldBe(2);
partition1NeedsToPush.ShouldContain("node1");
partition1NeedsToPush.ShouldContain("node2");
// Verify data can be synced
var changesToPull = await partition2Store.GetOplogForNodeAfterAsync("node3", vc1.GetTimestamp("node3"), default);
changesToPull.Count().ShouldBe(1);
var changesToPush = await partition1Store.GetOplogForNodeAfterAsync("node1", vc2.GetTimestamp("node1"), default);
var relation = vc1.CompareTo(vc2);
var partition1NeedsToPull = vc1.GetNodesWithUpdates(vc2).ToList();
var partition1NeedsToPush = vc1.GetNodesToPush(vc2).ToList();
// Assert
relation.ShouldBe(CausalityRelation.Concurrent);
// Partition 1 needs to pull node3
partition1NeedsToPull.Count().ShouldBe(1);
partition1NeedsToPull.ShouldContain("node3");
// Partition 1 needs to push node1 and node2
partition1NeedsToPush.Count.ShouldBe(2);
partition1NeedsToPush.ShouldContain("node1");
partition1NeedsToPush.ShouldContain("node2");
// Verify data can be synced
var changesToPull = await partition2Store.GetOplogForNodeAfterAsync("node3", vc1.GetTimestamp("node3"));
changesToPull.Count().ShouldBe(1);
var changesToPush = await partition1Store.GetOplogForNodeAfterAsync("node1", vc2.GetTimestamp("node1"));
changesToPush.Count().ShouldBe(1);
}
/// <summary>
/// Verifies no pull or push is required when vector clocks are equal.
/// Verifies no pull or push is required when vector clocks are equal.
/// </summary>
[Fact]
public void VectorClockSync_EqualClocks_ShouldNotSync()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc1.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
var vc2 = new VectorClock();
vc2.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc2.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
// Act
var relation = vc1.CompareTo(vc2);
var nodesToPull = vc1.GetNodesWithUpdates(vc2).ToList();
var nodesToPush = vc1.GetNodesToPush(vc2).ToList();
// Assert
relation.ShouldBe(CausalityRelation.Equal);
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc1.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
var vc2 = new VectorClock();
vc2.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc2.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
// Act
var relation = vc1.CompareTo(vc2);
var nodesToPull = vc1.GetNodesWithUpdates(vc2).ToList();
var nodesToPush = vc1.GetNodesToPush(vc2).ToList();
// Assert
relation.ShouldBe(CausalityRelation.Equal);
nodesToPull.ShouldBeEmpty();
nodesToPush.ShouldBeEmpty();
}
/// <summary>
/// Verifies a newly observed node is detected as a required pull source.
/// Verifies a newly observed node is detected as a required pull source.
/// </summary>
[Fact]
public async Task VectorClockSync_NewNodeJoins_ShouldBeDetected()
@@ -210,24 +205,24 @@ public class VectorClockSyncTests
newNodeOplogEntries.Add(new OplogEntry(
"users", "user3", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"NewNode\"}"),
JsonSerializer.Deserialize<JsonElement>("{\"name\":\"NewNode\"}"),
new HlcTimestamp(50, 1, "node3"), "", "hash_new"
));
// Act
var existingVC = existingNodeVectorClock;
var newNodeVC = newNodeVectorClock;
var nodesToPull = existingVC.GetNodesWithUpdates(newNodeVC).ToList();
// Assert
nodesToPull.Count().ShouldBe(1);
nodesToPull.ShouldContain("node3");
var changes = await newNodeStore.GetOplogForNodeAfterAsync("node3", existingVC.GetTimestamp("node3"), default);
changes.Count().ShouldBe(1);
}
var nodesToPull = existingVC.GetNodesWithUpdates(newNodeVC).ToList();
// Assert
nodesToPull.Count().ShouldBe(1);
nodesToPull.ShouldContain("node3");
var changes = await newNodeStore.GetOplogForNodeAfterAsync("node3", existingVC.GetTimestamp("node3"));
changes.Count().ShouldBe(1);
}
private static (IOplogStore Store, VectorClock VectorClock, List<OplogEntry> OplogEntries) CreatePeerStore()
{
var vectorClock = new VectorClock();
@@ -248,17 +243,14 @@ public class VectorClockSyncTests
var since = callInfo.ArgAt<HlcTimestamp>(1);
var collections = callInfo.ArgAt<IEnumerable<string>?>(2)?.ToList();
IEnumerable<OplogEntry> query = oplogEntries
var query = oplogEntries
.Where(e => e.Timestamp.NodeId == nodeId && e.Timestamp.CompareTo(since) > 0);
if (collections is { Count: > 0 })
{
query = query.Where(e => collections.Contains(e.Collection));
}
if (collections is { Count: > 0 }) query = query.Where(e => collections.Contains(e.Collection));
return Task.FromResult<IEnumerable<OplogEntry>>(query.OrderBy(e => e.Timestamp).ToList());
});
return (store, vectorClock, oplogEntries);
}
}
}

View File

@@ -1,31 +1,31 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.Network.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.Network.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.Network.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="NSubstitute" Version="5.3.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1" />
<PackageReference Include="Shouldly" Version="4.3.0" />
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4" />
<PackageReference Include="xunit.v3" Version="3.2.0" />
</ItemGroup>
<ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj" />
</ItemGroup>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.Network.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.Network.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.Network.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4"/>
<PackageReference Include="NSubstitute" Version="5.3.0"/>
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1"/>
<PackageReference Include="Shouldly" Version="4.3.0"/>
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4"/>
<PackageReference Include="xunit.v3" Version="3.2.0"/>
</ItemGroup>
<ItemGroup>
<Using Include="Xunit"/>
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj"/>
</ItemGroup>
</Project>

View File

@@ -1,30 +1,28 @@
using System.Text.Json;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using Microsoft.Extensions.Logging.Abstractions;
using System.Text.Json;
using Xunit;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
/// <summary>
/// Tests for BLite persistence stores: Export, Import, Merge, Drop operations.
/// Tests for BLite persistence stores: Export, Import, Merge, Drop operations.
/// </summary>
public class BLiteStoreExportImportTests : IDisposable
{
private readonly string _testDbPath;
private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly SampleDbContext _context;
private readonly SampleDocumentStore _documentStore;
private readonly BLiteOplogStore<SampleDbContext> _oplogStore;
private readonly BLitePeerConfigurationStore<SampleDbContext> _peerConfigStore;
private readonly BLiteSnapshotMetadataStore<SampleDbContext> _snapshotMetadataStore;
private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly string _testDbPath;
/// <summary>
/// Initializes a new instance of the <see cref="BLiteStoreExportImportTests"/> class.
/// Initializes a new instance of the <see cref="BLiteStoreExportImportTests" /> class.
/// </summary>
public BLiteStoreExportImportTests()
{
@@ -33,7 +31,8 @@ public class BLiteStoreExportImportTests : IDisposable
_configProvider = CreateConfigProvider("test-node");
var vectorClock = new VectorClockService();
_documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock, NullLogger<SampleDocumentStore>.Instance);
_documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock,
NullLogger<SampleDocumentStore>.Instance);
_snapshotMetadataStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
_context, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
_oplogStore = new BLiteOplogStore<SampleDbContext>(
@@ -45,10 +44,42 @@ public class BLiteStoreExportImportTests : IDisposable
_context, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
}
/// <summary>
/// Disposes test resources and removes the temporary database file.
/// </summary>
public void Dispose()
{
_documentStore?.Dispose();
_context?.Dispose();
if (File.Exists(_testDbPath))
try
{
File.Delete(_testDbPath);
}
catch
{
}
}
private static IPeerNodeConfigurationProvider CreateConfigProvider(string nodeId)
{
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration
{
NodeId = nodeId,
TcpPort = 5000,
AuthToken = "test-token",
OplogRetentionHours = 24,
MaintenanceIntervalMinutes = 60
});
return configProvider;
}
#region OplogStore Tests
/// <summary>
/// Verifies that exporting oplog entries returns all persisted records.
/// Verifies that exporting oplog entries returns all persisted records.
/// </summary>
[Fact]
public async Task OplogStore_ExportAsync_ReturnsAllEntries()
@@ -69,7 +100,7 @@ public class BLiteStoreExportImportTests : IDisposable
}
/// <summary>
/// Verifies that importing oplog entries adds them to the store.
/// Verifies that importing oplog entries adds them to the store.
/// </summary>
[Fact]
public async Task OplogStore_ImportAsync_AddsEntries()
@@ -92,7 +123,7 @@ public class BLiteStoreExportImportTests : IDisposable
}
/// <summary>
/// Verifies that merging oplog entries adds only entries that are not already present.
/// Verifies that merging oplog entries adds only entries that are not already present.
/// </summary>
[Fact]
public async Task OplogStore_MergeAsync_OnlyAddsNewEntries()
@@ -117,7 +148,7 @@ public class BLiteStoreExportImportTests : IDisposable
}
/// <summary>
/// Verifies that chain range lookup resolves entries by hash and returns the expected range.
/// Verifies that chain range lookup resolves entries by hash and returns the expected range.
/// </summary>
[Fact]
public async Task OplogStore_GetChainRangeAsync_UsesHashLookup()
@@ -126,7 +157,8 @@ public class BLiteStoreExportImportTests : IDisposable
var payload1 = JsonDocument.Parse("{\"test\":\"k1\"}").RootElement;
var payload2 = JsonDocument.Parse("{\"test\":\"k2\"}").RootElement;
var entry1 = new OplogEntry("col1", "k1", OperationType.Put, payload1, new HlcTimestamp(1000, 0, "node1"), "");
var entry2 = new OplogEntry("col1", "k2", OperationType.Put, payload2, new HlcTimestamp(2000, 0, "node1"), entry1.Hash);
var entry2 = new OplogEntry("col1", "k2", OperationType.Put, payload2, new HlcTimestamp(2000, 0, "node1"),
entry1.Hash);
await _oplogStore.AppendOplogEntryAsync(entry1);
await _oplogStore.AppendOplogEntryAsync(entry2);
@@ -141,7 +173,7 @@ public class BLiteStoreExportImportTests : IDisposable
}
/// <summary>
/// Verifies that dropping the oplog store removes all entries.
/// Verifies that dropping the oplog store removes all entries.
/// </summary>
[Fact]
public async Task OplogStore_DropAsync_ClearsAllEntries()
@@ -164,7 +196,7 @@ public class BLiteStoreExportImportTests : IDisposable
#region PeerConfigurationStore Tests
/// <summary>
/// Verifies that exporting peer configurations returns all persisted peers.
/// Verifies that exporting peer configurations returns all persisted peers.
/// </summary>
[Fact]
public async Task PeerConfigStore_ExportAsync_ReturnsAllPeers()
@@ -183,7 +215,7 @@ public class BLiteStoreExportImportTests : IDisposable
}
/// <summary>
/// Verifies that importing peer configurations adds peers to the store.
/// Verifies that importing peer configurations adds peers to the store.
/// </summary>
[Fact]
public async Task PeerConfigStore_ImportAsync_AddsPeers()
@@ -204,7 +236,7 @@ public class BLiteStoreExportImportTests : IDisposable
}
/// <summary>
/// Verifies that merging peer configurations adds only new peers.
/// Verifies that merging peer configurations adds only new peers.
/// </summary>
[Fact]
public async Task PeerConfigStore_MergeAsync_OnlyAddsNewPeers()
@@ -229,7 +261,7 @@ public class BLiteStoreExportImportTests : IDisposable
}
/// <summary>
/// Verifies that dropping peer configurations removes all peers.
/// Verifies that dropping peer configurations removes all peers.
/// </summary>
[Fact]
public async Task PeerConfigStore_DropAsync_ClearsAllPeers()
@@ -252,7 +284,7 @@ public class BLiteStoreExportImportTests : IDisposable
#region SnapshotMetadataStore Tests
/// <summary>
/// Verifies that exporting snapshot metadata returns all persisted metadata entries.
/// Verifies that exporting snapshot metadata returns all persisted metadata entries.
/// </summary>
[Fact]
public async Task SnapshotMetadataStore_ExportAsync_ReturnsAllMetadata()
@@ -273,7 +305,7 @@ public class BLiteStoreExportImportTests : IDisposable
}
/// <summary>
/// Verifies that importing snapshot metadata adds metadata entries to the store.
/// Verifies that importing snapshot metadata adds metadata entries to the store.
/// </summary>
[Fact]
public async Task SnapshotMetadataStore_ImportAsync_AddsMetadata()
@@ -294,7 +326,7 @@ public class BLiteStoreExportImportTests : IDisposable
}
/// <summary>
/// Verifies that merging snapshot metadata adds only entries with new node identifiers.
/// Verifies that merging snapshot metadata adds only entries with new node identifiers.
/// </summary>
[Fact]
public async Task SnapshotMetadataStore_MergeAsync_OnlyAddsNewMetadata()
@@ -318,7 +350,7 @@ public class BLiteStoreExportImportTests : IDisposable
}
/// <summary>
/// Verifies that dropping snapshot metadata removes all metadata entries.
/// Verifies that dropping snapshot metadata removes all metadata entries.
/// </summary>
[Fact]
public async Task SnapshotMetadataStore_DropAsync_ClearsAllMetadata()
@@ -340,7 +372,7 @@ public class BLiteStoreExportImportTests : IDisposable
#region DocumentStore Tests
/// <summary>
/// Verifies that exporting documents returns all persisted documents.
/// Verifies that exporting documents returns all persisted documents.
/// </summary>
[Fact]
public async Task DocumentStore_ExportAsync_ReturnsAllDocuments()
@@ -360,7 +392,7 @@ public class BLiteStoreExportImportTests : IDisposable
}
/// <summary>
/// Verifies that importing documents adds them to the underlying store.
/// Verifies that importing documents adds them to the underlying store.
/// </summary>
[Fact]
public async Task DocumentStore_ImportAsync_AddsDocuments()
@@ -385,7 +417,7 @@ public class BLiteStoreExportImportTests : IDisposable
}
/// <summary>
/// Verifies that document merge behavior honors conflict resolution.
/// Verifies that document merge behavior honors conflict resolution.
/// </summary>
[Fact]
public async Task DocumentStore_MergeAsync_UsesConflictResolution()
@@ -414,7 +446,7 @@ public class BLiteStoreExportImportTests : IDisposable
}
/// <summary>
/// Verifies that dropping documents removes all persisted documents.
/// Verifies that dropping documents removes all persisted documents.
/// </summary>
[Fact]
public async Task DocumentStore_DropAsync_ClearsAllDocuments()
@@ -468,38 +500,10 @@ public class BLiteStoreExportImportTests : IDisposable
private static Document CreateDocument<T>(string collection, string key, T entity) where T : class
{
var json = JsonSerializer.Serialize(entity);
string json = JsonSerializer.Serialize(entity);
var content = JsonDocument.Parse(json).RootElement;
return new Document(collection, key, content, new HlcTimestamp(0, 0, ""), false);
}
#endregion
/// <summary>
/// Disposes test resources and removes the temporary database file.
/// </summary>
public void Dispose()
{
_documentStore?.Dispose();
_context?.Dispose();
if (File.Exists(_testDbPath))
{
try { File.Delete(_testDbPath); } catch { }
}
}
private static IPeerNodeConfigurationProvider CreateConfigProvider(string nodeId)
{
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration
{
NodeId = nodeId,
TcpPort = 5000,
AuthToken = "test-token",
OplogRetentionHours = 24,
MaintenanceIntervalMinutes = 60
});
return configProvider;
}
}
}

View File

@@ -1,3 +1,3 @@
global using ZB.MOM.WW.CBDDC.Sample.Console;
global using NSubstitute;
global using Shouldly;
global using Shouldly;

View File

@@ -7,12 +7,12 @@ namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class PeerOplogConfirmationStoreTests : IDisposable
{
private readonly string _testDbPath;
private readonly SampleDbContext _context;
private readonly BLitePeerOplogConfirmationStore<SampleDbContext> _store;
private readonly string _testDbPath;
/// <summary>
/// Initializes a new instance of the <see cref="PeerOplogConfirmationStoreTests"/> class.
/// Initializes a new instance of the <see cref="PeerOplogConfirmationStoreTests" /> class.
/// </summary>
public PeerOplogConfirmationStoreTests()
{
@@ -23,8 +23,22 @@ public class PeerOplogConfirmationStoreTests : IDisposable
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
}
/// <inheritdoc />
public void Dispose()
{
_context?.Dispose();
if (File.Exists(_testDbPath))
try
{
File.Delete(_testDbPath);
}
catch
{
}
}
/// <summary>
/// Verifies that ensuring peer registration multiple times remains idempotent.
/// Verifies that ensuring peer registration multiple times remains idempotent.
/// </summary>
[Fact]
public async Task EnsurePeerRegisteredAsync_IsIdempotent()
@@ -41,7 +55,7 @@ public class PeerOplogConfirmationStoreTests : IDisposable
}
/// <summary>
/// Verifies create, update, and read flows for peer oplog confirmations.
/// Verifies create, update, and read flows for peer oplog confirmations.
/// </summary>
[Fact]
public async Task ConfirmationStore_CrudFlow_Works()
@@ -74,7 +88,7 @@ public class PeerOplogConfirmationStoreTests : IDisposable
}
/// <summary>
/// Verifies that removing peer tracking deactivates tracking records for that peer.
/// Verifies that removing peer tracking deactivates tracking records for that peer.
/// </summary>
[Fact]
public async Task RemovePeerTrackingAsync_DeactivatesPeerTracking()
@@ -95,14 +109,4 @@ public class PeerOplogConfirmationStoreTests : IDisposable
peerARows.ShouldNotBeEmpty();
peerARows.All(x => !x.IsActive).ShouldBeTrue();
}
/// <inheritdoc />
public void Dispose()
{
_context?.Dispose();
if (File.Exists(_testDbPath))
{
try { File.Delete(_testDbPath); } catch { }
}
}
}
}

View File

@@ -1,19 +1,12 @@
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using Microsoft.Extensions.Logging.Abstractions;
using System.Text.Json;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class SampleDbContextTests : IDisposable
{
private readonly string _dbPath;
private readonly SampleDbContext _context;
private readonly string _dbPath;
/// <summary>
/// Initializes a new test context backed by a temporary database file.
/// Initializes a new test context backed by a temporary database file.
/// </summary>
public SampleDbContextTests()
{
@@ -22,19 +15,23 @@ public class SampleDbContextTests : IDisposable
}
/// <summary>
/// Releases test resources and removes the temporary database file.
/// Releases test resources and removes the temporary database file.
/// </summary>
public void Dispose()
{
_context?.Dispose();
if (File.Exists(_dbPath))
{
try { File.Delete(_dbPath); } catch { }
}
try
{
File.Delete(_dbPath);
}
catch
{
}
}
/// <summary>
/// Verifies that required collections are initialized in the context.
/// Verifies that required collections are initialized in the context.
/// </summary>
[Fact]
public void Context_ShouldInitializeCollections()
@@ -47,7 +44,7 @@ public class SampleDbContextTests : IDisposable
}
/// <summary>
/// Verifies that inserting a user persists the document.
/// Verifies that inserting a user persists the document.
/// </summary>
[Fact]
public async Task Users_Insert_ShouldPersist()
@@ -74,153 +71,153 @@ public class SampleDbContextTests : IDisposable
}
/// <summary>
/// Verifies that updating a user modifies the existing document.
/// Verifies that updating a user modifies the existing document.
/// </summary>
[Fact]
public async Task Users_Update_ShouldModifyExisting()
{
// Arrange
var user = new User { Id = "user2", Name = "Bob", Age = 25 };
await _context.Users.InsertAsync(user);
await _context.SaveChangesAsync();
// Act
user.Age = 26;
user.Address = new Address { City = "Milan" };
await _context.Users.UpdateAsync(user);
await _context.SaveChangesAsync();
// Assert
var retrieved = _context.Users.FindById("user2");
retrieved.ShouldNotBeNull();
retrieved!.Age.ShouldBe(26);
retrieved.Address?.City.ShouldBe("Milan");
}
// Arrange
var user = new User { Id = "user2", Name = "Bob", Age = 25 };
await _context.Users.InsertAsync(user);
await _context.SaveChangesAsync();
// Act
user.Age = 26;
user.Address = new Address { City = "Milan" };
await _context.Users.UpdateAsync(user);
await _context.SaveChangesAsync();
// Assert
var retrieved = _context.Users.FindById("user2");
retrieved.ShouldNotBeNull();
retrieved!.Age.ShouldBe(26);
retrieved.Address?.City.ShouldBe("Milan");
}
/// <summary>
/// Verifies that deleting a user removes the document.
/// Verifies that deleting a user removes the document.
/// </summary>
[Fact]
public async Task Users_Delete_ShouldRemove()
{
// Arrange
var user = new User { Id = "user3", Name = "Charlie", Age = 35 };
await _context.Users.InsertAsync(user);
await _context.SaveChangesAsync();
// Act
await _context.Users.DeleteAsync("user3");
await _context.SaveChangesAsync();
// Assert
var retrieved = _context.Users.FindById("user3");
retrieved.ShouldBeNull();
}
// Arrange
var user = new User { Id = "user3", Name = "Charlie", Age = 35 };
await _context.Users.InsertAsync(user);
await _context.SaveChangesAsync();
// Act
await _context.Users.DeleteAsync("user3");
await _context.SaveChangesAsync();
// Assert
var retrieved = _context.Users.FindById("user3");
retrieved.ShouldBeNull();
}
/// <summary>
/// Verifies that inserting a todo list with items persists nested data.
/// Verifies that inserting a todo list with items persists nested data.
/// </summary>
[Fact]
public async Task TodoLists_InsertWithItems_ShouldPersist()
{
// Arrange
var todoList = new TodoList
{
Id = "list1",
Name = "Shopping",
Items = new List<TodoItem>
{
new() { Task = "Buy milk", Completed = false },
new() { Task = "Buy bread", Completed = true }
}
};
// Act
await _context.TodoLists.InsertAsync(todoList);
await _context.SaveChangesAsync();
// Assert
var retrieved = _context.TodoLists.FindById("list1");
retrieved.ShouldNotBeNull();
retrieved!.Name.ShouldBe("Shopping");
// Arrange
var todoList = new TodoList
{
Id = "list1",
Name = "Shopping",
Items = new List<TodoItem>
{
new() { Task = "Buy milk", Completed = false },
new() { Task = "Buy bread", Completed = true }
}
};
// Act
await _context.TodoLists.InsertAsync(todoList);
await _context.SaveChangesAsync();
// Assert
var retrieved = _context.TodoLists.FindById("list1");
retrieved.ShouldNotBeNull();
retrieved!.Name.ShouldBe("Shopping");
retrieved.Items.Count.ShouldBe(2);
retrieved.Items.ShouldContain(i => i.Task == "Buy milk" && !i.Completed);
retrieved.Items.ShouldContain(i => i.Task == "Buy bread" && i.Completed);
}
retrieved.Items.ShouldContain(i => i.Task == "Buy milk" && !i.Completed);
retrieved.Items.ShouldContain(i => i.Task == "Buy bread" && i.Completed);
}
/// <summary>
/// Verifies that updating todo items modifies the nested collection.
/// Verifies that updating todo items modifies the nested collection.
/// </summary>
[Fact]
public async Task TodoLists_UpdateItems_ShouldModifyNestedCollection()
{
// Arrange
var todoList = new TodoList
{
Id = "list2",
Name = "Work Tasks",
Items = new List<TodoItem>
{
new() { Task = "Write report", Completed = false }
}
};
await _context.TodoLists.InsertAsync(todoList);
await _context.SaveChangesAsync();
// Act - Mark task as completed and add new task
todoList.Items[0].Completed = true;
todoList.Items.Add(new TodoItem { Task = "Review report", Completed = false });
await _context.TodoLists.UpdateAsync(todoList);
await _context.SaveChangesAsync();
// Assert
var retrieved = _context.TodoLists.FindById("list2");
retrieved.ShouldNotBeNull();
// Arrange
var todoList = new TodoList
{
Id = "list2",
Name = "Work Tasks",
Items = new List<TodoItem>
{
new() { Task = "Write report", Completed = false }
}
};
await _context.TodoLists.InsertAsync(todoList);
await _context.SaveChangesAsync();
// Act - Mark task as completed and add new task
todoList.Items[0].Completed = true;
todoList.Items.Add(new TodoItem { Task = "Review report", Completed = false });
await _context.TodoLists.UpdateAsync(todoList);
await _context.SaveChangesAsync();
// Assert
var retrieved = _context.TodoLists.FindById("list2");
retrieved.ShouldNotBeNull();
retrieved!.Items.Count.ShouldBe(2);
retrieved.Items.First().Completed.ShouldBe(true);
retrieved.Items.Last().Completed.ShouldBe(false);
}
retrieved.Items.First().Completed.ShouldBe(true);
retrieved.Items.Last().Completed.ShouldBe(false);
}
/// <summary>
/// Verifies that querying all users returns all inserted users.
/// Verifies that querying all users returns all inserted users.
/// </summary>
[Fact]
public void Users_FindAll_ShouldReturnAllUsers()
{
// Arrange
_context.Users.InsertAsync(new User { Id = "u1", Name = "User1", Age = 20 }).Wait();
_context.Users.InsertAsync(new User { Id = "u2", Name = "User2", Age = 30 }).Wait();
_context.Users.InsertAsync(new User { Id = "u3", Name = "User3", Age = 40 }).Wait();
_context.SaveChangesAsync().Wait();
// Act
var allUsers = _context.Users.FindAll().ToList();
// Assert
// Arrange
_context.Users.InsertAsync(new User { Id = "u1", Name = "User1", Age = 20 }).Wait();
_context.Users.InsertAsync(new User { Id = "u2", Name = "User2", Age = 30 }).Wait();
_context.Users.InsertAsync(new User { Id = "u3", Name = "User3", Age = 40 }).Wait();
_context.SaveChangesAsync().Wait();
// Act
var allUsers = _context.Users.FindAll().ToList();
// Assert
allUsers.Count.ShouldBe(3);
allUsers.Select(u => u.Name).ShouldContain("User1");
allUsers.Select(u => u.Name).ShouldContain("User2");
allUsers.Select(u => u.Name).ShouldContain("User3");
}
}
/// <summary>
/// Verifies that predicate-based queries return only matching users.
/// Verifies that predicate-based queries return only matching users.
/// </summary>
[Fact]
public void Users_Find_WithPredicate_ShouldFilterCorrectly()
{
// Arrange
_context.Users.InsertAsync(new User { Id = "f1", Name = "Young", Age = 18 }).Wait();
_context.Users.InsertAsync(new User { Id = "f2", Name = "Adult", Age = 30 }).Wait();
_context.Users.InsertAsync(new User { Id = "f3", Name = "Senior", Age = 65 }).Wait();
_context.SaveChangesAsync().Wait();
// Act
var adults = _context.Users.Find(u => u.Age >= 30).ToList();
// Assert
// Arrange
_context.Users.InsertAsync(new User { Id = "f1", Name = "Young", Age = 18 }).Wait();
_context.Users.InsertAsync(new User { Id = "f2", Name = "Adult", Age = 30 }).Wait();
_context.Users.InsertAsync(new User { Id = "f3", Name = "Senior", Age = 65 }).Wait();
_context.SaveChangesAsync().Wait();
// Act
var adults = _context.Users.Find(u => u.Age >= 30).ToList();
// Assert
adults.Count.ShouldBe(2);
adults.Select(u => u.Name).ShouldContain("Adult");
adults.Select(u => u.Name).ShouldContain("Senior");
}
}
}
}

View File

@@ -1,29 +1,27 @@
using System.Text.Json;
using System.Text.Json.Nodes;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using Microsoft.Extensions.Logging.Abstractions;
using System.Text.Json;
using System.Text.Json.Nodes;
using Xunit;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class SnapshotStoreTests : IDisposable
{
private readonly string _testDbPath;
private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly SampleDbContext _context;
private readonly SampleDocumentStore _documentStore;
private readonly BLiteOplogStore<SampleDbContext> _oplogStore;
private readonly BLitePeerConfigurationStore<SampleDbContext> _peerConfigStore;
private readonly BLitePeerOplogConfirmationStore<SampleDbContext> _peerConfirmationStore;
private readonly SnapshotStore _snapshotStore;
private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly SampleDocumentStore _documentStore;
private readonly BLiteOplogStore<SampleDbContext> _oplogStore;
private readonly BLitePeerConfigurationStore<SampleDbContext> _peerConfigStore;
private readonly BLitePeerOplogConfirmationStore<SampleDbContext> _peerConfirmationStore;
private readonly SnapshotStore _snapshotStore;
private readonly string _testDbPath;
/// <summary>
/// Initializes a new instance of the <see cref="SnapshotStoreTests"/> class.
/// Initializes a new instance of the <see cref="SnapshotStoreTests" /> class.
/// </summary>
public SnapshotStoreTests()
{
@@ -32,7 +30,8 @@ public class SnapshotStoreTests : IDisposable
_configProvider = CreateConfigProvider("test-node");
var vectorClock = new VectorClockService();
_documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock, NullLogger<SampleDocumentStore>.Instance);
_documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock,
NullLogger<SampleDocumentStore>.Instance);
var snapshotMetadataStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
_context,
NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
@@ -43,25 +42,43 @@ public class SnapshotStoreTests : IDisposable
vectorClock,
snapshotMetadataStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
_peerConfigStore = new BLitePeerConfigurationStore<SampleDbContext>(
_context,
NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
_peerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
_context,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
_snapshotStore = new SnapshotStore(
_documentStore,
_peerConfigStore,
_oplogStore,
new LastWriteWinsConflictResolver(),
NullLogger<SnapshotStore>.Instance,
_peerConfirmationStore);
_peerConfigStore = new BLitePeerConfigurationStore<SampleDbContext>(
_context,
NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
_peerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
_context,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
_snapshotStore = new SnapshotStore(
_documentStore,
_peerConfigStore,
_oplogStore,
new LastWriteWinsConflictResolver(),
NullLogger<SnapshotStore>.Instance,
_peerConfirmationStore);
}
/// <summary>
/// Releases resources created for test execution.
/// </summary>
public void Dispose()
{
_documentStore?.Dispose();
_context?.Dispose();
if (File.Exists(_testDbPath))
try
{
File.Delete(_testDbPath);
}
catch
{
}
}
/// <summary>
/// Verifies that creating a snapshot writes valid JSON to the output stream.
/// Verifies that creating a snapshot writes valid JSON to the output stream.
/// </summary>
[Fact]
public async Task CreateSnapshotAsync_WritesValidJsonToStream()
@@ -80,7 +97,7 @@ public class SnapshotStoreTests : IDisposable
// Reset stream position and verify JSON is valid
stream.Position = 0;
var json = await new StreamReader(stream).ReadToEndAsync();
string json = await new StreamReader(stream).ReadToEndAsync();
string.IsNullOrWhiteSpace(json).ShouldBeFalse("Snapshot JSON should not be empty");
json.Trim().ShouldStartWith("{");
@@ -90,14 +107,15 @@ public class SnapshotStoreTests : IDisposable
doc.ShouldNotBeNull();
// Verify structure
doc.RootElement.TryGetProperty("Version", out _).ShouldBeTrue("Should have Version property");
doc.RootElement.TryGetProperty("Documents", out _).ShouldBeTrue("Should have Documents property");
doc.RootElement.TryGetProperty("Oplog", out _).ShouldBeTrue("Should have Oplog property");
doc.RootElement.TryGetProperty("PeerConfirmations", out _).ShouldBeTrue("Should have PeerConfirmations property");
doc.RootElement.TryGetProperty("Version", out _).ShouldBeTrue("Should have Version property");
doc.RootElement.TryGetProperty("Documents", out _).ShouldBeTrue("Should have Documents property");
doc.RootElement.TryGetProperty("Oplog", out _).ShouldBeTrue("Should have Oplog property");
doc.RootElement.TryGetProperty("PeerConfirmations", out _)
.ShouldBeTrue("Should have PeerConfirmations property");
}
/// <summary>
/// Verifies that snapshot creation includes all persisted documents.
/// Verifies that snapshot creation includes all persisted documents.
/// </summary>
[Fact]
public async Task CreateSnapshotAsync_IncludesAllDocuments()
@@ -119,7 +137,7 @@ public class SnapshotStoreTests : IDisposable
// Assert
stream.Position = 0;
var json = await new StreamReader(stream).ReadToEndAsync();
string json = await new StreamReader(stream).ReadToEndAsync();
var doc = JsonDocument.Parse(json);
var documents = doc.RootElement.GetProperty("Documents");
@@ -127,38 +145,39 @@ public class SnapshotStoreTests : IDisposable
}
/// <summary>
/// Verifies that creating and replacing a snapshot preserves document data.
/// Verifies that creating and replacing a snapshot preserves document data.
/// </summary>
[Fact]
public async Task RoundTrip_CreateAndReplace_PreservesData()
{
// Arrange - Add data to source
var originalUser = new User { Id = "user-rt", Name = "RoundTrip User", Age = 42 };
await _context.Users.InsertAsync(originalUser);
await _peerConfirmationStore.UpdateConfirmationAsync(
"peer-rt",
"source-rt",
new HlcTimestamp(500, 2, "source-rt"),
"hash-rt");
await _context.SaveChangesAsync();
public async Task RoundTrip_CreateAndReplace_PreservesData()
{
// Arrange - Add data to source
var originalUser = new User { Id = "user-rt", Name = "RoundTrip User", Age = 42 };
await _context.Users.InsertAsync(originalUser);
await _peerConfirmationStore.UpdateConfirmationAsync(
"peer-rt",
"source-rt",
new HlcTimestamp(500, 2, "source-rt"),
"hash-rt");
await _context.SaveChangesAsync();
// Create snapshot
using var snapshotStream = new MemoryStream();
await _snapshotStore.CreateSnapshotAsync(snapshotStream);
snapshotStream.Position = 0;
var snapshotJson = await new StreamReader(snapshotStream).ReadToEndAsync();
var snapshotDoc = JsonDocument.Parse(snapshotJson);
snapshotDoc.RootElement.GetProperty("PeerConfirmations").GetArrayLength().ShouldBe(1);
snapshotStream.Position = 0;
// Create snapshot
using var snapshotStream = new MemoryStream();
await _snapshotStore.CreateSnapshotAsync(snapshotStream);
snapshotStream.Position = 0;
string snapshotJson = await new StreamReader(snapshotStream).ReadToEndAsync();
var snapshotDoc = JsonDocument.Parse(snapshotJson);
snapshotDoc.RootElement.GetProperty("PeerConfirmations").GetArrayLength().ShouldBe(1);
snapshotStream.Position = 0;
// Create a new context/stores (simulating a different node)
var newDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-target-{Guid.NewGuid()}.blite");
string newDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-target-{Guid.NewGuid()}.blite");
try
{
using var newContext = new SampleDbContext(newDbPath);
var newConfigProvider = CreateConfigProvider("test-new-node");
var newVectorClock = new VectorClockService();
var newDocStore = new SampleDocumentStore(newContext, newConfigProvider, newVectorClock, NullLogger<SampleDocumentStore>.Instance);
var newDocStore = new SampleDocumentStore(newContext, newConfigProvider, newVectorClock,
NullLogger<SampleDocumentStore>.Instance);
var newSnapshotMetaStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
newContext, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
var newOplogStore = new BLiteOplogStore<SampleDbContext>(
@@ -166,66 +185,72 @@ public class SnapshotStoreTests : IDisposable
newVectorClock,
newSnapshotMetaStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
var newPeerStore = new BLitePeerConfigurationStore<SampleDbContext>(
newContext, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
var newPeerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
newContext,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
var newSnapshotStore = new SnapshotStore(
newDocStore,
newPeerStore,
newOplogStore,
new LastWriteWinsConflictResolver(),
NullLogger<SnapshotStore>.Instance,
newPeerConfirmationStore);
var newPeerStore = new BLitePeerConfigurationStore<SampleDbContext>(
newContext, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
var newPeerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
newContext,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
var newSnapshotStore = new SnapshotStore(
newDocStore,
newPeerStore,
newOplogStore,
new LastWriteWinsConflictResolver(),
NullLogger<SnapshotStore>.Instance,
newPeerConfirmationStore);
// Act - Replace database with snapshot
await newSnapshotStore.ReplaceDatabaseAsync(snapshotStream);
// Assert - Data should be restored
var restoredUser = newContext.Users.FindById("user-rt");
restoredUser.ShouldNotBeNull();
restoredUser.Name.ShouldBe("RoundTrip User");
restoredUser.Age.ShouldBe(42);
var restoredConfirmations = (await newPeerConfirmationStore.GetConfirmationsAsync()).ToList();
restoredConfirmations.Count.ShouldBe(1);
restoredConfirmations[0].PeerNodeId.ShouldBe("peer-rt");
restoredConfirmations[0].SourceNodeId.ShouldBe("source-rt");
restoredConfirmations[0].ConfirmedWall.ShouldBe(500);
restoredConfirmations[0].ConfirmedLogic.ShouldBe(2);
restoredConfirmations[0].ConfirmedHash.ShouldBe("hash-rt");
}
finally
{
restoredUser.ShouldNotBeNull();
restoredUser.Name.ShouldBe("RoundTrip User");
restoredUser.Age.ShouldBe(42);
var restoredConfirmations = (await newPeerConfirmationStore.GetConfirmationsAsync()).ToList();
restoredConfirmations.Count.ShouldBe(1);
restoredConfirmations[0].PeerNodeId.ShouldBe("peer-rt");
restoredConfirmations[0].SourceNodeId.ShouldBe("source-rt");
restoredConfirmations[0].ConfirmedWall.ShouldBe(500);
restoredConfirmations[0].ConfirmedLogic.ShouldBe(2);
restoredConfirmations[0].ConfirmedHash.ShouldBe("hash-rt");
}
finally
{
if (File.Exists(newDbPath))
try { File.Delete(newDbPath); } catch { }
try
{
File.Delete(newDbPath);
}
catch
{
}
}
}
/// <summary>
/// Verifies that merging a snapshot preserves existing data and adds new data.
/// Verifies that merging a snapshot preserves existing data and adds new data.
/// </summary>
[Fact]
public async Task MergeSnapshotAsync_MergesWithExistingData()
{
// Arrange - Add initial data
await _context.Users.InsertAsync(new User { Id = "existing", Name = "Existing User", Age = 30 });
await _peerConfirmationStore.UpdateConfirmationAsync(
"peer-merge",
"source-a",
new HlcTimestamp(100, 0, "source-a"),
"target-hash-old");
await _peerConfirmationStore.UpdateConfirmationAsync(
"peer-local-only",
"source-local",
new HlcTimestamp(50, 0, "source-local"),
"target-local-hash");
await _context.SaveChangesAsync();
public async Task MergeSnapshotAsync_MergesWithExistingData()
{
// Arrange - Add initial data
await _context.Users.InsertAsync(new User { Id = "existing", Name = "Existing User", Age = 30 });
await _peerConfirmationStore.UpdateConfirmationAsync(
"peer-merge",
"source-a",
new HlcTimestamp(100, 0, "source-a"),
"target-hash-old");
await _peerConfirmationStore.UpdateConfirmationAsync(
"peer-local-only",
"source-local",
new HlcTimestamp(50, 0, "source-local"),
"target-local-hash");
await _context.SaveChangesAsync();
// Create snapshot with different data
var sourceDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-source-{Guid.NewGuid()}.blite");
string sourceDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-source-{Guid.NewGuid()}.blite");
MemoryStream snapshotStream;
try
@@ -236,7 +261,8 @@ public class SnapshotStoreTests : IDisposable
var sourceConfigProvider = CreateConfigProvider("test-source-node");
var sourceVectorClock = new VectorClockService();
var sourceDocStore = new SampleDocumentStore(sourceContext, sourceConfigProvider, sourceVectorClock, NullLogger<SampleDocumentStore>.Instance);
var sourceDocStore = new SampleDocumentStore(sourceContext, sourceConfigProvider, sourceVectorClock,
NullLogger<SampleDocumentStore>.Instance);
var sourceSnapshotMetaStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
sourceContext, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
var sourceOplogStore = new BLiteOplogStore<SampleDbContext>(
@@ -244,29 +270,29 @@ public class SnapshotStoreTests : IDisposable
sourceVectorClock,
sourceSnapshotMetaStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
var sourcePeerStore = new BLitePeerConfigurationStore<SampleDbContext>(
sourceContext, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
var sourcePeerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
sourceContext,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
await sourcePeerConfirmationStore.UpdateConfirmationAsync(
"peer-merge",
"source-a",
new HlcTimestamp(200, 1, "source-a"),
"source-hash-new");
await sourcePeerConfirmationStore.UpdateConfirmationAsync(
"peer-merge",
"source-b",
new HlcTimestamp(300, 0, "source-b"),
"source-hash-b");
var sourceSnapshotStore = new SnapshotStore(
sourceDocStore,
sourcePeerStore,
sourceOplogStore,
new LastWriteWinsConflictResolver(),
NullLogger<SnapshotStore>.Instance,
sourcePeerConfirmationStore);
var sourcePeerStore = new BLitePeerConfigurationStore<SampleDbContext>(
sourceContext, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
var sourcePeerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
sourceContext,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
await sourcePeerConfirmationStore.UpdateConfirmationAsync(
"peer-merge",
"source-a",
new HlcTimestamp(200, 1, "source-a"),
"source-hash-new");
await sourcePeerConfirmationStore.UpdateConfirmationAsync(
"peer-merge",
"source-b",
new HlcTimestamp(300, 0, "source-b"),
"source-hash-b");
var sourceSnapshotStore = new SnapshotStore(
sourceDocStore,
sourcePeerStore,
sourceOplogStore,
new LastWriteWinsConflictResolver(),
NullLogger<SnapshotStore>.Instance,
sourcePeerConfirmationStore);
snapshotStream = new MemoryStream();
await sourceSnapshotStore.CreateSnapshotAsync(snapshotStream);
@@ -275,7 +301,13 @@ public class SnapshotStoreTests : IDisposable
finally
{
if (File.Exists(sourceDbPath))
try { File.Delete(sourceDbPath); } catch { }
try
{
File.Delete(sourceDbPath);
}
catch
{
}
}
// Act - Merge snapshot into existing data
@@ -285,70 +317,71 @@ public class SnapshotStoreTests : IDisposable
var existingUser = _context.Users.FindById("existing");
var newUser = _context.Users.FindById("new-user");
existingUser.ShouldNotBeNull();
newUser.ShouldNotBeNull();
existingUser.Name.ShouldBe("Existing User");
newUser.Name.ShouldBe("New User");
var confirmations = (await _peerConfirmationStore.GetConfirmationsAsync())
.OrderBy(c => c.PeerNodeId)
.ThenBy(c => c.SourceNodeId)
.ToList();
confirmations.Count.ShouldBe(3);
confirmations[0].PeerNodeId.ShouldBe("peer-local-only");
confirmations[0].SourceNodeId.ShouldBe("source-local");
confirmations[0].ConfirmedWall.ShouldBe(50);
confirmations[0].ConfirmedHash.ShouldBe("target-local-hash");
confirmations[1].PeerNodeId.ShouldBe("peer-merge");
confirmations[1].SourceNodeId.ShouldBe("source-a");
confirmations[1].ConfirmedWall.ShouldBe(200);
confirmations[1].ConfirmedLogic.ShouldBe(1);
confirmations[1].ConfirmedHash.ShouldBe("source-hash-new");
confirmations[2].PeerNodeId.ShouldBe("peer-merge");
confirmations[2].SourceNodeId.ShouldBe("source-b");
confirmations[2].ConfirmedWall.ShouldBe(300);
confirmations[2].ConfirmedHash.ShouldBe("source-hash-b");
}
/// <summary>
/// Verifies that replace can consume legacy snapshots that do not include peer confirmations.
/// </summary>
[Fact]
public async Task ReplaceDatabaseAsync_LegacySnapshotWithoutPeerConfirmations_IsSupported()
{
// Arrange
await _context.Users.InsertAsync(new User { Id = "legacy-user", Name = "Legacy User", Age = 33 });
await _context.SaveChangesAsync();
using var snapshotStream = new MemoryStream();
await _snapshotStore.CreateSnapshotAsync(snapshotStream);
snapshotStream.Position = 0;
var snapshotJson = await new StreamReader(snapshotStream).ReadToEndAsync();
var legacySnapshot = JsonNode.Parse(snapshotJson)!.AsObject();
legacySnapshot.Remove("PeerConfirmations");
using var legacyStream = new MemoryStream();
await using (var writer = new Utf8JsonWriter(legacyStream))
{
legacySnapshot.WriteTo(writer);
}
legacyStream.Position = 0;
// Act
await _snapshotStore.ReplaceDatabaseAsync(legacyStream);
// Assert
_context.Users.FindById("legacy-user").ShouldNotBeNull();
(await _peerConfirmationStore.GetConfirmationsAsync()).Count().ShouldBe(0);
}
/// <summary>
/// Verifies that snapshot creation succeeds for an empty database.
/// </summary>
existingUser.ShouldNotBeNull();
newUser.ShouldNotBeNull();
existingUser.Name.ShouldBe("Existing User");
newUser.Name.ShouldBe("New User");
var confirmations = (await _peerConfirmationStore.GetConfirmationsAsync())
.OrderBy(c => c.PeerNodeId)
.ThenBy(c => c.SourceNodeId)
.ToList();
confirmations.Count.ShouldBe(3);
confirmations[0].PeerNodeId.ShouldBe("peer-local-only");
confirmations[0].SourceNodeId.ShouldBe("source-local");
confirmations[0].ConfirmedWall.ShouldBe(50);
confirmations[0].ConfirmedHash.ShouldBe("target-local-hash");
confirmations[1].PeerNodeId.ShouldBe("peer-merge");
confirmations[1].SourceNodeId.ShouldBe("source-a");
confirmations[1].ConfirmedWall.ShouldBe(200);
confirmations[1].ConfirmedLogic.ShouldBe(1);
confirmations[1].ConfirmedHash.ShouldBe("source-hash-new");
confirmations[2].PeerNodeId.ShouldBe("peer-merge");
confirmations[2].SourceNodeId.ShouldBe("source-b");
confirmations[2].ConfirmedWall.ShouldBe(300);
confirmations[2].ConfirmedHash.ShouldBe("source-hash-b");
}
/// <summary>
/// Verifies that replace can consume legacy snapshots that do not include peer confirmations.
/// </summary>
[Fact]
public async Task ReplaceDatabaseAsync_LegacySnapshotWithoutPeerConfirmations_IsSupported()
{
// Arrange
await _context.Users.InsertAsync(new User { Id = "legacy-user", Name = "Legacy User", Age = 33 });
await _context.SaveChangesAsync();
using var snapshotStream = new MemoryStream();
await _snapshotStore.CreateSnapshotAsync(snapshotStream);
snapshotStream.Position = 0;
string snapshotJson = await new StreamReader(snapshotStream).ReadToEndAsync();
var legacySnapshot = JsonNode.Parse(snapshotJson)!.AsObject();
legacySnapshot.Remove("PeerConfirmations");
using var legacyStream = new MemoryStream();
await using (var writer = new Utf8JsonWriter(legacyStream))
{
legacySnapshot.WriteTo(writer);
}
legacyStream.Position = 0;
// Act
await _snapshotStore.ReplaceDatabaseAsync(legacyStream);
// Assert
_context.Users.FindById("legacy-user").ShouldNotBeNull();
(await _peerConfirmationStore.GetConfirmationsAsync()).Count().ShouldBe(0);
}
/// <summary>
/// Verifies that snapshot creation succeeds for an empty database.
/// </summary>
[Fact]
public async Task CreateSnapshotAsync_HandlesEmptyDatabase()
{
@@ -360,7 +393,7 @@ public class SnapshotStoreTests : IDisposable
(stream.Length > 0).ShouldBeTrue();
stream.Position = 0;
var json = await new StreamReader(stream).ReadToEndAsync();
string json = await new StreamReader(stream).ReadToEndAsync();
var doc = JsonDocument.Parse(json);
var documents = doc.RootElement.GetProperty("Documents");
@@ -368,7 +401,7 @@ public class SnapshotStoreTests : IDisposable
}
/// <summary>
/// Verifies that snapshot creation includes oplog entries.
/// Verifies that snapshot creation includes oplog entries.
/// </summary>
[Fact]
public async Task CreateSnapshotAsync_IncludesOplogEntries()
@@ -394,27 +427,13 @@ public class SnapshotStoreTests : IDisposable
// Assert
stream.Position = 0;
var json = await new StreamReader(stream).ReadToEndAsync();
string json = await new StreamReader(stream).ReadToEndAsync();
var doc = JsonDocument.Parse(json);
var oplog = doc.RootElement.GetProperty("Oplog");
(oplog.GetArrayLength() >= 1).ShouldBeTrue("Should have at least one oplog entry");
}
/// <summary>
/// Releases resources created for test execution.
/// </summary>
public void Dispose()
{
_documentStore?.Dispose();
_context?.Dispose();
if (File.Exists(_testDbPath))
{
try { File.Delete(_testDbPath); } catch { }
}
}
private static IPeerNodeConfigurationProvider CreateConfigProvider(string nodeId)
{
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
@@ -428,4 +447,4 @@ public class SnapshotStoreTests : IDisposable
});
return configProvider;
}
}
}

View File

@@ -1,32 +1,32 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.Sample.Console.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.Sample.Console.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.Sample.Console.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="NSubstitute" Version="5.3.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1" />
<PackageReference Include="Shouldly" Version="4.3.0" />
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4" />
<PackageReference Include="xunit.v3" Version="3.2.0" />
</ItemGroup>
<ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\samples\ZB.MOM.WW.CBDDC.Sample.Console\ZB.MOM.WW.CBDDC.Sample.Console.csproj" />
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Persistence\ZB.MOM.WW.CBDDC.Persistence.csproj" />
</ItemGroup>
</Project>
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.Sample.Console.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.Sample.Console.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.Sample.Console.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4"/>
<PackageReference Include="NSubstitute" Version="5.3.0"/>
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1"/>
<PackageReference Include="Shouldly" Version="4.3.0"/>
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4"/>
<PackageReference Include="xunit.v3" Version="3.2.0"/>
</ItemGroup>
<ItemGroup>
<Using Include="Xunit"/>
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\samples\ZB.MOM.WW.CBDDC.Sample.Console\ZB.MOM.WW.CBDDC.Sample.Console.csproj"/>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Persistence\ZB.MOM.WW.CBDDC.Persistence.csproj"/>
</ItemGroup>
</Project>