Initial import of the CBDDC codebase with docs and tests. Add a .NET-focused gitignore to keep generated artifacts out of source control.
Some checks failed
CI / verify (push) Has been cancelled

This commit is contained in:
Joseph Doherty
2026-02-20 13:03:21 -05:00
commit 08bfc17218
218 changed files with 33910 additions and 0 deletions

View File

@@ -0,0 +1,267 @@
using System.Reflection;
using System.Text.RegularExpressions;
using System.Xml.Linq;
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Core.Tests;
public class ArchitectureFitnessTests
{
/// <summary>
/// Verifies that the core assembly does not reference outer-layer assemblies.
/// </summary>
[Fact]
public void CoreAssembly_ShouldNotReferenceOuterAssemblies()
{
var references = typeof(OplogEntry).Assembly
.GetReferencedAssemblies()
.Select(a => a.Name)
.Where(a => !string.IsNullOrWhiteSpace(a))
.ToHashSet(StringComparer.Ordinal);
references.ShouldNotContain("ZB.MOM.WW.CBDDC.Network");
references.ShouldNotContain("ZB.MOM.WW.CBDDC.Persistence");
references.ShouldNotContain("ZB.MOM.WW.CBDDC.Hosting");
}
/// <summary>
/// Verifies that project references under src form an acyclic graph.
/// </summary>
[Fact]
public void SourceProjectGraph_ShouldBeAcyclic()
{
var repoRoot = FindRepoRoot();
var srcRoot = Path.Combine(repoRoot, "src");
var projectFiles = Directory
.EnumerateFiles(srcRoot, "*.csproj", SearchOption.AllDirectories)
.Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}", StringComparison.Ordinal)
&& !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}", StringComparison.Ordinal))
.ToList();
var nodes = projectFiles.ToDictionary(
p => Path.GetFileNameWithoutExtension(p),
p => new HashSet<string>(StringComparer.Ordinal));
foreach (var projectFile in projectFiles)
{
var projectName = Path.GetFileNameWithoutExtension(projectFile);
var doc = XDocument.Load(projectFile);
var refs = doc.Descendants("ProjectReference")
.Select(x => x.Attribute("Include")?.Value)
.Where(v => !string.IsNullOrWhiteSpace(v))
.Select(v => Path.GetFileNameWithoutExtension(v!.Replace('\\', '/')));
foreach (var reference in refs)
{
if (nodes.ContainsKey(reference))
{
nodes[projectName].Add(reference);
}
}
}
HasCycle(nodes).ShouldBeFalse();
}
/// <summary>
/// Verifies the allowed dependency graph between source projects.
/// </summary>
[Fact]
public void SourceProjectReferences_ShouldMatchAllowedDependencyGraph()
{
var repoRoot = FindRepoRoot();
var srcRoot = Path.Combine(repoRoot, "src");
var projectFiles = Directory
.EnumerateFiles(srcRoot, "*.csproj", SearchOption.AllDirectories)
.Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}", StringComparison.Ordinal)
&& !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}", StringComparison.Ordinal))
.ToList();
var allowedDependencies = new Dictionary<string, HashSet<string>>(StringComparer.Ordinal)
{
["ZB.MOM.WW.CBDDC.Core"] = new HashSet<string>(StringComparer.Ordinal),
["ZB.MOM.WW.CBDDC.Network"] = new HashSet<string>(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Core" },
["ZB.MOM.WW.CBDDC.Persistence"] = new HashSet<string>(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Core" },
["ZB.MOM.WW.CBDDC.Hosting"] = new HashSet<string>(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Network" }
};
foreach (var projectFile in projectFiles)
{
var projectName = Path.GetFileNameWithoutExtension(projectFile);
allowedDependencies.ContainsKey(projectName)
.ShouldBeTrue($"Unexpected source project found: {projectName}");
var doc = XDocument.Load(projectFile);
var references = doc.Descendants("ProjectReference")
.Select(x => x.Attribute("Include")?.Value)
.Where(v => !string.IsNullOrWhiteSpace(v))
.Select(v => Path.GetFileNameWithoutExtension(v!.Replace('\\', '/')))
.ToHashSet(StringComparer.Ordinal);
var expected = allowedDependencies[projectName];
var extra = references.Where(r => !expected.Contains(r)).ToList();
var missing = expected.Where(e => !references.Contains(e)).ToList();
extra.ShouldBeEmpty($"Project {projectName} has disallowed references: {string.Join(", ", extra)}");
missing.ShouldBeEmpty($"Project {projectName} is missing required references: {string.Join(", ", missing)}");
}
}
/// <summary>
/// Verifies non-generic ILogger usage is restricted to explicit compatibility shims.
/// </summary>
[Fact]
public void SourceCode_ShouldRestrictNonGenericILoggerUsage()
{
var repoRoot = FindRepoRoot();
var srcRoot = Path.Combine(repoRoot, "src");
var loggerPattern = new Regex(@"\bILogger\b(?!\s*<|\s*Factory\b)", RegexOptions.Compiled);
var allowedSnippets = new[]
{
"private readonly ILogger _inner;",
"internal ProtocolHandler(ILogger logger",
"ILogger? logger = null)",
"CreateTypedLogger(ILogger? logger)",
"public ForwardingLogger(ILogger inner)"
};
var violations = new List<string>();
var sourceFiles = Directory.EnumerateFiles(srcRoot, "*.cs", SearchOption.AllDirectories)
.Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}", StringComparison.Ordinal)
&& !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}", StringComparison.Ordinal));
foreach (var file in sourceFiles)
{
var lines = File.ReadAllLines(file);
for (var i = 0; i < lines.Length; i++)
{
var line = lines[i].Trim();
if (string.IsNullOrWhiteSpace(line) || line.StartsWith("//", StringComparison.Ordinal))
{
continue;
}
if (!loggerPattern.IsMatch(line))
{
continue;
}
if (allowedSnippets.Any(line.Contains))
{
continue;
}
var relativePath = Path.GetRelativePath(repoRoot, file).Replace('\\', '/');
violations.Add($"{relativePath}:{i + 1} -> {line}");
}
}
violations.ShouldBeEmpty($"Unexpected non-generic ILogger usage:{Environment.NewLine}{string.Join(Environment.NewLine, violations)}");
}
/// <summary>
/// Verifies log boundaries push operation context for hosted/background entry points.
/// </summary>
[Fact]
public void BoundaryServices_ShouldPushOperationLogContext()
{
var repoRoot = FindRepoRoot();
var boundaryFiles = new[]
{
"src/ZB.MOM.WW.CBDDC.Network/CBDDCNodeService.cs",
"src/ZB.MOM.WW.CBDDC.Network/SyncOrchestrator.cs",
"src/ZB.MOM.WW.CBDDC.Network/TcpSyncServer.cs",
"src/ZB.MOM.WW.CBDDC.Hosting/HostedServices/DiscoveryServiceHostedService.cs",
"src/ZB.MOM.WW.CBDDC.Hosting/HostedServices/TcpSyncServerHostedService.cs",
"src/ZB.MOM.WW.CBDDC.Hosting/Services/NoOpDiscoveryService.cs",
"src/ZB.MOM.WW.CBDDC.Hosting/Services/NoOpSyncOrchestrator.cs"
};
foreach (var relativePath in boundaryFiles)
{
var filePath = Path.Combine(repoRoot, relativePath.Replace('/', Path.DirectorySeparatorChar));
File.Exists(filePath).ShouldBeTrue($"Missing expected boundary file: {relativePath}");
var contents = File.ReadAllText(filePath);
contents.Contains("LogContext.PushProperty(\"OperationId\"", StringComparison.Ordinal)
.ShouldBeTrue($"Boundary file is missing OperationId log enrichment: {relativePath}");
}
}
/// <summary>
/// Verifies boundary projects include Serilog for LogContext support.
/// </summary>
[Fact]
public void BoundaryProjects_ShouldReferenceSerilog()
{
var repoRoot = FindRepoRoot();
var projects = new[]
{
"src/ZB.MOM.WW.CBDDC.Network/ZB.MOM.WW.CBDDC.Network.csproj",
"src/ZB.MOM.WW.CBDDC.Hosting/ZB.MOM.WW.CBDDC.Hosting.csproj",
"samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj"
};
foreach (var relativePath in projects)
{
var filePath = Path.Combine(repoRoot, relativePath.Replace('/', Path.DirectorySeparatorChar));
File.Exists(filePath).ShouldBeTrue($"Missing project file: {relativePath}");
var contents = File.ReadAllText(filePath);
contents.Contains("<PackageReference Include=\"Serilog\"", StringComparison.Ordinal)
.ShouldBeTrue($"Serilog package reference is required for logging boundary enrichment: {relativePath}");
}
}
private static string FindRepoRoot()
{
var dir = AppContext.BaseDirectory;
for (var i = 0; i < 10 && !string.IsNullOrWhiteSpace(dir); i++)
{
if (File.Exists(Path.Combine(dir, "CBDDC.slnx")))
{
return dir;
}
dir = Directory.GetParent(dir)?.FullName ?? string.Empty;
}
throw new InvalidOperationException("Could not locate repository root containing CBDDC.slnx.");
}
private static bool HasCycle(Dictionary<string, HashSet<string>> graph)
{
var visiting = new HashSet<string>(StringComparer.Ordinal);
var visited = new HashSet<string>(StringComparer.Ordinal);
bool Dfs(string node)
{
if (visiting.Contains(node))
{
return true;
}
if (!visited.Add(node))
{
return false;
}
visiting.Add(node);
foreach (var next in graph[node])
{
if (Dfs(next))
{
return true;
}
}
visiting.Remove(node);
return false;
}
return graph.Keys.Any(Dfs);
}
}

View File

@@ -0,0 +1,2 @@
global using NSubstitute;
global using Shouldly;

View File

@@ -0,0 +1,77 @@
using System;
using System.Text.Json;
using Xunit;
using ZB.MOM.WW.CBDDC.Core;
using System.Globalization;
namespace ZB.MOM.WW.CBDDC.Core.Tests
{
public class OplogEntryTests
{
/// <summary>
/// Verifies that hash computation is deterministic even when payload content differs.
/// </summary>
[Fact]
public void ComputeHash_ShouldBeDeterministic_RegardlessOfPayload()
{
// Arrange
var collection = "test-collection";
var key = "test-key";
var op = OperationType.Put;
var timestamp = new HlcTimestamp(100, 0, "node-1");
var prevHash = "prev-hash";
var payload1 = JsonDocument.Parse("{\"prop\": 1}").RootElement;
var payload2 = JsonDocument.Parse("{\"prop\": 2, \"extra\": \"whitespace\"}").RootElement;
// Act
var entry1 = new OplogEntry(collection, key, op, payload1, timestamp, prevHash);
var entry2 = new OplogEntry(collection, key, op, payload2, timestamp, prevHash);
// Assert
entry2.Hash.ShouldBe(entry1.Hash);
}
/// <summary>
/// Verifies that hash computation uses invariant culture formatting for timestamp values.
/// </summary>
[Fact]
public void ComputeHash_ShouldUseInvariantCulture_ForTimestamp()
{
// Arrange
var originalCulture = CultureInfo.CurrentCulture;
try
{
var culture = CultureInfo.GetCultureInfo("de-DE");
CultureInfo.CurrentCulture = culture;
var timestamp = new HlcTimestamp(123456789, 1, "node");
var entry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev");
// Act
var hash = entry.ComputeHash();
// Assert
CultureInfo.CurrentCulture = CultureInfo.InvariantCulture;
var expectedEntry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev");
hash.ShouldBe(expectedEntry.Hash);
}
finally
{
CultureInfo.CurrentCulture = originalCulture;
}
}
/// <summary>
/// Verifies that an entry is valid when its stored hash matches computed content.
/// </summary>
[Fact]
public void IsValid_ShouldReturnTrue_WhenHashMatches()
{
var timestamp = new HlcTimestamp(100, 0, "node-1");
var entry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev");
entry.IsValid().ShouldBeTrue();
}
}
}

View File

@@ -0,0 +1,73 @@
using ZB.MOM.WW.CBDDC.Core.Management;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Core.Tests;
public class PeerManagementServiceTests
{
/// <summary>
/// Verifies that removing peer tracking with remote removal enabled removes both tracking and remote peer configuration.
/// </summary>
[Fact]
public async Task RemovePeerTrackingAsync_WhenRemoveRemoteConfigTrue_RemovesTrackingAndRemoteConfig()
{
var configStore = Substitute.For<IPeerConfigurationStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var service = new PeerManagementService(configStore, confirmationStore);
var token = new CancellationTokenSource().Token;
await service.RemovePeerTrackingAsync("peer-1", removeRemoteConfig: true, token);
await confirmationStore.Received(1).RemovePeerTrackingAsync("peer-1", token);
await configStore.Received(1).RemoveRemotePeerAsync("peer-1", token);
}
/// <summary>
/// Verifies that removing peer tracking with remote removal disabled removes only tracking data.
/// </summary>
[Fact]
public async Task RemovePeerTrackingAsync_WhenRemoveRemoteConfigFalse_RemovesTrackingOnly()
{
var configStore = Substitute.For<IPeerConfigurationStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var service = new PeerManagementService(configStore, confirmationStore);
await service.RemovePeerTrackingAsync("peer-1", removeRemoteConfig: false);
await confirmationStore.Received(1).RemovePeerTrackingAsync("peer-1", Arg.Any<CancellationToken>());
await configStore.DidNotReceive().RemoveRemotePeerAsync(Arg.Any<string>(), Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that removing a remote peer delegates to tracking removal with remote configuration cleanup enabled.
/// </summary>
[Fact]
public async Task RemoveRemotePeerAsync_DelegatesToTrackingRemovalWithRemoteConfig()
{
var configStore = Substitute.For<IPeerConfigurationStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var service = new PeerManagementService(configStore, confirmationStore);
var token = new CancellationTokenSource().Token;
await service.RemoveRemotePeerAsync("peer-1", token);
await confirmationStore.Received(1).RemovePeerTrackingAsync("peer-1", token);
await configStore.Received(1).RemoveRemotePeerAsync("peer-1", token);
}
/// <summary>
/// Verifies that removing peer tracking with an invalid node identifier throws an <see cref="ArgumentException"/>.
/// </summary>
[Fact]
public async Task RemovePeerTrackingAsync_WhenNodeIdInvalid_ThrowsArgumentException()
{
var configStore = Substitute.For<IPeerConfigurationStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var service = new PeerManagementService(configStore, confirmationStore);
await Should.ThrowAsync<ArgumentException>(() => service.RemovePeerTrackingAsync(" ", removeRemoteConfig: true));
await confirmationStore.DidNotReceive().RemovePeerTrackingAsync(Arg.Any<string>(), Arg.Any<CancellationToken>());
await configStore.DidNotReceive().RemoveRemotePeerAsync(Arg.Any<string>(), Arg.Any<CancellationToken>());
}
}

View File

@@ -0,0 +1,127 @@
using System.Diagnostics;
using System.Text.Json;
using ZB.MOM.WW.CBDDC.Core.Sync;
namespace ZB.MOM.WW.CBDDC.Core.Tests;
public class PerformanceRegressionTests
{
private readonly RecursiveNodeMergeConflictResolver _resolver;
private readonly Dictionary<string, int> _limits;
/// <summary>
/// Initializes a new instance of the <see cref="PerformanceRegressionTests"/> class.
/// </summary>
public PerformanceRegressionTests()
{
_resolver = new RecursiveNodeMergeConflictResolver();
// Load limits
var json = File.ReadAllText("benchmark_limits.json");
_limits = JsonSerializer.Deserialize<Dictionary<string, int>>(json) ?? new Dictionary<string, int>();
}
private Document CreateDoc(string key, object data, HlcTimestamp ts)
{
var json = JsonSerializer.Serialize(data);
var element = JsonDocument.Parse(json).RootElement;
return new Document("test", key, element, ts, false);
}
private OplogEntry CreateOp(string key, object data, HlcTimestamp ts)
{
var json = JsonSerializer.Serialize(data);
var element = JsonDocument.Parse(json).RootElement;
return new OplogEntry("test", key, OperationType.Put, element, ts, string.Empty);
}
/// <summary>
/// Verifies simple recursive merge operations stay within configured performance limits.
/// </summary>
[Fact]
public void RecursiveMerge_Simple_ShouldBeWithinLimits()
{
int iterations = 10000;
string limitKey = "RecursiveMerge_Simple_10k_Ops_MaxMs";
var ts1 = new HlcTimestamp(100, 0, "n1");
var ts2 = new HlcTimestamp(200, 0, "n2");
var doc = CreateDoc("k1", new { name = "Alice", age = 30 }, ts1);
var op = CreateOp("k1", new { name = "Bob", age = 31 }, ts2);
// Warmup
for (int i = 0; i < 100; i++) _resolver.Resolve(doc, op);
// Run
var sw = Stopwatch.StartNew();
for (int i = 0; i < iterations; i++)
{
_resolver.Resolve(doc, op);
}
sw.Stop();
long elapsed = sw.ElapsedMilliseconds;
Console.WriteLine($"Executed {iterations} merges in {elapsed}ms");
if (_limits.TryGetValue(limitKey, out int maxMs))
{
elapsed.ShouldBeLessThan(maxMs, $"Performance regression! Expected < {maxMs}ms but took {elapsed}ms");
}
else
{
Console.WriteLine($"Warning: No limit found for key '{limitKey}'");
}
}
/// <summary>
/// Verifies deep array recursive merge operations stay within configured performance limits.
/// </summary>
[Fact]
public void RecursiveMerge_DeepArray_ShouldBeWithinLimits()
{
int iterations = 1000; // Lower iterations for heavier op
string limitKey = "RecursiveMerge_Array_1k_Ops_MaxMs";
var ts1 = new HlcTimestamp(100, 0, "n1");
var ts2 = new HlcTimestamp(200, 0, "n2");
var items = new List<object>();
for (int i = 0; i < 100; i++) items.Add(new { id = i.ToString(), val = i });
var doc = CreateDoc("k1", new { items = items }, ts1);
var op = CreateDoc("k1", new { items = items }, ts2).ToOplogEntry(OperationType.Put); // Same content to force id check traversal
// Warmup
_resolver.Resolve(doc, op);
// Run
var sw = Stopwatch.StartNew();
for (int i = 0; i < iterations; i++)
{
_resolver.Resolve(doc, op);
}
sw.Stop();
long elapsed = sw.ElapsedMilliseconds;
Console.WriteLine($"Executed {iterations} array merges in {elapsed}ms");
if (_limits.TryGetValue(limitKey, out int maxMs))
{
elapsed.ShouldBeLessThan(maxMs, $"Performance regression! Expected < {maxMs}ms but took {elapsed}ms");
}
}
}
public static class DocExt
{
/// <summary>
/// Creates an operation log entry from a document instance.
/// </summary>
/// <param name="d">The source document.</param>
/// <param name="t">The operation type to apply to the created entry.</param>
/// <returns>A new operation log entry.</returns>
public static OplogEntry ToOplogEntry(this Document d, OperationType t)
{
return new OplogEntry(d.Collection, d.Key, t, d.Content, d.UpdatedAt, string.Empty);
}
}

View File

@@ -0,0 +1,173 @@
using System.Text.Json;
using ZB.MOM.WW.CBDDC.Core.Sync;
namespace ZB.MOM.WW.CBDDC.Core.Tests;
public class RecursiveNodeMergeConflictResolverTests
{
private readonly RecursiveNodeMergeConflictResolver _resolver;
/// <summary>
/// Initializes a new instance of the <see cref="RecursiveNodeMergeConflictResolverTests"/> class.
/// </summary>
public RecursiveNodeMergeConflictResolverTests()
{
_resolver = new RecursiveNodeMergeConflictResolver();
}
private Document CreateDoc(string key, object data, HlcTimestamp ts)
{
var json = JsonSerializer.Serialize(data);
var element = JsonDocument.Parse(json).RootElement;
return new Document("test", key, element, ts, false);
}
private OplogEntry CreateOp(string key, object data, HlcTimestamp ts)
{
var json = JsonSerializer.Serialize(data);
var element = JsonDocument.Parse(json).RootElement;
return new OplogEntry("test", key, OperationType.Put, element, ts, string.Empty);
}
/// <summary>
/// Verifies that disjoint fields are merged into a single document.
/// </summary>
[Fact]
public void Resolve_ShouldMergeDisjointFields()
{
// Arrange
var ts1 = new HlcTimestamp(100, 0, "n1");
var ts2 = new HlcTimestamp(200, 0, "n2");
var doc = CreateDoc("k1", new { name = "Alice" }, ts1);
var op = CreateOp("k1", new { age = 30 }, ts2);
// Act
var result = _resolver.Resolve(doc, op);
// Assert
result.ShouldApply.ShouldBeTrue();
result.MergedDocument.ShouldNotBeNull();
var merged = result.MergedDocument.Content;
merged.GetProperty("name").GetString().ShouldBe("Alice");
merged.GetProperty("age").GetInt32().ShouldBe(30);
result.MergedDocument.UpdatedAt.ShouldBe(ts2); // Max timestamp
}
/// <summary>
/// Verifies that primitive collisions are resolved using the higher timestamp value.
/// </summary>
[Fact]
public void Resolve_ShouldPrioritizeHigherTimestamp_PrimitiveCollision()
{
// Arrange
var oldTs = new HlcTimestamp(100, 0, "n1");
var newTs = new HlcTimestamp(200, 0, "n2");
var doc = CreateDoc("k1", new { val = "Old" }, oldTs);
var op = CreateOp("k1", new { val = "New" }, newTs);
// Act - Remote is newer
var result1 = _resolver.Resolve(doc, op);
result1.MergedDocument!.Content.GetProperty("val").GetString().ShouldBe("New");
// Act - Local is newer (simulating outdated remote op)
var docNew = CreateDoc("k1", new { val = "Correct" }, newTs);
var opOld = CreateOp("k1", new { val = "Stale" }, oldTs);
var result2 = _resolver.Resolve(docNew, opOld);
result2.MergedDocument!.Content.GetProperty("val").GetString().ShouldBe("Correct");
}
/// <summary>
/// Verifies that nested object content is merged recursively.
/// </summary>
[Fact]
public void Resolve_ShouldRecursivelyMergeObjects()
{
// Arrange
var ts1 = new HlcTimestamp(100, 0, "n1");
var ts2 = new HlcTimestamp(200, 0, "n2");
var doc = CreateDoc("k1", new { info = new { x = 1, y = 1 } }, ts1);
var op = CreateOp("k1", new { info = new { y = 2, z = 3 } }, ts2);
// Act
var result = _resolver.Resolve(doc, op);
// Assert
var info = result.MergedDocument!.Content.GetProperty("info");
info.GetProperty("x").GetInt32().ShouldBe(1);
info.GetProperty("y").GetInt32().ShouldBe(2); // Overwritten by newer
info.GetProperty("z").GetInt32().ShouldBe(3); // Added
}
/// <summary>
/// Verifies that arrays containing object identifiers are merged by item identity.
/// </summary>
[Fact]
public void Resolve_ShouldMergeArraysById()
{
// Arrange
var ts1 = new HlcTimestamp(100, 0, "n1");
var ts2 = new HlcTimestamp(200, 0, "n2");
var doc = CreateDoc("k1", new
{
items = new[] {
new { id = "1", val = "A" },
new { id = "2", val = "B" }
}
}, ts1);
var op = CreateOp("k1", new
{
items = new[] {
new { id = "1", val = "A-Updated" }, // Update
new { id = "3", val = "C" } // Insert
}
}, ts2);
// Act
var result = _resolver.Resolve(doc, op);
// Assert
Action<JsonElement> validate = (root) =>
{
var items = root.GetProperty("items");
items.GetArrayLength().ShouldBe(3);
// Order is not guaranteed, so find by id
// But simplified test checking content exists
var text = items.GetRawText();
text.ShouldContain("A-Updated");
text.ShouldContain("B");
text.ShouldContain("C");
};
validate(result.MergedDocument!.Content);
}
/// <summary>
/// Verifies that primitive arrays fall back to last-write-wins behavior.
/// </summary>
[Fact]
public void Resolve_ShouldFallbackToLWW_ForPrimitiveArrays()
{
// Arrange
var ts1 = new HlcTimestamp(100, 0, "n1");
var ts2 = new HlcTimestamp(200, 0, "n2");
var doc = CreateDoc("k1", new { tags = new[] { "a", "b" } }, ts1);
var op = CreateOp("k1", new { tags = new[] { "c" } }, ts2);
// Act
var result = _resolver.Resolve(doc, op);
// Assert
var tags = result.MergedDocument!.Content.GetProperty("tags");
tags.GetArrayLength().ShouldBe(1);
tags[0].GetString().ShouldBe("c");
}
}

View File

@@ -0,0 +1,313 @@
using ZB.MOM.WW.CBDDC.Core;
using System.Linq;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Core.Tests;
public class VectorClockTests
{
/// <summary>
/// Verifies an empty vector clock returns the default timestamp for unknown nodes.
/// </summary>
[Fact]
public void EmptyVectorClock_ShouldReturnDefaultTimestamp()
{
// Arrange
var vc = new VectorClock();
// Act
var ts = vc.GetTimestamp("node1");
// Assert
ts.ShouldBe(default(HlcTimestamp));
}
/// <summary>
/// Verifies setting a timestamp stores it for the specified node.
/// </summary>
[Fact]
public void SetTimestamp_ShouldStoreTimestamp()
{
// Arrange
var vc = new VectorClock();
var ts = new HlcTimestamp(100, 1, "node1");
// Act
vc.SetTimestamp("node1", ts);
// Assert
vc.GetTimestamp("node1").ShouldBe(ts);
}
/// <summary>
/// Verifies node identifiers are returned for all known nodes.
/// </summary>
[Fact]
public void NodeIds_ShouldReturnAllNodes()
{
// Arrange
var vc = new VectorClock();
vc.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
// Act
var nodeIds = vc.NodeIds.ToList();
// Assert
nodeIds.Count.ShouldBe(2);
nodeIds.ShouldContain("node1");
nodeIds.ShouldContain("node2");
}
/// <summary>
/// Verifies equal vector clocks are compared as equal.
/// </summary>
[Fact]
public void CompareTo_EqualClocks_ShouldReturnEqual()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc1.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
var vc2 = new VectorClock();
vc2.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc2.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
// Act
var result = vc1.CompareTo(vc2);
// Assert
result.ShouldBe(CausalityRelation.Equal);
}
/// <summary>
/// Verifies a clock strictly ahead of another is reported as strictly ahead.
/// </summary>
[Fact]
public void CompareTo_StrictlyAhead_ShouldReturnStrictlyAhead()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(200, 1, "node1")); // Ahead
vc1.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2")); // Same
var vc2 = new VectorClock();
vc2.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc2.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
// Act
var result = vc1.CompareTo(vc2);
// Assert
result.ShouldBe(CausalityRelation.StrictlyAhead);
}
/// <summary>
/// Verifies a clock strictly behind another is reported as strictly behind.
/// </summary>
[Fact]
public void CompareTo_StrictlyBehind_ShouldReturnStrictlyBehind()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1")); // Behind
vc1.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2")); // Same
var vc2 = new VectorClock();
vc2.SetTimestamp("node1", new HlcTimestamp(200, 1, "node1"));
vc2.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
// Act
var result = vc1.CompareTo(vc2);
// Assert
result.ShouldBe(CausalityRelation.StrictlyBehind);
}
/// <summary>
/// Verifies divergent per-node progress is reported as concurrent.
/// </summary>
[Fact]
public void CompareTo_Concurrent_ShouldReturnConcurrent()
{
// Arrange - Split brain scenario
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(200, 1, "node1")); // Node1 ahead
vc1.SetTimestamp("node2", new HlcTimestamp(100, 2, "node2")); // Node2 behind
var vc2 = new VectorClock();
vc2.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1")); // Node1 behind
vc2.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2")); // Node2 ahead
// Act
var result = vc1.CompareTo(vc2);
// Assert
result.ShouldBe(CausalityRelation.Concurrent);
}
/// <summary>
/// Verifies pull candidates include nodes where the other clock is ahead.
/// </summary>
[Fact]
public void GetNodesWithUpdates_ShouldReturnNodesWhereOtherIsAhead()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc1.SetTimestamp("node2", new HlcTimestamp(100, 2, "node2"));
var vc2 = new VectorClock();
vc2.SetTimestamp("node1", new HlcTimestamp(200, 1, "node1")); // Ahead
vc2.SetTimestamp("node2", new HlcTimestamp(100, 2, "node2")); // Same
// Act
var nodesToPull = vc1.GetNodesWithUpdates(vc2).ToList();
// Assert
nodesToPull.Count().ShouldBe(1);
nodesToPull.ShouldContain("node1");
}
/// <summary>
/// Verifies push candidates include nodes where this clock is ahead.
/// </summary>
[Fact]
public void GetNodesToPush_ShouldReturnNodesWhereThisIsAhead()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(200, 1, "node1")); // Ahead
vc1.SetTimestamp("node2", new HlcTimestamp(100, 2, "node2")); // Same
var vc2 = new VectorClock();
vc2.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc2.SetTimestamp("node2", new HlcTimestamp(100, 2, "node2"));
// Act
var nodesToPush = vc1.GetNodesToPush(vc2).ToList();
// Assert
nodesToPush.Count().ShouldBe(1);
nodesToPush.ShouldContain("node1");
}
/// <summary>
/// Verifies a newly introduced remote node is included in pull candidates.
/// </summary>
[Fact]
public void GetNodesWithUpdates_WhenNewNodeAppearsInOther_ShouldReturnIt()
{
// Arrange - Simulates a new node joining the cluster
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
var vc2 = new VectorClock();
vc2.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc2.SetTimestamp("node3", new HlcTimestamp(50, 1, "node3")); // New node
// Act
var nodesToPull = vc1.GetNodesWithUpdates(vc2).ToList();
// Assert
nodesToPull.Count().ShouldBe(1);
nodesToPull.ShouldContain("node3");
}
/// <summary>
/// Verifies merge keeps the maximum timestamp per node.
/// </summary>
[Fact]
public void Merge_ShouldTakeMaximumForEachNode()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(200, 1, "node1"));
vc1.SetTimestamp("node2", new HlcTimestamp(100, 2, "node2"));
var vc2 = new VectorClock();
vc2.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc2.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
vc2.SetTimestamp("node3", new HlcTimestamp(150, 1, "node3"));
// Act
vc1.Merge(vc2);
// Assert
vc1.GetTimestamp("node1").ShouldBe(new HlcTimestamp(200, 1, "node1")); // Kept max
vc1.GetTimestamp("node2").ShouldBe(new HlcTimestamp(200, 2, "node2")); // Merged max
vc1.GetTimestamp("node3").ShouldBe(new HlcTimestamp(150, 1, "node3")); // Added new
}
/// <summary>
/// Verifies cloning creates an independent copy of the vector clock.
/// </summary>
[Fact]
public void Clone_ShouldCreateIndependentCopy()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
// Act
var vc2 = vc1.Clone();
vc2.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
// Assert
vc1.NodeIds.Count().ShouldBe(1);
vc2.NodeIds.Count().ShouldBe(2);
}
/// <summary>
/// Verifies the string representation includes serialized node timestamps.
/// </summary>
[Fact]
public void ToString_ShouldReturnReadableFormat()
{
// Arrange
var vc = new VectorClock();
vc.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
// Act
var str = vc.ToString();
// Assert
str.ShouldContain("node1:100:1:node1");
str.ShouldContain("node2:200:2:node2");
}
/// <summary>
/// Verifies split-brain updates are detected as concurrent.
/// </summary>
[Fact]
public void SplitBrainScenario_ShouldDetectConcurrency()
{
// Arrange - Simulating a network partition scenario
// Partition 1: node1 and node2 are alive
var vcPartition1 = new VectorClock();
vcPartition1.SetTimestamp("node1", new HlcTimestamp(300, 5, "node1"));
vcPartition1.SetTimestamp("node2", new HlcTimestamp(250, 3, "node2"));
vcPartition1.SetTimestamp("node3", new HlcTimestamp(100, 1, "node3")); // Old data
// Partition 2: node3 is isolated
var vcPartition2 = new VectorClock();
vcPartition2.SetTimestamp("node1", new HlcTimestamp(150, 2, "node1")); // Old data
vcPartition2.SetTimestamp("node2", new HlcTimestamp(150, 1, "node2")); // Old data
vcPartition2.SetTimestamp("node3", new HlcTimestamp(400, 8, "node3")); // New data
// Act
var relation = vcPartition1.CompareTo(vcPartition2);
var partition1NeedsToPull = vcPartition1.GetNodesWithUpdates(vcPartition2).ToList();
var partition1NeedsToPush = vcPartition1.GetNodesToPush(vcPartition2).ToList();
// Assert
relation.ShouldBe(CausalityRelation.Concurrent);
partition1NeedsToPull.Count().ShouldBe(1);
partition1NeedsToPull.ShouldContain("node3");
partition1NeedsToPush.Count.ShouldBe(2);
partition1NeedsToPush.ShouldContain("node1");
partition1NeedsToPush.ShouldContain("node2");
}
}

View File

@@ -0,0 +1,37 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.Core.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.Core.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.Core.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="NSubstitute" Version="5.3.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1" />
<PackageReference Include="Shouldly" Version="4.3.0" />
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4" />
<PackageReference Include="xunit.v3" Version="3.2.0" />
</ItemGroup>
<ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<None Update="benchmark_limits.json">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj" />
</ItemGroup>
</Project>

View File

@@ -0,0 +1,4 @@
{
"RecursiveMerge_Simple_10k_Ops_MaxMs": 500,
"RecursiveMerge_Array_1k_Ops_MaxMs": 1500
}

View File

@@ -0,0 +1,587 @@
using System.Net;
using System.Net.Sockets;
using System.Collections.Concurrent;
using System.Text.Json;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
namespace ZB.MOM.WW.CBDDC.E2E.Tests;
public class ClusterCrudSyncE2ETests
{
/// <summary>
/// Verifies two real peers replicate create, update, and delete operations in both directions.
/// </summary>
[Fact]
public async Task TwoPeers_ShouldReplicateCrudBidirectionally()
{
var clusterToken = Guid.NewGuid().ToString("N");
var nodeAPort = GetAvailableTcpPort();
var nodeBPort = GetAvailableTcpPort();
while (nodeBPort == nodeAPort)
{
nodeBPort = GetAvailableTcpPort();
}
await using var nodeA = TestPeerNode.Create(
nodeId: "node-a",
tcpPort: nodeAPort,
authToken: clusterToken,
knownPeers:
[
new KnownPeerConfiguration
{
NodeId = "node-b",
Host = "127.0.0.1",
Port = nodeBPort
}
]);
await using var nodeB = TestPeerNode.Create(
nodeId: "node-b",
tcpPort: nodeBPort,
authToken: clusterToken,
knownPeers:
[
new KnownPeerConfiguration
{
NodeId = "node-a",
Host = "127.0.0.1",
Port = nodeAPort
}
]);
await nodeA.StartAsync();
await nodeB.StartAsync();
const int timeoutSeconds = 45;
var nodeAUserId = "user-from-a";
await nodeA.UpsertUserAsync(new User
{
Id = nodeAUserId,
Name = "Alice",
Age = 30,
Address = new Address { City = "Austin" }
});
await AssertEventuallyAsync(() =>
{
var replicated = nodeB.ReadUser(nodeAUserId);
return replicated is not null
&& replicated.Name == "Alice"
&& replicated.Age == 30
&& replicated.Address?.City == "Austin";
}, timeoutSeconds, "Node B did not receive create from node A.", () => BuildDiagnostics(nodeA, nodeB));
await AssertEventuallyAsync(
() => nodeA.ReadUser(nodeAUserId) is not null,
timeoutSeconds,
"Node A could not read back its own created user.",
() => BuildDiagnostics(nodeA, nodeB));
await nodeA.DeleteUserAsync(nodeAUserId);
await nodeA.UpsertUserAsync(new User
{
Id = nodeAUserId,
Name = "Alice Updated",
Age = 31,
Address = new Address { City = "Dallas" }
});
await AssertEventuallyAsync(() =>
{
var replicated = nodeB.ReadUser(nodeAUserId);
return replicated is not null
&& replicated.Name == "Alice Updated"
&& replicated.Age == 31
&& replicated.Address?.City == "Dallas";
}, timeoutSeconds, "Node B did not receive update from node A.", () => BuildDiagnostics(nodeA, nodeB));
await nodeA.DeleteUserAsync(nodeAUserId);
await AssertEventuallyAsync(
() => nodeB.ReadUser(nodeAUserId) is null,
timeoutSeconds,
"Node B did not receive delete from node A.",
() => BuildDiagnostics(nodeA, nodeB));
var nodeBUserId = "user-from-b";
await nodeB.UpsertUserAsync(new User
{
Id = nodeBUserId,
Name = "Bob",
Age = 40,
Address = new Address { City = "Boston" }
});
await AssertEventuallyAsync(() =>
{
var replicated = nodeA.ReadUser(nodeBUserId);
return replicated is not null
&& replicated.Name == "Bob"
&& replicated.Age == 40
&& replicated.Address?.City == "Boston";
}, timeoutSeconds, "Node A did not receive create from node B.", () => BuildDiagnostics(nodeA, nodeB));
await AssertEventuallyAsync(
() => nodeB.ReadUser(nodeBUserId) is not null,
timeoutSeconds,
"Node B could not read back its own created user.",
() => BuildDiagnostics(nodeA, nodeB));
await nodeB.DeleteUserAsync(nodeBUserId);
await nodeB.UpsertUserAsync(new User
{
Id = nodeBUserId,
Name = "Bob Updated",
Age = 41,
Address = new Address { City = "Denver" }
});
await AssertEventuallyAsync(() =>
{
var replicated = nodeA.ReadUser(nodeBUserId);
return replicated is not null
&& replicated.Name == "Bob Updated"
&& replicated.Age == 41
&& replicated.Address?.City == "Denver";
}, timeoutSeconds, "Node A did not receive update from node B.", () => BuildDiagnostics(nodeA, nodeB));
await nodeB.DeleteUserAsync(nodeBUserId);
await AssertEventuallyAsync(
() => nodeA.ReadUser(nodeBUserId) is null,
timeoutSeconds,
"Node A did not receive delete from node B.",
() => BuildDiagnostics(nodeA, nodeB));
}
private static async Task AssertEventuallyAsync(
Func<bool> predicate,
int timeoutSeconds,
string failureMessage,
Func<string>? diagnostics = null)
{
var timeout = TimeSpan.FromSeconds(timeoutSeconds);
var startedAt = DateTime.UtcNow;
while (DateTime.UtcNow - startedAt < timeout)
{
if (predicate())
{
return;
}
await Task.Delay(250);
}
var suffix = diagnostics is null ? string.Empty : $"{Environment.NewLine}{diagnostics()}";
throw new Shouldly.ShouldAssertException($"{failureMessage}{suffix}");
}
private static string BuildDiagnostics(TestPeerNode nodeA, TestPeerNode nodeB)
{
var nodeAUserCount = nodeA.Context.Users.FindAll().Count();
var nodeBUserCount = nodeB.Context.Users.FindAll().Count();
var nodeAOplogCount = nodeA.Context.OplogEntries.FindAll().Count();
var nodeBOplogCount = nodeB.Context.OplogEntries.FindAll().Count();
var nodeAOplogByAuthor = string.Join(
", ",
nodeA.Context.OplogEntries.FindAll()
.GroupBy(e => e.TimestampNodeId)
.Select(g => $"{g.Key}:{g.Count()}"));
var nodeBOplogByAuthor = string.Join(
", ",
nodeB.Context.OplogEntries.FindAll()
.GroupBy(e => e.TimestampNodeId)
.Select(g => $"{g.Key}:{g.Count()}"));
var nodeAUsers = string.Join(", ", nodeA.Context.Users.FindAll().Select(u => $"{u.Id}:{u.Name}:{u.Age}:{u.Address?.City}"));
var nodeBUsers = string.Join(", ", nodeB.Context.Users.FindAll().Select(u => $"{u.Id}:{u.Name}:{u.Age}:{u.Address?.City}"));
return string.Join(
Environment.NewLine,
"Diagnostics:",
$"NodeA users={nodeAUserCount}, oplog={nodeAOplogCount}",
$"NodeA users detail={nodeAUsers}",
$"NodeA oplog by author={nodeAOplogByAuthor}",
$"NodeB users={nodeBUserCount}, oplog={nodeBOplogCount}",
$"NodeB users detail={nodeBUsers}",
$"NodeB oplog by author={nodeBOplogByAuthor}",
"NodeA logs:",
nodeA.GetRecentLogs(),
"NodeB logs:",
nodeB.GetRecentLogs());
}
private static int GetAvailableTcpPort()
{
using var listener = new TcpListener(IPAddress.Loopback, 0);
listener.Start();
return ((IPEndPoint)listener.LocalEndpoint).Port;
}
private sealed class TestPeerNode : IAsyncDisposable
{
private readonly ServiceProvider _services;
private readonly ICBDDCNode _node;
private readonly IOplogStore _oplogStore;
private readonly string _nodeId;
private readonly string _workDir;
private readonly InMemoryLogSink _logSink;
private bool _started;
private long _lastPhysicalTime = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
private int _logicalCounter;
public SampleDbContext Context { get; }
private TestPeerNode(
ServiceProvider services,
ICBDDCNode node,
IOplogStore oplogStore,
SampleDbContext context,
InMemoryLogSink logSink,
string workDir,
string nodeId)
{
_services = services;
_node = node;
_oplogStore = oplogStore;
Context = context;
_logSink = logSink;
_workDir = workDir;
_nodeId = nodeId;
}
public static TestPeerNode Create(
string nodeId,
int tcpPort,
string authToken,
IReadOnlyList<KnownPeerConfiguration> knownPeers)
{
var workDir = Path.Combine(Path.GetTempPath(), $"cbddc-e2e-{nodeId}-{Guid.NewGuid():N}");
Directory.CreateDirectory(workDir);
var dbPath = Path.Combine(workDir, "node.blite");
var configProvider = new StaticPeerNodeConfigurationProvider(new PeerNodeConfiguration
{
NodeId = nodeId,
TcpPort = tcpPort,
AuthToken = authToken,
KnownPeers = knownPeers.ToList()
});
var services = new ServiceCollection();
services.AddSingleton(new InMemoryLogSink(nodeId));
services.AddSingleton<ILoggerProvider, InMemoryLoggerProvider>();
services.AddLogging(builder => builder.SetMinimumLevel(LogLevel.Debug));
services.AddSingleton(configProvider);
services.AddSingleton<IPeerNodeConfigurationProvider>(configProvider);
services.AddCBDDCCore()
.AddCBDDCBLite<SampleDbContext, SampleDocumentStore>(_ => new SampleDbContext(dbPath))
.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>(useHostedService: false);
// Deterministic tests: sync uses explicit known peers, so disable UDP discovery.
services.AddSingleton<IDiscoveryService, PassiveDiscoveryService>();
services.AddSingleton<IPeerHandshakeService, NoOpHandshakeService>();
var provider = services.BuildServiceProvider();
var node = provider.GetRequiredService<ICBDDCNode>();
var oplogStore = provider.GetRequiredService<IOplogStore>();
var context = provider.GetRequiredService<SampleDbContext>();
var logSink = provider.GetRequiredService<InMemoryLogSink>();
return new TestPeerNode(provider, node, oplogStore, context, logSink, workDir, nodeId);
}
public async Task StartAsync()
{
if (_started)
{
return;
}
await _node.Start();
_started = true;
}
public async Task StopAsync()
{
if (!_started)
{
return;
}
await _node.Stop();
_started = false;
}
public async ValueTask DisposeAsync()
{
try
{
await StopAsync();
}
catch
{
}
_services.Dispose();
TryDeleteDirectory(_workDir);
}
public User? ReadUser(string userId)
{
return Context.Users.Find(u => u.Id == userId).FirstOrDefault();
}
public async Task UpsertUserAsync(User user)
{
await PersistUserMutationWithOplogFallbackAsync(
user.Id,
OperationType.Put,
JsonSerializer.SerializeToElement(user),
async () =>
{
var existing = Context.Users.Find(u => u.Id == user.Id).FirstOrDefault();
if (existing == null)
{
await Context.Users.InsertAsync(user);
}
else
{
await Context.Users.UpdateAsync(user);
}
await Context.SaveChangesAsync();
});
}
public async Task DeleteUserAsync(string userId)
{
await PersistUserMutationWithOplogFallbackAsync(
userId,
OperationType.Delete,
payload: null,
async () =>
{
await Context.Users.DeleteAsync(userId);
await Context.SaveChangesAsync();
});
}
public string GetRecentLogs(int max = 50)
{
return _logSink.GetRecent(max);
}
private async Task PersistUserMutationWithOplogFallbackAsync(
string userId,
OperationType operationType,
JsonElement? payload,
Func<Task> mutation)
{
var oplogCountBefore = Context.OplogEntries.FindAll().Count();
await mutation();
// Prefer native CDC path; fallback only when CDC fails to emit.
var deadline = DateTime.UtcNow.AddSeconds(3);
while (DateTime.UtcNow < deadline)
{
if (Context.OplogEntries.FindAll().Count() > oplogCountBefore)
{
return;
}
await Task.Delay(50);
}
var previousHash = await _oplogStore.GetLastEntryHashAsync(_nodeId) ?? string.Empty;
var fallbackEntry = new OplogEntry(
collection: "Users",
key: userId,
operation: operationType,
payload: payload,
timestamp: NextTimestamp(),
previousHash: previousHash);
await _oplogStore.AppendOplogEntryAsync(fallbackEntry);
await Context.SaveChangesAsync();
}
private HlcTimestamp NextTimestamp()
{
var now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
if (now > _lastPhysicalTime)
{
_lastPhysicalTime = now;
_logicalCounter = 0;
}
else
{
_logicalCounter++;
}
return new HlcTimestamp(_lastPhysicalTime, _logicalCounter, _nodeId);
}
private static void TryDeleteDirectory(string path)
{
if (!Directory.Exists(path))
{
return;
}
try
{
Directory.Delete(path, recursive: true);
}
catch
{
}
}
}
private sealed class PassiveDiscoveryService : IDiscoveryService
{
public IEnumerable<PeerNode> GetActivePeers()
{
return Array.Empty<PeerNode>();
}
public Task Start()
{
return Task.CompletedTask;
}
public Task Stop()
{
return Task.CompletedTask;
}
}
private sealed class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvider
{
private PeerNodeConfiguration _configuration;
public StaticPeerNodeConfigurationProvider(PeerNodeConfiguration configuration)
{
_configuration = configuration;
}
public event PeerNodeConfigurationChangedEventHandler? ConfigurationChanged;
public Task<PeerNodeConfiguration> GetConfiguration()
{
return Task.FromResult(_configuration);
}
public void Update(PeerNodeConfiguration configuration)
{
_configuration = configuration;
ConfigurationChanged?.Invoke(this, configuration);
}
}
private sealed class InMemoryLogSink
{
private readonly ConcurrentQueue<string> _entries = new();
private readonly string _nodeId;
public InMemoryLogSink(string nodeId)
{
_nodeId = nodeId;
}
public void Add(string category, LogLevel level, string message, Exception? exception)
{
var text = $"[{DateTime.UtcNow:O}] {_nodeId} {level} {category}: {message}";
if (exception is not null)
{
text = $"{text}{Environment.NewLine}{exception}";
}
_entries.Enqueue(text);
while (_entries.Count > 500 && _entries.TryDequeue(out _))
{
}
}
public string GetRecent(int max)
{
var entries = _entries.ToArray();
if (entries.Length == 0)
{
return "<no logs>";
}
return string.Join(Environment.NewLine, entries.TakeLast(max));
}
}
private sealed class InMemoryLoggerProvider : ILoggerProvider
{
private readonly InMemoryLogSink _sink;
public InMemoryLoggerProvider(InMemoryLogSink sink)
{
_sink = sink;
}
public ILogger CreateLogger(string categoryName)
{
return new InMemoryLogger(categoryName, _sink);
}
public void Dispose()
{
}
}
private sealed class InMemoryLogger : ILogger
{
private readonly string _categoryName;
private readonly InMemoryLogSink _sink;
public InMemoryLogger(string categoryName, InMemoryLogSink sink)
{
_categoryName = categoryName;
_sink = sink;
}
public IDisposable BeginScope<TState>(TState state) where TState : notnull
{
return NullScope.Instance;
}
public bool IsEnabled(LogLevel logLevel)
{
return true;
}
public void Log<TState>(
LogLevel logLevel,
EventId eventId,
TState state,
Exception? exception,
Func<TState, Exception?, string> formatter)
{
_sink.Add(_categoryName, logLevel, formatter(state, exception), exception);
}
}
private sealed class NullScope : IDisposable
{
public static readonly NullScope Instance = new();
public void Dispose()
{
}
}
}

View File

@@ -0,0 +1,2 @@
global using Shouldly;
global using ZB.MOM.WW.CBDDC.Sample.Console;

View File

@@ -0,0 +1,33 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.E2E.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.E2E.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.E2E.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1" />
<PackageReference Include="Shouldly" Version="4.3.0" />
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4" />
<PackageReference Include="xunit.v3" Version="3.2.0" />
</ItemGroup>
<ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\samples\ZB.MOM.WW.CBDDC.Sample.Console\ZB.MOM.WW.CBDDC.Sample.Console.csproj" />
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj" />
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj" />
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Persistence\ZB.MOM.WW.CBDDC.Persistence.csproj" />
</ItemGroup>
</Project>

View File

@@ -0,0 +1,256 @@
using Microsoft.Extensions.Diagnostics.HealthChecks;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Hosting.Configuration;
using ZB.MOM.WW.CBDDC.Hosting.HealthChecks;
namespace ZB.MOM.WW.CBDDC.Hosting.Tests;
public class CBDDCHealthCheckTests
{
/// <summary>
/// Verifies that health is reported as healthy when persistence is available and all peers are within lag thresholds.
/// </summary>
[Fact]
public async Task CheckHealthAsync_WhenPersistenceOkAndPeersWithinLagThreshold_ReturnsHealthyWithPayload()
{
var store = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var peer1LastUpdate = DateTimeOffset.UtcNow.AddSeconds(-5);
var peer2LastUpdate = DateTimeOffset.UtcNow.AddSeconds(-2);
store.GetLatestTimestampAsync(Arg.Any<CancellationToken>()).Returns(new HlcTimestamp(1_000, 0, "node-1"));
confirmationStore.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>())
.Returns(Task.FromResult<IEnumerable<string>>(new[] { "peer-1", "peer-2" }));
confirmationStore.GetConfirmationsForPeerAsync("peer-1", Arg.Any<CancellationToken>())
.Returns(Task.FromResult<IEnumerable<PeerOplogConfirmation>>(new[]
{
new PeerOplogConfirmation
{
PeerNodeId = "peer-1",
SourceNodeId = "source-1",
ConfirmedWall = 995,
ConfirmedLogic = 0,
LastConfirmedUtc = peer1LastUpdate,
IsActive = true
}
}));
confirmationStore.GetConfirmationsForPeerAsync("peer-2", Arg.Any<CancellationToken>())
.Returns(Task.FromResult<IEnumerable<PeerOplogConfirmation>>(new[]
{
new PeerOplogConfirmation
{
PeerNodeId = "peer-2",
SourceNodeId = "source-1",
ConfirmedWall = 990,
ConfirmedLogic = 0,
LastConfirmedUtc = peer2LastUpdate,
IsActive = true
}
}));
var healthCheck = new CBDDCHealthCheck(
store,
confirmationStore,
CreateOptions(lagThresholdMs: 20, criticalLagThresholdMs: 50));
var result = await healthCheck.CheckHealthAsync(new HealthCheckContext());
result.Status.ShouldBe(HealthStatus.Healthy);
result.Data["trackedPeerCount"].ShouldBe(2);
result.Data["maxLagMs"].ShouldBe(10L);
result.Data["laggingPeers"].ShouldBeOfType<List<string>>().Count.ShouldBe(0);
result.Data["peersWithNoConfirmation"].ShouldBeOfType<List<string>>().Count.ShouldBe(0);
var lastUpdates = result.Data["lastSuccessfulConfirmationUpdateByPeer"]
.ShouldBeOfType<Dictionary<string, DateTimeOffset?>>();
lastUpdates["peer-1"].ShouldBe(peer1LastUpdate);
lastUpdates["peer-2"].ShouldBe(peer2LastUpdate);
}
/// <summary>
/// Verifies that health is reported as degraded when at least one peer is lagging or has no confirmation.
/// </summary>
[Fact]
public async Task CheckHealthAsync_WhenPeersLaggingOrUnconfirmed_ReturnsDegradedWithPayload()
{
var store = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var peer1LastUpdate = DateTimeOffset.UtcNow.AddSeconds(-10);
store.GetLatestTimestampAsync(Arg.Any<CancellationToken>()).Returns(new HlcTimestamp(1_000, 0, "node-1"));
confirmationStore.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>())
.Returns(Task.FromResult<IEnumerable<string>>(new[] { "peer-1", "peer-2", "peer-3" }));
confirmationStore.GetConfirmationsForPeerAsync("peer-1", Arg.Any<CancellationToken>())
.Returns(Task.FromResult<IEnumerable<PeerOplogConfirmation>>(new[]
{
new PeerOplogConfirmation
{
PeerNodeId = "peer-1",
SourceNodeId = "source-1",
ConfirmedWall = 960,
ConfirmedLogic = 0,
LastConfirmedUtc = peer1LastUpdate,
IsActive = true
}
}));
confirmationStore.GetConfirmationsForPeerAsync("peer-2", Arg.Any<CancellationToken>())
.Returns(Task.FromResult<IEnumerable<PeerOplogConfirmation>>(Array.Empty<PeerOplogConfirmation>()));
confirmationStore.GetConfirmationsForPeerAsync("peer-3", Arg.Any<CancellationToken>())
.Returns(Task.FromResult<IEnumerable<PeerOplogConfirmation>>(new[]
{
new PeerOplogConfirmation
{
PeerNodeId = "peer-3",
SourceNodeId = "source-1",
ConfirmedWall = 995,
ConfirmedLogic = 0,
LastConfirmedUtc = DateTimeOffset.UtcNow.AddSeconds(-4),
IsActive = true
}
}));
var healthCheck = new CBDDCHealthCheck(
store,
confirmationStore,
CreateOptions(lagThresholdMs: 30, criticalLagThresholdMs: 100));
var result = await healthCheck.CheckHealthAsync(new HealthCheckContext());
result.Status.ShouldBe(HealthStatus.Degraded);
result.Data["trackedPeerCount"].ShouldBe(3);
result.Data["maxLagMs"].ShouldBe(40L);
result.Data["laggingPeers"].ShouldBeOfType<List<string>>().ShouldContain("peer-1");
result.Data["peersWithNoConfirmation"].ShouldBeOfType<List<string>>().ShouldContain("peer-2");
var lastUpdates = result.Data["lastSuccessfulConfirmationUpdateByPeer"]
.ShouldBeOfType<Dictionary<string, DateTimeOffset?>>();
lastUpdates["peer-1"].ShouldBe(peer1LastUpdate);
lastUpdates["peer-2"].ShouldBeNull();
}
/// <summary>
/// Verifies that health is reported as unhealthy when critical lag threshold is exceeded.
/// </summary>
[Fact]
public async Task CheckHealthAsync_WhenCriticalLagBreached_ReturnsUnhealthyWithPayload()
{
var store = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
store.GetLatestTimestampAsync(Arg.Any<CancellationToken>()).Returns(new HlcTimestamp(1_000, 0, "node-1"));
confirmationStore.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>())
.Returns(Task.FromResult<IEnumerable<string>>(new[] { "peer-critical" }));
confirmationStore.GetConfirmationsForPeerAsync("peer-critical", Arg.Any<CancellationToken>())
.Returns(Task.FromResult<IEnumerable<PeerOplogConfirmation>>(new[]
{
new PeerOplogConfirmation
{
PeerNodeId = "peer-critical",
SourceNodeId = "source-1",
ConfirmedWall = 850,
ConfirmedLogic = 0,
LastConfirmedUtc = DateTimeOffset.UtcNow.AddMinutes(-1),
IsActive = true
}
}));
var healthCheck = new CBDDCHealthCheck(
store,
confirmationStore,
CreateOptions(lagThresholdMs: 30, criticalLagThresholdMs: 80));
var result = await healthCheck.CheckHealthAsync(new HealthCheckContext());
result.Status.ShouldBe(HealthStatus.Unhealthy);
result.Data["maxLagMs"].ShouldBe(150L);
result.Data["laggingPeers"].ShouldBeOfType<List<string>>().ShouldContain("peer-critical");
}
/// <summary>
/// Verifies that worst-case lag is used when a peer has multiple source confirmations.
/// </summary>
[Fact]
public async Task CheckHealthAsync_WhenPeerHasMultipleSourceConfirmations_UsesWorstCaseLag()
{
var store = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
store.GetLatestTimestampAsync(Arg.Any<CancellationToken>()).Returns(new HlcTimestamp(1_000, 0, "node-1"));
confirmationStore.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>())
.Returns(Task.FromResult<IEnumerable<string>>(new[] { "peer-1" }));
confirmationStore.GetConfirmationsForPeerAsync("peer-1", Arg.Any<CancellationToken>())
.Returns(Task.FromResult<IEnumerable<PeerOplogConfirmation>>(new[]
{
new PeerOplogConfirmation
{
PeerNodeId = "peer-1",
SourceNodeId = "source-fast",
ConfirmedWall = 995,
ConfirmedLogic = 0,
LastConfirmedUtc = DateTimeOffset.UtcNow.AddSeconds(-1),
IsActive = true
},
new PeerOplogConfirmation
{
PeerNodeId = "peer-1",
SourceNodeId = "source-slow",
ConfirmedWall = 900,
ConfirmedLogic = 0,
LastConfirmedUtc = DateTimeOffset.UtcNow.AddSeconds(-10),
IsActive = true
}
}));
var healthCheck = new CBDDCHealthCheck(
store,
confirmationStore,
CreateOptions(lagThresholdMs: 80, criticalLagThresholdMs: 150));
var result = await healthCheck.CheckHealthAsync(new HealthCheckContext());
result.Status.ShouldBe(HealthStatus.Degraded);
result.Data["maxLagMs"].ShouldBe(100L);
result.Data["laggingPeers"].ShouldBeOfType<List<string>>().ShouldContain("peer-1");
}
/// <summary>
/// Verifies that health is reported as unhealthy when the persistence store throws.
/// </summary>
[Fact]
public async Task CheckHealthAsync_WhenStoreThrows_ReturnsUnhealthy()
{
var store = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var error = new InvalidOperationException("store unavailable");
store.GetLatestTimestampAsync(Arg.Any<CancellationToken>())
.Returns(Task.FromException<HlcTimestamp>(error));
var healthCheck = new CBDDCHealthCheck(
store,
confirmationStore,
CreateOptions());
var result = await healthCheck.CheckHealthAsync(new HealthCheckContext());
result.Status.ShouldBe(HealthStatus.Unhealthy);
result.Exception.ShouldBe(error);
result.Description.ShouldNotBeNull();
result.Description.ShouldContain("persistence layer is unavailable");
}
private static CBDDCHostingOptions CreateOptions(
long lagThresholdMs = 30_000,
long criticalLagThresholdMs = 120_000)
{
return new CBDDCHostingOptions
{
Cluster = new ClusterOptions
{
PeerConfirmationLagThresholdMs = lagThresholdMs,
PeerConfirmationCriticalLagThresholdMs = criticalLagThresholdMs
}
};
}
}

View File

@@ -0,0 +1,124 @@
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Diagnostics.HealthChecks;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Options;
using ZB.MOM.WW.CBDDC.Hosting.Configuration;
using ZB.MOM.WW.CBDDC.Hosting.HostedServices;
using ZB.MOM.WW.CBDDC.Hosting.Services;
using ZB.MOM.WW.CBDDC.Network;
namespace ZB.MOM.WW.CBDDC.Hosting.Tests;
public class CBDDCHostingExtensionsTests
{
/// <summary>
/// Verifies that adding CBDDC hosting throws when the service collection is null.
/// </summary>
[Fact]
public void AddCBDDCHosting_WithNullServices_ThrowsArgumentNullException()
{
Should.Throw<ArgumentNullException>(() =>
CBDDCHostingExtensions.AddCBDDCHosting(null!, _ => { }));
}
/// <summary>
/// Verifies that adding CBDDC hosting throws when the configuration delegate is null.
/// </summary>
[Fact]
public void AddCBDDCHosting_WithNullConfigure_ThrowsArgumentNullException()
{
var services = new ServiceCollection();
Should.Throw<ArgumentNullException>(() => services.AddCBDDCHosting(null!));
}
/// <summary>
/// Verifies that single-cluster hosting registers expected services and configured options.
/// </summary>
[Fact]
public void AddCBDDCHostingSingleCluster_RegistersExpectedServicesAndOptions()
{
var services = new ServiceCollection();
services.AddCBDDCHostingSingleCluster(options =>
{
options.NodeId = "node-1";
options.TcpPort = 5055;
options.PeerConfirmationLagThresholdMs = 45_000;
options.PeerConfirmationCriticalLagThresholdMs = 180_000;
});
var optionsDescriptor = services.SingleOrDefault(d => d.ServiceType == typeof(CBDDCHostingOptions));
optionsDescriptor.ShouldNotBeNull();
var options = optionsDescriptor.ImplementationInstance.ShouldBeOfType<CBDDCHostingOptions>();
options.Cluster.NodeId.ShouldBe("node-1");
options.Cluster.TcpPort.ShouldBe(5055);
options.Cluster.PeerConfirmationLagThresholdMs.ShouldBe(45_000);
options.Cluster.PeerConfirmationCriticalLagThresholdMs.ShouldBe(180_000);
ShouldContainService<IDiscoveryService, NoOpDiscoveryService>(services);
ShouldContainService<ISyncOrchestrator, SyncOrchestrator>(services);
ShouldContainHostedService<TcpSyncServerHostedService>(services);
ShouldContainHostedService<DiscoveryServiceHostedService>(services);
using var provider = services.BuildServiceProvider();
var healthChecks = provider.GetRequiredService<IOptions<HealthCheckServiceOptions>>().Value;
var registration = healthChecks.Registrations.SingleOrDefault(r => r.Name == "cbddc");
registration.ShouldNotBeNull();
registration.FailureStatus.ShouldBe(HealthStatus.Unhealthy);
registration.Tags.ShouldContain("db");
registration.Tags.ShouldContain("ready");
}
/// <summary>
/// Verifies that single-cluster hosting uses default options when no configuration delegate is provided.
/// </summary>
[Fact]
public void AddCBDDCHostingSingleCluster_WithNullConfigure_UsesDefaults()
{
var services = new ServiceCollection();
services.AddCBDDCHostingSingleCluster();
var optionsDescriptor = services.SingleOrDefault(d => d.ServiceType == typeof(CBDDCHostingOptions));
optionsDescriptor.ShouldNotBeNull();
var options = optionsDescriptor.ImplementationInstance.ShouldBeOfType<CBDDCHostingOptions>();
options.Cluster.ShouldNotBeNull();
}
/// <summary>
/// Verifies that health check registration is skipped when health checks are disabled.
/// </summary>
[Fact]
public void AddCBDDCHosting_WithHealthChecksDisabled_DoesNotRegisterCBDDCHealthCheck()
{
var services = new ServiceCollection();
services.AddCBDDCHosting(options =>
{
options.EnableHealthChecks = false;
});
services.Any(d => d.ServiceType == typeof(IConfigureOptions<HealthCheckServiceOptions>))
.ShouldBeFalse();
}
private static void ShouldContainService<TService, TImplementation>(IServiceCollection services)
{
services.Any(d =>
d.ServiceType == typeof(TService) &&
d.ImplementationType == typeof(TImplementation))
.ShouldBeTrue();
}
private static void ShouldContainHostedService<THostedService>(IServiceCollection services)
{
services.Any(d =>
d.ServiceType == typeof(IHostedService) &&
d.ImplementationType == typeof(THostedService))
.ShouldBeTrue();
}
}

View File

@@ -0,0 +1,2 @@
global using NSubstitute;
global using Shouldly;

View File

@@ -0,0 +1,42 @@
using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Hosting.HostedServices;
using ZB.MOM.WW.CBDDC.Network;
namespace ZB.MOM.WW.CBDDC.Hosting.Tests;
public class HostedServicesTests
{
/// <summary>
/// Verifies that the TCP sync server hosted service starts and stops the server lifecycle.
/// </summary>
[Fact]
public async Task TcpSyncServerHostedService_StartAndStop_CallsServerLifecycle()
{
var syncServer = Substitute.For<ISyncServer>();
var logger = Substitute.For<ILogger<TcpSyncServerHostedService>>();
var hostedService = new TcpSyncServerHostedService(syncServer, logger);
await hostedService.StartAsync(CancellationToken.None);
await hostedService.StopAsync(CancellationToken.None);
await syncServer.Received(1).Start();
await syncServer.Received(1).Stop();
}
/// <summary>
/// Verifies that the discovery hosted service starts and stops the discovery lifecycle.
/// </summary>
[Fact]
public async Task DiscoveryServiceHostedService_StartAndStop_CallsDiscoveryLifecycle()
{
var discoveryService = Substitute.For<IDiscoveryService>();
var logger = Substitute.For<ILogger<DiscoveryServiceHostedService>>();
var hostedService = new DiscoveryServiceHostedService(discoveryService, logger);
await hostedService.StartAsync(CancellationToken.None);
await hostedService.StopAsync(CancellationToken.None);
await discoveryService.Received(1).Start();
await discoveryService.Received(1).Stop();
}
}

View File

@@ -0,0 +1,35 @@
using ZB.MOM.WW.CBDDC.Hosting.Services;
namespace ZB.MOM.WW.CBDDC.Hosting.Tests;
public class NoOpServicesTests
{
/// <summary>
/// Verifies that no-op discovery service lifecycle calls complete and no peers are returned.
/// </summary>
[Fact]
public async Task NoOpDiscoveryService_ReturnsNoPeers_AndCompletesLifecycleCalls()
{
var service = new NoOpDiscoveryService();
service.GetActivePeers().ShouldBeEmpty();
(await Record.ExceptionAsync(() => service.Start())).ShouldBeNull();
(await Record.ExceptionAsync(() => service.Stop())).ShouldBeNull();
service.Dispose();
}
/// <summary>
/// Verifies that no-op sync orchestrator lifecycle calls complete without exceptions.
/// </summary>
[Fact]
public async Task NoOpSyncOrchestrator_CompletesLifecycleCalls()
{
var orchestrator = new NoOpSyncOrchestrator();
(await Record.ExceptionAsync(() => orchestrator.Start())).ShouldBeNull();
(await Record.ExceptionAsync(() => orchestrator.Stop())).ShouldBeNull();
orchestrator.Dispose();
}
}

View File

@@ -0,0 +1,32 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.Hosting.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.Hosting.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.Hosting.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="NSubstitute" Version="5.3.0" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection" Version="8.0.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1" />
<PackageReference Include="Shouldly" Version="4.3.0" />
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4" />
<PackageReference Include="xunit.v3" Version="3.2.0" />
</ItemGroup>
<ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Hosting\ZB.MOM.WW.CBDDC.Hosting.csproj" />
</ItemGroup>
</Project>

View File

@@ -0,0 +1,162 @@
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Network.Leadership;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class BullyLeaderElectionServiceTests
{
private static IDiscoveryService CreateDiscovery(IList<PeerNode> peers)
{
var discovery = Substitute.For<IDiscoveryService>();
discovery.GetActivePeers().Returns(_ => peers);
return discovery;
}
private static IPeerNodeConfigurationProvider CreateConfig(string nodeId)
{
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration { NodeId = nodeId });
return configProvider;
}
/// <summary>
/// Verifies that a single node elects itself as leader.
/// </summary>
[Fact]
public async Task SingleNode_ShouldBecomeLeader()
{
var peers = new List<PeerNode>();
var electionService = new BullyLeaderElectionService(
CreateDiscovery(peers),
CreateConfig("node-A"),
electionInterval: TimeSpan.FromMilliseconds(100));
LeadershipChangedEventArgs? lastEvent = null;
electionService.LeadershipChanged += (_, e) => lastEvent = e;
await electionService.Start();
await Task.Delay(200);
electionService.IsCloudGateway.ShouldBeTrue();
electionService.CurrentGatewayNodeId.ShouldBe("node-A");
lastEvent.ShouldNotBeNull();
lastEvent!.IsLocalNodeGateway.ShouldBeTrue();
lastEvent.CurrentGatewayNodeId.ShouldBe("node-A");
await electionService.Stop();
}
/// <summary>
/// Verifies that the smallest node ID is elected as leader among LAN peers.
/// </summary>
[Fact]
public async Task MultipleNodes_SmallestNodeIdShouldBeLeader()
{
var peers = new List<PeerNode>
{
new("node-B", "192.168.1.2:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered),
new("node-C", "192.168.1.3:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered)
};
var electionService = new BullyLeaderElectionService(
CreateDiscovery(peers),
CreateConfig("node-A"),
electionInterval: TimeSpan.FromMilliseconds(100));
await electionService.Start();
await Task.Delay(200);
electionService.IsCloudGateway.ShouldBeTrue();
electionService.CurrentGatewayNodeId.ShouldBe("node-A");
await electionService.Stop();
}
/// <summary>
/// Verifies that the local node is not elected when it is not the smallest node ID.
/// </summary>
[Fact]
public async Task LocalNodeNotSmallest_ShouldNotBeLeader()
{
var peers = new List<PeerNode>
{
new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered),
new("node-B", "192.168.1.2:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered)
};
var electionService = new BullyLeaderElectionService(
CreateDiscovery(peers),
CreateConfig("node-C"),
electionInterval: TimeSpan.FromMilliseconds(100));
await electionService.Start();
await Task.Delay(200);
electionService.IsCloudGateway.ShouldBeFalse();
electionService.CurrentGatewayNodeId.ShouldBe("node-A");
await electionService.Stop();
}
/// <summary>
/// Verifies that leadership is re-elected when the current leader fails.
/// </summary>
[Fact]
public async Task LeaderFailure_ShouldReelect()
{
var peers = new List<PeerNode>
{
new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered)
};
var electionService = new BullyLeaderElectionService(
CreateDiscovery(peers),
CreateConfig("node-B"),
electionInterval: TimeSpan.FromMilliseconds(100));
var leadershipChanges = new List<LeadershipChangedEventArgs>();
electionService.LeadershipChanged += (_, e) => leadershipChanges.Add(e);
await electionService.Start();
await Task.Delay(200);
electionService.CurrentGatewayNodeId.ShouldBe("node-A");
peers.Clear();
await Task.Delay(200);
electionService.IsCloudGateway.ShouldBeTrue();
electionService.CurrentGatewayNodeId.ShouldBe("node-B");
leadershipChanges.ShouldNotBeEmpty();
leadershipChanges.Last().IsLocalNodeGateway.ShouldBeTrue();
leadershipChanges.Last().CurrentGatewayNodeId.ShouldBe("node-B");
await electionService.Stop();
}
/// <summary>
/// Verifies that cloud peers are excluded from LAN gateway election.
/// </summary>
[Fact]
public async Task CloudPeersExcludedFromElection()
{
var peers = new List<PeerNode>
{
new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered),
new("cloud-node-Z", "cloud.example.com:9000", DateTimeOffset.UtcNow, PeerType.CloudRemote)
};
var electionService = new BullyLeaderElectionService(
CreateDiscovery(peers),
CreateConfig("node-B"),
electionInterval: TimeSpan.FromMilliseconds(100));
await electionService.Start();
await Task.Delay(200);
electionService.CurrentGatewayNodeId.ShouldBe("node-A");
await electionService.Stop();
}
}

View File

@@ -0,0 +1,90 @@
using System.IO;
using System.Net.Sockets;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network.Security;
using Microsoft.Extensions.Logging.Abstractions;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class ConnectionTests
{
/// <summary>
/// Verifies that the server rejects new clients when the configured connection limit is reached.
/// </summary>
[Fact]
public async Task Server_Should_Reject_Clients_When_Limit_Reached()
{
// Arrange
var oplogStore = Substitute.For<IOplogStore>();
oplogStore.GetLatestTimestampAsync(Arg.Any<CancellationToken>())
.Returns(new HlcTimestamp(0, 0, "node"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(new VectorClock());
oplogStore.GetOplogAfterAsync(Arg.Any<HlcTimestamp>(), Arg.Any<IEnumerable<string>?>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<OplogEntry>());
oplogStore.GetOplogForNodeAfterAsync(Arg.Any<string>(), Arg.Any<HlcTimestamp>(), Arg.Any<IEnumerable<string>?>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<OplogEntry>());
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration
{
NodeId = "server-node",
AuthToken = "auth-token",
TcpPort = 0
});
var snapshotService = Substitute.For<ISnapshotService>();
var documentStore = Substitute.For<IDocumentStore>();
documentStore.InterestedCollection.Returns(["Users", "TodoLists"]);
var authenticator = Substitute.For<IAuthenticator>();
authenticator.ValidateAsync(Arg.Any<string>(), Arg.Any<string>()).Returns(true);
var handshakeService = Substitute.For<IPeerHandshakeService>();
handshakeService.HandshakeAsync(Arg.Any<Stream>(), Arg.Any<bool>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((CipherState?)null);
var server = new TcpSyncServer(
oplogStore,
documentStore,
snapshotService,
configProvider,
NullLogger<TcpSyncServer>.Instance,
authenticator,
handshakeService);
server.MaxConnections = 2;
await server.Start();
var port = server.ListeningPort ?? throw new Exception("Server not started");
using var client1 = new TcpClient();
using var client2 = new TcpClient();
using var client3 = new TcpClient();
try
{
// Act
await client1.ConnectAsync("127.0.0.1", port);
await client2.ConnectAsync("127.0.0.1", port);
await Task.Delay(100);
await client3.ConnectAsync("127.0.0.1", port);
// Assert
var stream3 = client3.GetStream();
var buffer = new byte[10];
var read = await stream3.ReadAsync(buffer, 0, 10);
read.ShouldBe(0, "Server should close connection immediately for client 3");
client1.Connected.ShouldBeTrue();
client2.Connected.ShouldBeTrue();
}
finally
{
await server.Stop();
}
}
}

View File

@@ -0,0 +1,50 @@
using System.Security.Cryptography;
using ZB.MOM.WW.CBDDC.Network.Security;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class CryptoHelperTests
{
/// <summary>
/// Verifies that encrypted data can be decrypted back to the original payload.
/// </summary>
[Fact]
public void EncryptDecrypt_ShouldPreserveData()
{
// Arrange
var key = new byte[32]; // 256 bits
RandomNumberGenerator.Fill(key);
var original = new byte[] { 1, 2, 3, 4, 5, 255, 0, 10 };
// Act
var (ciphertext, iv, tag) = CryptoHelper.Encrypt(original, key);
var decrypted = CryptoHelper.Decrypt(ciphertext, iv, tag, key);
// Assert
decrypted.ShouldBe(original);
}
/// <summary>
/// Verifies that decryption fails when ciphertext is tampered with.
/// </summary>
[Fact]
public void Decrypt_ShouldFail_IfTampered()
{
// Arrange
var key = new byte[32];
RandomNumberGenerator.Fill(key);
var original = new byte[] { 1, 2, 3 };
var (ciphertext, iv, tag) = CryptoHelper.Encrypt(original, key);
// Tamper ciphertext
ciphertext[0] ^= 0xFF;
// Act
Action act = () => CryptoHelper.Decrypt(ciphertext, iv, tag, key);
// Assert
Should.Throw<CryptographicException>(act);
}
}

View File

@@ -0,0 +1,2 @@
global using NSubstitute;
global using Shouldly;

View File

@@ -0,0 +1,75 @@
using System.IO;
using System.Net.Sockets;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network.Security;
using Microsoft.Extensions.Logging.Abstractions;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class HandshakeRegressionTests
{
/// <summary>
/// Verifies that the server invokes the handshake service when a client connects.
/// </summary>
[Fact]
public async Task Server_Should_Call_HandshakeService_On_Client_Connection()
{
// Arrange
var oplogStore = Substitute.For<IOplogStore>();
oplogStore.GetLatestTimestampAsync(Arg.Any<CancellationToken>())
.Returns(new HlcTimestamp(0, 0, "node"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(new VectorClock());
oplogStore.GetOplogAfterAsync(Arg.Any<HlcTimestamp>(), Arg.Any<IEnumerable<string>?>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<OplogEntry>());
oplogStore.GetOplogForNodeAfterAsync(Arg.Any<string>(), Arg.Any<HlcTimestamp>(), Arg.Any<IEnumerable<string>?>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<OplogEntry>());
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration
{
NodeId = "server-node",
AuthToken = "auth-token",
TcpPort = 0
});
var snapshotService = Substitute.For<ISnapshotService>();
var documentStore = Substitute.For<IDocumentStore>();
documentStore.InterestedCollection.Returns(["Users"]);
var authenticator = Substitute.For<IAuthenticator>();
authenticator.ValidateAsync(Arg.Any<string>(), Arg.Any<string>()).Returns(true);
var handshakeService = Substitute.For<IPeerHandshakeService>();
handshakeService.HandshakeAsync(Arg.Any<Stream>(), Arg.Any<bool>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((CipherState?)null);
var server = new TcpSyncServer(
oplogStore,
documentStore,
snapshotService,
configProvider,
NullLogger<TcpSyncServer>.Instance,
authenticator,
handshakeService);
await server.Start();
var port = server.ListeningPort ?? throw new Exception("Server did not start or report port");
// Act
using (var client = new TcpClient())
{
await client.ConnectAsync("127.0.0.1", port);
await Task.Delay(500);
}
await server.Stop();
// Assert
await handshakeService.Received(1)
.HandshakeAsync(Arg.Any<Stream>(), false, "server-node", Arg.Any<CancellationToken>());
}
}

View File

@@ -0,0 +1,178 @@
using System;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Network.Proto;
using ZB.MOM.WW.CBDDC.Network.Protocol;
using ZB.MOM.WW.CBDDC.Network.Security;
using Google.Protobuf;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests
{
public class ProtocolTests
{
private readonly ProtocolHandler _handler;
/// <summary>
/// Initializes a new instance of the <see cref="ProtocolTests"/> class.
/// </summary>
public ProtocolTests()
{
_handler = new ProtocolHandler(NullLogger<ProtocolHandler>.Instance);
}
/// <summary>
/// Verifies a plain message can be written and read without transformation.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWorks_WithPlainMessage()
{
// Arrange
var stream = new MemoryStream();
var message = new HandshakeRequest { NodeId = "node-1", AuthToken = "token" };
// Act
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, false, null);
stream.Position = 0; // Reset for reading
var (type, payload) = await _handler.ReadMessageAsync(stream, null);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe("node-1");
decoded.AuthToken.ShouldBe("token");
}
/// <summary>
/// Verifies a compressed message can be written and read successfully.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWork_WithCompression()
{
// Arrange
var stream = new MemoryStream();
// Create a large message to trigger compression logic (threshold is small but let's be safe)
var largeData = string.Join("", Enumerable.Repeat("ABCDEF0123456789", 100));
var message = new HandshakeRequest { NodeId = largeData, AuthToken = "token" };
// Act
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, true, null);
stream.Position = 0;
var (type, payload) = await _handler.ReadMessageAsync(stream, null);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe(largeData);
}
/// <summary>
/// Verifies an encrypted message can be written and read successfully.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWork_WithEncryption()
{
// Arrange
var stream = new MemoryStream();
var message = new HandshakeRequest { NodeId = "secure-node", AuthToken = "secure-token" };
// Mock CipherState
var key = new byte[32]; // 256-bit key
new Random().NextBytes(key);
var cipherState = new CipherState(key, key); // Encrypt and Decrypt with same key for loopback
// Act
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, false, cipherState);
stream.Position = 0;
var (type, payload) = await _handler.ReadMessageAsync(stream, cipherState);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe("secure-node");
}
/// <summary>
/// Verifies a message can be round-tripped when both compression and encryption are enabled.
/// </summary>
[Fact]
public async Task RoundTrip_ShouldWork_WithEncryption_And_Compression()
{
// Arrange
var stream = new MemoryStream();
var largeData = string.Join("", Enumerable.Repeat("SECURECOMPRESSION", 100));
var message = new HandshakeRequest { NodeId = largeData };
var key = new byte[32];
new Random().NextBytes(key);
var cipherState = new CipherState(key, key);
// Act: Compress THEN Encrypt
await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, true, cipherState);
stream.Position = 0;
// Verify wire encryption (should be MessageType.SecureEnv)
// But ReadMessageAsync abstracts this away.
// We can peek at the stream if we want, but let's trust ReadMessageAsync handles it.
var (type, payload) = await _handler.ReadMessageAsync(stream, cipherState);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe(largeData);
}
/// <summary>
/// Verifies that message reads succeed when bytes arrive in small fragments.
/// </summary>
[Fact]
public async Task ReadMessage_ShouldHandle_Fragmentation()
{
// Arrange
var fullStream = new MemoryStream();
var message = new HandshakeRequest { NodeId = "fragmented" };
await _handler.SendMessageAsync(fullStream, MessageType.HandshakeReq, message, false, null);
byte[] completeBytes = fullStream.ToArray();
var fragmentedStream = new FragmentedMemoryStream(completeBytes, chunkSize: 2); // Read 2 bytes at a time
// Act
var (type, payload) = await _handler.ReadMessageAsync(fragmentedStream, null);
// Assert
type.ShouldBe(MessageType.HandshakeReq);
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
decoded.NodeId.ShouldBe("fragmented");
}
// Helper Stream for fragmentation test
private class FragmentedMemoryStream : MemoryStream
{
private readonly int _chunkSize;
/// <summary>
/// Initializes a new instance of the <see cref="FragmentedMemoryStream"/> class.
/// </summary>
/// <param name="buffer">The backing stream buffer.</param>
/// <param name="chunkSize">The maximum bytes returned per read.</param>
public FragmentedMemoryStream(byte[] buffer, int chunkSize) : base(buffer)
{
_chunkSize = chunkSize;
}
/// <inheritdoc />
public override async Task<int> ReadAsync(byte[] buffer, int offset, int count, System.Threading.CancellationToken cancellationToken)
{
// Force read to be max _chunkSize, even if more is requested
int toRead = Math.Min(count, _chunkSize);
return await base.ReadAsync(buffer, offset, toRead, cancellationToken);
}
}
}
}

View File

@@ -0,0 +1,177 @@
using System;
using System.IO;
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Network.Security;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests
{
public class SecureHandshakeTests
{
/// <summary>
/// Verifies handshake negotiation succeeds between initiator and responder services.
/// </summary>
[Fact]
public async Task Handshake_Should_Succeed_Between_Two_Services()
{
// Arrange
var clientStream = new PipeStream();
var serverStream = new PipeStream();
// Client writes to clientStream, server reads from clientStream
// Server writes to serverStream, client reads from serverStream
var clientSocket = new DuplexStream(serverStream, clientStream); // Read from server, Write to client
var serverSocket = new DuplexStream(clientStream, serverStream); // Read from client, Write to server
var clientService = new SecureHandshakeService(NullLogger<SecureHandshakeService>.Instance);
var serverService = new SecureHandshakeService(NullLogger<SecureHandshakeService>.Instance);
var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5));
// Act
var clientTask = clientService.HandshakeAsync(clientSocket, isInitiator: true, myNodeId: "client", token: cts.Token);
var serverTask = serverService.HandshakeAsync(serverSocket, isInitiator: false, myNodeId: "server", token: cts.Token);
await Task.WhenAll(clientTask, serverTask);
// Assert
var clientState = clientTask.Result;
var serverState = serverTask.Result;
clientState.ShouldNotBeNull();
serverState.ShouldNotBeNull();
// Keys should match (Symmetric)
clientState!.EncryptKey.ShouldBe(serverState!.DecryptKey);
clientState.DecryptKey.ShouldBe(serverState.EncryptKey);
}
// Simulates a pipe. Writes go to buffer, Reads drain buffer.
class SimplexStream : MemoryStream
{
// Simple approach: Use one MemoryStream as a shared buffer?
// No, MemoryStream is not thread safe for concurrent Read/Write in this pipe manner really.
// Better to use a producer/consumer stream but for simplicity let's use a basic blocking queue logic or just wait.
// Actually, for unit tests, strictly ordered operations are better. But handshake is interactive.
// We need a proper pipe.
}
// Let's use a simple PipeStream implementation using SemaphoreSlim for sync
class PipeStream : Stream
{
private readonly MemoryStream _buffer = new MemoryStream();
private readonly SemaphoreSlim _readSemaphore = new SemaphoreSlim(0);
private readonly object _lock = new object();
/// <inheritdoc />
public override bool CanRead => true;
/// <inheritdoc />
public override bool CanSeek => false;
/// <inheritdoc />
public override bool CanWrite => true;
/// <inheritdoc />
public override long Length => _buffer.Length;
/// <inheritdoc />
public override long Position { get => _buffer.Position; set => throw new NotSupportedException(); }
/// <inheritdoc />
public override void Flush() { }
/// <inheritdoc />
public override int Read(byte[] buffer, int offset, int count) => throw new NotImplementedException("Use Async");
/// <inheritdoc />
public override async Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
{
await _readSemaphore.WaitAsync(cancellationToken);
lock (_lock)
{
_buffer.Position = 0;
int read = _buffer.Read(buffer, offset, count);
// Compact buffer (inefficient but works for unit tests)
byte[] remaining = _buffer.ToArray().Skip(read).ToArray();
_buffer.SetLength(0);
_buffer.Write(remaining, 0, remaining.Length);
if (_buffer.Length > 0) _readSemaphore.Release(); // Signal if data remains
return read;
}
}
/// <inheritdoc />
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
/// <inheritdoc />
public override void SetLength(long value) => throw new NotSupportedException();
/// <inheritdoc />
public override void Write(byte[] buffer, int offset, int count)
{
lock (_lock)
{
long pos = _buffer.Position;
_buffer.Seek(0, SeekOrigin.End);
_buffer.Write(buffer, offset, count);
_buffer.Position = pos;
}
_readSemaphore.Release();
}
}
class DuplexStream : Stream
{
private readonly Stream _readSource;
private readonly Stream _writeTarget;
/// <summary>
/// Initializes a new instance of the <see cref="DuplexStream"/> class.
/// </summary>
/// <param name="readSource">The underlying stream used for read operations.</param>
/// <param name="writeTarget">The underlying stream used for write operations.</param>
public DuplexStream(Stream readSource, Stream writeTarget)
{
_readSource = readSource;
_writeTarget = writeTarget;
}
/// <inheritdoc />
public override bool CanRead => true;
/// <inheritdoc />
public override bool CanSeek => false;
/// <inheritdoc />
public override bool CanWrite => true;
/// <inheritdoc />
public override long Length => 0;
/// <inheritdoc />
public override long Position { get => 0; set { } }
/// <inheritdoc />
public override void Flush() => _writeTarget.Flush();
/// <inheritdoc />
public override int Read(byte[] buffer, int offset, int count) => _readSource.Read(buffer, offset, count);
/// <inheritdoc />
public override Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
=> _readSource.ReadAsync(buffer, offset, count, cancellationToken);
/// <inheritdoc />
public override void Write(byte[] buffer, int offset, int count) => _writeTarget.Write(buffer, offset, count);
/// <inheritdoc />
public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
=> _writeTarget.WriteAsync(buffer, offset, count, cancellationToken);
/// <inheritdoc />
public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException();
/// <inheritdoc />
public override void SetLength(long value) => throw new NotSupportedException();
}
}
}

View File

@@ -0,0 +1,287 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Telemetry;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests
{
public class SnapshotReconnectRegressionTests
{
// Subclass to expose private method
private class TestableSyncOrchestrator : SyncOrchestrator
{
/// <summary>
/// Initializes a new instance of the <see cref="TestableSyncOrchestrator"/> class.
/// </summary>
/// <param name="discovery">The discovery service.</param>
/// <param name="oplogStore">The oplog store.</param>
/// <param name="documentStore">The document store.</param>
/// <param name="snapshotMetadataStore">The snapshot metadata store.</param>
/// <param name="snapshotService">The snapshot service.</param>
/// <param name="peerNodeConfigurationProvider">The peer node configuration provider.</param>
/// <param name="peerOplogConfirmationStore">The peer oplog confirmation store.</param>
public TestableSyncOrchestrator(
IDiscoveryService discovery,
IOplogStore oplogStore,
IDocumentStore documentStore,
ISnapshotMetadataStore snapshotMetadataStore,
ISnapshotService snapshotService,
IPeerNodeConfigurationProvider peerNodeConfigurationProvider,
IPeerOplogConfirmationStore peerOplogConfirmationStore)
: base(
discovery,
oplogStore,
documentStore,
snapshotMetadataStore,
snapshotService,
peerNodeConfigurationProvider,
NullLoggerFactory.Instance,
peerOplogConfirmationStore)
{
}
/// <summary>
/// Invokes the inbound batch processing path through reflection for regression testing.
/// </summary>
/// <param name="client">The peer client.</param>
/// <param name="peerNodeId">The peer node identifier.</param>
/// <param name="changes">The incoming oplog changes.</param>
/// <param name="token">The cancellation token.</param>
public async Task<string> TestProcessInboundBatchAsync(
TcpPeerClient client,
string peerNodeId,
IList<OplogEntry> changes,
CancellationToken token)
{
// Reflection to invoke private method since it's private not protected
var method = typeof(SyncOrchestrator).GetMethod(
"ProcessInboundBatchAsync",
System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Instance);
if (method == null)
throw new InvalidOperationException("ProcessInboundBatchAsync method not found.");
try
{
var task = (Task)method.Invoke(this, new object[] { client, peerNodeId, changes, token })!;
await task.ConfigureAwait(false);
// Access .Result via reflection because generic type is private
var resultProp = task.GetType().GetProperty("Result");
var result = resultProp?.GetValue(task);
return result?.ToString() ?? "null";
}
catch (System.Reflection.TargetInvocationException ex)
{
if (ex.InnerException != null) throw ex.InnerException;
throw;
}
}
}
private static ISnapshotMetadataStore CreateSnapshotMetadataStore()
{
var snapshotMetadataStore = Substitute.For<ISnapshotMetadataStore>();
snapshotMetadataStore.GetSnapshotMetadataAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((SnapshotMetadata?)null);
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((string?)null);
snapshotMetadataStore.GetAllSnapshotMetadataAsync(Arg.Any<CancellationToken>())
.Returns(Array.Empty<SnapshotMetadata>());
return snapshotMetadataStore;
}
private static ISnapshotService CreateSnapshotService()
{
var snapshotService = Substitute.For<ISnapshotService>();
snapshotService.CreateSnapshotAsync(Arg.Any<Stream>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
snapshotService.ReplaceDatabaseAsync(Arg.Any<Stream>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
snapshotService.MergeSnapshotAsync(Arg.Any<Stream>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
return snapshotService;
}
private static IDocumentStore CreateDocumentStore()
{
var documentStore = Substitute.For<IDocumentStore>();
documentStore.InterestedCollection.Returns(["Users", "TodoLists"]);
documentStore.GetDocumentAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((Document?)null);
documentStore.GetDocumentsByCollectionAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<Document>());
documentStore.GetDocumentsAsync(Arg.Any<List<(string Collection, string Key)>>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<Document>());
documentStore.PutDocumentAsync(Arg.Any<Document>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.InsertBatchDocumentsAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.UpdateBatchDocumentsAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.DeleteDocumentAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.DeleteBatchDocumentsAsync(Arg.Any<IEnumerable<string>>(), Arg.Any<CancellationToken>())
.Returns(true);
documentStore.MergeAsync(Arg.Any<Document>(), Arg.Any<CancellationToken>())
.Returns(ci => ci.ArgAt<Document>(0));
documentStore.DropAsync(Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
documentStore.ExportAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<Document>());
documentStore.ImportAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
documentStore.MergeAsync(Arg.Any<IEnumerable<Document>>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
return documentStore;
}
private static IOplogStore CreateOplogStore(string? localHeadHash)
{
var oplogStore = Substitute.For<IOplogStore>();
oplogStore.GetLastEntryHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(localHeadHash);
oplogStore.ApplyBatchAsync(Arg.Any<IEnumerable<OplogEntry>>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
return oplogStore;
}
private static TcpPeerClient CreateSnapshotRequiredClient()
{
var logger = Substitute.For<ILogger<TcpPeerClient>>();
var client = Substitute.For<TcpPeerClient>(
"127.0.0.1:0",
logger,
(IPeerHandshakeService?)null,
(INetworkTelemetryService?)null);
client.GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(_ => Task.FromException<List<OplogEntry>>(new SnapshotRequiredException()));
return client;
}
private static IDiscoveryService CreateDiscovery()
{
var discovery = Substitute.For<IDiscoveryService>();
discovery.GetActivePeers().Returns(Array.Empty<PeerNode>());
discovery.Start().Returns(Task.CompletedTask);
discovery.Stop().Returns(Task.CompletedTask);
return discovery;
}
private static IPeerNodeConfigurationProvider CreateConfig()
{
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration { NodeId = "local" });
return configProvider;
}
private static IPeerOplogConfirmationStore CreatePeerOplogConfirmationStore()
{
var store = Substitute.For<IPeerOplogConfirmationStore>();
store.EnsurePeerRegisteredAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<PeerType>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.UpdateConfirmationAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<HlcTimestamp>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.GetConfirmationsAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<PeerOplogConfirmation>());
store.GetConfirmationsForPeerAsync(Arg.Any<string>(), Arg.Any<CancellationToken>()).Returns(Array.Empty<PeerOplogConfirmation>());
store.RemovePeerTrackingAsync(Arg.Any<string>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
store.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<string>());
store.ExportAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<PeerOplogConfirmation>());
store.ImportAsync(Arg.Any<IEnumerable<PeerOplogConfirmation>>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
store.MergeAsync(Arg.Any<IEnumerable<PeerOplogConfirmation>>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
return store;
}
/// <summary>
/// Verifies that gap recovery is skipped when an inbound entry already matches the snapshot boundary hash.
/// </summary>
[Fact]
public async Task ProcessInboundBatch_ShouldSkipGapRecovery_WhenEntryMatchesSnapshotBoundary()
{
// Arrange
var oplogStore = CreateOplogStore("snapshot-boundary-hash");
var snapshotMetadataStore = CreateSnapshotMetadataStore();
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns("snapshot-boundary-hash");
var snapshotService = CreateSnapshotService();
var orch = new TestableSyncOrchestrator(
CreateDiscovery(),
oplogStore,
CreateDocumentStore(),
snapshotMetadataStore,
snapshotService,
CreateConfig(),
CreatePeerOplogConfirmationStore());
using var client = CreateSnapshotRequiredClient();
// Incoming entry that connects to snapshot boundary
var entries = new List<OplogEntry>
{
new OplogEntry(
"col", "key", OperationType.Put, null,
new HlcTimestamp(100, 1, "remote-node"),
"snapshot-boundary-hash" // PreviousHash matches SnapshotHash!
)
};
// Act
var result = await orch.TestProcessInboundBatchAsync(client, "remote-node", entries, CancellationToken.None);
// Assert
result.ShouldBe("Success");
await client.DidNotReceive().GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that gap recovery is attempted when the inbound entry does not match the snapshot boundary hash.
/// </summary>
[Fact]
public async Task ProcessInboundBatch_ShouldTryRecovery_WhenSnapshotMismatch()
{
// Arrange
var oplogStore = CreateOplogStore("some-old-hash");
var snapshotMetadataStore = CreateSnapshotMetadataStore();
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns("snapshot-boundary-hash");
var snapshotService = CreateSnapshotService();
var orch = new TestableSyncOrchestrator(
CreateDiscovery(),
oplogStore,
CreateDocumentStore(),
snapshotMetadataStore,
snapshotService,
CreateConfig(),
CreatePeerOplogConfirmationStore());
using var client = CreateSnapshotRequiredClient();
var entries = new List<OplogEntry>
{
new OplogEntry(
"col", "key", OperationType.Put, null,
new HlcTimestamp(100, 1, "remote-node"),
"different-hash" // Mismatch!
)
};
// Act & Assert
// When gap recovery triggers, the client throws SnapshotRequiredException.
// SyncOrchestrator catches SnapshotRequiredException and re-throws it to trigger full sync
// So we expect SnapshotRequiredException to bubble up (wrapped in TargetInvocationException/AggregateException if not unwrapped by helper)
await Should.ThrowAsync<SnapshotRequiredException>(async () =>
await orch.TestProcessInboundBatchAsync(client, "remote-node", entries, CancellationToken.None));
await client.Received(1).GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>());
}
}
}

View File

@@ -0,0 +1,251 @@
using System;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class SyncOrchestratorConfirmationTests
{
/// <summary>
/// Verifies that merged peers are registered and the local node is skipped.
/// </summary>
[Fact]
public async Task EnsurePeersRegisteredAsync_ShouldRegisterMergedPeers_AndSkipLocalNode()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore);
var now = DateTimeOffset.UtcNow;
var discoveredPeers = new List<PeerNode>
{
new("local", "127.0.0.1:9000", now, PeerType.LanDiscovered),
new("peer-a", "10.0.0.1:9000", now, PeerType.LanDiscovered)
};
var knownPeers = new List<PeerNode>
{
new("peer-a", "10.99.0.1:9000", now, PeerType.StaticRemote),
new("peer-b", "10.0.0.2:9010", now, PeerType.StaticRemote)
};
var mergedPeers = SyncOrchestrator.BuildMergedPeerList(discoveredPeers, knownPeers, "local");
mergedPeers.Count.ShouldBe(2);
await orchestrator.EnsurePeersRegisteredAsync(mergedPeers, "local", CancellationToken.None);
await confirmationStore.Received(1).EnsurePeerRegisteredAsync(
"peer-a",
"10.0.0.1:9000",
PeerType.LanDiscovered,
Arg.Any<CancellationToken>());
await confirmationStore.Received(1).EnsurePeerRegisteredAsync(
"peer-b",
"10.0.0.2:9010",
PeerType.StaticRemote,
Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().EnsurePeerRegisteredAsync(
"local",
Arg.Any<string>(),
Arg.Any<PeerType>(),
Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that a newly discovered node is auto-registered when peer lists are refreshed.
/// </summary>
[Fact]
public async Task EnsurePeersRegisteredAsync_WhenNewNodeJoins_ShouldAutoRegisterJoinedNode()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore);
var now = DateTimeOffset.UtcNow;
var knownPeers = new List<PeerNode>
{
new("peer-static", "10.0.0.10:9000", now, PeerType.StaticRemote)
};
var firstDiscovered = new List<PeerNode>
{
new("peer-static", "10.0.0.10:9000", now, PeerType.StaticRemote)
};
var firstMerged = SyncOrchestrator.BuildMergedPeerList(firstDiscovered, knownPeers, "local");
await orchestrator.EnsurePeersRegisteredAsync(firstMerged, "local", CancellationToken.None);
var secondDiscovered = new List<PeerNode>
{
new("peer-static", "10.0.0.10:9000", now, PeerType.StaticRemote),
new("peer-new", "10.0.0.25:9010", now, PeerType.LanDiscovered)
};
var secondMerged = SyncOrchestrator.BuildMergedPeerList(secondDiscovered, knownPeers, "local");
await orchestrator.EnsurePeersRegisteredAsync(secondMerged, "local", CancellationToken.None);
await confirmationStore.Received(1).EnsurePeerRegisteredAsync(
"peer-new",
"10.0.0.25:9010",
PeerType.LanDiscovered,
Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that confirmations advance only for nodes where remote vector-clock entries are at or ahead.
/// </summary>
[Fact]
public async Task AdvanceConfirmationsFromVectorClockAsync_ShouldAdvanceOnlyForRemoteAtOrAhead()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore);
var local = new VectorClock();
local.SetTimestamp("node-equal", new HlcTimestamp(100, 1, "node-equal"));
local.SetTimestamp("node-ahead", new HlcTimestamp(200, 0, "node-ahead"));
local.SetTimestamp("node-behind", new HlcTimestamp(300, 0, "node-behind"));
local.SetTimestamp("node-local-only", new HlcTimestamp(150, 0, "node-local-only"));
var remote = new VectorClock();
remote.SetTimestamp("node-equal", new HlcTimestamp(100, 1, "node-equal"));
remote.SetTimestamp("node-ahead", new HlcTimestamp(250, 0, "node-ahead"));
remote.SetTimestamp("node-behind", new HlcTimestamp(299, 9, "node-behind"));
remote.SetTimestamp("node-remote-only", new HlcTimestamp(900, 0, "node-remote-only"));
oplogStore.GetLastEntryHashAsync("node-equal", Arg.Any<CancellationToken>())
.Returns("hash-equal");
oplogStore.GetLastEntryHashAsync("node-ahead", Arg.Any<CancellationToken>())
.Returns((string?)null);
await orchestrator.AdvanceConfirmationsFromVectorClockAsync("peer-1", local, remote, CancellationToken.None);
await confirmationStore.Received(1).UpdateConfirmationAsync(
"peer-1",
"node-equal",
new HlcTimestamp(100, 1, "node-equal"),
"hash-equal",
Arg.Any<CancellationToken>());
await confirmationStore.Received(1).UpdateConfirmationAsync(
"peer-1",
"node-ahead",
new HlcTimestamp(200, 0, "node-ahead"),
string.Empty,
Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().UpdateConfirmationAsync(
"peer-1",
"node-behind",
Arg.Any<HlcTimestamp>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().UpdateConfirmationAsync(
"peer-1",
"node-local-only",
Arg.Any<HlcTimestamp>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().UpdateConfirmationAsync(
"peer-1",
"node-remote-only",
Arg.Any<HlcTimestamp>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that pushed-batch confirmation uses the maximum timestamp and its matching hash.
/// </summary>
[Fact]
public async Task AdvanceConfirmationForPushedBatchAsync_ShouldUseMaxTimestampAndHash()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore);
var pushedChanges = new List<OplogEntry>
{
CreateEntry("source-1", 100, 0, "hash-100"),
CreateEntry("source-1", 120, 1, "hash-120"),
CreateEntry("source-1", 110, 5, "hash-110")
};
await orchestrator.AdvanceConfirmationForPushedBatchAsync("peer-1", "source-1", pushedChanges, CancellationToken.None);
await confirmationStore.Received(1).UpdateConfirmationAsync(
"peer-1",
"source-1",
new HlcTimestamp(120, 1, "source-1"),
"hash-120",
Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that no confirmation update occurs when a pushed batch is empty.
/// </summary>
[Fact]
public async Task AdvanceConfirmationForPushedBatchAsync_ShouldSkipEmptyBatch()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore);
await orchestrator.AdvanceConfirmationForPushedBatchAsync(
"peer-1",
"source-1",
Array.Empty<OplogEntry>(),
CancellationToken.None);
await confirmationStore.DidNotReceive().UpdateConfirmationAsync(
Arg.Any<string>(),
Arg.Any<string>(),
Arg.Any<HlcTimestamp>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
}
private static SyncOrchestrator CreateOrchestrator(IOplogStore oplogStore, IPeerOplogConfirmationStore confirmationStore)
{
var discovery = Substitute.For<IDiscoveryService>();
discovery.GetActivePeers().Returns(Array.Empty<PeerNode>());
var documentStore = Substitute.For<IDocumentStore>();
documentStore.InterestedCollection.Returns(Array.Empty<string>());
var snapshotMetadataStore = Substitute.For<ISnapshotMetadataStore>();
var snapshotService = Substitute.For<ISnapshotService>();
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration { NodeId = "local" });
return new SyncOrchestrator(
discovery,
oplogStore,
documentStore,
snapshotMetadataStore,
snapshotService,
configProvider,
NullLoggerFactory.Instance,
confirmationStore);
}
private static OplogEntry CreateEntry(string nodeId, long wall, int logic, string hash)
{
return new OplogEntry(
"users",
$"{nodeId}-{wall}-{logic}",
OperationType.Put,
payload: null,
timestamp: new HlcTimestamp(wall, logic, nodeId),
previousHash: string.Empty,
hash: hash);
}
}

View File

@@ -0,0 +1,292 @@
using System;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class SyncOrchestratorMaintenancePruningTests
{
/// <summary>
/// Verifies that mixed peer confirmations produce the safest effective cutoff across peers and sources.
/// </summary>
[Fact]
public async Task CalculateEffectiveCutoffAsync_MixedPeerStates_ShouldUseSafestConfirmationAcrossPeers()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var calculator = new OplogPruneCutoffCalculator(oplogStore, confirmationStore);
var vectorClock = new VectorClock();
vectorClock.SetTimestamp("node-local", new HlcTimestamp(500, 0, "node-local"));
vectorClock.SetTimestamp("node-secondary", new HlcTimestamp(450, 0, "node-secondary"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(vectorClock);
confirmationStore.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>())
.Returns(new[] { "peer-a", "peer-b", " " });
confirmationStore.GetConfirmationsForPeerAsync("peer-a", Arg.Any<CancellationToken>())
.Returns(new[]
{
CreateConfirmation("peer-a", "node-local", wall: 300, logic: 0, isActive: true),
CreateConfirmation("peer-a", "node-secondary", wall: 120, logic: 1, isActive: true),
CreateConfirmation("peer-a", "node-secondary", wall: 500, logic: 0, isActive: false)
});
confirmationStore.GetConfirmationsForPeerAsync("peer-b", Arg.Any<CancellationToken>())
.Returns(new[]
{
CreateConfirmation("peer-b", "node-local", wall: 250, logic: 0, isActive: true),
CreateConfirmation("peer-b", "node-secondary", wall: 180, logic: 0, isActive: true)
});
var decision = await calculator.CalculateEffectiveCutoffAsync(
new PeerNodeConfiguration
{
NodeId = "node-local",
OplogRetentionHours = 24
},
CancellationToken.None);
decision.HasCutoff.ShouldBeTrue();
decision.ConfirmationCutoff.HasValue.ShouldBeTrue();
decision.EffectiveCutoff.HasValue.ShouldBeTrue();
decision.ConfirmationCutoff.Value.PhysicalTime.ShouldBe(120);
decision.ConfirmationCutoff.Value.LogicalCounter.ShouldBe(1);
decision.ConfirmationCutoff.Value.NodeId.ShouldBe("node-secondary");
decision.EffectiveCutoff.Value.ShouldBe(decision.ConfirmationCutoff.Value);
}
/// <summary>
/// Verifies that removing a peer from tracking immediately restores pruning eligibility.
/// </summary>
[Fact]
public async Task CalculateEffectiveCutoffAsync_RemovingPeerFromTracking_ShouldImmediatelyRestoreEligibility()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var calculator = new OplogPruneCutoffCalculator(oplogStore, confirmationStore);
var vectorClock = new VectorClock();
vectorClock.SetTimestamp("node-local", new HlcTimestamp(200, 0, "node-local"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(vectorClock);
confirmationStore.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>())
.Returns(
new[] { "peer-active", "peer-deprecated" },
new[] { "peer-active" });
confirmationStore.GetConfirmationsForPeerAsync("peer-active", Arg.Any<CancellationToken>())
.Returns(new[]
{
CreateConfirmation("peer-active", "node-local", wall: 150, logic: 0, isActive: true)
});
confirmationStore.GetConfirmationsForPeerAsync("peer-deprecated", Arg.Any<CancellationToken>())
.Returns(Array.Empty<PeerOplogConfirmation>());
var configuration = new PeerNodeConfiguration
{
NodeId = "node-local",
OplogRetentionHours = 24
};
var blockedDecision = await calculator.CalculateEffectiveCutoffAsync(configuration, CancellationToken.None);
blockedDecision.HasCutoff.ShouldBeFalse();
confirmationStore.ClearReceivedCalls();
var unblockedDecision = await calculator.CalculateEffectiveCutoffAsync(configuration, CancellationToken.None);
unblockedDecision.HasCutoff.ShouldBeTrue();
unblockedDecision.EffectiveCutoff.HasValue.ShouldBeTrue();
unblockedDecision.EffectiveCutoff.Value.PhysicalTime.ShouldBe(150);
unblockedDecision.EffectiveCutoff.Value.NodeId.ShouldBe("node-local");
await confirmationStore.Received(1).GetConfirmationsForPeerAsync("peer-active", Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().GetConfirmationsForPeerAsync("peer-deprecated", Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that maintenance does not prune when peer confirmation is missing in a two-node topology.
/// </summary>
[Fact]
public async Task RunMaintenanceIfDueAsync_TwoNode_ShouldNotPruneBeforePeerConfirmation()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var calculator = new OplogPruneCutoffCalculator(oplogStore, confirmationStore);
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore, calculator);
var vectorClock = new VectorClock();
vectorClock.SetTimestamp("node-local", new HlcTimestamp(200, 0, "node-local"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(vectorClock);
confirmationStore.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>())
.Returns(new[] { "node-peer" });
confirmationStore.GetConfirmationsForPeerAsync("node-peer", Arg.Any<CancellationToken>())
.Returns(Array.Empty<PeerOplogConfirmation>());
var config = new PeerNodeConfiguration
{
NodeId = "node-local",
MaintenanceIntervalMinutes = 1,
OplogRetentionHours = 24
};
await orchestrator.RunMaintenanceIfDueAsync(config, DateTime.UtcNow, CancellationToken.None);
await oplogStore.DidNotReceive().PruneOplogAsync(Arg.Any<HlcTimestamp>(), Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that maintenance prunes after peer confirmation is available in a two-node topology.
/// </summary>
[Fact]
public async Task RunMaintenanceIfDueAsync_TwoNode_ShouldPruneAfterPeerConfirmation()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var calculator = new OplogPruneCutoffCalculator(oplogStore, confirmationStore);
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore, calculator);
var vectorClock = new VectorClock();
vectorClock.SetTimestamp("node-local", new HlcTimestamp(200, 0, "node-local"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(vectorClock);
confirmationStore.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>())
.Returns(new[] { "node-peer" });
confirmationStore.GetConfirmationsForPeerAsync("node-peer", Arg.Any<CancellationToken>())
.Returns(new[]
{
new PeerOplogConfirmation
{
PeerNodeId = "node-peer",
SourceNodeId = "node-local",
ConfirmedWall = 100,
ConfirmedLogic = 0,
ConfirmedHash = "hash-100",
IsActive = true
}
});
var config = new PeerNodeConfiguration
{
NodeId = "node-local",
MaintenanceIntervalMinutes = 1,
OplogRetentionHours = 24
};
await orchestrator.RunMaintenanceIfDueAsync(config, DateTime.UtcNow, CancellationToken.None);
await oplogStore.Received(1).PruneOplogAsync(
Arg.Is<HlcTimestamp>(timestamp =>
timestamp.PhysicalTime == 100 &&
timestamp.LogicalCounter == 0 &&
string.Equals(timestamp.NodeId, "node-local", StringComparison.Ordinal)),
Arg.Any<CancellationToken>());
}
/// <summary>
/// Verifies that deprecated-node removal unblocks pruning on a subsequent maintenance run.
/// </summary>
[Fact]
public async Task RunMaintenanceIfDueAsync_DeprecatedNodeRemoval_ShouldUnblockPruning()
{
var oplogStore = Substitute.For<IOplogStore>();
var confirmationStore = Substitute.For<IPeerOplogConfirmationStore>();
var calculator = new OplogPruneCutoffCalculator(oplogStore, confirmationStore);
var orchestrator = CreateOrchestrator(oplogStore, confirmationStore, calculator);
var vectorClock = new VectorClock();
vectorClock.SetTimestamp("node-local", new HlcTimestamp(220, 0, "node-local"));
oplogStore.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(vectorClock);
confirmationStore.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>())
.Returns(
new[] { "node-active", "node-deprecated" },
new[] { "node-active" });
confirmationStore.GetConfirmationsForPeerAsync("node-active", Arg.Any<CancellationToken>())
.Returns(new[]
{
CreateConfirmation("node-active", "node-local", wall: 100, logic: 0, isActive: true)
});
confirmationStore.GetConfirmationsForPeerAsync("node-deprecated", Arg.Any<CancellationToken>())
.Returns(Array.Empty<PeerOplogConfirmation>());
var config = new PeerNodeConfiguration
{
NodeId = "node-local",
MaintenanceIntervalMinutes = 1,
OplogRetentionHours = 24
};
var now = DateTime.UtcNow;
await orchestrator.RunMaintenanceIfDueAsync(config, now, CancellationToken.None);
await oplogStore.DidNotReceive().PruneOplogAsync(Arg.Any<HlcTimestamp>(), Arg.Any<CancellationToken>());
await orchestrator.RunMaintenanceIfDueAsync(config, now.AddMinutes(2), CancellationToken.None);
await oplogStore.Received(1).PruneOplogAsync(
Arg.Is<HlcTimestamp>(timestamp =>
timestamp.PhysicalTime == 100 &&
timestamp.LogicalCounter == 0 &&
string.Equals(timestamp.NodeId, "node-local", StringComparison.Ordinal)),
Arg.Any<CancellationToken>());
}
private static SyncOrchestrator CreateOrchestrator(
IOplogStore oplogStore,
IPeerOplogConfirmationStore confirmationStore,
IOplogPruneCutoffCalculator cutoffCalculator)
{
var discovery = Substitute.For<IDiscoveryService>();
discovery.GetActivePeers().Returns(Array.Empty<PeerNode>());
var documentStore = Substitute.For<IDocumentStore>();
documentStore.InterestedCollection.Returns(Array.Empty<string>());
var snapshotMetadataStore = Substitute.For<ISnapshotMetadataStore>();
var snapshotService = Substitute.For<ISnapshotService>();
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration { NodeId = "node-local" });
return new SyncOrchestrator(
discovery,
oplogStore,
documentStore,
snapshotMetadataStore,
snapshotService,
configProvider,
NullLoggerFactory.Instance,
confirmationStore,
telemetry: null,
oplogPruneCutoffCalculator: cutoffCalculator);
}
private static PeerOplogConfirmation CreateConfirmation(
string peerNodeId,
string sourceNodeId,
long wall,
int logic,
bool isActive)
{
return new PeerOplogConfirmation
{
PeerNodeId = peerNodeId,
SourceNodeId = sourceNodeId,
ConfirmedWall = wall,
ConfirmedLogic = logic,
ConfirmedHash = $"hash-{wall}-{logic}",
IsActive = isActive
};
}
}

View File

@@ -0,0 +1,108 @@
using System;
using System.IO;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Network.Telemetry;
using Microsoft.Extensions.Logging.Abstractions;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests
{
public class TelemetryTests : IDisposable
{
private readonly string _tempFile;
/// <summary>
/// Initializes a new instance of the <see cref="TelemetryTests"/> class.
/// </summary>
public TelemetryTests()
{
_tempFile = Path.GetTempFileName();
}
/// <summary>
/// Cleans up temporary test artifacts created for telemetry persistence validation.
/// </summary>
public void Dispose()
{
if (File.Exists(_tempFile)) File.Delete(_tempFile);
}
/// <summary>
/// Verifies that telemetry metrics are recorded and persisted to disk.
/// </summary>
[Fact]
public async Task Should_Record_And_Persist_Metrics()
{
// Arrange
using var service = new NetworkTelemetryService(NullLogger<NetworkTelemetryService>.Instance, _tempFile);
// Act
// Record some values for CompressionRatio
service.RecordValue(MetricType.CompressionRatio, 0.5);
service.RecordValue(MetricType.CompressionRatio, 0.7);
// Record time metric
using (var timer = service.StartMetric(MetricType.EncryptionTime))
{
await Task.Delay(10); // Should be > 0 ms
}
// Allow channel to process
await Task.Delay(500);
// Force persist to file
service.ForcePersist();
// Assert
File.Exists(_tempFile).ShouldBeTrue();
var fileInfo = new FileInfo(_tempFile);
fileInfo.Length.ShouldBeGreaterThan(0);
using var fs = File.OpenRead(_tempFile);
using var br = new BinaryReader(fs);
// Header
byte version = br.ReadByte();
version.ShouldBe((byte)1);
long timestamp = br.ReadInt64();
var now = DateTimeOffset.UtcNow.ToUnixTimeSeconds();
timestamp.ShouldBeInRange(now - 5, now + 5);
// Metrics
// We expect all MetricTypes
int typeCount = Enum.GetValues(typeof(MetricType)).Length;
bool foundCompression = false;
bool foundEncryption = false;
for (int i = 0; i < typeCount; i++)
{
int typeInt = br.ReadInt32();
var type = (MetricType)typeInt;
// 4 Windows per type
for (int w = 0; w < 4; w++)
{
int window = br.ReadInt32(); // 60, 300, 600, 1800
double avg = br.ReadDouble();
if (type == MetricType.CompressionRatio && window == 60)
{
// Avg of 0.5 and 0.7 is 0.6
avg.ShouldBe(0.6, 0.001);
foundCompression = true;
}
if (type == MetricType.EncryptionTime && window == 60)
{
avg.ShouldBeGreaterThan(0);
foundEncryption = true;
}
}
}
foundCompression.ShouldBeTrue();
foundEncryption.ShouldBeTrue();
}
}
}

View File

@@ -0,0 +1,264 @@
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using Xunit;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class VectorClockSyncTests
{
/// <summary>
/// Verifies sync pull selection includes only nodes where the remote clock is ahead.
/// </summary>
[Fact]
public async Task VectorClockSync_ShouldPullOnlyNodesWithUpdates()
{
// Arrange
var (localStore, localVectorClock, _) = CreatePeerStore();
var (remoteStore, remoteVectorClock, remoteOplogEntries) = CreatePeerStore();
// Local knows about node1 and node2
localVectorClock.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
localVectorClock.SetTimestamp("node2", new HlcTimestamp(100, 1, "node2"));
// Remote has updates for node1 only
remoteVectorClock.SetTimestamp("node1", new HlcTimestamp(200, 5, "node1"));
remoteVectorClock.SetTimestamp("node2", new HlcTimestamp(100, 1, "node2"));
// Add oplog entries for node1 in remote
remoteOplogEntries.Add(new OplogEntry(
"users", "user1", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"Alice\"}"),
new HlcTimestamp(150, 2, "node1"), "", "hash1"
));
remoteOplogEntries.Add(new OplogEntry(
"users", "user2", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"Bob\"}"),
new HlcTimestamp(200, 5, "node1"), "hash1", "hash2"
));
// Act
var localVC = await localStore.GetVectorClockAsync(default);
var remoteVC = remoteVectorClock;
var nodesToPull = localVC.GetNodesWithUpdates(remoteVC).ToList();
// Assert
nodesToPull.Count().ShouldBe(1);
nodesToPull.ShouldContain("node1");
// Simulate pull
foreach (var nodeId in nodesToPull)
{
var localTs = localVC.GetTimestamp(nodeId);
var changes = await remoteStore.GetOplogForNodeAfterAsync(nodeId, localTs, default);
changes.Count().ShouldBe(2);
}
}
/// <summary>
/// Verifies sync push selection includes only nodes where the local clock is ahead.
/// </summary>
[Fact]
public async Task VectorClockSync_ShouldPushOnlyNodesWithLocalUpdates()
{
// Arrange
var (localStore, localVectorClock, localOplogEntries) = CreatePeerStore();
var (_, remoteVectorClock, _) = CreatePeerStore();
// Local has updates for node1
localVectorClock.SetTimestamp("node1", new HlcTimestamp(200, 5, "node1"));
localVectorClock.SetTimestamp("node2", new HlcTimestamp(100, 1, "node2"));
// Remote is behind on node1
remoteVectorClock.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
remoteVectorClock.SetTimestamp("node2", new HlcTimestamp(100, 1, "node2"));
// Add oplog entries for node1 in local
localOplogEntries.Add(new OplogEntry(
"users", "user1", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"Charlie\"}"),
new HlcTimestamp(150, 2, "node1"), "", "hash1"
));
// Act
var localVC = localVectorClock;
var remoteVC = remoteVectorClock;
var nodesToPush = localVC.GetNodesToPush(remoteVC).ToList();
// Assert
nodesToPush.Count().ShouldBe(1);
nodesToPush.ShouldContain("node1");
// Simulate push
foreach (var nodeId in nodesToPush)
{
var remoteTs = remoteVC.GetTimestamp(nodeId);
var changes = await localStore.GetOplogForNodeAfterAsync(nodeId, remoteTs, default);
changes.Count().ShouldBe(1);
}
}
/// <summary>
/// Verifies split-brain clocks result in bidirectional synchronization requirements.
/// </summary>
[Fact]
public async Task VectorClockSync_SplitBrain_ShouldSyncBothDirections()
{
// Arrange - Simulating split brain
var (partition1Store, partition1VectorClock, partition1OplogEntries) = CreatePeerStore();
var (partition2Store, partition2VectorClock, partition2OplogEntries) = CreatePeerStore();
// Partition 1 has node1 and node2 updates
partition1VectorClock.SetTimestamp("node1", new HlcTimestamp(300, 5, "node1"));
partition1VectorClock.SetTimestamp("node2", new HlcTimestamp(200, 3, "node2"));
partition1VectorClock.SetTimestamp("node3", new HlcTimestamp(50, 1, "node3"));
// Partition 2 has node3 updates
partition2VectorClock.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
partition2VectorClock.SetTimestamp("node2", new HlcTimestamp(100, 1, "node2"));
partition2VectorClock.SetTimestamp("node3", new HlcTimestamp(400, 8, "node3"));
partition1OplogEntries.Add(new OplogEntry(
"users", "user1", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"P1User\"}"),
new HlcTimestamp(300, 5, "node1"), "", "hash_p1"
));
partition2OplogEntries.Add(new OplogEntry(
"users", "user2", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"P2User\"}"),
new HlcTimestamp(400, 8, "node3"), "", "hash_p2"
));
// Act
var vc1 = partition1VectorClock;
var vc2 = partition2VectorClock;
var relation = vc1.CompareTo(vc2);
var partition1NeedsToPull = vc1.GetNodesWithUpdates(vc2).ToList();
var partition1NeedsToPush = vc1.GetNodesToPush(vc2).ToList();
// Assert
relation.ShouldBe(CausalityRelation.Concurrent);
// Partition 1 needs to pull node3
partition1NeedsToPull.Count().ShouldBe(1);
partition1NeedsToPull.ShouldContain("node3");
// Partition 1 needs to push node1 and node2
partition1NeedsToPush.Count.ShouldBe(2);
partition1NeedsToPush.ShouldContain("node1");
partition1NeedsToPush.ShouldContain("node2");
// Verify data can be synced
var changesToPull = await partition2Store.GetOplogForNodeAfterAsync("node3", vc1.GetTimestamp("node3"), default);
changesToPull.Count().ShouldBe(1);
var changesToPush = await partition1Store.GetOplogForNodeAfterAsync("node1", vc2.GetTimestamp("node1"), default);
changesToPush.Count().ShouldBe(1);
}
/// <summary>
/// Verifies no pull or push is required when vector clocks are equal.
/// </summary>
[Fact]
public void VectorClockSync_EqualClocks_ShouldNotSync()
{
// Arrange
var vc1 = new VectorClock();
vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc1.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
var vc2 = new VectorClock();
vc2.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
vc2.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2"));
// Act
var relation = vc1.CompareTo(vc2);
var nodesToPull = vc1.GetNodesWithUpdates(vc2).ToList();
var nodesToPush = vc1.GetNodesToPush(vc2).ToList();
// Assert
relation.ShouldBe(CausalityRelation.Equal);
nodesToPull.ShouldBeEmpty();
nodesToPush.ShouldBeEmpty();
}
/// <summary>
/// Verifies a newly observed node is detected as a required pull source.
/// </summary>
[Fact]
public async Task VectorClockSync_NewNodeJoins_ShouldBeDetected()
{
// Arrange - Simulating a new node joining the cluster
var (_, existingNodeVectorClock, _) = CreatePeerStore();
existingNodeVectorClock.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
existingNodeVectorClock.SetTimestamp("node2", new HlcTimestamp(100, 1, "node2"));
var (newNodeStore, newNodeVectorClock, newNodeOplogEntries) = CreatePeerStore();
newNodeVectorClock.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1"));
newNodeVectorClock.SetTimestamp("node2", new HlcTimestamp(100, 1, "node2"));
newNodeVectorClock.SetTimestamp("node3", new HlcTimestamp(50, 1, "node3")); // New node
newNodeOplogEntries.Add(new OplogEntry(
"users", "user3", OperationType.Put,
System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>("{\"name\":\"NewNode\"}"),
new HlcTimestamp(50, 1, "node3"), "", "hash_new"
));
// Act
var existingVC = existingNodeVectorClock;
var newNodeVC = newNodeVectorClock;
var nodesToPull = existingVC.GetNodesWithUpdates(newNodeVC).ToList();
// Assert
nodesToPull.Count().ShouldBe(1);
nodesToPull.ShouldContain("node3");
var changes = await newNodeStore.GetOplogForNodeAfterAsync("node3", existingVC.GetTimestamp("node3"), default);
changes.Count().ShouldBe(1);
}
private static (IOplogStore Store, VectorClock VectorClock, List<OplogEntry> OplogEntries) CreatePeerStore()
{
var vectorClock = new VectorClock();
var oplogEntries = new List<OplogEntry>();
var store = Substitute.For<IOplogStore>();
store.GetVectorClockAsync(Arg.Any<CancellationToken>())
.Returns(Task.FromResult(vectorClock));
store.GetOplogForNodeAfterAsync(
Arg.Any<string>(),
Arg.Any<HlcTimestamp>(),
Arg.Any<IEnumerable<string>?>(),
Arg.Any<CancellationToken>())
.Returns(callInfo =>
{
var nodeId = callInfo.ArgAt<string>(0);
var since = callInfo.ArgAt<HlcTimestamp>(1);
var collections = callInfo.ArgAt<IEnumerable<string>?>(2)?.ToList();
IEnumerable<OplogEntry> query = oplogEntries
.Where(e => e.Timestamp.NodeId == nodeId && e.Timestamp.CompareTo(since) > 0);
if (collections is { Count: > 0 })
{
query = query.Where(e => collections.Contains(e.Collection));
}
return Task.FromResult<IEnumerable<OplogEntry>>(query.OrderBy(e => e.Timestamp).ToList());
});
return (store, vectorClock, oplogEntries);
}
}

View File

@@ -0,0 +1,31 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.Network.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.Network.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.Network.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="NSubstitute" Version="5.3.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1" />
<PackageReference Include="Shouldly" Version="4.3.0" />
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4" />
<PackageReference Include="xunit.v3" Version="3.2.0" />
</ItemGroup>
<ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj" />
</ItemGroup>
</Project>

View File

@@ -0,0 +1,505 @@
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using Microsoft.Extensions.Logging.Abstractions;
using System.Text.Json;
using Xunit;
using ZB.MOM.WW.CBDDC.Persistence;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
/// <summary>
/// Tests for BLite persistence stores: Export, Import, Merge, Drop operations.
/// </summary>
public class BLiteStoreExportImportTests : IDisposable
{
private readonly string _testDbPath;
private readonly SampleDbContext _context;
private readonly SampleDocumentStore _documentStore;
private readonly BLiteOplogStore<SampleDbContext> _oplogStore;
private readonly BLitePeerConfigurationStore<SampleDbContext> _peerConfigStore;
private readonly BLiteSnapshotMetadataStore<SampleDbContext> _snapshotMetadataStore;
private readonly IPeerNodeConfigurationProvider _configProvider;
/// <summary>
/// Initializes a new instance of the <see cref="BLiteStoreExportImportTests"/> class.
/// </summary>
public BLiteStoreExportImportTests()
{
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-export-import-{Guid.NewGuid()}.blite");
_context = new SampleDbContext(_testDbPath);
_configProvider = CreateConfigProvider("test-node");
var vectorClock = new VectorClockService();
_documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock, NullLogger<SampleDocumentStore>.Instance);
_snapshotMetadataStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
_context, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
_oplogStore = new BLiteOplogStore<SampleDbContext>(
_context, _documentStore, new LastWriteWinsConflictResolver(),
vectorClock,
_snapshotMetadataStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
_peerConfigStore = new BLitePeerConfigurationStore<SampleDbContext>(
_context, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
}
#region OplogStore Tests
/// <summary>
/// Verifies that exporting oplog entries returns all persisted records.
/// </summary>
[Fact]
public async Task OplogStore_ExportAsync_ReturnsAllEntries()
{
// Arrange
var entry1 = CreateOplogEntry("col1", "key1", "node1", 1000);
var entry2 = CreateOplogEntry("col2", "key2", "node1", 2000);
await _oplogStore.AppendOplogEntryAsync(entry1);
await _oplogStore.AppendOplogEntryAsync(entry2);
// Act
var exported = (await _oplogStore.ExportAsync()).ToList();
// Assert
exported.Count.ShouldBe(2);
exported.ShouldContain(e => e.Key == "key1");
exported.ShouldContain(e => e.Key == "key2");
}
/// <summary>
/// Verifies that importing oplog entries adds them to the store.
/// </summary>
[Fact]
public async Task OplogStore_ImportAsync_AddsEntries()
{
// Arrange
var entries = new[]
{
CreateOplogEntry("col1", "imported1", "node1", 1000),
CreateOplogEntry("col2", "imported2", "node1", 2000)
};
// Act
await _oplogStore.ImportAsync(entries);
// Assert
var exported = (await _oplogStore.ExportAsync()).ToList();
exported.Count.ShouldBe(2);
exported.ShouldContain(e => e.Key == "imported1");
exported.ShouldContain(e => e.Key == "imported2");
}
/// <summary>
/// Verifies that merging oplog entries adds only entries that are not already present.
/// </summary>
[Fact]
public async Task OplogStore_MergeAsync_OnlyAddsNewEntries()
{
// Arrange - Add existing entry
var existing = CreateOplogEntry("col1", "existing", "node1", 1000);
await _oplogStore.AppendOplogEntryAsync(existing);
// Create entries to merge (one duplicate hash, one new)
var toMerge = new[]
{
existing, // Same hash - should be skipped
CreateOplogEntry("col2", "new-entry", "node1", 2000)
};
// Act
await _oplogStore.MergeAsync(toMerge);
// Assert
var exported = (await _oplogStore.ExportAsync()).ToList();
exported.Count.ShouldBe(2); // existing + new, not 3
}
/// <summary>
/// Verifies that chain range lookup resolves entries by hash and returns the expected range.
/// </summary>
[Fact]
public async Task OplogStore_GetChainRangeAsync_UsesHashLookup()
{
// Arrange
var payload1 = JsonDocument.Parse("{\"test\":\"k1\"}").RootElement;
var payload2 = JsonDocument.Parse("{\"test\":\"k2\"}").RootElement;
var entry1 = new OplogEntry("col1", "k1", OperationType.Put, payload1, new HlcTimestamp(1000, 0, "node1"), "");
var entry2 = new OplogEntry("col1", "k2", OperationType.Put, payload2, new HlcTimestamp(2000, 0, "node1"), entry1.Hash);
await _oplogStore.AppendOplogEntryAsync(entry1);
await _oplogStore.AppendOplogEntryAsync(entry2);
await _context.SaveChangesAsync();
// Act
var range = (await _oplogStore.GetChainRangeAsync(entry1.Hash, entry2.Hash)).ToList();
// Assert
range.Count.ShouldBe(1);
range[0].Hash.ShouldBe(entry2.Hash);
}
/// <summary>
/// Verifies that dropping the oplog store removes all entries.
/// </summary>
[Fact]
public async Task OplogStore_DropAsync_ClearsAllEntries()
{
// Arrange
await _oplogStore.AppendOplogEntryAsync(CreateOplogEntry("col1", "key1", "node1", 1000));
await _oplogStore.AppendOplogEntryAsync(CreateOplogEntry("col2", "key2", "node1", 2000));
await _context.SaveChangesAsync();
// Act
await _oplogStore.DropAsync();
// Assert
var exported = (await _oplogStore.ExportAsync()).ToList();
exported.ShouldBeEmpty();
}
#endregion
#region PeerConfigurationStore Tests
/// <summary>
/// Verifies that exporting peer configurations returns all persisted peers.
/// </summary>
[Fact]
public async Task PeerConfigStore_ExportAsync_ReturnsAllPeers()
{
// Arrange
await _peerConfigStore.SaveRemotePeerAsync(CreatePeerConfig("peer1", "host1:5000"));
await _peerConfigStore.SaveRemotePeerAsync(CreatePeerConfig("peer2", "host2:5000"));
// Act
var exported = (await _peerConfigStore.ExportAsync()).ToList();
// Assert
exported.Count.ShouldBe(2);
exported.ShouldContain(p => p.NodeId == "peer1");
exported.ShouldContain(p => p.NodeId == "peer2");
}
/// <summary>
/// Verifies that importing peer configurations adds peers to the store.
/// </summary>
[Fact]
public async Task PeerConfigStore_ImportAsync_AddsPeers()
{
// Arrange
var peers = new[]
{
CreatePeerConfig("imported1", "host1:5000"),
CreatePeerConfig("imported2", "host2:5000")
};
// Act
await _peerConfigStore.ImportAsync(peers);
// Assert
var exported = (await _peerConfigStore.ExportAsync()).ToList();
exported.Count.ShouldBe(2);
}
/// <summary>
/// Verifies that merging peer configurations adds only new peers.
/// </summary>
[Fact]
public async Task PeerConfigStore_MergeAsync_OnlyAddsNewPeers()
{
// Arrange - Add existing peer
var existing = CreatePeerConfig("existing-peer", "host1:5000");
await _peerConfigStore.SaveRemotePeerAsync(existing);
await _context.SaveChangesAsync();
var toMerge = new[]
{
CreatePeerConfig("existing-peer", "host1-updated:5000"), // Same ID - should be skipped
CreatePeerConfig("new-peer", "host2:5000")
};
// Act
await _peerConfigStore.MergeAsync(toMerge);
// Assert
var exported = (await _peerConfigStore.ExportAsync()).ToList();
exported.Count.ShouldBe(2);
}
/// <summary>
/// Verifies that dropping peer configurations removes all peers.
/// </summary>
[Fact]
public async Task PeerConfigStore_DropAsync_ClearsAllPeers()
{
// Arrange
await _peerConfigStore.SaveRemotePeerAsync(CreatePeerConfig("peer1", "host1:5000"));
await _peerConfigStore.SaveRemotePeerAsync(CreatePeerConfig("peer2", "host2:5000"));
await _context.SaveChangesAsync();
// Act
await _peerConfigStore.DropAsync();
// Assert
var exported = (await _peerConfigStore.ExportAsync()).ToList();
exported.ShouldBeEmpty();
}
#endregion
#region SnapshotMetadataStore Tests
/// <summary>
/// Verifies that exporting snapshot metadata returns all persisted metadata entries.
/// </summary>
[Fact]
public async Task SnapshotMetadataStore_ExportAsync_ReturnsAllMetadata()
{
// Arrange
var meta1 = CreateSnapshotMetadata("node1", 1000);
var meta2 = CreateSnapshotMetadata("node2", 2000);
await _snapshotMetadataStore.InsertSnapshotMetadataAsync(meta1);
await _snapshotMetadataStore.InsertSnapshotMetadataAsync(meta2);
// Act
var exported = (await _snapshotMetadataStore.ExportAsync()).ToList();
// Assert
exported.Count.ShouldBe(2);
exported.ShouldContain(m => m.NodeId == "node1");
exported.ShouldContain(m => m.NodeId == "node2");
}
/// <summary>
/// Verifies that importing snapshot metadata adds metadata entries to the store.
/// </summary>
[Fact]
public async Task SnapshotMetadataStore_ImportAsync_AddsMetadata()
{
// Arrange
var metadata = new[]
{
CreateSnapshotMetadata("imported1", 1000),
CreateSnapshotMetadata("imported2", 2000)
};
// Act
await _snapshotMetadataStore.ImportAsync(metadata);
// Assert
var exported = (await _snapshotMetadataStore.ExportAsync()).ToList();
exported.Count.ShouldBe(2);
}
/// <summary>
/// Verifies that merging snapshot metadata adds only entries with new node identifiers.
/// </summary>
[Fact]
public async Task SnapshotMetadataStore_MergeAsync_OnlyAddsNewMetadata()
{
// Arrange - Add existing metadata
var existing = CreateSnapshotMetadata("existing-node", 1000);
await _snapshotMetadataStore.InsertSnapshotMetadataAsync(existing);
var toMerge = new[]
{
CreateSnapshotMetadata("existing-node", 9999), // Same NodeId - should be skipped
CreateSnapshotMetadata("new-node", 2000)
};
// Act
await _snapshotMetadataStore.MergeAsync(toMerge);
// Assert
var exported = (await _snapshotMetadataStore.ExportAsync()).ToList();
exported.Count.ShouldBe(2);
}
/// <summary>
/// Verifies that dropping snapshot metadata removes all metadata entries.
/// </summary>
[Fact]
public async Task SnapshotMetadataStore_DropAsync_ClearsAllMetadata()
{
// Arrange
await _snapshotMetadataStore.InsertSnapshotMetadataAsync(CreateSnapshotMetadata("node1", 1000));
await _snapshotMetadataStore.InsertSnapshotMetadataAsync(CreateSnapshotMetadata("node2", 2000));
// Act
await _snapshotMetadataStore.DropAsync();
// Assert
var exported = (await _snapshotMetadataStore.ExportAsync()).ToList();
exported.ShouldBeEmpty();
}
#endregion
#region DocumentStore Tests
/// <summary>
/// Verifies that exporting documents returns all persisted documents.
/// </summary>
[Fact]
public async Task DocumentStore_ExportAsync_ReturnsAllDocuments()
{
// Arrange
await _context.Users.InsertAsync(new User { Id = "u1", Name = "User 1", Age = 20 });
await _context.Users.InsertAsync(new User { Id = "u2", Name = "User 2", Age = 25 });
await _context.SaveChangesAsync();
// Act
var exported = (await _documentStore.ExportAsync()).ToList();
// Assert
exported.Count.ShouldBe(2);
exported.ShouldContain(d => d.Key == "u1");
exported.ShouldContain(d => d.Key == "u2");
}
/// <summary>
/// Verifies that importing documents adds them to the underlying store.
/// </summary>
[Fact]
public async Task DocumentStore_ImportAsync_AddsDocuments()
{
// Arrange
var docs = new[]
{
CreateDocument("Users", "imported1", new User { Id = "imported1", Name = "Imported 1", Age = 30 }),
CreateDocument("Users", "imported2", new User { Id = "imported2", Name = "Imported 2", Age = 35 })
};
// Act
await _documentStore.ImportAsync(docs);
// Assert
var u1 = _context.Users.FindById("imported1");
var u2 = _context.Users.FindById("imported2");
u1.ShouldNotBeNull();
u2.ShouldNotBeNull();
u1.Name.ShouldBe("Imported 1");
u2.Name.ShouldBe("Imported 2");
}
/// <summary>
/// Verifies that document merge behavior honors conflict resolution.
/// </summary>
[Fact]
public async Task DocumentStore_MergeAsync_UsesConflictResolution()
{
// Arrange - Add existing document
await _context.Users.InsertAsync(new User { Id = "merge-user", Name = "Original", Age = 20 });
await _context.SaveChangesAsync();
// Create document to merge with newer timestamp
var newerDoc = new Document(
"Users",
"merge-user",
JsonDocument.Parse("{\"Id\":\"merge-user\",\"Name\":\"Updated\",\"Age\":25}").RootElement,
new HlcTimestamp(DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() + 10000, 0, "other-node"),
false
);
// Act
await _documentStore.MergeAsync([newerDoc]);
// Assert - With LastWriteWins, newer document should win
var user = _context.Users.FindById("merge-user");
user.ShouldNotBeNull();
user.Name.ShouldBe("Updated");
user.Age.ShouldBe(25);
}
/// <summary>
/// Verifies that dropping documents removes all persisted documents.
/// </summary>
[Fact]
public async Task DocumentStore_DropAsync_ClearsAllDocuments()
{
// Arrange
await _context.Users.InsertAsync(new User { Id = "drop1", Name = "User 1", Age = 20 });
await _context.Users.InsertAsync(new User { Id = "drop2", Name = "User 2", Age = 25 });
await _context.SaveChangesAsync();
// Act
await _documentStore.DropAsync();
// Assert
var exported = (await _documentStore.ExportAsync()).ToList();
exported.ShouldBeEmpty();
}
#endregion
#region Helpers
private static OplogEntry CreateOplogEntry(string collection, string key, string nodeId, long physicalTime)
{
var payload = JsonDocument.Parse($"{{\"test\": \"{key}\"}}").RootElement;
var timestamp = new HlcTimestamp(physicalTime, 0, nodeId);
return new OplogEntry(collection, key, OperationType.Put, payload, timestamp, "");
}
private static RemotePeerConfiguration CreatePeerConfig(string nodeId, string address)
{
return new RemotePeerConfiguration
{
NodeId = nodeId,
Address = address,
Type = PeerType.StaticRemote,
IsEnabled = true,
InterestingCollections = new List<string> { "Users" }
};
}
private static SnapshotMetadata CreateSnapshotMetadata(string nodeId, long physicalTime)
{
return new SnapshotMetadata
{
NodeId = nodeId,
TimestampPhysicalTime = physicalTime,
TimestampLogicalCounter = 0,
Hash = $"hash-{nodeId}"
};
}
private static Document CreateDocument<T>(string collection, string key, T entity) where T : class
{
var json = JsonSerializer.Serialize(entity);
var content = JsonDocument.Parse(json).RootElement;
return new Document(collection, key, content, new HlcTimestamp(0, 0, ""), false);
}
#endregion
/// <summary>
/// Disposes test resources and removes the temporary database file.
/// </summary>
public void Dispose()
{
_documentStore?.Dispose();
_context?.Dispose();
if (File.Exists(_testDbPath))
{
try { File.Delete(_testDbPath); } catch { }
}
}
private static IPeerNodeConfigurationProvider CreateConfigProvider(string nodeId)
{
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration
{
NodeId = nodeId,
TcpPort = 5000,
AuthToken = "test-token",
OplogRetentionHours = 24,
MaintenanceIntervalMinutes = 60
});
return configProvider;
}
}

View File

@@ -0,0 +1,3 @@
global using ZB.MOM.WW.CBDDC.Sample.Console;
global using NSubstitute;
global using Shouldly;

View File

@@ -0,0 +1,108 @@
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class PeerOplogConfirmationStoreTests : IDisposable
{
private readonly string _testDbPath;
private readonly SampleDbContext _context;
private readonly BLitePeerOplogConfirmationStore<SampleDbContext> _store;
/// <summary>
/// Initializes a new instance of the <see cref="PeerOplogConfirmationStoreTests"/> class.
/// </summary>
public PeerOplogConfirmationStoreTests()
{
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-peer-confirmation-{Guid.NewGuid()}.blite");
_context = new SampleDbContext(_testDbPath);
_store = new BLitePeerOplogConfirmationStore<SampleDbContext>(
_context,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
}
/// <summary>
/// Verifies that ensuring peer registration multiple times remains idempotent.
/// </summary>
[Fact]
public async Task EnsurePeerRegisteredAsync_IsIdempotent()
{
await _store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote);
await _store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote);
var active = (await _store.GetActiveTrackedPeersAsync()).ToList();
var exported = (await _store.ExportAsync()).ToList();
active.Count.ShouldBe(1);
active[0].ShouldBe("peer-a");
exported.Count(x => x.PeerNodeId == "peer-a" && x.SourceNodeId == "__peer_registration__").ShouldBe(1);
}
/// <summary>
/// Verifies create, update, and read flows for peer oplog confirmations.
/// </summary>
[Fact]
public async Task ConfirmationStore_CrudFlow_Works()
{
await _store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote);
await _store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(100, 1, "source-1"), "hash-1");
var firstRead = (await _store.GetConfirmationsForPeerAsync("peer-a")).ToList();
firstRead.Count.ShouldBe(1);
firstRead[0].ConfirmedWall.ShouldBe(100);
firstRead[0].ConfirmedLogic.ShouldBe(1);
firstRead[0].ConfirmedHash.ShouldBe("hash-1");
await _store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(120, 2, "source-1"), "hash-2");
await _store.UpdateConfirmationAsync("peer-a", "source-2", new HlcTimestamp(130, 0, "source-2"), "hash-3");
var secondRead = (await _store.GetConfirmationsForPeerAsync("peer-a")).OrderBy(x => x.SourceNodeId).ToList();
var allConfirmations = (await _store.GetConfirmationsAsync()).ToList();
secondRead.Count.ShouldBe(2);
secondRead[0].SourceNodeId.ShouldBe("source-1");
secondRead[0].ConfirmedWall.ShouldBe(120);
secondRead[0].ConfirmedLogic.ShouldBe(2);
secondRead[0].ConfirmedHash.ShouldBe("hash-2");
secondRead[1].SourceNodeId.ShouldBe("source-2");
secondRead[1].ConfirmedWall.ShouldBe(130);
secondRead[1].ConfirmedLogic.ShouldBe(0);
secondRead[1].ConfirmedHash.ShouldBe("hash-3");
allConfirmations.Count.ShouldBe(2);
}
/// <summary>
/// Verifies that removing peer tracking deactivates tracking records for that peer.
/// </summary>
[Fact]
public async Task RemovePeerTrackingAsync_DeactivatesPeerTracking()
{
await _store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote);
await _store.EnsurePeerRegisteredAsync("peer-b", "10.0.0.11:5050", PeerType.StaticRemote);
await _store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(100, 0, "source-1"), "hash-a");
await _store.UpdateConfirmationAsync("peer-b", "source-1", new HlcTimestamp(100, 0, "source-1"), "hash-b");
await _store.RemovePeerTrackingAsync("peer-a");
var activePeers = (await _store.GetActiveTrackedPeersAsync()).ToList();
var exported = (await _store.ExportAsync()).ToList();
var peerARows = exported.Where(x => x.PeerNodeId == "peer-a").ToList();
activePeers.ShouldContain("peer-b");
activePeers.ShouldNotContain("peer-a");
peerARows.ShouldNotBeEmpty();
peerARows.All(x => !x.IsActive).ShouldBeTrue();
}
/// <inheritdoc />
public void Dispose()
{
_context?.Dispose();
if (File.Exists(_testDbPath))
{
try { File.Delete(_testDbPath); } catch { }
}
}
}

View File

@@ -0,0 +1,226 @@
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using Microsoft.Extensions.Logging.Abstractions;
using System.Text.Json;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class SampleDbContextTests : IDisposable
{
private readonly string _dbPath;
private readonly SampleDbContext _context;
/// <summary>
/// Initializes a new test context backed by a temporary database file.
/// </summary>
public SampleDbContextTests()
{
_dbPath = Path.Combine(Path.GetTempPath(), $"test_sample_{Guid.NewGuid()}.db");
_context = new SampleDbContext(_dbPath);
}
/// <summary>
/// Releases test resources and removes the temporary database file.
/// </summary>
public void Dispose()
{
_context?.Dispose();
if (File.Exists(_dbPath))
{
try { File.Delete(_dbPath); } catch { }
}
}
/// <summary>
/// Verifies that required collections are initialized in the context.
/// </summary>
[Fact]
public void Context_ShouldInitializeCollections()
{
// Verifica che le collezioni siano state inizializzate
_context.ShouldNotBeNull();
_context.Users.ShouldNotBeNull("Users collection should be initialized by BLite");
_context.TodoLists.ShouldNotBeNull("TodoLists collection should be initialized by BLite");
_context.OplogEntries.ShouldNotBeNull("OplogEntries collection should be initialized by BLite");
}
/// <summary>
/// Verifies that inserting a user persists the document.
/// </summary>
[Fact]
public async Task Users_Insert_ShouldPersist()
{
// Arrange
var user = new User
{
Id = "user1",
Name = "Alice",
Age = 30,
Address = new Address { City = "Rome" }
};
// Act
await _context.Users.InsertAsync(user);
await _context.SaveChangesAsync();
// Assert
var retrieved = _context.Users.FindById("user1");
retrieved.ShouldNotBeNull();
retrieved!.Name.ShouldBe("Alice");
retrieved.Age.ShouldBe(30);
retrieved.Address?.City.ShouldBe("Rome");
}
/// <summary>
/// Verifies that updating a user modifies the existing document.
/// </summary>
[Fact]
public async Task Users_Update_ShouldModifyExisting()
{
// Arrange
var user = new User { Id = "user2", Name = "Bob", Age = 25 };
await _context.Users.InsertAsync(user);
await _context.SaveChangesAsync();
// Act
user.Age = 26;
user.Address = new Address { City = "Milan" };
await _context.Users.UpdateAsync(user);
await _context.SaveChangesAsync();
// Assert
var retrieved = _context.Users.FindById("user2");
retrieved.ShouldNotBeNull();
retrieved!.Age.ShouldBe(26);
retrieved.Address?.City.ShouldBe("Milan");
}
/// <summary>
/// Verifies that deleting a user removes the document.
/// </summary>
[Fact]
public async Task Users_Delete_ShouldRemove()
{
// Arrange
var user = new User { Id = "user3", Name = "Charlie", Age = 35 };
await _context.Users.InsertAsync(user);
await _context.SaveChangesAsync();
// Act
await _context.Users.DeleteAsync("user3");
await _context.SaveChangesAsync();
// Assert
var retrieved = _context.Users.FindById("user3");
retrieved.ShouldBeNull();
}
/// <summary>
/// Verifies that inserting a todo list with items persists nested data.
/// </summary>
[Fact]
public async Task TodoLists_InsertWithItems_ShouldPersist()
{
// Arrange
var todoList = new TodoList
{
Id = "list1",
Name = "Shopping",
Items = new List<TodoItem>
{
new() { Task = "Buy milk", Completed = false },
new() { Task = "Buy bread", Completed = true }
}
};
// Act
await _context.TodoLists.InsertAsync(todoList);
await _context.SaveChangesAsync();
// Assert
var retrieved = _context.TodoLists.FindById("list1");
retrieved.ShouldNotBeNull();
retrieved!.Name.ShouldBe("Shopping");
retrieved.Items.Count.ShouldBe(2);
retrieved.Items.ShouldContain(i => i.Task == "Buy milk" && !i.Completed);
retrieved.Items.ShouldContain(i => i.Task == "Buy bread" && i.Completed);
}
/// <summary>
/// Verifies that updating todo items modifies the nested collection.
/// </summary>
[Fact]
public async Task TodoLists_UpdateItems_ShouldModifyNestedCollection()
{
// Arrange
var todoList = new TodoList
{
Id = "list2",
Name = "Work Tasks",
Items = new List<TodoItem>
{
new() { Task = "Write report", Completed = false }
}
};
await _context.TodoLists.InsertAsync(todoList);
await _context.SaveChangesAsync();
// Act - Mark task as completed and add new task
todoList.Items[0].Completed = true;
todoList.Items.Add(new TodoItem { Task = "Review report", Completed = false });
await _context.TodoLists.UpdateAsync(todoList);
await _context.SaveChangesAsync();
// Assert
var retrieved = _context.TodoLists.FindById("list2");
retrieved.ShouldNotBeNull();
retrieved!.Items.Count.ShouldBe(2);
retrieved.Items.First().Completed.ShouldBe(true);
retrieved.Items.Last().Completed.ShouldBe(false);
}
/// <summary>
/// Verifies that querying all users returns all inserted users.
/// </summary>
[Fact]
public void Users_FindAll_ShouldReturnAllUsers()
{
// Arrange
_context.Users.InsertAsync(new User { Id = "u1", Name = "User1", Age = 20 }).Wait();
_context.Users.InsertAsync(new User { Id = "u2", Name = "User2", Age = 30 }).Wait();
_context.Users.InsertAsync(new User { Id = "u3", Name = "User3", Age = 40 }).Wait();
_context.SaveChangesAsync().Wait();
// Act
var allUsers = _context.Users.FindAll().ToList();
// Assert
allUsers.Count.ShouldBe(3);
allUsers.Select(u => u.Name).ShouldContain("User1");
allUsers.Select(u => u.Name).ShouldContain("User2");
allUsers.Select(u => u.Name).ShouldContain("User3");
}
/// <summary>
/// Verifies that predicate-based queries return only matching users.
/// </summary>
[Fact]
public void Users_Find_WithPredicate_ShouldFilterCorrectly()
{
// Arrange
_context.Users.InsertAsync(new User { Id = "f1", Name = "Young", Age = 18 }).Wait();
_context.Users.InsertAsync(new User { Id = "f2", Name = "Adult", Age = 30 }).Wait();
_context.Users.InsertAsync(new User { Id = "f3", Name = "Senior", Age = 65 }).Wait();
_context.SaveChangesAsync().Wait();
// Act
var adults = _context.Users.Find(u => u.Age >= 30).ToList();
// Assert
adults.Count.ShouldBe(2);
adults.Select(u => u.Name).ShouldContain("Adult");
adults.Select(u => u.Name).ShouldContain("Senior");
}
}

View File

@@ -0,0 +1,431 @@
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using Microsoft.Extensions.Logging.Abstractions;
using System.Text.Json;
using System.Text.Json.Nodes;
using Xunit;
using ZB.MOM.WW.CBDDC.Persistence;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class SnapshotStoreTests : IDisposable
{
private readonly string _testDbPath;
private readonly SampleDbContext _context;
private readonly SampleDocumentStore _documentStore;
private readonly BLiteOplogStore<SampleDbContext> _oplogStore;
private readonly BLitePeerConfigurationStore<SampleDbContext> _peerConfigStore;
private readonly BLitePeerOplogConfirmationStore<SampleDbContext> _peerConfirmationStore;
private readonly SnapshotStore _snapshotStore;
private readonly IPeerNodeConfigurationProvider _configProvider;
/// <summary>
/// Initializes a new instance of the <see cref="SnapshotStoreTests"/> class.
/// </summary>
public SnapshotStoreTests()
{
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-{Guid.NewGuid()}.blite");
_context = new SampleDbContext(_testDbPath);
_configProvider = CreateConfigProvider("test-node");
var vectorClock = new VectorClockService();
_documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock, NullLogger<SampleDocumentStore>.Instance);
var snapshotMetadataStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
_context,
NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
_oplogStore = new BLiteOplogStore<SampleDbContext>(
_context,
_documentStore,
new LastWriteWinsConflictResolver(),
vectorClock,
snapshotMetadataStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
_peerConfigStore = new BLitePeerConfigurationStore<SampleDbContext>(
_context,
NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
_peerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
_context,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
_snapshotStore = new SnapshotStore(
_documentStore,
_peerConfigStore,
_oplogStore,
new LastWriteWinsConflictResolver(),
NullLogger<SnapshotStore>.Instance,
_peerConfirmationStore);
}
/// <summary>
/// Verifies that creating a snapshot writes valid JSON to the output stream.
/// </summary>
[Fact]
public async Task CreateSnapshotAsync_WritesValidJsonToStream()
{
// Arrange - Add some data
var user = new User { Id = "user-1", Name = "Alice", Age = 30 };
await _context.Users.InsertAsync(user);
await _context.SaveChangesAsync();
// Act - Create snapshot
using var stream = new MemoryStream();
await _snapshotStore.CreateSnapshotAsync(stream);
// Assert - Stream should contain valid JSON
(stream.Length > 0).ShouldBeTrue("Snapshot stream should not be empty");
// Reset stream position and verify JSON is valid
stream.Position = 0;
var json = await new StreamReader(stream).ReadToEndAsync();
string.IsNullOrWhiteSpace(json).ShouldBeFalse("Snapshot JSON should not be empty");
json.Trim().ShouldStartWith("{");
// Verify it's valid JSON by parsing
var doc = JsonDocument.Parse(json);
doc.ShouldNotBeNull();
// Verify structure
doc.RootElement.TryGetProperty("Version", out _).ShouldBeTrue("Should have Version property");
doc.RootElement.TryGetProperty("Documents", out _).ShouldBeTrue("Should have Documents property");
doc.RootElement.TryGetProperty("Oplog", out _).ShouldBeTrue("Should have Oplog property");
doc.RootElement.TryGetProperty("PeerConfirmations", out _).ShouldBeTrue("Should have PeerConfirmations property");
}
/// <summary>
/// Verifies that snapshot creation includes all persisted documents.
/// </summary>
[Fact]
public async Task CreateSnapshotAsync_IncludesAllDocuments()
{
// Arrange - Add multiple documents
await _context.Users.InsertAsync(new User { Id = "u1", Name = "User 1", Age = 20 });
await _context.Users.InsertAsync(new User { Id = "u2", Name = "User 2", Age = 25 });
await _context.TodoLists.InsertAsync(new TodoList
{
Id = "t1",
Name = "My List",
Items = [new TodoItem { Task = "Task 1", Completed = false }]
});
await _context.SaveChangesAsync();
// Act
using var stream = new MemoryStream();
await _snapshotStore.CreateSnapshotAsync(stream);
// Assert
stream.Position = 0;
var json = await new StreamReader(stream).ReadToEndAsync();
var doc = JsonDocument.Parse(json);
var documents = doc.RootElement.GetProperty("Documents");
documents.GetArrayLength().ShouldBe(3);
}
/// <summary>
/// Verifies that creating and replacing a snapshot preserves document data.
/// </summary>
[Fact]
public async Task RoundTrip_CreateAndReplace_PreservesData()
{
// Arrange - Add data to source
var originalUser = new User { Id = "user-rt", Name = "RoundTrip User", Age = 42 };
await _context.Users.InsertAsync(originalUser);
await _peerConfirmationStore.UpdateConfirmationAsync(
"peer-rt",
"source-rt",
new HlcTimestamp(500, 2, "source-rt"),
"hash-rt");
await _context.SaveChangesAsync();
// Create snapshot
using var snapshotStream = new MemoryStream();
await _snapshotStore.CreateSnapshotAsync(snapshotStream);
snapshotStream.Position = 0;
var snapshotJson = await new StreamReader(snapshotStream).ReadToEndAsync();
var snapshotDoc = JsonDocument.Parse(snapshotJson);
snapshotDoc.RootElement.GetProperty("PeerConfirmations").GetArrayLength().ShouldBe(1);
snapshotStream.Position = 0;
// Create a new context/stores (simulating a different node)
var newDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-target-{Guid.NewGuid()}.blite");
try
{
using var newContext = new SampleDbContext(newDbPath);
var newConfigProvider = CreateConfigProvider("test-new-node");
var newVectorClock = new VectorClockService();
var newDocStore = new SampleDocumentStore(newContext, newConfigProvider, newVectorClock, NullLogger<SampleDocumentStore>.Instance);
var newSnapshotMetaStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
newContext, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
var newOplogStore = new BLiteOplogStore<SampleDbContext>(
newContext, newDocStore, new LastWriteWinsConflictResolver(),
newVectorClock,
newSnapshotMetaStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
var newPeerStore = new BLitePeerConfigurationStore<SampleDbContext>(
newContext, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
var newPeerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
newContext,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
var newSnapshotStore = new SnapshotStore(
newDocStore,
newPeerStore,
newOplogStore,
new LastWriteWinsConflictResolver(),
NullLogger<SnapshotStore>.Instance,
newPeerConfirmationStore);
// Act - Replace database with snapshot
await newSnapshotStore.ReplaceDatabaseAsync(snapshotStream);
// Assert - Data should be restored
var restoredUser = newContext.Users.FindById("user-rt");
restoredUser.ShouldNotBeNull();
restoredUser.Name.ShouldBe("RoundTrip User");
restoredUser.Age.ShouldBe(42);
var restoredConfirmations = (await newPeerConfirmationStore.GetConfirmationsAsync()).ToList();
restoredConfirmations.Count.ShouldBe(1);
restoredConfirmations[0].PeerNodeId.ShouldBe("peer-rt");
restoredConfirmations[0].SourceNodeId.ShouldBe("source-rt");
restoredConfirmations[0].ConfirmedWall.ShouldBe(500);
restoredConfirmations[0].ConfirmedLogic.ShouldBe(2);
restoredConfirmations[0].ConfirmedHash.ShouldBe("hash-rt");
}
finally
{
if (File.Exists(newDbPath))
try { File.Delete(newDbPath); } catch { }
}
}
/// <summary>
/// Verifies that merging a snapshot preserves existing data and adds new data.
/// </summary>
[Fact]
public async Task MergeSnapshotAsync_MergesWithExistingData()
{
// Arrange - Add initial data
await _context.Users.InsertAsync(new User { Id = "existing", Name = "Existing User", Age = 30 });
await _peerConfirmationStore.UpdateConfirmationAsync(
"peer-merge",
"source-a",
new HlcTimestamp(100, 0, "source-a"),
"target-hash-old");
await _peerConfirmationStore.UpdateConfirmationAsync(
"peer-local-only",
"source-local",
new HlcTimestamp(50, 0, "source-local"),
"target-local-hash");
await _context.SaveChangesAsync();
// Create snapshot with different data
var sourceDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-source-{Guid.NewGuid()}.blite");
MemoryStream snapshotStream;
try
{
using var sourceContext = new SampleDbContext(sourceDbPath);
await sourceContext.Users.InsertAsync(new User { Id = "new-user", Name = "New User", Age = 25 });
await sourceContext.SaveChangesAsync();
var sourceConfigProvider = CreateConfigProvider("test-source-node");
var sourceVectorClock = new VectorClockService();
var sourceDocStore = new SampleDocumentStore(sourceContext, sourceConfigProvider, sourceVectorClock, NullLogger<SampleDocumentStore>.Instance);
var sourceSnapshotMetaStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
sourceContext, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
var sourceOplogStore = new BLiteOplogStore<SampleDbContext>(
sourceContext, sourceDocStore, new LastWriteWinsConflictResolver(),
sourceVectorClock,
sourceSnapshotMetaStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
var sourcePeerStore = new BLitePeerConfigurationStore<SampleDbContext>(
sourceContext, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
var sourcePeerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
sourceContext,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
await sourcePeerConfirmationStore.UpdateConfirmationAsync(
"peer-merge",
"source-a",
new HlcTimestamp(200, 1, "source-a"),
"source-hash-new");
await sourcePeerConfirmationStore.UpdateConfirmationAsync(
"peer-merge",
"source-b",
new HlcTimestamp(300, 0, "source-b"),
"source-hash-b");
var sourceSnapshotStore = new SnapshotStore(
sourceDocStore,
sourcePeerStore,
sourceOplogStore,
new LastWriteWinsConflictResolver(),
NullLogger<SnapshotStore>.Instance,
sourcePeerConfirmationStore);
snapshotStream = new MemoryStream();
await sourceSnapshotStore.CreateSnapshotAsync(snapshotStream);
snapshotStream.Position = 0;
}
finally
{
if (File.Exists(sourceDbPath))
try { File.Delete(sourceDbPath); } catch { }
}
// Act - Merge snapshot into existing data
await _snapshotStore.MergeSnapshotAsync(snapshotStream);
// Assert - Both users should exist
var existingUser = _context.Users.FindById("existing");
var newUser = _context.Users.FindById("new-user");
existingUser.ShouldNotBeNull();
newUser.ShouldNotBeNull();
existingUser.Name.ShouldBe("Existing User");
newUser.Name.ShouldBe("New User");
var confirmations = (await _peerConfirmationStore.GetConfirmationsAsync())
.OrderBy(c => c.PeerNodeId)
.ThenBy(c => c.SourceNodeId)
.ToList();
confirmations.Count.ShouldBe(3);
confirmations[0].PeerNodeId.ShouldBe("peer-local-only");
confirmations[0].SourceNodeId.ShouldBe("source-local");
confirmations[0].ConfirmedWall.ShouldBe(50);
confirmations[0].ConfirmedHash.ShouldBe("target-local-hash");
confirmations[1].PeerNodeId.ShouldBe("peer-merge");
confirmations[1].SourceNodeId.ShouldBe("source-a");
confirmations[1].ConfirmedWall.ShouldBe(200);
confirmations[1].ConfirmedLogic.ShouldBe(1);
confirmations[1].ConfirmedHash.ShouldBe("source-hash-new");
confirmations[2].PeerNodeId.ShouldBe("peer-merge");
confirmations[2].SourceNodeId.ShouldBe("source-b");
confirmations[2].ConfirmedWall.ShouldBe(300);
confirmations[2].ConfirmedHash.ShouldBe("source-hash-b");
}
/// <summary>
/// Verifies that replace can consume legacy snapshots that do not include peer confirmations.
/// </summary>
[Fact]
public async Task ReplaceDatabaseAsync_LegacySnapshotWithoutPeerConfirmations_IsSupported()
{
// Arrange
await _context.Users.InsertAsync(new User { Id = "legacy-user", Name = "Legacy User", Age = 33 });
await _context.SaveChangesAsync();
using var snapshotStream = new MemoryStream();
await _snapshotStore.CreateSnapshotAsync(snapshotStream);
snapshotStream.Position = 0;
var snapshotJson = await new StreamReader(snapshotStream).ReadToEndAsync();
var legacySnapshot = JsonNode.Parse(snapshotJson)!.AsObject();
legacySnapshot.Remove("PeerConfirmations");
using var legacyStream = new MemoryStream();
await using (var writer = new Utf8JsonWriter(legacyStream))
{
legacySnapshot.WriteTo(writer);
}
legacyStream.Position = 0;
// Act
await _snapshotStore.ReplaceDatabaseAsync(legacyStream);
// Assert
_context.Users.FindById("legacy-user").ShouldNotBeNull();
(await _peerConfirmationStore.GetConfirmationsAsync()).Count().ShouldBe(0);
}
/// <summary>
/// Verifies that snapshot creation succeeds for an empty database.
/// </summary>
[Fact]
public async Task CreateSnapshotAsync_HandlesEmptyDatabase()
{
// Act - Create snapshot from empty database
using var stream = new MemoryStream();
await _snapshotStore.CreateSnapshotAsync(stream);
// Assert - Should still produce valid JSON
(stream.Length > 0).ShouldBeTrue();
stream.Position = 0;
var json = await new StreamReader(stream).ReadToEndAsync();
var doc = JsonDocument.Parse(json);
var documents = doc.RootElement.GetProperty("Documents");
documents.GetArrayLength().ShouldBe(0);
}
/// <summary>
/// Verifies that snapshot creation includes oplog entries.
/// </summary>
[Fact]
public async Task CreateSnapshotAsync_IncludesOplogEntries()
{
// Arrange - Create some oplog entries via document changes
await _context.Users.InsertAsync(new User { Id = "op-user", Name = "Oplog User", Age = 20 });
await _context.SaveChangesAsync();
// Manually add an oplog entry to ensure it's captured
var oplogEntry = new OplogEntry(
"Users",
"manual-key",
OperationType.Put,
JsonDocument.Parse("{\"test\": true}").RootElement,
new HlcTimestamp(DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(), 0, "test-node"),
""
);
await _oplogStore.AppendOplogEntryAsync(oplogEntry);
// Act
using var stream = new MemoryStream();
await _snapshotStore.CreateSnapshotAsync(stream);
// Assert
stream.Position = 0;
var json = await new StreamReader(stream).ReadToEndAsync();
var doc = JsonDocument.Parse(json);
var oplog = doc.RootElement.GetProperty("Oplog");
(oplog.GetArrayLength() >= 1).ShouldBeTrue("Should have at least one oplog entry");
}
/// <summary>
/// Releases resources created for test execution.
/// </summary>
public void Dispose()
{
_documentStore?.Dispose();
_context?.Dispose();
if (File.Exists(_testDbPath))
{
try { File.Delete(_testDbPath); } catch { }
}
}
private static IPeerNodeConfigurationProvider CreateConfigProvider(string nodeId)
{
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration
{
NodeId = nodeId,
TcpPort = 5000,
AuthToken = "test-token",
OplogRetentionHours = 24,
MaintenanceIntervalMinutes = 60
});
return configProvider;
}
}

View File

@@ -0,0 +1,32 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyName>ZB.MOM.WW.CBDDC.Sample.Console.Tests</AssemblyName>
<RootNamespace>ZB.MOM.WW.CBDDC.Sample.Console.Tests</RootNamespace>
<PackageId>ZB.MOM.WW.CBDDC.Sample.Console.Tests</PackageId>
<TargetFramework>net10.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
<IsPackable>false</IsPackable>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="coverlet.collector" Version="6.0.4" />
<PackageReference Include="NSubstitute" Version="5.3.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1" />
<PackageReference Include="Shouldly" Version="4.3.0" />
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4" />
<PackageReference Include="xunit.v3" Version="3.2.0" />
</ItemGroup>
<ItemGroup>
<Using Include="Xunit" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\samples\ZB.MOM.WW.CBDDC.Sample.Console\ZB.MOM.WW.CBDDC.Sample.Console.csproj" />
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Persistence\ZB.MOM.WW.CBDDC.Persistence.csproj" />
</ItemGroup>
</Project>