From 7ebc2cb567cc1cd00cd260e66165ff6489a2bb1c Mon Sep 17 00:00:00 2001 From: Joseph Doherty Date: Sat, 21 Feb 2026 07:53:53 -0500 Subject: [PATCH] Reformat/cleanup --- CBDDC.slnx | 42 +- Directory.Build.props | 10 +- .../ConsoleInteractiveService.cs | 106 +- .../ZB.MOM.WW.CBDDC.Sample.Console/Program.cs | 65 +- .../ZB.MOM.WW.CBDDC.Sample.Console/README.md | 35 +- .../SampleDbContext.cs | 35 +- .../SampleDocumentStore.cs | 74 +- .../TodoList.cs | 23 +- .../ZB.MOM.WW.CBDDC.Sample.Console/User.cs | 20 +- .../ZB.MOM.WW.CBDDC.Sample.Console.csproj | 70 +- .../appsettings.json | 94 +- .../Cache/DocumentCache.cs | 171 ++- .../Cache/IDocumentCache.cs | 75 +- .../ChangesAppliedEventArgs.cs | 30 +- .../Diagnostics/CBDDCHealthCheck.cs | 73 +- .../Diagnostics/DiagnosticsModels.cs | 58 +- .../Diagnostics/ICBDDCHealthCheck.cs | 27 +- .../Diagnostics/ISyncStatusTracker.cs | 105 +- .../Diagnostics/SyncStatusTracker.cs | 208 ++-- src/ZB.MOM.WW.CBDDC.Core/Document.cs | 109 +- .../Exceptions/CBDDCExceptions.cs | 175 +-- src/ZB.MOM.WW.CBDDC.Core/HlcTimestamp.cs | 142 ++- .../Management/IPeerManagementService.cs | 18 +- .../Management/PeerManagementService.cs | 66 +- .../Network/IPeerNodeConfigurationProvider.cs | 56 +- src/ZB.MOM.WW.CBDDC.Core/Network/NodeRole.cs | 14 +- src/ZB.MOM.WW.CBDDC.Core/Network/PeerNode.cs | 87 +- .../Network/PeerNodeConfiguration.cs | 73 +- src/ZB.MOM.WW.CBDDC.Core/Network/PeerType.cs | 18 +- .../Network/RemotePeerConfiguration.cs | 23 +- .../StaticPeerNodeConfigurationProvider.cs | 48 +- src/ZB.MOM.WW.CBDDC.Core/OplogEntry.cs | 131 ++- .../PeerOplogConfirmation.cs | 18 +- src/ZB.MOM.WW.CBDDC.Core/QueryNode.cs | 256 +++-- src/ZB.MOM.WW.CBDDC.Core/README.md | 27 +- .../Resilience/IRetryPolicy.cs | 53 +- .../Resilience/RetryPolicy.cs | 126 +-- src/ZB.MOM.WW.CBDDC.Core/SnapshotMetadata.cs | 17 +- .../Storage/CorruptDatabaseException.cs | 14 +- .../Storage/IDocumentMetadataStore.cs | 107 +- .../Storage/IDocumentStore.cs | 124 +- .../Storage/IOplogStore.cs | 43 +- .../Storage/IPeerConfigurationStore.cs | 12 +- .../Storage/IPeerOplogConfirmationStore.cs | 16 +- .../Storage/ISnapshotMetadataStore.cs | 23 +- .../Storage/ISnapshotService.cs | 10 +- .../Storage/ISnapshotable.cs | 30 +- .../Storage/IVectorClockService.cs | 40 +- .../Sync/IConflictResolver.cs | 47 +- .../Sync/IOfflineQueue.cs | 68 +- .../Sync/LastWriteWinsConflictResolver.cs | 31 +- src/ZB.MOM.WW.CBDDC.Core/Sync/OfflineQueue.cs | 100 +- .../Sync/PendingOperation.cs | 32 +- .../RecursiveNodeMergeConflictResolver.cs | 132 ++- src/ZB.MOM.WW.CBDDC.Core/VectorClock.cs | 220 ++-- .../ZB.MOM.WW.CBDDC.Core.csproj | 56 +- .../CBDDCHostingExtensions.cs | 27 +- .../Configuration/CBDDCHostingOptions.cs | 10 +- .../Configuration/ClusterOptions.cs | 29 +- .../HealthChecks/CBDDCHealthCheck.cs | 56 +- .../DiscoveryServiceHostedService.cs | 12 +- .../TcpSyncServerHostedService.cs | 14 +- src/ZB.MOM.WW.CBDDC.Hosting/README.md | 13 +- .../Services/NoOpDiscoveryService.cs | 20 +- .../Services/NoOpSyncOrchestrator.cs | 17 +- .../ZB.MOM.WW.CBDDC.Hosting.csproj | 60 +- src/ZB.MOM.WW.CBDDC.Network/CBDDCNode.cs | 121 +- .../CBDDCNodeService.cs | 15 +- .../CBDDCServiceCollectionExtensions.cs | 8 +- .../CompositeDiscoveryService.cs | 55 +- .../CompressionHelper.cs | 64 +- src/ZB.MOM.WW.CBDDC.Network/ICBDDCNode.cs | 69 +- .../IDiscoveryService.cs | 47 +- .../IOplogPruneCutoffCalculator.cs | 8 +- .../ISyncOrchestrator.cs | 33 +- src/ZB.MOM.WW.CBDDC.Network/ISyncServer.cs | 42 +- .../Leadership/BullyLeaderElectionService.cs | 87 +- .../Leadership/ILeaderElectionService.cs | 52 +- .../OplogPruneCutoffCalculator.cs | 71 +- .../OplogPruneCutoffDecision.cs | 38 +- .../PeerDbNetworkExtensions.cs | 60 +- .../Protocol/ProtocolHandler.cs | 479 ++++---- src/ZB.MOM.WW.CBDDC.Network/README.md | 3 + .../Security/ClusterKeyAuthenticator.cs | 29 +- .../Security/CryptoHelper.cs | 66 +- .../Security/IAuthenticator.cs | 12 +- .../Security/IPeerHandshakeService.cs | 33 +- .../Security/NoOpHandshakeService.cs | 31 +- .../Security/SecureHandshakeService.cs | 121 +- .../SyncOrchestrator.cs | 1008 ++++++++--------- src/ZB.MOM.WW.CBDDC.Network/TcpPeerClient.cs | 576 +++++----- src/ZB.MOM.WW.CBDDC.Network/TcpSyncServer.cs | 301 ++--- .../Telemetry/INetworkTelemetryService.cs | 49 +- .../Telemetry/MetricType.cs | 2 +- .../Telemetry/NetworkTelemetryService.cs | 326 +++--- .../UdpDiscoveryService.cs | 317 +++--- .../ZB.MOM.WW.CBDDC.Network.csproj | 86 +- .../BLite/BLiteDocumentMetadataStore.cs | 50 +- .../BLite/BLiteDocumentStore.README.md | 59 +- .../BLite/BLiteDocumentStore.cs | 523 ++++----- .../BLite/BLiteOplogStore.cs | 92 +- .../BLite/BLitePeerConfigurationStore.cs | 67 +- .../BLite/BLitePeerOplogConfirmationStore.cs | 75 +- .../BLite/BLiteSnapshotMetadataStore.cs | 94 +- .../BLite/CBDDCBLiteExtensions.cs | 16 +- .../BLite/CBDDCDocumentDbContext.cs | 90 +- .../BLite/Entities/DocumentMetadataEntity.cs | 22 +- .../BLite/Entities/EntityMappers.cs | 249 ++-- .../BLite/Entities/OplogEntity.cs | 26 +- .../Entities/PeerOplogConfirmationEntity.cs | 20 +- .../BLite/Entities/RemotePeerEntity.cs | 20 +- .../BLite/Entities/SnapshotMetadataEntity.cs | 16 +- .../DocumentMetadataStore.cs | 24 +- .../NodeCacheEntry.cs | 8 +- src/ZB.MOM.WW.CBDDC.Persistence/OplogStore.cs | 168 +-- .../PeerConfigurationStore.cs | 31 +- .../PeerOplogConfirmationStore.cs | 10 +- src/ZB.MOM.WW.CBDDC.Persistence/README.md | 9 +- .../Snapshot/SnapshotDto.cs | 92 +- .../SnapshotMetadataStore.cs | 36 +- .../SnapshotStore.cs | 335 +++--- .../VectorClockService.cs | 26 +- .../ZB.MOM.WW.CBDDC.Persistence.csproj | 66 +- .../ArchitectureFitnessTests.cs | 136 +-- .../GlobalUsings.cs | 2 +- .../OplogEntryTests.cs | 130 ++- .../PeerManagementServiceTests.cs | 20 +- .../PerformanceRegressionTests.cs | 51 +- ...RecursiveNodeMergeConflictResolverTests.cs | 30 +- .../VectorClockTests.cs | 262 +++-- .../ZB.MOM.WW.CBDDC.Core.Tests.csproj | 60 +- .../benchmark_limits.json | 4 +- .../ClusterCrudSyncE2ETests.cs | 316 +++--- .../ZB.MOM.WW.CBDDC.E2E.Tests/GlobalUsings.cs | 2 +- .../ZB.MOM.WW.CBDDC.E2E.Tests.csproj | 52 +- .../CBDDCHealthCheckTests.cs | 20 +- .../CBDDCHostingExtensionsTests.cs | 17 +- .../GlobalUsings.cs | 2 +- .../HostedServicesTests.cs | 6 +- .../NoOpServicesTests.cs | 6 +- .../ZB.MOM.WW.CBDDC.Hosting.Tests.csproj | 50 +- .../BullyLeaderElectionServiceTests.cs | 25 +- .../ConnectionTests.cs | 20 +- .../CryptoHelperTests.cs | 43 +- .../GlobalUsings.cs | 2 +- .../HandshakeRegressionTests.cs | 18 +- .../ProtocolTests.cs | 332 +++--- .../SecureHandshakeTests.cs | 368 +++--- .../SnapshotReconnectRegressionTests.cs | 557 +++++---- .../SyncOrchestratorConfirmationTests.cs | 37 +- ...SyncOrchestratorMaintenancePruningTests.cs | 33 +- .../TelemetryTests.cs | 187 ++- .../VectorClockSyncTests.cs | 192 ++-- .../ZB.MOM.WW.CBDDC.Network.Tests.csproj | 60 +- .../BLiteStoreExportImportTests.cs | 120 +- .../GlobalUsings.cs | 2 +- .../PeerOplogConfirmationStoreTests.cs | 36 +- .../SampleDbContextTests.cs | 241 ++-- .../SnapshotStoreTests.cs | 443 ++++---- ...B.MOM.WW.CBDDC.Sample.Console.Tests.csproj | 64 +- 160 files changed, 7258 insertions(+), 7262 deletions(-) diff --git a/CBDDC.slnx b/CBDDC.slnx index 63a69f6..9c7bc09 100644 --- a/CBDDC.slnx +++ b/CBDDC.slnx @@ -1,23 +1,23 @@ - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + diff --git a/Directory.Build.props b/Directory.Build.props index dc63a45..dc0722a 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,7 +1,7 @@ - - true - latest - true - + + true + latest + true + diff --git a/samples/ZB.MOM.WW.CBDDC.Sample.Console/ConsoleInteractiveService.cs b/samples/ZB.MOM.WW.CBDDC.Sample.Console/ConsoleInteractiveService.cs index 5d2dafe..1dbc701 100755 --- a/samples/ZB.MOM.WW.CBDDC.Sample.Console/ConsoleInteractiveService.cs +++ b/samples/ZB.MOM.WW.CBDDC.Sample.Console/ConsoleInteractiveService.cs @@ -1,37 +1,33 @@ +using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Logging; -using ZB.MOM.WW.CBDDC.Core; +using Serilog.Context; using ZB.MOM.WW.CBDDC.Core.Cache; using ZB.MOM.WW.CBDDC.Core.Diagnostics; -using ZB.MOM.WW.CBDDC.Core.Sync; -using ZB.MOM.WW.CBDDC.Core.Storage; -using ZB.MOM.WW.CBDDC.Network; -using ZB.MOM.WW.CBDDC.Persistence.BLite; -using Microsoft.Extensions.DependencyInjection; // For IServiceProvider if needed -using Serilog.Context; -using ZB.MOM.WW.CBDDC.Sample.Console; using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Core.Sync; +using ZB.MOM.WW.CBDDC.Network; +using ZB.MOM.WW.CBDDC.Network.Security; +// For IServiceProvider if needed namespace ZB.MOM.WW.CBDDC.Sample.Console; public class ConsoleInteractiveService : BackgroundService { - private readonly ILogger _logger; - private readonly SampleDbContext _db; - private readonly ICBDDCNode _node; - private readonly IHostApplicationLifetime _lifetime; - - // Auxiliary services for status/commands private readonly IDocumentCache _cache; - private readonly IOfflineQueue _queue; - private readonly ICBDDCHealthCheck _healthCheck; - private readonly ISyncStatusTracker _syncTracker; - private readonly IServiceProvider _serviceProvider; private readonly IPeerNodeConfigurationProvider _configProvider; + private readonly SampleDbContext _db; + private readonly ICBDDCHealthCheck _healthCheck; + private readonly IHostApplicationLifetime _lifetime; + private readonly ILogger _logger; + private readonly ICBDDCNode _node; + private readonly IOfflineQueue _queue; + private readonly IServiceProvider _serviceProvider; + private readonly ISyncStatusTracker _syncTracker; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The logger used by the interactive service. /// The sample database context. @@ -72,7 +68,7 @@ public class ConsoleInteractiveService : BackgroundService { var config = await _configProvider.GetConfiguration(); - System.Console.WriteLine($"--- Interactive Console ---"); + System.Console.WriteLine("--- Interactive Console ---"); System.Console.WriteLine($"Node ID: {config.NodeId}"); PrintHelp(); @@ -85,7 +81,7 @@ public class ConsoleInteractiveService : BackgroundService continue; } - var input = System.Console.ReadLine(); + string? input = System.Console.ReadLine(); if (string.IsNullOrEmpty(input)) continue; try @@ -118,42 +114,53 @@ public class ConsoleInteractiveService : BackgroundService System.Console.WriteLine(" [n]ew (auto), [s]pam (5x), [c]ount, [t]odos"); System.Console.WriteLine(" [h]ealth, cac[h]e"); System.Console.WriteLine(" [r]esolver [lww|merge], [demo] conflict"); - } - + } + private async Task HandleInput(string input) { var config = await _configProvider.GetConfiguration(); if (input.StartsWith("n")) { var ts = DateTime.Now.ToString("HH:mm:ss.fff"); - var user = new User { Id = Guid.NewGuid().ToString(), Name = $"User-{ts}", Age = new Random().Next(18, 90), Address = new Address { City = "AutoCity" } }; + var user = new User + { + Id = Guid.NewGuid().ToString(), Name = $"User-{ts}", Age = new Random().Next(18, 90), + Address = new Address { City = "AutoCity" } + }; await _db.Users.InsertAsync(user); await _db.SaveChangesAsync(); System.Console.WriteLine($"[+] Created {user.Name} with Id: {user.Id}..."); } else if (input.StartsWith("s")) { - for (int i = 0; i < 5; i++) + for (var i = 0; i < 5; i++) { var ts = DateTime.Now.ToString("HH:mm:ss.fff"); - var user = new User { Id = Guid.NewGuid().ToString(), Name = $"User-{ts}", Age = new Random().Next(18, 90), Address = new Address { City = "SpamCity" } }; + var user = new User + { + Id = Guid.NewGuid().ToString(), Name = $"User-{ts}", Age = new Random().Next(18, 90), + Address = new Address { City = "SpamCity" } + }; await _db.Users.InsertAsync(user); System.Console.WriteLine($"[+] Created {user.Name} with Id: {user.Id}..."); await Task.Delay(100); } + await _db.SaveChangesAsync(); } else if (input.StartsWith("c")) { - var userCount = _db.Users.FindAll().Count(); - var todoCount = _db.TodoLists.FindAll().Count(); + int userCount = _db.Users.FindAll().Count(); + int todoCount = _db.TodoLists.FindAll().Count(); System.Console.WriteLine($"Collection 'Users': {userCount} documents"); System.Console.WriteLine($"Collection 'TodoLists': {todoCount} documents"); } else if (input.StartsWith("p")) { - var alice = new User { Id = Guid.NewGuid().ToString(), Name = "Alice", Age = 30, Address = new Address { City = "Paris" } }; - var bob = new User { Id = Guid.NewGuid().ToString(), Name = "Bob", Age = 25, Address = new Address { City = "Rome" } }; + var alice = new User + { Id = Guid.NewGuid().ToString(), Name = "Alice", Age = 30, Address = new Address { City = "Paris" } }; + var bob = new User + { Id = Guid.NewGuid().ToString(), Name = "Bob", Age = 25, Address = new Address { City = "Rome" } }; await _db.Users.InsertAsync(alice); await _db.Users.InsertAsync(bob); await _db.SaveChangesAsync(); @@ -162,17 +169,19 @@ public class ConsoleInteractiveService : BackgroundService else if (input.StartsWith("g")) { System.Console.Write("Enter user Id: "); - var id = System.Console.ReadLine(); + string? id = System.Console.ReadLine(); if (!string.IsNullOrEmpty(id)) { var u = _db.Users.FindById(id); - System.Console.WriteLine(u != null ? $"Got: {u.Name}, Age {u.Age}, City: {u.Address?.City}" : "Not found"); + System.Console.WriteLine(u != null + ? $"Got: {u.Name}, Age {u.Age}, City: {u.Address?.City}" + : "Not found"); } } else if (input.StartsWith("d")) { System.Console.Write("Enter user Id to delete: "); - var id = System.Console.ReadLine(); + string? id = System.Console.ReadLine(); if (!string.IsNullOrEmpty(id)) { await _db.Users.DeleteAsync(id); @@ -183,8 +192,8 @@ public class ConsoleInteractiveService : BackgroundService else if (input.StartsWith("l")) { var peers = _node.Discovery.GetActivePeers(); - var handshakeSvc = _serviceProvider.GetService(); - var secureIcon = handshakeSvc != null ? "πŸ”’" : "πŸ”“"; + var handshakeSvc = _serviceProvider.GetService(); + string secureIcon = handshakeSvc != null ? "πŸ”’" : "πŸ”“"; System.Console.WriteLine($"Active Peers ({secureIcon}):"); foreach (var p in peers) @@ -203,7 +212,7 @@ public class ConsoleInteractiveService : BackgroundService { var health = await _healthCheck.CheckAsync(); var syncStatus = _syncTracker.GetStatus(); - var handshakeSvc = _serviceProvider.GetService(); + var handshakeSvc = _serviceProvider.GetService(); System.Console.WriteLine("=== Health Check ==="); System.Console.WriteLine($"Database: {(health.DatabaseHealthy ? "βœ“" : "βœ—")}"); @@ -216,17 +225,18 @@ public class ConsoleInteractiveService : BackgroundService if (health.Errors.Any()) { System.Console.WriteLine("Errors:"); - foreach (var err in health.Errors.Take(3)) System.Console.WriteLine($" - {err}"); + foreach (string err in health.Errors.Take(3)) System.Console.WriteLine($" - {err}"); } } else if (input.StartsWith("ch") || input == "cache") { var stats = _cache.GetStatistics(); - System.Console.WriteLine($"=== Cache Stats ===\nSize: {stats.Size}\nHits: {stats.Hits}\nMisses: {stats.Misses}\nRate: {stats.HitRate:P1}"); + System.Console.WriteLine( + $"=== Cache Stats ===\nSize: {stats.Size}\nHits: {stats.Hits}\nMisses: {stats.Misses}\nRate: {stats.HitRate:P1}"); } else if (input.StartsWith("r") && input.Contains("resolver")) { - var parts = input.Split(' '); + string[] parts = input.Split(' '); if (parts.Length > 1) { var newResolver = parts[1].ToLower() switch @@ -240,7 +250,7 @@ public class ConsoleInteractiveService : BackgroundService { // Note: Requires restart to fully apply. For demo, we inform user. System.Console.WriteLine($"⚠️ Resolver changed to {parts[1].ToUpper()}. Restart node to apply."); - System.Console.WriteLine($" (Current session continues with previous resolver)"); + System.Console.WriteLine(" (Current session continues with previous resolver)"); } else { @@ -262,7 +272,7 @@ public class ConsoleInteractiveService : BackgroundService System.Console.WriteLine($"πŸ“‹ {list.Name} ({list.Items.Count} items)"); foreach (var item in list.Items) { - var status = item.Completed ? "βœ“" : " "; + string status = item.Completed ? "βœ“" : " "; System.Console.WriteLine($" [{status}] {item.Task}"); } } @@ -281,8 +291,8 @@ public class ConsoleInteractiveService : BackgroundService Name = "Shopping List", Items = new List { - new TodoItem { Task = "Buy milk", Completed = false }, - new TodoItem { Task = "Buy bread", Completed = false } + new() { Task = "Buy milk", Completed = false }, + new() { Task = "Buy bread", Completed = false } } }; @@ -325,24 +335,20 @@ public class ConsoleInteractiveService : BackgroundService System.Console.WriteLine($" List: {merged.Name}"); foreach (var item in merged.Items) { - var status = item.Completed ? "βœ“" : " "; + string status = item.Completed ? "βœ“" : " "; System.Console.WriteLine($" [{status}] {item.Task}"); } var resolver = _serviceProvider.GetRequiredService(); - var resolverType = resolver.GetType().Name; + string resolverType = resolver.GetType().Name; System.Console.WriteLine($"\nℹ️ Resolution Strategy: {resolverType}"); if (resolverType.Contains("Recursive")) - { System.Console.WriteLine(" β†’ Items merged by 'id', both edits preserved"); - } else - { System.Console.WriteLine(" β†’ Last write wins, Node B changes override Node A"); - } } System.Console.WriteLine("\nβœ“ Demo complete. Run 'todos' to see all lists.\n"); } -} +} \ No newline at end of file diff --git a/samples/ZB.MOM.WW.CBDDC.Sample.Console/Program.cs b/samples/ZB.MOM.WW.CBDDC.Sample.Console/Program.cs index f033c88..356742b 100755 --- a/samples/ZB.MOM.WW.CBDDC.Sample.Console/Program.cs +++ b/samples/ZB.MOM.WW.CBDDC.Sample.Console/Program.cs @@ -1,33 +1,26 @@ using Microsoft.Extensions.Configuration; using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Logging; -using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Storage; -using ZB.MOM.WW.CBDDC.Core.Cache; -using ZB.MOM.WW.CBDDC.Core.Sync; -using ZB.MOM.WW.CBDDC.Core.Diagnostics; -using ZB.MOM.WW.CBDDC.Core.Resilience; -using ZB.MOM.WW.CBDDC.Network; -using ZB.MOM.WW.CBDDC.Network.Security; -using ZB.MOM.WW.CBDDC.Persistence.BLite; -using ZB.MOM.WW.CBDDC.Sample.Console; using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; using Serilog; using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Core.Sync; +using ZB.MOM.WW.CBDDC.Network; +using ZB.MOM.WW.CBDDC.Persistence.BLite; namespace ZB.MOM.WW.CBDDC.Sample.Console; // Local User/Address classes removed in favor of Shared project -class Program +internal class Program { - static async Task Main(string[] args) + private static async Task Main(string[] args) { var builder = Host.CreateApplicationBuilder(args); // Configuration builder.Configuration.SetBasePath(Directory.GetCurrentDirectory()) - .AddJsonFile("appsettings.json", optional: true, reloadOnChange: true); + .AddJsonFile("appsettings.json", true, true); // Logging builder.Logging.ClearProviders(); @@ -38,39 +31,36 @@ class Program .Enrich.WithProperty("Application", "CBDDC.Sample.Console") .WriteTo.Console()); - var randomPort = new Random().Next(1000, 9999); + int randomPort = new Random().Next(1000, 9999); // Node ID - string nodeId = args.Length > 0 ? args[0] : ("node-" + randomPort); + string nodeId = args.Length > 0 ? args[0] : "node-" + randomPort; int tcpPort = args.Length > 1 ? int.Parse(args[1]) : randomPort; // Conflict Resolution Strategy (can be switched at runtime via service replacement) - var useRecursiveMerge = args.Contains("--merge"); - if (useRecursiveMerge) - { - builder.Services.AddSingleton(); - } + bool useRecursiveMerge = args.Contains("--merge"); + if (useRecursiveMerge) builder.Services.AddSingleton(); IPeerNodeConfigurationProvider peerNodeConfigurationProvider = new StaticPeerNodeConfigurationProvider( new PeerNodeConfiguration { NodeId = nodeId, TcpPort = tcpPort, - AuthToken = "Test-Cluster-Key", + AuthToken = "Test-Cluster-Key" //KnownPeers = builder.Configuration.GetSection("CBDDC:KnownPeers").Get>() ?? new() }); - builder.Services.AddSingleton(peerNodeConfigurationProvider); + builder.Services.AddSingleton(peerNodeConfigurationProvider); // Database path - var dataPath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "data"); + string dataPath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "data"); Directory.CreateDirectory(dataPath); - var databasePath = Path.Combine(dataPath, $"{nodeId}.blite"); + string databasePath = Path.Combine(dataPath, $"{nodeId}.blite"); // Register CBDDC Services using Fluent Extensions with BLite, SampleDbContext, and SampleDocumentStore builder.Services.AddCBDDCCore() - .AddCBDDCBLite(sp => new SampleDbContext(databasePath)) - .AddCBDDCNetwork(); // useHostedService = true by default + .AddCBDDCBLite(sp => new SampleDbContext(databasePath)) + .AddCBDDCNetwork(); // useHostedService = true by default builder.Services.AddHostedService(); // Runs the Input Loop @@ -86,12 +76,7 @@ class Program private class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvider { /// - /// Gets or sets the current peer node configuration. - /// - public PeerNodeConfiguration Configuration { get; set; } - - /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The initial peer node configuration. public StaticPeerNodeConfigurationProvider(PeerNodeConfiguration configuration) @@ -100,12 +85,17 @@ class Program } /// - /// Occurs when the peer node configuration changes. + /// Gets or sets the current peer node configuration. + /// + public PeerNodeConfiguration Configuration { get; } + + /// + /// Occurs when the peer node configuration changes. /// public event PeerNodeConfigurationChangedEventHandler? ConfigurationChanged; /// - /// Gets the current peer node configuration. + /// Gets the current peer node configuration. /// /// A task that returns the current configuration. public Task GetConfiguration() @@ -114,7 +104,7 @@ class Program } /// - /// Raises the configuration changed event. + /// Raises the configuration changed event. /// /// The new configuration value. protected virtual void OnConfigurationChanged(PeerNodeConfiguration newConfig) @@ -122,5 +112,4 @@ class Program ConfigurationChanged?.Invoke(this, newConfig); } } - -} +} \ No newline at end of file diff --git a/samples/ZB.MOM.WW.CBDDC.Sample.Console/README.md b/samples/ZB.MOM.WW.CBDDC.Sample.Console/README.md index 966279e..4c38012 100755 --- a/samples/ZB.MOM.WW.CBDDC.Sample.Console/README.md +++ b/samples/ZB.MOM.WW.CBDDC.Sample.Console/README.md @@ -5,21 +5,25 @@ This sample demonstrates the core features of CBDDC, a distributed peer-to-peer ## Features Demonstrated ### πŸ”‘ Primary Keys & Auto-Generation + - Automatic GUID generation for entities - Convention-based key detection (`Id` property) - `[PrimaryKey]` attribute support ### 🎯 Generic Type-Safe API + - `Collection()` for compile-time type safety - Keyless `Put(entity)` with auto-key extraction - IntelliSense-friendly operations ### πŸ” LINQ Query Support + - Expression-based queries - Paging and sorting - Complex predicates (>, >=, ==, !=, nested properties) ### 🌐 Network Synchronization + - UDP peer discovery - TCP synchronization - Automatic conflict resolution (Last-Write-Wins) @@ -35,16 +39,19 @@ dotnet run ### Multi-Node (Peer-to-Peer) Terminal 1: + ```bash dotnet run -- --node-id node1 --tcp-port 5001 --udp-port 6001 ``` Terminal 2: + ```bash dotnet run -- --node-id node2 --tcp-port 5002 --udp-port 6002 ``` Terminal 3: + ```bash dotnet run -- --node-id node3 --tcp-port 5003 --udp-port 6003 ``` @@ -53,20 +60,20 @@ Changes made on any node will automatically sync to all peers! ## Available Commands -| Command | Description | -|---------|-------------| -| `p` | Put Alice and Bob (auto-generated IDs) | -| `g` | Get user by ID (prompts for ID) | -| `d` | Delete user by ID (prompts for ID) | -| `n` | Create new user with auto-generated ID | -| `s` | Spam 5 users with auto-generated IDs | -| `c` | Count total documents | -| `f` | Demo various Find queries | -| `f2` | Demo Find with paging (skip/take) | -| `a` | Demo auto-generated primary keys | -| `t` | Demo generic typed API | -| `l` | List active peers | -| `q` | Quit | +| Command | Description | +|---------|----------------------------------------| +| `p` | Put Alice and Bob (auto-generated IDs) | +| `g` | Get user by ID (prompts for ID) | +| `d` | Delete user by ID (prompts for ID) | +| `n` | Create new user with auto-generated ID | +| `s` | Spam 5 users with auto-generated IDs | +| `c` | Count total documents | +| `f` | Demo various Find queries | +| `f2` | Demo Find with paging (skip/take) | +| `a` | Demo auto-generated primary keys | +| `t` | Demo generic typed API | +| `l` | List active peers | +| `q` | Quit | ## Example Session diff --git a/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDbContext.cs b/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDbContext.cs index 9e94934..7943a78 100755 --- a/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDbContext.cs +++ b/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDbContext.cs @@ -2,28 +2,13 @@ using BLite.Core.Metadata; using BLite.Core.Storage; using ZB.MOM.WW.CBDDC.Persistence.BLite; -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; namespace ZB.MOM.WW.CBDDC.Sample.Console; -public partial class SampleDbContext : CBDDCDocumentDbContext +public class SampleDbContext : CBDDCDocumentDbContext { /// - /// Gets the users collection. - /// - public DocumentCollection Users { get; private set; } = null!; - - /// - /// Gets the todo lists collection. - /// - public DocumentCollection TodoLists { get; private set; } = null!; - - /// - /// Initializes a new instance of the SampleDbContext class using the specified database file path. + /// Initializes a new instance of the SampleDbContext class using the specified database file path. /// /// The file system path to the database file. Cannot be null or empty. public SampleDbContext(string databasePath) : base(databasePath) @@ -31,8 +16,8 @@ public partial class SampleDbContext : CBDDCDocumentDbContext } /// - /// Initializes a new instance of the SampleDbContext class using the specified database file path and page file - /// configuration. + /// Initializes a new instance of the SampleDbContext class using the specified database file path and page file + /// configuration. /// /// The file system path to the database file. Cannot be null or empty. /// The configuration settings for the page file. Cannot be null. @@ -40,6 +25,16 @@ public partial class SampleDbContext : CBDDCDocumentDbContext { } + /// + /// Gets the users collection. + /// + public DocumentCollection Users { get; private set; } = null!; + + /// + /// Gets the todo lists collection. + /// + public DocumentCollection TodoLists { get; private set; } = null!; + /// protected override void OnModelCreating(ModelBuilder modelBuilder) { @@ -52,4 +47,4 @@ public partial class SampleDbContext : CBDDCDocumentDbContext .ToCollection("TodoLists") .HasKey(t => t.Id); } -} +} \ No newline at end of file diff --git a/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDocumentStore.cs b/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDocumentStore.cs index 6f867d0..f781176 100755 --- a/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDocumentStore.cs +++ b/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDocumentStore.cs @@ -1,16 +1,15 @@ -using ZB.MOM.WW.CBDDC.Core; +using System.Text.Json; +using Microsoft.Extensions.Logging; using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Sync; using ZB.MOM.WW.CBDDC.Persistence.BLite; -using Microsoft.Extensions.Logging; -using System.Text.Json; namespace ZB.MOM.WW.CBDDC.Sample.Console; /// -/// Document store implementation for CBDDC Sample using BLite persistence. -/// Extends BLiteDocumentStore to automatically handle Oplog creation via CDC. +/// Document store implementation for CBDDC Sample using BLite persistence. +/// Extends BLiteDocumentStore to automatically handle Oplog creation via CDC. /// public class SampleDocumentStore : BLiteDocumentStore { @@ -18,7 +17,7 @@ public class SampleDocumentStore : BLiteDocumentStore private const string TodoListsCollection = "TodoLists"; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The sample database context. /// The peer node configuration provider. @@ -37,6 +36,16 @@ public class SampleDocumentStore : BLiteDocumentStore WatchCollection(TodoListsCollection, context.TodoLists, t => t.Id); } + #region Helper Methods + + private static JsonElement? SerializeEntity(T? entity) where T : class + { + if (entity == null) return null; + return JsonSerializer.SerializeToElement(entity); + } + + #endregion + #region Abstract Method Implementations /// @@ -49,12 +58,10 @@ public class SampleDocumentStore : BLiteDocumentStore /// protected override async Task ApplyContentToEntitiesBatchAsync( - IEnumerable<(string Collection, string Key, JsonElement Content)> documents, CancellationToken cancellationToken) + IEnumerable<(string Collection, string Key, JsonElement Content)> documents, + CancellationToken cancellationToken) { - foreach (var (collection, key, content) in documents) - { - UpsertEntity(collection, key, content); - } + foreach ((string collection, string key, var content) in documents) UpsertEntity(collection, key, content); await _context.SaveChangesAsync(cancellationToken); } @@ -91,10 +98,10 @@ public class SampleDocumentStore : BLiteDocumentStore protected override Task GetEntityAsJsonAsync( string collection, string key, CancellationToken cancellationToken) { - return Task.FromResult(collection switch + return Task.FromResult(collection switch { - UsersCollection => SerializeEntity(_context.Users.Find(u => u.Id == key).FirstOrDefault()), - TodoListsCollection => SerializeEntity(_context.TodoLists.Find(t => t.Id == key).FirstOrDefault()), + UsersCollection => SerializeEntity(_context.Users.Find(u => u.Id == key).FirstOrDefault()), + TodoListsCollection => SerializeEntity(_context.TodoLists.Find(t => t.Id == key).FirstOrDefault()), _ => null }); } @@ -111,10 +118,7 @@ public class SampleDocumentStore : BLiteDocumentStore protected override async Task RemoveEntitiesBatchAsync( IEnumerable<(string Collection, string Key)> documents, CancellationToken cancellationToken) { - foreach (var (collection, key) in documents) - { - DeleteEntity(collection, key); - } + foreach ((string collection, string key) in documents) DeleteEntity(collection, key); await _context.SaveChangesAsync(cancellationToken); } @@ -140,25 +144,15 @@ public class SampleDocumentStore : BLiteDocumentStore { return await Task.Run(() => collection switch { - UsersCollection => _context.Users.FindAll() - .Select(u => (u.Id, SerializeEntity(u)!.Value)), - - TodoListsCollection => _context.TodoLists.FindAll() - .Select(t => (t.Id, SerializeEntity(t)!.Value)), - - _ => Enumerable.Empty<(string, JsonElement)>() - }, cancellationToken); - } - - #endregion - - #region Helper Methods - - private static JsonElement? SerializeEntity(T? entity) where T : class - { - if (entity == null) return null; - return JsonSerializer.SerializeToElement(entity); - } - - #endregion -} + UsersCollection => _context.Users.FindAll() + .Select(u => (u.Id, SerializeEntity(u)!.Value)), + + TodoListsCollection => _context.TodoLists.FindAll() + .Select(t => (t.Id, SerializeEntity(t)!.Value)), + + _ => Enumerable.Empty<(string, JsonElement)>() + }, cancellationToken); + } + + #endregion +} \ No newline at end of file diff --git a/samples/ZB.MOM.WW.CBDDC.Sample.Console/TodoList.cs b/samples/ZB.MOM.WW.CBDDC.Sample.Console/TodoList.cs index 8a1f701..f28609c 100755 --- a/samples/ZB.MOM.WW.CBDDC.Sample.Console/TodoList.cs +++ b/samples/ZB.MOM.WW.CBDDC.Sample.Console/TodoList.cs @@ -1,23 +1,22 @@ -using System.Collections.Generic; -using System.ComponentModel.DataAnnotations; - -namespace ZB.MOM.WW.CBDDC.Sample.Console; - +using System.ComponentModel.DataAnnotations; + +namespace ZB.MOM.WW.CBDDC.Sample.Console; + public class TodoList { /// - /// Gets or sets the document identifier. + /// Gets or sets the document identifier. /// [Key] public string Id { get; set; } = Guid.NewGuid().ToString(); /// - /// Gets or sets the list name. + /// Gets or sets the list name. /// public string Name { get; set; } = string.Empty; /// - /// Gets or sets the todo items in the list. + /// Gets or sets the todo items in the list. /// public List Items { get; set; } = new(); } @@ -25,17 +24,17 @@ public class TodoList public class TodoItem { /// - /// Gets or sets the task description. + /// Gets or sets the task description. /// public string Task { get; set; } = string.Empty; /// - /// Gets or sets a value indicating whether the task is completed. + /// Gets or sets a value indicating whether the task is completed. /// public bool Completed { get; set; } /// - /// Gets or sets the UTC creation timestamp. + /// Gets or sets the UTC creation timestamp. /// public DateTime CreatedAt { get; set; } = DateTime.UtcNow; -} +} \ No newline at end of file diff --git a/samples/ZB.MOM.WW.CBDDC.Sample.Console/User.cs b/samples/ZB.MOM.WW.CBDDC.Sample.Console/User.cs index ce2787b..cf5eab0 100755 --- a/samples/ZB.MOM.WW.CBDDC.Sample.Console/User.cs +++ b/samples/ZB.MOM.WW.CBDDC.Sample.Console/User.cs @@ -1,27 +1,27 @@ -using System.ComponentModel.DataAnnotations; - -namespace ZB.MOM.WW.CBDDC.Sample.Console; - +using System.ComponentModel.DataAnnotations; + +namespace ZB.MOM.WW.CBDDC.Sample.Console; + public class User { /// - /// Gets or sets the unique user identifier. + /// Gets or sets the unique user identifier. /// [Key] public string Id { get; set; } = ""; /// - /// Gets or sets the user name. + /// Gets or sets the user name. /// public string? Name { get; set; } /// - /// Gets or sets the user age. + /// Gets or sets the user age. /// public int Age { get; set; } /// - /// Gets or sets the user address. + /// Gets or sets the user address. /// public Address? Address { get; set; } } @@ -29,7 +29,7 @@ public class User public class Address { /// - /// Gets or sets the city value. + /// Gets or sets the city value. /// public string? City { get; set; } -} +} \ No newline at end of file diff --git a/samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj b/samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj index 1b9c5bd..35d85c8 100755 --- a/samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj +++ b/samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj @@ -1,41 +1,41 @@ - - - - all - runtime; build; native; contentfiles; analyzers; buildtransitive - - - - - + + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + + + + + - - - - - - - - - + + + + + + + + + - - - PreserveNewest - - + + + PreserveNewest + + - - ZB.MOM.WW.CBDDC.Sample.Console - ZB.MOM.WW.CBDDC.Sample.Console - ZB.MOM.WW.CBDDC.Sample.Console - Exe - net10.0 - enable - enable - false - + + ZB.MOM.WW.CBDDC.Sample.Console + ZB.MOM.WW.CBDDC.Sample.Console + ZB.MOM.WW.CBDDC.Sample.Console + Exe + net10.0 + enable + enable + false + - + diff --git a/samples/ZB.MOM.WW.CBDDC.Sample.Console/appsettings.json b/samples/ZB.MOM.WW.CBDDC.Sample.Console/appsettings.json index de12b5e..52ec43c 100755 --- a/samples/ZB.MOM.WW.CBDDC.Sample.Console/appsettings.json +++ b/samples/ZB.MOM.WW.CBDDC.Sample.Console/appsettings.json @@ -1,51 +1,51 @@ { -"Logging": { + "Logging": { "LogLevel": { - "Default": "Information", - "Microsoft": "Warning", - "System": "Warning", - "CBDDC": "Information", - "ZB.MOM.WW.CBDDC.Network.SyncOrchestrator": "Information", - "ZB.MOM.WW.CBDDC.Core.Storage.OplogCoordinator": "Warning", - "ZB.MOM.WW.CBDDC.Persistence": "Warning" - } -}, - "CBDDC": { - "Network": { - "TcpPort": 5001, - "UdpPort": 6000, - "AuthToken": "demo-secret-key", - "ConnectionTimeoutMs": 5000, - "RetryAttempts": 3, - "RetryDelayMs": 1000, - "LocalhostOnly": false - }, - "Persistence": { - "DatabasePath": "data/cbddc.db", - "EnableWalMode": true, - "CacheSizeMb": 50, - "EnableAutoBackup": true, - "BackupPath": "backups/", - "BusyTimeoutMs": 5000 - }, - "Sync": { - "SyncIntervalMs": 5000, - "BatchSize": 100, - "EnableOfflineQueue": true, - "MaxQueueSize": 1000 - }, - "Logging": { - "LogLevel": "Information", - "LogFilePath": "logs/cbddc.log", - "MaxLogFileSizeMb": 10, - "MaxLogFiles": 5 - }, - "KnownPeers": [ - { - "NodeId": "AspNetSampleNode", - "Host": "localhost", - "Port": 6001 - } - ] + "Default": "Information", + "Microsoft": "Warning", + "System": "Warning", + "CBDDC": "Information", + "ZB.MOM.WW.CBDDC.Network.SyncOrchestrator": "Information", + "ZB.MOM.WW.CBDDC.Core.Storage.OplogCoordinator": "Warning", + "ZB.MOM.WW.CBDDC.Persistence": "Warning" } + }, + "CBDDC": { + "Network": { + "TcpPort": 5001, + "UdpPort": 6000, + "AuthToken": "demo-secret-key", + "ConnectionTimeoutMs": 5000, + "RetryAttempts": 3, + "RetryDelayMs": 1000, + "LocalhostOnly": false + }, + "Persistence": { + "DatabasePath": "data/cbddc.db", + "EnableWalMode": true, + "CacheSizeMb": 50, + "EnableAutoBackup": true, + "BackupPath": "backups/", + "BusyTimeoutMs": 5000 + }, + "Sync": { + "SyncIntervalMs": 5000, + "BatchSize": 100, + "EnableOfflineQueue": true, + "MaxQueueSize": 1000 + }, + "Logging": { + "LogLevel": "Information", + "LogFilePath": "logs/cbddc.log", + "MaxLogFileSizeMb": 10, + "MaxLogFiles": 5 + }, + "KnownPeers": [ + { + "NodeId": "AspNetSampleNode", + "Host": "localhost", + "Port": 6001 + } + ] + } } \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Cache/DocumentCache.cs b/src/ZB.MOM.WW.CBDDC.Core/Cache/DocumentCache.cs index 94693eb..b835d90 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Cache/DocumentCache.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Cache/DocumentCache.cs @@ -1,76 +1,75 @@ -using System; -using System.Collections.Generic; -using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Network; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; -using System.Threading.Tasks; +using System.Collections.Generic; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Core.Network; namespace ZB.MOM.WW.CBDDC.Core.Cache; /// -/// LRU cache entry with linked list node. +/// LRU cache entry with linked list node. /// -internal class CacheEntry -{ - /// - /// Gets the cached document. - /// - public Document Document { get; } - - /// - /// Gets the linked-list node used for LRU tracking. - /// - public LinkedListNode Node { get; } - - /// - /// Initializes a new instance of the class. - /// - /// The cached document. - /// The linked-list node used for LRU tracking. - public CacheEntry(Document document, LinkedListNode node) - { - Document = document; - Node = node; +internal class CacheEntry +{ + /// + /// Initializes a new instance of the class. + /// + /// The cached document. + /// The linked-list node used for LRU tracking. + public CacheEntry(Document document, LinkedListNode node) + { + Document = document; + Node = node; } + + /// + /// Gets the cached document. + /// + public Document Document { get; } + + /// + /// Gets the linked-list node used for LRU tracking. + /// + public LinkedListNode Node { get; } } /// -/// In-memory LRU cache for documents. +/// In-memory LRU cache for documents. /// -public class DocumentCache : IDocumentCache -{ - private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider; - private readonly Dictionary _cache = new(); - private readonly LinkedList _lru = new(); - private readonly ILogger _logger; +public class DocumentCache : IDocumentCache +{ + private readonly Dictionary _cache = new(); private readonly object _lock = new(); + private readonly ILogger _logger; + private readonly LinkedList _lru = new(); + private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider; - // Statistics - private long _hits = 0; - private long _misses = 0; - - /// - /// Initializes a new instance of the class. - /// - /// The configuration provider used for cache size limits. - /// The logger instance. - public DocumentCache(IPeerNodeConfigurationProvider peerNodeConfigurationProvider, ILogger? logger = null) - { - _peerNodeConfigurationProvider = peerNodeConfigurationProvider; - _logger = logger ?? NullLogger.Instance; - } + // Statistics + private long _hits; + private long _misses; - /// - /// Gets a document from cache. - /// - /// The document collection name. - /// The document key. - /// A task whose result is the cached document, or if not found. - public async Task Get(string collection, string key) - { - lock (_lock) - { + /// + /// Initializes a new instance of the class. + /// + /// The configuration provider used for cache size limits. + /// The logger instance. + public DocumentCache(IPeerNodeConfigurationProvider peerNodeConfigurationProvider, + ILogger? logger = null) + { + _peerNodeConfigurationProvider = peerNodeConfigurationProvider; + _logger = logger ?? NullLogger.Instance; + } + + /// + /// Gets a document from cache. + /// + /// The document collection name. + /// The document key. + /// A task whose result is the cached document, or if not found. + public async Task Get(string collection, string key) + { + lock (_lock) + { var cacheKey = $"{collection}:{key}"; if (_cache.TryGetValue(cacheKey, out var entry)) @@ -90,16 +89,16 @@ public class DocumentCache : IDocumentCache } } - /// - /// Sets a document in cache. - /// - /// The document collection name. - /// The document key. - /// The document to cache. - /// A task that represents the asynchronous operation. - public async Task Set(string collection, string key, Document document) - { - var peerConfig = await _peerNodeConfigurationProvider.GetConfiguration(); + /// + /// Sets a document in cache. + /// + /// The document collection name. + /// The document key. + /// The document to cache. + /// A task that represents the asynchronous operation. + public async Task Set(string collection, string key, Document document) + { + var peerConfig = await _peerNodeConfigurationProvider.GetConfiguration(); lock (_lock) { @@ -118,7 +117,7 @@ public class DocumentCache : IDocumentCache // Evict if full if (_cache.Count >= peerConfig.MaxDocumentCacheSize) { - var oldest = _lru.Last!.Value; + string oldest = _lru.Last!.Value; _lru.RemoveLast(); _cache.Remove(oldest); _logger.LogTrace("Evicted oldest cache entry {Key}", oldest); @@ -130,15 +129,15 @@ public class DocumentCache : IDocumentCache } } - /// - /// Removes a document from cache. - /// - /// The document collection name. - /// The document key. - public void Remove(string collection, string key) - { - lock (_lock) - { + /// + /// Removes a document from cache. + /// + /// The document collection name. + /// The document key. + public void Remove(string collection, string key) + { + lock (_lock) + { var cacheKey = $"{collection}:{key}"; if (_cache.TryGetValue(cacheKey, out var entry)) @@ -151,13 +150,13 @@ public class DocumentCache : IDocumentCache } /// - /// Clears all cached documents. + /// Clears all cached documents. /// public void Clear() { lock (_lock) { - var count = _cache.Count; + int count = _cache.Count; _cache.Clear(); _lru.Clear(); _logger.LogInformation("Cleared cache ({Count} entries)", count); @@ -165,15 +164,15 @@ public class DocumentCache : IDocumentCache } /// - /// Gets cache statistics. + /// Gets cache statistics. /// public (long Hits, long Misses, int Size, double HitRate) GetStatistics() { lock (_lock) { - var total = _hits + _misses; - var hitRate = total > 0 ? (double)_hits / total : 0; + long total = _hits + _misses; + double hitRate = total > 0 ? (double)_hits / total : 0; return (_hits, _misses, _cache.Count, hitRate); } } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Cache/IDocumentCache.cs b/src/ZB.MOM.WW.CBDDC.Core/Cache/IDocumentCache.cs index 81e710d..6a151ff 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Cache/IDocumentCache.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Cache/IDocumentCache.cs @@ -1,45 +1,44 @@ -ο»Ώusing System.Threading.Tasks; - -namespace ZB.MOM.WW.CBDDC.Core.Cache +ο»Ώusing System.Threading.Tasks; + +namespace ZB.MOM.WW.CBDDC.Core.Cache; + +/// +/// Defines operations for caching documents by collection and key. +/// +public interface IDocumentCache { /// - /// Defines operations for caching documents by collection and key. + /// Clears all cached documents. /// - public interface IDocumentCache - { - /// - /// Clears all cached documents. - /// - void Clear(); + void Clear(); - /// - /// Gets a cached document by collection and key. - /// - /// The collection name. - /// The document key. - /// The cached document, or if not found. - Task Get(string collection, string key); + /// + /// Gets a cached document by collection and key. + /// + /// The collection name. + /// The document key. + /// The cached document, or if not found. + Task Get(string collection, string key); - /// - /// Gets cache hit/miss statistics. - /// - /// A tuple containing hits, misses, current size, and hit rate. - (long Hits, long Misses, int Size, double HitRate) GetStatistics(); + /// + /// Gets cache hit/miss statistics. + /// + /// A tuple containing hits, misses, current size, and hit rate. + (long Hits, long Misses, int Size, double HitRate) GetStatistics(); - /// - /// Removes a cached document by collection and key. - /// - /// The collection name. - /// The document key. - void Remove(string collection, string key); + /// + /// Removes a cached document by collection and key. + /// + /// The collection name. + /// The document key. + void Remove(string collection, string key); - /// - /// Adds or updates a cached document. - /// - /// The collection name. - /// The document key. - /// The document to cache. - /// A task that represents the asynchronous operation. - Task Set(string collection, string key, Document document); - } -} + /// + /// Adds or updates a cached document. + /// + /// The collection name. + /// The document key. + /// The document to cache. + /// A task that represents the asynchronous operation. + Task Set(string collection, string key, Document document); +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/ChangesAppliedEventArgs.cs b/src/ZB.MOM.WW.CBDDC.Core/ChangesAppliedEventArgs.cs index 26c421c..711d96d 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/ChangesAppliedEventArgs.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/ChangesAppliedEventArgs.cs @@ -1,24 +1,24 @@ -using System; -using System.Collections.Generic; - -namespace ZB.MOM.WW.CBDDC.Core; - -/// -/// Event arguments for when changes are applied to the peer store. -/// +using System; +using System.Collections.Generic; + +namespace ZB.MOM.WW.CBDDC.Core; + +/// +/// Event arguments for when changes are applied to the peer store. +/// public class ChangesAppliedEventArgs : EventArgs { /// - /// Gets the changes that were applied. - /// - public IEnumerable Changes { get; } - - /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The changes that were applied. public ChangesAppliedEventArgs(IEnumerable changes) { Changes = changes; } -} + + /// + /// Gets the changes that were applied. + /// + public IEnumerable Changes { get; } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/CBDDCHealthCheck.cs b/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/CBDDCHealthCheck.cs index 0554c66..d9a50fe 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/CBDDCHealthCheck.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/CBDDCHealthCheck.cs @@ -1,45 +1,44 @@ using System; -using System.Collections.Generic; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; -using ZB.MOM.WW.CBDDC.Core.Storage; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Core.Storage; namespace ZB.MOM.WW.CBDDC.Core.Diagnostics; /// -/// Provides health check functionality. +/// Provides health check functionality. /// -public class CBDDCHealthCheck : ICBDDCHealthCheck -{ - private readonly IOplogStore _store; - private readonly ISyncStatusTracker _syncTracker; - private readonly ILogger _logger; - - /// - /// Initializes a new instance of the class. - /// - /// The oplog store used for database health checks. - /// The tracker that provides synchronization status. - /// The logger instance. - public CBDDCHealthCheck( - IOplogStore store, - ISyncStatusTracker syncTracker, - ILogger? logger = null) - { - _store = store ?? throw new ArgumentNullException(nameof(store)); - _syncTracker = syncTracker ?? throw new ArgumentNullException(nameof(syncTracker)); - _logger = logger ?? NullLogger.Instance; - } +public class CBDDCHealthCheck : ICBDDCHealthCheck +{ + private readonly ILogger _logger; + private readonly IOplogStore _store; + private readonly ISyncStatusTracker _syncTracker; - /// - /// Performs a comprehensive health check. - /// - /// A token used to cancel the health check. - public async Task CheckAsync(CancellationToken cancellationToken = default) - { + /// + /// Initializes a new instance of the class. + /// + /// The oplog store used for database health checks. + /// The tracker that provides synchronization status. + /// The logger instance. + public CBDDCHealthCheck( + IOplogStore store, + ISyncStatusTracker syncTracker, + ILogger? logger = null) + { + _store = store ?? throw new ArgumentNullException(nameof(store)); + _syncTracker = syncTracker ?? throw new ArgumentNullException(nameof(syncTracker)); + _logger = logger ?? NullLogger.Instance; + } + + /// + /// Performs a comprehensive health check. + /// + /// A token used to cancel the health check. + public async Task CheckAsync(CancellationToken cancellationToken = default) + { var status = new HealthStatus(); // Check database health @@ -65,9 +64,7 @@ public class CBDDCHealthCheck : ICBDDCHealthCheck // Add error messages from sync tracker foreach (var error in syncStatus.SyncErrors.Take(5)) // Last 5 errors - { status.Errors.Add($"{error.Timestamp:yyyy-MM-dd HH:mm:ss} - {error.Message}"); - } // Add metadata status.Metadata["TotalDocumentsSynced"] = syncStatus.TotalDocumentsSynced; @@ -79,4 +76,4 @@ public class CBDDCHealthCheck : ICBDDCHealthCheck return status; } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/DiagnosticsModels.cs b/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/DiagnosticsModels.cs index 4b3a1f7..e4f2bc3 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/DiagnosticsModels.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/DiagnosticsModels.cs @@ -4,145 +4,145 @@ using System.Collections.Generic; namespace ZB.MOM.WW.CBDDC.Core.Diagnostics; /// -/// Represents the health status of an CBDDC instance. +/// Represents the health status of an CBDDC instance. /// public class HealthStatus { /// - /// Indicates if the database is healthy. + /// Indicates if the database is healthy. /// public bool DatabaseHealthy { get; set; } /// - /// Indicates if network connectivity is available. + /// Indicates if network connectivity is available. /// public bool NetworkHealthy { get; set; } /// - /// Number of currently connected peers. + /// Number of currently connected peers. /// public int ConnectedPeers { get; set; } /// - /// Timestamp of the last successful sync operation. + /// Timestamp of the last successful sync operation. /// public DateTime? LastSyncTime { get; set; } /// - /// List of recent errors. + /// List of recent errors. /// public List Errors { get; set; } = new(); /// - /// Overall health status. + /// Overall health status. /// public bool IsHealthy => DatabaseHealthy && NetworkHealthy && Errors.Count == 0; /// - /// Additional diagnostic information. + /// Additional diagnostic information. /// public Dictionary Metadata { get; set; } = new(); } /// -/// Represents the synchronization status. +/// Represents the synchronization status. /// public class SyncStatus { /// - /// Indicates if the node is currently online. + /// Indicates if the node is currently online. /// public bool IsOnline { get; set; } /// - /// Timestamp of the last sync operation. + /// Timestamp of the last sync operation. /// public DateTime? LastSyncTime { get; set; } /// - /// Number of pending operations in the offline queue. + /// Number of pending operations in the offline queue. /// public int PendingOperations { get; set; } /// - /// List of active peer nodes. + /// List of active peer nodes. /// public List ActivePeers { get; set; } = new(); /// - /// Recent sync errors. + /// Recent sync errors. /// public List SyncErrors { get; set; } = new(); /// - /// Total number of documents synced. + /// Total number of documents synced. /// public long TotalDocumentsSynced { get; set; } /// - /// Total bytes transferred. + /// Total bytes transferred. /// public long TotalBytesTransferred { get; set; } } /// -/// Information about a peer node. +/// Information about a peer node. /// public class PeerInfo { /// - /// Unique identifier of the peer. + /// Unique identifier of the peer. /// public string NodeId { get; set; } = ""; /// - /// Network address of the peer. + /// Network address of the peer. /// public string Address { get; set; } = ""; /// - /// Last time the peer was seen. + /// Last time the peer was seen. /// public DateTime LastSeen { get; set; } /// - /// Indicates if the peer is currently connected. + /// Indicates if the peer is currently connected. /// public bool IsConnected { get; set; } /// - /// Number of successful syncs with this peer. + /// Number of successful syncs with this peer. /// public int SuccessfulSyncs { get; set; } /// - /// Number of failed syncs with this peer. + /// Number of failed syncs with this peer. /// public int FailedSyncs { get; set; } } /// -/// Represents a synchronization error. +/// Represents a synchronization error. /// public class SyncError { /// - /// Timestamp when the error occurred. + /// Timestamp when the error occurred. /// public DateTime Timestamp { get; set; } /// - /// Error message. + /// Error message. /// public string Message { get; set; } = ""; /// - /// Peer node ID if applicable. + /// Peer node ID if applicable. /// public string? PeerNodeId { get; set; } /// - /// Error code. + /// Error code. /// public string? ErrorCode { get; set; } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/ICBDDCHealthCheck.cs b/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/ICBDDCHealthCheck.cs index 54761e9..ec8ed9d 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/ICBDDCHealthCheck.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/ICBDDCHealthCheck.cs @@ -1,15 +1,14 @@ -ο»Ώusing System.Threading; -using System.Threading.Tasks; - -namespace ZB.MOM.WW.CBDDC.Core.Diagnostics +ο»Ώusing System.Threading; +using System.Threading.Tasks; + +namespace ZB.MOM.WW.CBDDC.Core.Diagnostics; + +public interface ICBDDCHealthCheck { - public interface ICBDDCHealthCheck - { - /// - /// Performs a health check for the implementing component. - /// - /// Cancellation token. - /// The resulting health status. - Task CheckAsync(CancellationToken cancellationToken = default); - } -} + /// + /// Performs a health check for the implementing component. + /// + /// Cancellation token. + /// The resulting health status. + Task CheckAsync(CancellationToken cancellationToken = default); +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/ISyncStatusTracker.cs b/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/ISyncStatusTracker.cs index 674526a..a1759d6 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/ISyncStatusTracker.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/ISyncStatusTracker.cs @@ -1,63 +1,62 @@ -ο»Ώusing System; - -namespace ZB.MOM.WW.CBDDC.Core.Diagnostics +ο»Ώusing System; + +namespace ZB.MOM.WW.CBDDC.Core.Diagnostics; + +/// +/// Tracks synchronization status and peer health metrics. +/// +public interface ISyncStatusTracker { /// - /// Tracks synchronization status and peer health metrics. + /// Removes peer entries that have been inactive longer than the specified threshold. /// - public interface ISyncStatusTracker - { - /// - /// Removes peer entries that have been inactive longer than the specified threshold. - /// - /// The inactivity threshold used to prune peers. - void CleanupInactivePeers(TimeSpan inactiveThreshold); + /// The inactivity threshold used to prune peers. + void CleanupInactivePeers(TimeSpan inactiveThreshold); - /// - /// Gets the current synchronization status snapshot. - /// - /// The current . - SyncStatus GetStatus(); + /// + /// Gets the current synchronization status snapshot. + /// + /// The current . + SyncStatus GetStatus(); - /// - /// Records an error encountered during synchronization. - /// - /// The error message. - /// The related peer node identifier, if available. - /// An optional error code. - void RecordError(string message, string? peerNodeId = null, string? errorCode = null); + /// + /// Records an error encountered during synchronization. + /// + /// The error message. + /// The related peer node identifier, if available. + /// An optional error code. + void RecordError(string message, string? peerNodeId = null, string? errorCode = null); - /// - /// Records a failed operation for the specified peer. - /// - /// The peer node identifier. - void RecordPeerFailure(string nodeId); + /// + /// Records a failed operation for the specified peer. + /// + /// The peer node identifier. + void RecordPeerFailure(string nodeId); - /// - /// Records a successful operation for the specified peer. - /// - /// The peer node identifier. - void RecordPeerSuccess(string nodeId); + /// + /// Records a successful operation for the specified peer. + /// + /// The peer node identifier. + void RecordPeerSuccess(string nodeId); - /// - /// Records synchronization throughput metrics. - /// - /// The number of synchronized documents. - /// The number of bytes transferred. - void RecordSync(int documentCount, long bytesTransferred); + /// + /// Records synchronization throughput metrics. + /// + /// The number of synchronized documents. + /// The number of bytes transferred. + void RecordSync(int documentCount, long bytesTransferred); - /// - /// Sets whether the local node is currently online. - /// - /// A value indicating whether the node is online. - void SetOnlineStatus(bool isOnline); + /// + /// Sets whether the local node is currently online. + /// + /// A value indicating whether the node is online. + void SetOnlineStatus(bool isOnline); - /// - /// Updates peer connectivity details. - /// - /// The peer node identifier. - /// The peer network address. - /// A value indicating whether the peer is connected. - void UpdatePeer(string nodeId, string address, bool isConnected); - } -} + /// + /// Updates peer connectivity details. + /// + /// The peer node identifier. + /// The peer network address. + /// A value indicating whether the peer is connected. + void UpdatePeer(string nodeId, string address, bool isConnected); +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/SyncStatusTracker.cs b/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/SyncStatusTracker.cs index 3eae21a..553f1f2 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/SyncStatusTracker.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Diagnostics/SyncStatusTracker.cs @@ -1,44 +1,44 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; +using System; +using System.Collections.Generic; +using System.Linq; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; namespace ZB.MOM.WW.CBDDC.Core.Diagnostics; /// -/// Tracks synchronization status and provides diagnostics. +/// Tracks synchronization status and provides diagnostics. /// -public class SyncStatusTracker : ISyncStatusTracker -{ - private readonly ILogger _logger; - private readonly object _lock = new(); - - private bool _isOnline = false; - private DateTime? _lastSyncTime; +public class SyncStatusTracker : ISyncStatusTracker +{ + private const int MaxErrorHistory = 50; private readonly List _activePeers = new(); + private readonly object _lock = new(); + private readonly ILogger _logger; private readonly Queue _recentErrors = new(); - private long _totalDocumentsSynced = 0; - private long _totalBytesTransferred = 0; - private const int MaxErrorHistory = 50; - - /// - /// Initializes a new instance of the class. - /// - /// Optional logger instance. - public SyncStatusTracker(ILogger? logger = null) - { - _logger = logger ?? NullLogger.Instance; - } - - /// - /// Updates online status. - /// - /// Whether the node is currently online. - public void SetOnlineStatus(bool isOnline) - { - lock (_lock) - { + + private bool _isOnline; + private DateTime? _lastSyncTime; + private long _totalBytesTransferred; + private long _totalDocumentsSynced; + + /// + /// Initializes a new instance of the class. + /// + /// Optional logger instance. + public SyncStatusTracker(ILogger? logger = null) + { + _logger = logger ?? NullLogger.Instance; + } + + /// + /// Updates online status. + /// + /// Whether the node is currently online. + public void SetOnlineStatus(bool isOnline) + { + lock (_lock) + { if (_isOnline != isOnline) { _isOnline = isOnline; @@ -47,15 +47,15 @@ public class SyncStatusTracker : ISyncStatusTracker } } - /// - /// Records a successful sync operation. - /// - /// The number of documents synchronized. - /// The number of bytes transferred. - public void RecordSync(int documentCount, long bytesTransferred) - { - lock (_lock) - { + /// + /// Records a successful sync operation. + /// + /// The number of documents synchronized. + /// The number of bytes transferred. + public void RecordSync(int documentCount, long bytesTransferred) + { + lock (_lock) + { _lastSyncTime = DateTime.UtcNow; _totalDocumentsSynced += documentCount; _totalBytesTransferred += bytesTransferred; @@ -64,16 +64,16 @@ public class SyncStatusTracker : ISyncStatusTracker } } - /// - /// Records a sync error. - /// - /// The error message. - /// The related peer node identifier, if available. - /// The error code, if available. - public void RecordError(string message, string? peerNodeId = null, string? errorCode = null) - { - lock (_lock) - { + /// + /// Records a sync error. + /// + /// The error message. + /// The related peer node identifier, if available. + /// The error code, if available. + public void RecordError(string message, string? peerNodeId = null, string? errorCode = null) + { + lock (_lock) + { var error = new SyncError { Timestamp = DateTime.UtcNow, @@ -84,25 +84,22 @@ public class SyncStatusTracker : ISyncStatusTracker _recentErrors.Enqueue(error); - while (_recentErrors.Count > MaxErrorHistory) - { - _recentErrors.Dequeue(); - } + while (_recentErrors.Count > MaxErrorHistory) _recentErrors.Dequeue(); _logger.LogWarning("Sync error recorded: {Message} (Peer: {Peer})", message, peerNodeId ?? "N/A"); } } - /// - /// Updates peer information. - /// - /// The peer node identifier. - /// The peer address. - /// Whether the peer is currently connected. - public void UpdatePeer(string nodeId, string address, bool isConnected) - { - lock (_lock) - { + /// + /// Updates peer information. + /// + /// The peer node identifier. + /// The peer address. + /// Whether the peer is currently connected. + public void UpdatePeer(string nodeId, string address, bool isConnected) + { + lock (_lock) + { var peer = _activePeers.FirstOrDefault(p => p.NodeId == nodeId); if (peer == null) @@ -126,40 +123,34 @@ public class SyncStatusTracker : ISyncStatusTracker } } - /// - /// Records successful sync with a peer. - /// - /// The peer node identifier. - public void RecordPeerSuccess(string nodeId) - { - lock (_lock) - { + /// + /// Records successful sync with a peer. + /// + /// The peer node identifier. + public void RecordPeerSuccess(string nodeId) + { + lock (_lock) + { var peer = _activePeers.FirstOrDefault(p => p.NodeId == nodeId); - if (peer != null) - { - peer.SuccessfulSyncs++; - } - } - } - - /// - /// Records failed sync with a peer. - /// - /// The peer node identifier. - public void RecordPeerFailure(string nodeId) - { - lock (_lock) - { - var peer = _activePeers.FirstOrDefault(p => p.NodeId == nodeId); - if (peer != null) - { - peer.FailedSyncs++; - } + if (peer != null) peer.SuccessfulSyncs++; } } /// - /// Gets current sync status. + /// Records failed sync with a peer. + /// + /// The peer node identifier. + public void RecordPeerFailure(string nodeId) + { + lock (_lock) + { + var peer = _activePeers.FirstOrDefault(p => p.NodeId == nodeId); + if (peer != null) peer.FailedSyncs++; + } + } + + /// + /// Gets current sync status. /// public SyncStatus GetStatus() { @@ -178,21 +169,18 @@ public class SyncStatusTracker : ISyncStatusTracker } } - /// - /// Cleans up inactive peers. - /// - /// The inactivity threshold used to remove peers. - public void CleanupInactivePeers(TimeSpan inactiveThreshold) - { - lock (_lock) - { + /// + /// Cleans up inactive peers. + /// + /// The inactivity threshold used to remove peers. + public void CleanupInactivePeers(TimeSpan inactiveThreshold) + { + lock (_lock) + { var cutoff = DateTime.UtcNow - inactiveThreshold; - var removed = _activePeers.RemoveAll(p => p.LastSeen < cutoff); + int removed = _activePeers.RemoveAll(p => p.LastSeen < cutoff); - if (removed > 0) - { - _logger.LogInformation("Removed {Count} inactive peers", removed); - } + if (removed > 0) _logger.LogInformation("Removed {Count} inactive peers", removed); } } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Document.cs b/src/ZB.MOM.WW.CBDDC.Core/Document.cs index bae037e..6fd8ea9 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Document.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Document.cs @@ -1,41 +1,15 @@ -using ZB.MOM.WW.CBDDC.Core.Sync; -using System; -using System.Text.Json; - +using System.Text.Json; +using ZB.MOM.WW.CBDDC.Core.Sync; + namespace ZB.MOM.WW.CBDDC.Core; /// -/// Represents a stored document and its synchronization metadata. +/// Represents a stored document and its synchronization metadata. /// public class Document { /// - /// Gets the collection that contains the document. - /// - public string Collection { get; private set; } - - /// - /// Gets the document key. - /// - public string Key { get; private set; } - - /// - /// Gets the document content. - /// - public JsonElement Content { get; private set; } - - /// - /// Gets the timestamp of the latest applied update. - /// - public HlcTimestamp UpdatedAt { get; private set; } - - /// - /// Gets a value indicating whether the document is deleted. - /// - public bool IsDeleted { get; private set; } - - /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The collection that contains the document. /// The document key. @@ -52,32 +26,59 @@ public class Document } /// - /// Merges a remote operation into the current document using last-write-wins or a conflict resolver. + /// Gets the collection that contains the document. + /// + public string Collection { get; } + + /// + /// Gets the document key. + /// + public string Key { get; } + + /// + /// Gets the document content. + /// + public JsonElement Content { get; private set; } + + /// + /// Gets the timestamp of the latest applied update. + /// + public HlcTimestamp UpdatedAt { get; private set; } + + /// + /// Gets a value indicating whether the document is deleted. + /// + public bool IsDeleted { get; private set; } + + /// + /// Merges a remote operation into the current document using last-write-wins or a conflict resolver. /// /// The remote operation to merge. /// An optional conflict resolver for custom merge behavior. public void Merge(OplogEntry oplogEntry, IConflictResolver? resolver = null) { if (oplogEntry == null) return; - if (Collection != oplogEntry.Collection) return; - if (Key != oplogEntry.Key) return; - if (resolver == null) - { - //last wins - if (UpdatedAt <= oplogEntry.Timestamp) - { - Content = oplogEntry.Payload ?? default; - UpdatedAt = oplogEntry.Timestamp; - IsDeleted = oplogEntry.Operation == OperationType.Delete; - } - return; - } - var resolutionResult = resolver.Resolve(this, oplogEntry); - if (resolutionResult.ShouldApply && resolutionResult.MergedDocument != null) - { - Content = resolutionResult.MergedDocument.Content; - UpdatedAt = resolutionResult.MergedDocument.UpdatedAt; - IsDeleted = resolutionResult.MergedDocument.IsDeleted; - } - } -} + if (Collection != oplogEntry.Collection) return; + if (Key != oplogEntry.Key) return; + if (resolver == null) + { + //last wins + if (UpdatedAt <= oplogEntry.Timestamp) + { + Content = oplogEntry.Payload ?? default; + UpdatedAt = oplogEntry.Timestamp; + IsDeleted = oplogEntry.Operation == OperationType.Delete; + } + + return; + } + + var resolutionResult = resolver.Resolve(this, oplogEntry); + if (resolutionResult.ShouldApply && resolutionResult.MergedDocument != null) + { + Content = resolutionResult.MergedDocument.Content; + UpdatedAt = resolutionResult.MergedDocument.UpdatedAt; + IsDeleted = resolutionResult.MergedDocument.IsDeleted; + } + } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Exceptions/CBDDCExceptions.cs b/src/ZB.MOM.WW.CBDDC.Core/Exceptions/CBDDCExceptions.cs index 50847e0..c249388 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Exceptions/CBDDCExceptions.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Exceptions/CBDDCExceptions.cs @@ -1,19 +1,14 @@ -using System; - -namespace ZB.MOM.WW.CBDDC.Core.Exceptions; - -/// -/// Base exception for all CBDDC-related errors. -/// -public class CBDDCException : Exception -{ - /// - /// Error code for programmatic error handling. - /// - public string ErrorCode { get; } +using System; +namespace ZB.MOM.WW.CBDDC.Core.Exceptions; + +/// +/// Base exception for all CBDDC-related errors. +/// +public class CBDDCException : Exception +{ /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The application-specific error code. /// The exception message. @@ -24,7 +19,7 @@ public class CBDDCException : Exception } /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The application-specific error code. /// The exception message. @@ -34,137 +29,151 @@ public class CBDDCException : Exception { ErrorCode = errorCode; } -} - -/// -/// Exception thrown when network operations fail. -/// + + /// + /// Error code for programmatic error handling. + /// + public string ErrorCode { get; } +} + +/// +/// Exception thrown when network operations fail. +/// public class NetworkException : CBDDCException { /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The exception message. public NetworkException(string message) - : base("NETWORK_ERROR", message) { } + : base("NETWORK_ERROR", message) + { + } /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The exception message. /// The exception that caused the current exception. public NetworkException(string message, Exception innerException) - : base("NETWORK_ERROR", message, innerException) { } + : base("NETWORK_ERROR", message, innerException) + { + } } - -/// -/// Exception thrown when persistence operations fail. -/// + +/// +/// Exception thrown when persistence operations fail. +/// public class PersistenceException : CBDDCException { /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The exception message. public PersistenceException(string message) - : base("PERSISTENCE_ERROR", message) { } + : base("PERSISTENCE_ERROR", message) + { + } /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The exception message. /// The exception that caused the current exception. public PersistenceException(string message, Exception innerException) - : base("PERSISTENCE_ERROR", message, innerException) { } + : base("PERSISTENCE_ERROR", message, innerException) + { + } } - -/// -/// Exception thrown when synchronization operations fail. -/// + +/// +/// Exception thrown when synchronization operations fail. +/// public class SyncException : CBDDCException { /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The exception message. public SyncException(string message) - : base("SYNC_ERROR", message) { } + : base("SYNC_ERROR", message) + { + } /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The exception message. /// The exception that caused the current exception. public SyncException(string message, Exception innerException) - : base("SYNC_ERROR", message, innerException) { } + : base("SYNC_ERROR", message, innerException) + { + } } - -/// -/// Exception thrown when configuration is invalid. -/// + +/// +/// Exception thrown when configuration is invalid. +/// public class ConfigurationException : CBDDCException { /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The exception message. public ConfigurationException(string message) - : base("CONFIG_ERROR", message) { } + : base("CONFIG_ERROR", message) + { + } } - -/// -/// Exception thrown when database corruption is detected. -/// + +/// +/// Exception thrown when database corruption is detected. +/// public class DatabaseCorruptionException : PersistenceException { /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The exception message. public DatabaseCorruptionException(string message) - : base(message) { } + : base(message) + { + } /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The exception message. /// The exception that caused the current exception. public DatabaseCorruptionException(string message, Exception innerException) - : base(message, innerException) { } + : base(message, innerException) + { + } } - -/// -/// Exception thrown when a timeout occurs. -/// + +/// +/// Exception thrown when a timeout occurs. +/// public class TimeoutException : CBDDCException { /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The operation that timed out. /// The timeout in milliseconds. public TimeoutException(string operation, int timeoutMs) - : base("TIMEOUT_ERROR", $"Operation '{operation}' timed out after {timeoutMs}ms") { } + : base("TIMEOUT_ERROR", $"Operation '{operation}' timed out after {timeoutMs}ms") + { + } } - /// -/// Exception thrown when a document is not found in a collection. +/// Exception thrown when a document is not found in a collection. /// public class DocumentNotFoundException : PersistenceException { /// - /// Gets the document key that was not found. - /// - public string Key { get; } - - /// - /// Gets the collection where the document was searched. - /// - public string Collection { get; } - - /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The collection where the document was searched. /// The document key that was not found. @@ -174,16 +183,28 @@ public class DocumentNotFoundException : PersistenceException Collection = collection; Key = key; } + + /// + /// Gets the document key that was not found. + /// + public string Key { get; } + + /// + /// Gets the collection where the document was searched. + /// + public string Collection { get; } } /// -/// Exception thrown when a concurrency conflict occurs during persistence operations. +/// Exception thrown when a concurrency conflict occurs during persistence operations. /// public class CBDDCConcurrencyException : PersistenceException { /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The exception message. - public CBDDCConcurrencyException(string message) : base(message) { } -} + public CBDDCConcurrencyException(string message) : base(message) + { + } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/HlcTimestamp.cs b/src/ZB.MOM.WW.CBDDC.Core/HlcTimestamp.cs index eeb6163..245491a 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/HlcTimestamp.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/HlcTimestamp.cs @@ -1,31 +1,31 @@ -using System; - -namespace ZB.MOM.WW.CBDDC.Core; - -/// -/// Represents a Hybrid Logical Clock timestamp. -/// Provides a Total Ordering of events in a distributed system. -/// Implements value semantics and comparable interfaces. -/// +using System; + +namespace ZB.MOM.WW.CBDDC.Core; + +/// +/// Represents a Hybrid Logical Clock timestamp. +/// Provides a Total Ordering of events in a distributed system. +/// Implements value semantics and comparable interfaces. +/// public readonly struct HlcTimestamp : IComparable, IComparable, IEquatable { /// - /// Gets the physical time component of the timestamp. + /// Gets the physical time component of the timestamp. /// public long PhysicalTime { get; } /// - /// Gets the logical counter component used to order events with equal physical time. + /// Gets the logical counter component used to order events with equal physical time. /// public int LogicalCounter { get; } /// - /// Gets the node identifier that produced this timestamp. + /// Gets the node identifier that produced this timestamp. /// public string NodeId { get; } /// - /// Initializes a new instance of the struct. + /// Initializes a new instance of the struct. /// /// The physical time component. /// The logical counter component. @@ -35,36 +35,36 @@ public readonly struct HlcTimestamp : IComparable, IComparable, IE PhysicalTime = physicalTime; LogicalCounter = logicalCounter; NodeId = nodeId ?? throw new ArgumentNullException(nameof(nodeId)); - } - + } + /// - /// Compares two timestamps to establish a total order. - /// Order: PhysicalTime -> LogicalCounter -> NodeId (lexicographical tie-breaker). + /// Compares two timestamps to establish a total order. + /// Order: PhysicalTime -> LogicalCounter -> NodeId (lexicographical tie-breaker). /// /// The other timestamp to compare with this instance. /// - /// A value less than zero if this instance is earlier than , zero if they are equal, - /// or greater than zero if this instance is later than . + /// A value less than zero if this instance is earlier than , zero if they are equal, + /// or greater than zero if this instance is later than . /// public int CompareTo(HlcTimestamp other) { int timeComparison = PhysicalTime.CompareTo(other.PhysicalTime); if (timeComparison != 0) return timeComparison; - - int counterComparison = LogicalCounter.CompareTo(other.LogicalCounter); - if (counterComparison != 0) return counterComparison; - + + int counterComparison = LogicalCounter.CompareTo(other.LogicalCounter); + if (counterComparison != 0) return counterComparison; + // Use Ordinal comparison for consistent tie-breaking across cultures/platforms return string.Compare(NodeId, other.NodeId, StringComparison.Ordinal); } /// - /// Compares this instance with another object. + /// Compares this instance with another object. /// /// The object to compare with this instance. /// - /// A value less than zero if this instance is earlier than , zero if equal, or greater - /// than zero if later. + /// A value less than zero if this instance is earlier than , zero if equal, or greater + /// than zero if later. /// public int CompareTo(object? obj) { @@ -74,10 +74,10 @@ public readonly struct HlcTimestamp : IComparable, IComparable, IE } /// - /// Determines whether this instance and another timestamp are equal. + /// Determines whether this instance and another timestamp are equal. /// /// The other timestamp to compare. - /// if the timestamps are equal; otherwise, . + /// if the timestamps are equal; otherwise, . public bool Equals(HlcTimestamp other) { return PhysicalTime == other.PhysicalTime && @@ -96,42 +96,68 @@ public readonly struct HlcTimestamp : IComparable, IComparable, IE { unchecked { - var hashCode = PhysicalTime.GetHashCode(); - hashCode = (hashCode * 397) ^ LogicalCounter; - // Ensure HashCode uses the same comparison logic as Equals/CompareTo - // Handle null NodeId gracefully (possible via default(HlcTimestamp)) - hashCode = (hashCode * 397) ^ (NodeId != null ? StringComparer.Ordinal.GetHashCode(NodeId) : 0); - return hashCode; - } - } - - public static bool operator ==(HlcTimestamp left, HlcTimestamp right) => left.Equals(right); - public static bool operator !=(HlcTimestamp left, HlcTimestamp right) => !left.Equals(right); - - // Standard comparison operators making usage in SyncOrchestrator cleaner (e.g., remote > local) - public static bool operator <(HlcTimestamp left, HlcTimestamp right) => left.CompareTo(right) < 0; - public static bool operator <=(HlcTimestamp left, HlcTimestamp right) => left.CompareTo(right) <= 0; - public static bool operator >(HlcTimestamp left, HlcTimestamp right) => left.CompareTo(right) > 0; - public static bool operator >=(HlcTimestamp left, HlcTimestamp right) => left.CompareTo(right) >= 0; + int hashCode = PhysicalTime.GetHashCode(); + hashCode = (hashCode * 397) ^ LogicalCounter; + // Ensure HashCode uses the same comparison logic as Equals/CompareTo + // Handle null NodeId gracefully (possible via default(HlcTimestamp)) + hashCode = (hashCode * 397) ^ (NodeId != null ? StringComparer.Ordinal.GetHashCode(NodeId) : 0); + return hashCode; + } + } + + public static bool operator ==(HlcTimestamp left, HlcTimestamp right) + { + return left.Equals(right); + } + + public static bool operator !=(HlcTimestamp left, HlcTimestamp right) + { + return !left.Equals(right); + } + + // Standard comparison operators making usage in SyncOrchestrator cleaner (e.g., remote > local) + public static bool operator <(HlcTimestamp left, HlcTimestamp right) + { + return left.CompareTo(right) < 0; + } + + public static bool operator <=(HlcTimestamp left, HlcTimestamp right) + { + return left.CompareTo(right) <= 0; + } + + public static bool operator >(HlcTimestamp left, HlcTimestamp right) + { + return left.CompareTo(right) > 0; + } + + public static bool operator >=(HlcTimestamp left, HlcTimestamp right) + { + return left.CompareTo(right) >= 0; + } /// - public override string ToString() => FormattableString.Invariant($"{PhysicalTime}:{LogicalCounter}:{NodeId}"); + public override string ToString() + { + return FormattableString.Invariant($"{PhysicalTime}:{LogicalCounter}:{NodeId}"); + } /// - /// Parses a timestamp string. + /// Parses a timestamp string. /// /// The string to parse, in the format "PhysicalTime:LogicalCounter:NodeId". - /// The parsed . + /// The parsed . public static HlcTimestamp Parse(string s) { if (string.IsNullOrEmpty(s)) throw new ArgumentNullException(nameof(s)); - var parts = s.Split(':'); - if (parts.Length != 3) throw new FormatException("Invalid HlcTimestamp format. Expected 'PhysicalTime:LogicalCounter:NodeId'."); - if (!long.TryParse(parts[0], out var physicalTime)) - throw new FormatException("Invalid PhysicalTime component in HlcTimestamp."); - if (!int.TryParse(parts[1], out var logicalCounter)) - throw new FormatException("Invalid LogicalCounter component in HlcTimestamp."); - var nodeId = parts[2]; - return new HlcTimestamp(physicalTime, logicalCounter, nodeId); - } -} + string[] parts = s.Split(':'); + if (parts.Length != 3) + throw new FormatException("Invalid HlcTimestamp format. Expected 'PhysicalTime:LogicalCounter:NodeId'."); + if (!long.TryParse(parts[0], out long physicalTime)) + throw new FormatException("Invalid PhysicalTime component in HlcTimestamp."); + if (!int.TryParse(parts[1], out int logicalCounter)) + throw new FormatException("Invalid LogicalCounter component in HlcTimestamp."); + string nodeId = parts[2]; + return new HlcTimestamp(physicalTime, logicalCounter, nodeId); + } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Management/IPeerManagementService.cs b/src/ZB.MOM.WW.CBDDC.Core/Management/IPeerManagementService.cs index 87b0855..1d19d54 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Management/IPeerManagementService.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Management/IPeerManagementService.cs @@ -6,13 +6,13 @@ using ZB.MOM.WW.CBDDC.Core.Network; namespace ZB.MOM.WW.CBDDC.Core.Management; /// -/// Service for managing remote peer configurations. -/// Provides CRUD operations for adding, removing, enabling/disabling remote cloud nodes. +/// Service for managing remote peer configurations. +/// Provides CRUD operations for adding, removing, enabling/disabling remote cloud nodes. /// public interface IPeerManagementService { /// - /// Adds a static remote peer with simple authentication. + /// Adds a static remote peer with simple authentication. /// /// Unique identifier for the remote peer. /// Network address (hostname:port) of the remote peer. @@ -20,14 +20,14 @@ public interface IPeerManagementService Task AddStaticPeerAsync(string nodeId, string address, CancellationToken cancellationToken = default); /// - /// Removes a remote peer configuration. + /// Removes a remote peer configuration. /// /// Unique identifier of the peer to remove. /// Cancellation token. Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default); /// - /// Removes confirmation tracking for a peer and optionally removes static remote configuration. + /// Removes confirmation tracking for a peer and optionally removes static remote configuration. /// /// Unique identifier of the peer to untrack. /// When true, also removes static remote peer configuration. @@ -38,23 +38,23 @@ public interface IPeerManagementService CancellationToken cancellationToken = default); /// - /// Retrieves all configured remote peers. + /// Retrieves all configured remote peers. /// /// Cancellation token. /// Collection of remote peer configurations. Task> GetAllRemotePeersAsync(CancellationToken cancellationToken = default); /// - /// Enables synchronization with a remote peer. + /// Enables synchronization with a remote peer. /// /// Unique identifier of the peer to enable. /// Cancellation token. Task EnablePeerAsync(string nodeId, CancellationToken cancellationToken = default); /// - /// Disables synchronization with a remote peer (keeps configuration). + /// Disables synchronization with a remote peer (keeps configuration). /// /// Unique identifier of the peer to disable. /// Cancellation token. Task DisablePeerAsync(string nodeId, CancellationToken cancellationToken = default); -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Management/PeerManagementService.cs b/src/ZB.MOM.WW.CBDDC.Core/Management/PeerManagementService.cs index 979e264..57e957b 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Management/PeerManagementService.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Management/PeerManagementService.cs @@ -2,29 +2,28 @@ using System; using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; -using ZB.MOM.WW.CBDDC.Core.Network; -using ZB.MOM.WW.CBDDC.Core.Storage; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Core.Storage; namespace ZB.MOM.WW.CBDDC.Core.Management; /// -/// Implementation of peer management service. -/// Provides CRUD operations for managing remote peer configurations. -/// -/// Remote peer configurations are stored in a synchronized collection and automatically -/// replicated across all nodes in the cluster. Any change made on one node will be -/// synchronized to all other nodes through the normal CBDDC sync process. +/// Implementation of peer management service. +/// Provides CRUD operations for managing remote peer configurations. +/// Remote peer configurations are stored in a synchronized collection and automatically +/// replicated across all nodes in the cluster. Any change made on one node will be +/// synchronized to all other nodes through the normal CBDDC sync process. /// public class PeerManagementService : IPeerManagementService { - private readonly IPeerConfigurationStore _store; - private readonly IPeerOplogConfirmationStore _peerOplogConfirmationStore; private readonly ILogger _logger; + private readonly IPeerOplogConfirmationStore _peerOplogConfirmationStore; + private readonly IPeerConfigurationStore _store; /// - /// Initializes a new instance of the PeerManagementService class. + /// Initializes a new instance of the PeerManagementService class. /// /// Database instance for accessing the synchronized collection. /// Peer confirmation tracking store. @@ -35,12 +34,13 @@ public class PeerManagementService : IPeerManagementService ILogger? logger = null) { _store = store ?? throw new ArgumentNullException(nameof(store)); - _peerOplogConfirmationStore = peerOplogConfirmationStore ?? throw new ArgumentNullException(nameof(peerOplogConfirmationStore)); + _peerOplogConfirmationStore = peerOplogConfirmationStore ?? + throw new ArgumentNullException(nameof(peerOplogConfirmationStore)); _logger = logger ?? NullLogger.Instance; } /// - /// Adds or updates a static remote peer configuration. + /// Adds or updates a static remote peer configuration. /// /// The unique node identifier of the peer. /// The peer network address in host:port format. @@ -60,22 +60,23 @@ public class PeerManagementService : IPeerManagementService }; await _store.SaveRemotePeerAsync(config, cancellationToken); - _logger.LogInformation("Added static remote peer: {NodeId} at {Address} (will sync to all cluster nodes)", nodeId, address); + _logger.LogInformation("Added static remote peer: {NodeId} at {Address} (will sync to all cluster nodes)", + nodeId, address); } /// - /// Removes a remote peer configuration. + /// Removes a remote peer configuration. /// /// The unique node identifier of the peer to remove. /// A token used to cancel the operation. /// A task that represents the asynchronous operation. public async Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default) { - await RemovePeerTrackingAsync(nodeId, removeRemoteConfig: true, cancellationToken); + await RemovePeerTrackingAsync(nodeId, true, cancellationToken); } /// - /// Removes peer tracking and optionally removes remote peer configuration. + /// Removes peer tracking and optionally removes remote peer configuration. /// /// The unique node identifier of the peer to untrack. /// When true, also removes static remote peer configuration. @@ -93,7 +94,8 @@ public class PeerManagementService : IPeerManagementService if (removeRemoteConfig) { await _store.RemoveRemotePeerAsync(nodeId, cancellationToken); - _logger.LogInformation("Removed remote peer and tracking: {NodeId} (will sync to all cluster nodes)", nodeId); + _logger.LogInformation("Removed remote peer and tracking: {NodeId} (will sync to all cluster nodes)", + nodeId); return; } @@ -101,17 +103,18 @@ public class PeerManagementService : IPeerManagementService } /// - /// Gets all configured remote peers. + /// Gets all configured remote peers. /// /// A token used to cancel the operation. /// A task that represents the asynchronous operation. The task result contains remote peer configurations. - public async Task> GetAllRemotePeersAsync(CancellationToken cancellationToken = default) + public async Task> GetAllRemotePeersAsync( + CancellationToken cancellationToken = default) { return await _store.GetRemotePeersAsync(cancellationToken); } /// - /// Enables a configured remote peer. + /// Enables a configured remote peer. /// /// The unique node identifier of the peer to enable. /// A token used to cancel the operation. @@ -122,10 +125,7 @@ public class PeerManagementService : IPeerManagementService var peer = await _store.GetRemotePeerAsync(nodeId, cancellationToken); - if (peer == null) - { - return; // Peer not found, nothing to enable - } + if (peer == null) return; // Peer not found, nothing to enable if (!peer.IsEnabled) { @@ -136,7 +136,7 @@ public class PeerManagementService : IPeerManagementService } /// - /// Disables a configured remote peer. + /// Disables a configured remote peer. /// /// The unique node identifier of the peer to disable. /// A token used to cancel the operation. @@ -147,10 +147,7 @@ public class PeerManagementService : IPeerManagementService var peer = await _store.GetRemotePeerAsync(nodeId, cancellationToken); - if (peer == null) - { - return; // Peer not found, nothing to disable - } + if (peer == null) return; // Peer not found, nothing to disable if (peer.IsEnabled) { @@ -163,23 +160,16 @@ public class PeerManagementService : IPeerManagementService private static void ValidateNodeId(string nodeId) { if (string.IsNullOrWhiteSpace(nodeId)) - { throw new ArgumentException("NodeId cannot be null or empty", nameof(nodeId)); - } } private static void ValidateAddress(string address) { if (string.IsNullOrWhiteSpace(address)) - { throw new ArgumentException("Address cannot be null or empty", nameof(address)); - } // Basic format validation (should contain host:port) if (!address.Contains(':')) - { throw new ArgumentException("Address must be in format 'host:port'", nameof(address)); - } } - -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Network/IPeerNodeConfigurationProvider.cs b/src/ZB.MOM.WW.CBDDC.Core/Network/IPeerNodeConfigurationProvider.cs index d92620b..79d09e9 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Network/IPeerNodeConfigurationProvider.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Network/IPeerNodeConfigurationProvider.cs @@ -1,38 +1,40 @@ -using System; using System.Threading.Tasks; -namespace ZB.MOM.WW.CBDDC.Core.Network; - -/// -/// Represents a method that handles peer node configuration change notifications. -/// -/// The source of the event. -/// The updated peer node configuration. -public delegate void PeerNodeConfigurationChangedEventHandler(object? sender, PeerNodeConfiguration newConfig); +namespace ZB.MOM.WW.CBDDC.Core.Network; /// -/// Defines a contract for retrieving and monitoring configuration settings for a peer node. +/// Represents a method that handles peer node configuration change notifications. /// -/// Implementations of this interface provide access to the current configuration and notify subscribers -/// when configuration changes occur. This interface is typically used by components that require up-to-date -/// configuration information for peer-to-peer networking scenarios. +/// The source of the event. +/// The updated peer node configuration. +public delegate void PeerNodeConfigurationChangedEventHandler(object? sender, PeerNodeConfiguration newConfig); + +/// +/// Defines a contract for retrieving and monitoring configuration settings for a peer node. +/// +/// +/// Implementations of this interface provide access to the current configuration and notify subscribers +/// when configuration changes occur. This interface is typically used by components that require up-to-date +/// configuration information for peer-to-peer networking scenarios. +/// public interface IPeerNodeConfigurationProvider { - /// - /// Asynchronously retrieves the current configuration settings for the peer node. - /// - /// - /// A task that represents the asynchronous operation. The task result contains the current - /// . - /// - public Task GetConfiguration(); + /// + /// Asynchronously retrieves the current configuration settings for the peer node. + /// + /// + /// A task that represents the asynchronous operation. The task result contains the current + /// . + /// + public Task GetConfiguration(); /// - /// Occurs when the configuration of the peer node changes. + /// Occurs when the configuration of the peer node changes. /// - /// Subscribe to this event to be notified when any configuration settings for the peer node are - /// modified. Event handlers can use this notification to update dependent components or respond to configuration - /// changes as needed. - + /// + /// Subscribe to this event to be notified when any configuration settings for the peer node are + /// modified. Event handlers can use this notification to update dependent components or respond to configuration + /// changes as needed. + /// public event PeerNodeConfigurationChangedEventHandler? ConfigurationChanged; -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Network/NodeRole.cs b/src/ZB.MOM.WW.CBDDC.Core/Network/NodeRole.cs index 5864cdf..833ccc3 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Network/NodeRole.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Network/NodeRole.cs @@ -1,20 +1,20 @@ namespace ZB.MOM.WW.CBDDC.Core.Network; /// -/// Defines the role of a node in the distributed network cluster. +/// Defines the role of a node in the distributed network cluster. /// public enum NodeRole { /// - /// Standard member node that synchronizes only within the local area network. - /// Does not connect to cloud remote nodes. + /// Standard member node that synchronizes only within the local area network. + /// Does not connect to cloud remote nodes. /// Member = 0, /// - /// Leader node that acts as a gateway to cloud remote nodes. - /// Elected via the Bully algorithm (lexicographically smallest NodeId). - /// Responsible for synchronizing local cluster changes with cloud nodes. + /// Leader node that acts as a gateway to cloud remote nodes. + /// Elected via the Bully algorithm (lexicographically smallest NodeId). + /// Responsible for synchronizing local cluster changes with cloud nodes. /// CloudGateway = 1 -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Network/PeerNode.cs b/src/ZB.MOM.WW.CBDDC.Core/Network/PeerNode.cs index 0c019a4..6ee13d9 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Network/PeerNode.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Network/PeerNode.cs @@ -1,53 +1,17 @@ using System; using System.Collections.Generic; -using System.Linq; namespace ZB.MOM.WW.CBDDC.Core.Network; /// -/// Represents a peer node in a distributed network, including its unique identifier, network address, and last seen -/// timestamp. +/// Represents a peer node in a distributed network, including its unique identifier, network address, and last seen +/// timestamp. /// public class PeerNode { /// - /// Gets the unique identifier for the node. - /// - public string NodeId { get; } - - /// - /// Gets the address associated with the current instance. - /// - public string Address { get; } - - /// - /// Gets the date and time when the entity was last observed or updated. - /// - public DateTimeOffset LastSeen { get; } - - /// - /// Gets the configuration settings for the peer node. - /// - public PeerNodeConfiguration? Configuration { get; } - - /// - /// Gets the type of the peer node (LanDiscovered, StaticRemote, or CloudRemote). - /// - public PeerType Type { get; } - - /// - /// Gets the role assigned to this node within the cluster. - /// - public NodeRole Role { get; } - - /// - /// Gets the list of collections this peer is interested in. - /// - public System.Collections.Generic.IReadOnlyList InterestingCollections { get; } - - /// - /// Initializes a new instance of the PeerNode class with the specified node identifier, network address, and last - /// seen timestamp. + /// Initializes a new instance of the PeerNode class with the specified node identifier, network address, and last + /// seen timestamp. /// /// The unique identifier for the peer node. Cannot be null or empty. /// The network address of the peer node. Cannot be null or empty. @@ -57,10 +21,10 @@ public class PeerNode /// The peer node configuration /// The list of collections this peer is interested in. public PeerNode( - string nodeId, - string address, + string nodeId, + string address, DateTimeOffset lastSeen, - PeerType type = PeerType.LanDiscovered, + PeerType type = PeerType.LanDiscovered, NodeRole role = NodeRole.Member, PeerNodeConfiguration? configuration = null, IEnumerable? interestingCollections = null) @@ -73,4 +37,39 @@ public class PeerNode Configuration = configuration; InterestingCollections = new List(interestingCollections ?? []).AsReadOnly(); } -} + + /// + /// Gets the unique identifier for the node. + /// + public string NodeId { get; } + + /// + /// Gets the address associated with the current instance. + /// + public string Address { get; } + + /// + /// Gets the date and time when the entity was last observed or updated. + /// + public DateTimeOffset LastSeen { get; } + + /// + /// Gets the configuration settings for the peer node. + /// + public PeerNodeConfiguration? Configuration { get; } + + /// + /// Gets the type of the peer node (LanDiscovered, StaticRemote, or CloudRemote). + /// + public PeerType Type { get; } + + /// + /// Gets the role assigned to this node within the cluster. + /// + public NodeRole Role { get; } + + /// + /// Gets the list of collections this peer is interested in. + /// + public IReadOnlyList InterestingCollections { get; } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Network/PeerNodeConfiguration.cs b/src/ZB.MOM.WW.CBDDC.Core/Network/PeerNodeConfiguration.cs index 7c00b3d..1d7a9be 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Network/PeerNodeConfiguration.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Network/PeerNodeConfiguration.cs @@ -1,96 +1,101 @@ using System; +using System.Collections.Generic; namespace ZB.MOM.WW.CBDDC.Core.Network; /// -/// Represents the configuration settings for a peer node in a distributed network. +/// Represents the configuration settings for a peer node in a distributed network. /// -/// Use this class to specify identification, network port, and authentication details required for a -/// peer node to participate in a cluster or peer-to-peer environment. The property provides a -/// basic configuration suitable for development or testing scenarios. +/// +/// Use this class to specify identification, network port, and authentication details required for a +/// peer node to participate in a cluster or peer-to-peer environment. The property provides a +/// basic configuration suitable for development or testing scenarios. +/// public class PeerNodeConfiguration { /// - /// Gets or sets the unique identifier for the node. + /// Gets or sets the unique identifier for the node. /// - public string NodeId { get; set; } = string.Empty; + public string NodeId { get; set; } = string.Empty; /// - /// Gets or sets the TCP port number used for network communication. + /// Gets or sets the TCP port number used for network communication. /// public int TcpPort { get; set; } /// - /// Gets or sets the authentication token used to authorize API requests. + /// Gets or sets the authentication token used to authorize API requests. /// - public string AuthToken { get; set; } = string.Empty; + public string AuthToken { get; set; } = string.Empty; /// - /// Maximum size of the document cache items. Default: 10. + /// Maximum size of the document cache items. Default: 10. /// public int MaxDocumentCacheSize { get; set; } = 100; /// - /// Maximum size of offline queue. Default: 1000. + /// Maximum size of offline queue. Default: 1000. /// public int MaxQueueSize { get; set; } = 1000; /// - /// Number of retry attempts for failed network operations. Default: 3. + /// Number of retry attempts for failed network operations. Default: 3. /// public int RetryAttempts { get; set; } = 3; /// - /// Delay between retry attempts in milliseconds. Default: 1000ms. + /// Delay between retry attempts in milliseconds. Default: 1000ms. /// public int RetryDelayMs { get; set; } = 1000; /// - /// Interval between periodic maintenance operations (Oplog pruning) in minutes. Default: 60 minutes. + /// Interval between periodic maintenance operations (Oplog pruning) in minutes. Default: 60 minutes. /// public int MaintenanceIntervalMinutes { get; set; } = 60; /// - /// Oplog retention period in hours. Entries older than this will be pruned. Default: 24 hours. + /// Oplog retention period in hours. Entries older than this will be pruned. Default: 24 hours. /// public int OplogRetentionHours { get; set; } = 24; /// - /// Gets or sets a list of known peers to connect to directly, bypassing discovery. + /// Gets or sets a list of known peers to connect to directly, bypassing discovery. /// - public System.Collections.Generic.List KnownPeers { get; set; } = new(); + public List KnownPeers { get; set; } = new(); /// - /// Gets the default configuration settings for a peer node. + /// Gets the default configuration settings for a peer node. /// - /// Each access returns a new instance of the configuration with a unique node identifier. The - /// default settings use TCP port 9000 and a generated authentication token. Modify the returned instance as needed - /// before use. - public static PeerNodeConfiguration Default => new PeerNodeConfiguration - { - NodeId = Guid.NewGuid().ToString(), - TcpPort = 9000, - AuthToken = Guid.NewGuid().ToString("N") - }; + /// + /// Each access returns a new instance of the configuration with a unique node identifier. The + /// default settings use TCP port 9000 and a generated authentication token. Modify the returned instance as needed + /// before use. + /// + public static PeerNodeConfiguration Default => new() + { + NodeId = Guid.NewGuid().ToString(), + TcpPort = 9000, + AuthToken = Guid.NewGuid().ToString("N") + }; } /// -/// Configuration for a known peer node. +/// Configuration for a known peer node. /// public class KnownPeerConfiguration { /// - /// The unique identifier of the peer node. + /// The unique identifier of the peer node. /// - public string NodeId { get; set; } = string.Empty; + public string NodeId { get; set; } = string.Empty; /// - /// The hostname or IP address of the peer. + /// The hostname or IP address of the peer. /// - public string Host { get; set; } = string.Empty; + public string Host { get; set; } = string.Empty; /// - /// The TCP port of the peer. + /// The TCP port of the peer. /// public int Port { get; set; } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Network/PeerType.cs b/src/ZB.MOM.WW.CBDDC.Core/Network/PeerType.cs index 9803d7f..29d8140 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Network/PeerType.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Network/PeerType.cs @@ -1,26 +1,26 @@ namespace ZB.MOM.WW.CBDDC.Core.Network; /// -/// Defines the type of peer node in the distributed network. +/// Defines the type of peer node in the distributed network. /// public enum PeerType { /// - /// Peer discovered via UDP broadcast on the local area network. - /// These peers are ephemeral and removed after timeout when no longer broadcasting. + /// Peer discovered via UDP broadcast on the local area network. + /// These peers are ephemeral and removed after timeout when no longer broadcasting. /// LanDiscovered = 0, /// - /// Peer manually configured with a static address. - /// These peers are persistent across restarts and stored in the database. + /// Peer manually configured with a static address. + /// These peers are persistent across restarts and stored in the database. /// StaticRemote = 1, /// - /// Cloud remote node. - /// Always active if internet connectivity is available. - /// Synchronized only by the elected leader node to reduce overhead. + /// Cloud remote node. + /// Always active if internet connectivity is available. + /// Synchronized only by the elected leader node to reduce overhead. /// CloudRemote = 2 -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Network/RemotePeerConfiguration.cs b/src/ZB.MOM.WW.CBDDC.Core/Network/RemotePeerConfiguration.cs index 6f68754..811f57e 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Network/RemotePeerConfiguration.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Network/RemotePeerConfiguration.cs @@ -1,38 +1,39 @@ +using System.Collections.Generic; using System.ComponentModel.DataAnnotations; namespace ZB.MOM.WW.CBDDC.Core.Network; /// -/// Configuration for a remote peer node that is persistent across restarts. -/// This collection is automatically synchronized across all nodes in the cluster. +/// Configuration for a remote peer node that is persistent across restarts. +/// This collection is automatically synchronized across all nodes in the cluster. /// public class RemotePeerConfiguration { /// - /// Gets or sets the unique identifier for the remote peer node. + /// Gets or sets the unique identifier for the remote peer node. /// [Key] public string NodeId { get; set; } = ""; /// - /// Gets or sets the network address of the remote peer (hostname:port). + /// Gets or sets the network address of the remote peer (hostname:port). /// public string Address { get; set; } = ""; /// - /// Gets or sets the type of the peer (StaticRemote or CloudRemote). + /// Gets or sets the type of the peer (StaticRemote or CloudRemote). /// public PeerType Type { get; set; } /// - /// Gets or sets whether this peer is enabled for synchronization. - /// Disabled peers are stored but not used for sync. + /// Gets or sets whether this peer is enabled for synchronization. + /// Disabled peers are stored but not used for sync. /// public bool IsEnabled { get; set; } = true; /// - /// Gets or sets the list of collections this peer is interested in. - /// If empty, the peer is interested in all collections. + /// Gets or sets the list of collections this peer is interested in. + /// If empty, the peer is interested in all collections. /// - public System.Collections.Generic.List InterestingCollections { get; set; } = new(); -} + public List InterestingCollections { get; set; } = new(); +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Network/StaticPeerNodeConfigurationProvider.cs b/src/ZB.MOM.WW.CBDDC.Core/Network/StaticPeerNodeConfigurationProvider.cs index 2824b13..1d6d010 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Network/StaticPeerNodeConfigurationProvider.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Network/StaticPeerNodeConfigurationProvider.cs @@ -1,32 +1,16 @@ -using System.Threading.Tasks; - +using System.Threading.Tasks; + namespace ZB.MOM.WW.CBDDC.Core.Network; /// -/// Provides peer node configuration from an in-memory static source. +/// Provides peer node configuration from an in-memory static source. /// public class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvider { private PeerNodeConfiguration _configuration = new(); /// - /// Gets or sets the current peer node configuration. - /// - public PeerNodeConfiguration Configuration - { - get => _configuration; - set - { - if (_configuration != value) - { - _configuration = value; - OnConfigurationChanged(_configuration); - } - } - } - - /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The initial peer node configuration. public StaticPeerNodeConfigurationProvider(PeerNodeConfiguration configuration) @@ -35,12 +19,28 @@ public class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvide } /// - /// Occurs when the peer node configuration changes. + /// Gets or sets the current peer node configuration. + /// + public PeerNodeConfiguration Configuration + { + get => _configuration; + set + { + if (_configuration != value) + { + _configuration = value; + OnConfigurationChanged(_configuration); + } + } + } + + /// + /// Occurs when the peer node configuration changes. /// public event PeerNodeConfigurationChangedEventHandler? ConfigurationChanged; /// - /// Gets the current peer node configuration. + /// Gets the current peer node configuration. /// /// A task whose result is the current configuration. public Task GetConfiguration() @@ -49,11 +49,11 @@ public class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvide } /// - /// Raises the event. + /// Raises the event. /// /// The new peer node configuration. protected virtual void OnConfigurationChanged(PeerNodeConfiguration newConfig) { ConfigurationChanged?.Invoke(this, newConfig); } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/OplogEntry.cs b/src/ZB.MOM.WW.CBDDC.Core/OplogEntry.cs index 0f0add9..dbac33a 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/OplogEntry.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/OplogEntry.cs @@ -1,5 +1,7 @@ using System; -using System.ComponentModel.DataAnnotations; +using System.Globalization; +using System.Security.Cryptography; +using System.Text; using System.Text.Json; namespace ZB.MOM.WW.CBDDC.Core; @@ -10,84 +12,56 @@ public enum OperationType Delete } -public static class OplogEntryExtensions -{ - /// - /// Computes a deterministic hash for the specified oplog entry. - /// - /// The oplog entry to hash. - /// The lowercase hexadecimal SHA-256 hash of the entry. - public static string ComputeHash(this OplogEntry entry) - { - using var sha256 = System.Security.Cryptography.SHA256.Create(); - var sb = new System.Text.StringBuilder(); +public static class OplogEntryExtensions +{ + /// + /// Computes a deterministic hash for the specified oplog entry. + /// + /// The oplog entry to hash. + /// The lowercase hexadecimal SHA-256 hash of the entry. + public static string ComputeHash(this OplogEntry entry) + { + using var sha256 = SHA256.Create(); + var sb = new StringBuilder(); sb.Append(entry.Collection); sb.Append('|'); sb.Append(entry.Key); sb.Append('|'); // Ensure stable string representation for Enum (integer value) - sb.Append(((int)entry.Operation).ToString(System.Globalization.CultureInfo.InvariantCulture)); + sb.Append(((int)entry.Operation).ToString(CultureInfo.InvariantCulture)); sb.Append('|'); // Payload excluded from hash to avoid serialization non-determinism // sb.Append(entry.Payload...); sb.Append('|'); // Timestamp.ToString() is now Invariant - sb.Append(entry.Timestamp.ToString()); + sb.Append(entry.Timestamp.ToString()); sb.Append('|'); sb.Append(entry.PreviousHash); - var bytes = System.Text.Encoding.UTF8.GetBytes(sb.ToString()); - var hashBytes = sha256.ComputeHash(bytes); + byte[] bytes = Encoding.UTF8.GetBytes(sb.ToString()); + byte[] hashBytes = sha256.ComputeHash(bytes); // Convert to hex string return BitConverter.ToString(hashBytes).Replace("-", "").ToLowerInvariant(); } } -public class OplogEntry -{ - /// - /// Gets the collection name associated with this entry. - /// - public string Collection { get; } - /// - /// Gets the document key associated with this entry. - /// - public string Key { get; } - /// - /// Gets the operation represented by this entry. - /// - public OperationType Operation { get; } - /// - /// Gets the serialized payload for the operation. - /// - public JsonElement? Payload { get; } - /// - /// Gets the logical timestamp for this entry. - /// - public HlcTimestamp Timestamp { get; } - /// - /// Gets the hash of this entry. - /// - public string Hash { get; } - /// - /// Gets the hash of the previous entry in the chain. - /// - public string PreviousHash { get; } - - /// - /// Initializes a new instance of the class. - /// - /// The collection name. - /// The document key. - /// The operation type. - /// The serialized payload. - /// The logical timestamp. - /// The previous entry hash. - /// The current entry hash. If null, it is computed. - public OplogEntry(string collection, string key, OperationType operation, JsonElement? payload, HlcTimestamp timestamp, string previousHash, string? hash = null) - { +public class OplogEntry +{ + /// + /// Initializes a new instance of the class. + /// + /// The collection name. + /// The document key. + /// The operation type. + /// The serialized payload. + /// The logical timestamp. + /// The previous entry hash. + /// The current entry hash. If null, it is computed. + public OplogEntry(string collection, string key, OperationType operation, JsonElement? payload, + HlcTimestamp timestamp, string previousHash, string? hash = null) + { Collection = collection; Key = key; Operation = operation; @@ -98,10 +72,45 @@ public class OplogEntry } /// - /// Verifies if the stored Hash matches the content. + /// Gets the collection name associated with this entry. + /// + public string Collection { get; } + + /// + /// Gets the document key associated with this entry. + /// + public string Key { get; } + + /// + /// Gets the operation represented by this entry. + /// + public OperationType Operation { get; } + + /// + /// Gets the serialized payload for the operation. + /// + public JsonElement? Payload { get; } + + /// + /// Gets the logical timestamp for this entry. + /// + public HlcTimestamp Timestamp { get; } + + /// + /// Gets the hash of this entry. + /// + public string Hash { get; } + + /// + /// Gets the hash of the previous entry in the chain. + /// + public string PreviousHash { get; } + + /// + /// Verifies if the stored Hash matches the content. /// public bool IsValid() { return Hash == this.ComputeHash(); } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/PeerOplogConfirmation.cs b/src/ZB.MOM.WW.CBDDC.Core/PeerOplogConfirmation.cs index bae3dc0..89d74bd 100644 --- a/src/ZB.MOM.WW.CBDDC.Core/PeerOplogConfirmation.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/PeerOplogConfirmation.cs @@ -3,42 +3,42 @@ using System; namespace ZB.MOM.WW.CBDDC.Core; /// -/// Represents a persisted confirmation watermark for a tracked peer and source node. +/// Represents a persisted confirmation watermark for a tracked peer and source node. /// public class PeerOplogConfirmation { /// - /// Gets or sets the tracked peer node identifier. + /// Gets or sets the tracked peer node identifier. /// public string PeerNodeId { get; set; } = ""; /// - /// Gets or sets the source node identifier this confirmation applies to. + /// Gets or sets the source node identifier this confirmation applies to. /// public string SourceNodeId { get; set; } = ""; /// - /// Gets or sets the physical wall-clock component of the confirmed HLC timestamp. + /// Gets or sets the physical wall-clock component of the confirmed HLC timestamp. /// public long ConfirmedWall { get; set; } /// - /// Gets or sets the logical counter component of the confirmed HLC timestamp. + /// Gets or sets the logical counter component of the confirmed HLC timestamp. /// public int ConfirmedLogic { get; set; } /// - /// Gets or sets the confirmed hash at the watermark. + /// Gets or sets the confirmed hash at the watermark. /// public string ConfirmedHash { get; set; } = ""; /// - /// Gets or sets when this confirmation record was last updated in UTC. + /// Gets or sets when this confirmation record was last updated in UTC. /// public DateTimeOffset LastConfirmedUtc { get; set; } = DateTimeOffset.UtcNow; /// - /// Gets or sets whether this tracked peer is active for pruning/sync gating. + /// Gets or sets whether this tracked peer is active for pruning/sync gating. /// public bool IsActive { get; set; } = true; -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/QueryNode.cs b/src/ZB.MOM.WW.CBDDC.Core/QueryNode.cs index 91d835b..e526951 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/QueryNode.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/QueryNode.cs @@ -1,225 +1,269 @@ -using System.Text.Json; - -namespace ZB.MOM.WW.CBDDC.Core; - -public abstract class QueryNode { } - +namespace ZB.MOM.WW.CBDDC.Core; + +public abstract class QueryNode +{ +} + public class Eq : QueryNode { /// - /// Gets the field name to compare. + /// Initializes a new equality query node. + /// + /// The field name to compare. + /// The value to compare against. + public Eq(string field, object value) + { + Field = field; + Value = value; + } + + /// + /// Gets the field name to compare. /// public string Field { get; } /// - /// Gets the value to compare against. + /// Gets the value to compare against. /// public object Value { get; } - - /// - /// Initializes a new equality query node. - /// - /// The field name to compare. - /// The value to compare against. - public Eq(string field, object value) { Field = field; Value = value; } } public class Gt : QueryNode { /// - /// Gets the field name to compare. + /// Initializes a new greater-than query node. + /// + /// The field name to compare. + /// The threshold value. + public Gt(string field, object value) + { + Field = field; + Value = value; + } + + /// + /// Gets the field name to compare. /// public string Field { get; } /// - /// Gets the threshold value. + /// Gets the threshold value. /// public object Value { get; } - - /// - /// Initializes a new greater-than query node. - /// - /// The field name to compare. - /// The threshold value. - public Gt(string field, object value) { Field = field; Value = value; } } public class Lt : QueryNode { /// - /// Gets the field name to compare. + /// Initializes a new less-than query node. + /// + /// The field name to compare. + /// The threshold value. + public Lt(string field, object value) + { + Field = field; + Value = value; + } + + /// + /// Gets the field name to compare. /// public string Field { get; } /// - /// Gets the threshold value. + /// Gets the threshold value. /// public object Value { get; } - - /// - /// Initializes a new less-than query node. - /// - /// The field name to compare. - /// The threshold value. - public Lt(string field, object value) { Field = field; Value = value; } } public class Gte : QueryNode { /// - /// Gets the field name to compare. + /// Initializes a new greater-than-or-equal query node. + /// + /// The field name to compare. + /// The threshold value. + public Gte(string field, object value) + { + Field = field; + Value = value; + } + + /// + /// Gets the field name to compare. /// public string Field { get; } /// - /// Gets the threshold value. + /// Gets the threshold value. /// public object Value { get; } - - /// - /// Initializes a new greater-than-or-equal query node. - /// - /// The field name to compare. - /// The threshold value. - public Gte(string field, object value) { Field = field; Value = value; } } public class Lte : QueryNode { /// - /// Gets the field name to compare. + /// Initializes a new less-than-or-equal query node. + /// + /// The field name to compare. + /// The threshold value. + public Lte(string field, object value) + { + Field = field; + Value = value; + } + + /// + /// Gets the field name to compare. /// public string Field { get; } /// - /// Gets the threshold value. + /// Gets the threshold value. /// public object Value { get; } - - /// - /// Initializes a new less-than-or-equal query node. - /// - /// The field name to compare. - /// The threshold value. - public Lte(string field, object value) { Field = field; Value = value; } } public class Neq : QueryNode { /// - /// Gets the field name to compare. + /// Initializes a new not-equal query node. + /// + /// The field name to compare. + /// The value to compare against. + public Neq(string field, object value) + { + Field = field; + Value = value; + } + + /// + /// Gets the field name to compare. /// public string Field { get; } /// - /// Gets the value to compare against. + /// Gets the value to compare against. /// public object Value { get; } - - /// - /// Initializes a new not-equal query node. - /// - /// The field name to compare. - /// The value to compare against. - public Neq(string field, object value) { Field = field; Value = value; } } public class In : QueryNode { /// - /// Gets the field name to compare. + /// Initializes a new in-list query node. + /// + /// The field name to compare. + /// The set of values to compare against. + public In(string field, object[] values) + { + Field = field; + Values = values; + } + + /// + /// Gets the field name to compare. /// public string Field { get; } /// - /// Gets the set of values to compare against. + /// Gets the set of values to compare against. /// public object[] Values { get; } - - /// - /// Initializes a new in-list query node. - /// - /// The field name to compare. - /// The set of values to compare against. - public In(string field, object[] values) { Field = field; Values = values; } } public class Contains : QueryNode { /// - /// Gets the field name to compare. + /// Initializes a new contains query node. + /// + /// The field name to compare. + /// The substring value to search for. + public Contains(string field, string value) + { + Field = field; + Value = value; + } + + /// + /// Gets the field name to compare. /// public string Field { get; } /// - /// Gets the substring value to search for. + /// Gets the substring value to search for. /// public string Value { get; } - - /// - /// Initializes a new contains query node. - /// - /// The field name to compare. - /// The substring value to search for. - public Contains(string field, string value) { Field = field; Value = value; } } public class NotContains : QueryNode { /// - /// Gets the field name to compare. + /// Initializes a new not-contains query node. + /// + /// The field name to compare. + /// The substring value to exclude. + public NotContains(string field, string value) + { + Field = field; + Value = value; + } + + /// + /// Gets the field name to compare. /// public string Field { get; } /// - /// Gets the substring value to exclude. + /// Gets the substring value to exclude. /// public string Value { get; } - - /// - /// Initializes a new not-contains query node. - /// - /// The field name to compare. - /// The substring value to exclude. - public NotContains(string field, string value) { Field = field; Value = value; } } public class And : QueryNode { /// - /// Gets the left side of the logical operation. + /// Initializes a new logical AND query node. + /// + /// The left query node. + /// The right query node. + public And(QueryNode left, QueryNode right) + { + Left = left; + Right = right; + } + + /// + /// Gets the left side of the logical operation. /// public QueryNode Left { get; } /// - /// Gets the right side of the logical operation. + /// Gets the right side of the logical operation. /// public QueryNode Right { get; } - - /// - /// Initializes a new logical AND query node. - /// - /// The left query node. - /// The right query node. - public And(QueryNode left, QueryNode right) { Left = left; Right = right; } } public class Or : QueryNode { /// - /// Gets the left side of the logical operation. + /// Initializes a new logical OR query node. + /// + /// The left query node. + /// The right query node. + public Or(QueryNode left, QueryNode right) + { + Left = left; + Right = right; + } + + /// + /// Gets the left side of the logical operation. /// public QueryNode Left { get; } /// - /// Gets the right side of the logical operation. + /// Gets the right side of the logical operation. /// public QueryNode Right { get; } - - /// - /// Initializes a new logical OR query node. - /// - /// The left query node. - /// The right query node. - public Or(QueryNode left, QueryNode right) { Left = left; Right = right; } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/README.md b/src/ZB.MOM.WW.CBDDC.Core/README.md index 32a898d..2c82dd4 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/README.md +++ b/src/ZB.MOM.WW.CBDDC.Core/README.md @@ -4,7 +4,9 @@ Core abstractions and logic for **CBDDC**, a peer-to-peer data synchronization m ## What Is CBDDC? -CBDDC is **not** a database — it's a sync layer that plugs into your existing data store (BLite) and enables automatic P2P replication across nodes in a mesh network. Your application reads and writes to its database as usual; CBDDC handles synchronization in the background. +CBDDC is **not** a database οΏ½ it's a sync layer that plugs into your existing data store (BLite) and enables automatic +P2P replication across nodes in a mesh network. Your application reads and writes to its database as usual; CBDDC +handles synchronization in the background. ## What's In This Package @@ -17,7 +19,7 @@ CBDDC is **not** a database ```bash # Pick a persistence provider -dotnet add package ZB.MOM.WW.CBDDC.Persistence # Embedded document DB +dotnet add package ZB.MOM.WW.CBDDC.Persistence # Embedded document DB # Add networking dotnet add package ZB.MOM.WW.CBDDC.Network @@ -65,12 +67,12 @@ builder.Services.AddCBDDCCore() ## Key Concepts -| Concept | Description | -|---------|-------------| -| **CDC** | Change Data Capture — watches collections registered via `WatchCollection()` | -| **Oplog** | Append-only hash-chained journal of changes per node | -| **VectorClock** | Tracks causal ordering across the mesh | -| **DocumentStore** | Your bridge between entities and the sync engine | +| Concept | Description | +|-------------------|------------------------------------------------------------------------------| +| **CDC** | Change Data Capture οΏ½ watches collections registered via `WatchCollection()` | +| **Oplog** | Append-only hash-chained journal of changes per node | +| **VectorClock** | Tracks causal ordering across the mesh | +| **DocumentStore** | Your bridge between entities and the sync engine | ## Architecture @@ -91,15 +93,16 @@ Your App ? DbContext.SaveChangesAsync() ## Related Packages -- **ZB.MOM.WW.CBDDC.Persistence** οΏ½ BLite embedded provider (.NET 10+) -- **ZB.MOM.WW.CBDDC.Network** — P2P networking (UDP discovery, TCP sync, Gossip) +- **ZB.MOM.WW.CBDDC.Persistence** οΏ½ BLite embedded provider (.NET 10+) +- **ZB.MOM.WW.CBDDC.Network** οΏ½ P2P networking (UDP discovery, TCP sync, Gossip) ## Documentation - **[Complete Documentation](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net)** -- **[Sample Application](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net/tree/main/samples/ZB.MOM.WW.CBDDC.Sample.Console)** +- **[Sample Application](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net/tree/main/samples/ZB.MOM.WW.CBDDC.Sample.Console) + ** - **[Integration Guide](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net#integrating-with-your-database)** ## License -MIT — see [LICENSE](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net/blob/main/LICENSE) +MIT οΏ½ see [LICENSE](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net/blob/main/LICENSE) diff --git a/src/ZB.MOM.WW.CBDDC.Core/Resilience/IRetryPolicy.cs b/src/ZB.MOM.WW.CBDDC.Core/Resilience/IRetryPolicy.cs index 22d19eb..ec752ff 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Resilience/IRetryPolicy.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Resilience/IRetryPolicy.cs @@ -1,27 +1,28 @@ -ο»Ώusing System; -using System.Threading; -using System.Threading.Tasks; - -namespace ZB.MOM.WW.CBDDC.Core.Resilience +ο»Ώusing System; +using System.Threading; +using System.Threading.Tasks; + +namespace ZB.MOM.WW.CBDDC.Core.Resilience; + +public interface IRetryPolicy { - public interface IRetryPolicy - { - /// - /// Executes an asynchronous operation with retry handling. - /// - /// The operation to execute. - /// The operation name used for diagnostics. - /// A token used to cancel the operation. - /// A task that represents the asynchronous execution. - Task ExecuteAsync(Func operation, string operationName, CancellationToken cancellationToken = default); - /// - /// Executes an asynchronous operation with retry handling and returns a result. - /// - /// The result type. - /// The operation to execute. - /// The operation name used for diagnostics. - /// A token used to cancel the operation. - /// A task that represents the asynchronous execution and yields the operation result. - Task ExecuteAsync(Func> operation, string operationName, CancellationToken cancellationToken = default); - } -} + /// + /// Executes an asynchronous operation with retry handling. + /// + /// The operation to execute. + /// The operation name used for diagnostics. + /// A token used to cancel the operation. + /// A task that represents the asynchronous execution. + Task ExecuteAsync(Func operation, string operationName, CancellationToken cancellationToken = default); + + /// + /// Executes an asynchronous operation with retry handling and returns a result. + /// + /// The result type. + /// The operation to execute. + /// The operation name used for diagnostics. + /// A token used to cancel the operation. + /// A task that represents the asynchronous execution and yields the operation result. + Task ExecuteAsync(Func> operation, string operationName, + CancellationToken cancellationToken = default); +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Resilience/RetryPolicy.cs b/src/ZB.MOM.WW.CBDDC.Core/Resilience/RetryPolicy.cs index 5889607..6198ed3 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Resilience/RetryPolicy.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Resilience/RetryPolicy.cs @@ -1,50 +1,53 @@ -using System; -using System.Threading; -using System.Threading.Tasks; -using ZB.MOM.WW.CBDDC.Core.Exceptions; -using ZB.MOM.WW.CBDDC.Core.Network; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; +using System; +using System.IO; +using System.Net.Sockets; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Core.Exceptions; +using ZB.MOM.WW.CBDDC.Core.Network; +using TimeoutException = ZB.MOM.WW.CBDDC.Core.Exceptions.TimeoutException; namespace ZB.MOM.WW.CBDDC.Core.Resilience; /// -/// Provides retry logic for transient failures. +/// Provides retry logic for transient failures. /// -public class RetryPolicy : IRetryPolicy -{ - private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider; - private readonly ILogger _logger; - - /// - /// Initializes a new instance of the class. - /// - /// The provider for retry configuration values. - /// The logger instance. - public RetryPolicy(IPeerNodeConfigurationProvider peerNodeConfigurationProvider, ILogger? logger = null) - { - _logger = logger ?? NullLogger.Instance; - _peerNodeConfigurationProvider = peerNodeConfigurationProvider - ?? throw new ArgumentNullException(nameof(peerNodeConfigurationProvider)); - } +public class RetryPolicy : IRetryPolicy +{ + private readonly ILogger _logger; + private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider; - /// - /// Executes an operation with retry logic. - /// - /// The result type returned by the operation. - /// The asynchronous operation to execute. - /// The operation name used for logging. - /// A token used to cancel retry delays. - public async Task ExecuteAsync( - Func> operation, - string operationName, - CancellationToken cancellationToken = default) + /// + /// Initializes a new instance of the class. + /// + /// The provider for retry configuration values. + /// The logger instance. + public RetryPolicy(IPeerNodeConfigurationProvider peerNodeConfigurationProvider, + ILogger? logger = null) + { + _logger = logger ?? NullLogger.Instance; + _peerNodeConfigurationProvider = peerNodeConfigurationProvider + ?? throw new ArgumentNullException(nameof(peerNodeConfigurationProvider)); + } + + /// + /// Executes an operation with retry logic. + /// + /// The result type returned by the operation. + /// The asynchronous operation to execute. + /// The operation name used for logging. + /// A token used to cancel retry delays. + public async Task ExecuteAsync( + Func> operation, + string operationName, + CancellationToken cancellationToken = default) { var config = await _peerNodeConfigurationProvider.GetConfiguration(); Exception? lastException = null; - for (int attempt = 1; attempt <= config.RetryAttempts; attempt++) - { + for (var attempt = 1; attempt <= config.RetryAttempts; attempt++) try { _logger.LogDebug("Executing {Operation} (attempt {Attempt}/{Max})", @@ -55,7 +58,7 @@ public class RetryPolicy : IRetryPolicy catch (Exception ex) when (attempt < config.RetryAttempts && IsTransient(ex)) { lastException = ex; - var delay = config.RetryDelayMs * attempt; // Exponential backoff + int delay = config.RetryDelayMs * attempt; // Exponential backoff _logger.LogWarning(ex, "Operation {Operation} failed (attempt {Attempt}/{Max}). Retrying in {Delay}ms...", @@ -63,36 +66,31 @@ public class RetryPolicy : IRetryPolicy await Task.Delay(delay, cancellationToken); } - } - if (lastException != null) - { - _logger.LogError(lastException, - "Operation {Operation} failed after {Attempts} attempts", - operationName, config.RetryAttempts); - } - else - { - _logger.LogError( - "Operation {Operation} failed after {Attempts} attempts", - operationName, config.RetryAttempts); - } + if (lastException != null) + _logger.LogError(lastException, + "Operation {Operation} failed after {Attempts} attempts", + operationName, config.RetryAttempts); + else + _logger.LogError( + "Operation {Operation} failed after {Attempts} attempts", + operationName, config.RetryAttempts); throw new CBDDCException("RETRY_EXHAUSTED", $"Operation '{operationName}' failed after {config.RetryAttempts} attempts", lastException!); } - /// - /// Executes an operation with retry logic (void return). - /// - /// The asynchronous operation to execute. - /// The operation name used for logging. - /// A token used to cancel retry delays. - public async Task ExecuteAsync( - Func operation, - string operationName, - CancellationToken cancellationToken = default) + /// + /// Executes an operation with retry logic (void return). + /// + /// The asynchronous operation to execute. + /// The operation name used for logging. + /// A token used to cancel retry delays. + public async Task ExecuteAsync( + Func operation, + string operationName, + CancellationToken cancellationToken = default) { await ExecuteAsync(async () => { @@ -104,13 +102,13 @@ public class RetryPolicy : IRetryPolicy private static bool IsTransient(Exception ex) { // Network errors are typically transient - if (ex is NetworkException or System.Net.Sockets.SocketException or System.IO.IOException) + if (ex is NetworkException or SocketException or IOException) return true; // Timeout errors are transient - if (ex is Exceptions.TimeoutException or OperationCanceledException) + if (ex is TimeoutException or OperationCanceledException) return true; return false; } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/SnapshotMetadata.cs b/src/ZB.MOM.WW.CBDDC.Core/SnapshotMetadata.cs index 385af8b..30736f8 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/SnapshotMetadata.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/SnapshotMetadata.cs @@ -1,21 +1,24 @@ -namespace ZB.MOM.WW.CBDDC.Core; - +namespace ZB.MOM.WW.CBDDC.Core; + public class SnapshotMetadata { /// - /// Gets or sets the node identifier associated with the snapshot. + /// Gets or sets the node identifier associated with the snapshot. /// public string NodeId { get; set; } = ""; + /// - /// Gets or sets the physical time component of the snapshot timestamp. + /// Gets or sets the physical time component of the snapshot timestamp. /// public long TimestampPhysicalTime { get; set; } + /// - /// Gets or sets the logical counter component of the snapshot timestamp. + /// Gets or sets the logical counter component of the snapshot timestamp. /// public int TimestampLogicalCounter { get; set; } + /// - /// Gets or sets the snapshot hash. + /// Gets or sets the snapshot hash. /// public string Hash { get; set; } = ""; -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Storage/CorruptDatabaseException.cs b/src/ZB.MOM.WW.CBDDC.Core/Storage/CorruptDatabaseException.cs index 4cad089..37a0e05 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Storage/CorruptDatabaseException.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Storage/CorruptDatabaseException.cs @@ -1,16 +1,18 @@ -using System; - +using System; + namespace ZB.MOM.WW.CBDDC.Core.Storage; /// -/// Represents an error that occurs when a database is found to be corrupt. +/// Represents an error that occurs when a database is found to be corrupt. /// public class CorruptDatabaseException : Exception { /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The exception message. /// The underlying exception that caused this error. - public CorruptDatabaseException(string message, Exception innerException) : base(message, innerException) { } -} + public CorruptDatabaseException(string message, Exception innerException) : base(message, innerException) + { + } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Storage/IDocumentMetadataStore.cs b/src/ZB.MOM.WW.CBDDC.Core/Storage/IDocumentMetadataStore.cs index 81461e8..f8491f4 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Storage/IDocumentMetadataStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Storage/IDocumentMetadataStore.cs @@ -1,108 +1,115 @@ -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; - +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + namespace ZB.MOM.WW.CBDDC.Core.Storage; /// -/// Defines the contract for storing and retrieving document metadata for sync tracking. -/// Document metadata stores HLC timestamps and deleted state without modifying application entities. +/// Defines the contract for storing and retrieving document metadata for sync tracking. +/// Document metadata stores HLC timestamps and deleted state without modifying application entities. /// public interface IDocumentMetadataStore : ISnapshotable { /// - /// Gets the metadata for a specific document. + /// Gets the metadata for a specific document. /// /// The collection name. /// The document key. /// A cancellation token. /// The document metadata if found; otherwise null. - Task GetMetadataAsync(string collection, string key, CancellationToken cancellationToken = default); + Task GetMetadataAsync(string collection, string key, + CancellationToken cancellationToken = default); /// - /// Gets metadata for all documents in a collection. + /// Gets metadata for all documents in a collection. /// /// The collection name. /// A cancellation token. /// Enumerable of document metadata for the collection. - Task> GetMetadataByCollectionAsync(string collection, CancellationToken cancellationToken = default); + Task> GetMetadataByCollectionAsync(string collection, + CancellationToken cancellationToken = default); /// - /// Upserts (inserts or updates) metadata for a document. + /// Upserts (inserts or updates) metadata for a document. /// /// The metadata to upsert. /// A cancellation token. Task UpsertMetadataAsync(DocumentMetadata metadata, CancellationToken cancellationToken = default); /// - /// Upserts metadata for multiple documents in batch. + /// Upserts metadata for multiple documents in batch. /// /// The metadata items to upsert. /// A cancellation token. - Task UpsertMetadataBatchAsync(IEnumerable metadatas, CancellationToken cancellationToken = default); + Task UpsertMetadataBatchAsync(IEnumerable metadatas, + CancellationToken cancellationToken = default); /// - /// Marks a document as deleted by setting IsDeleted=true and updating the timestamp. + /// Marks a document as deleted by setting IsDeleted=true and updating the timestamp. /// /// The collection name. /// The document key. /// The HLC timestamp of the deletion. /// A cancellation token. - Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp, CancellationToken cancellationToken = default); + Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp, + CancellationToken cancellationToken = default); /// - /// Gets all document metadata with timestamps after the specified timestamp. - /// Used for incremental sync to find documents modified since last sync. + /// Gets all document metadata with timestamps after the specified timestamp. + /// Used for incremental sync to find documents modified since last sync. /// /// The timestamp to compare against. /// Optional collection filter. /// A cancellation token. /// Documents modified after the specified timestamp. - Task> GetMetadataAfterAsync(HlcTimestamp since, IEnumerable? collections = null, CancellationToken cancellationToken = default); + Task> GetMetadataAfterAsync(HlcTimestamp since, + IEnumerable? collections = null, CancellationToken cancellationToken = default); } /// -/// Represents metadata for a document used in sync tracking. +/// Represents metadata for a document used in sync tracking. /// public class DocumentMetadata { /// - /// Gets or sets the collection name. + /// Initializes a new instance of the class. /// - public string Collection { get; set; } = ""; + public DocumentMetadata() + { + } /// - /// Gets or sets the document key. + /// Initializes a new instance of the class. /// - public string Key { get; set; } = ""; - - /// - /// Gets or sets the HLC timestamp of the last modification. - /// - public HlcTimestamp UpdatedAt { get; set; } - - /// - /// Gets or sets whether this document is marked as deleted (tombstone). - /// - public bool IsDeleted { get; set; } - - /// - /// Initializes a new instance of the class. - /// - public DocumentMetadata() { } - - /// - /// Initializes a new instance of the class. - /// - /// The collection name. - /// The document key. - /// The last update timestamp. - /// Whether the document is marked as deleted. - public DocumentMetadata(string collection, string key, HlcTimestamp updatedAt, bool isDeleted = false) - { - Collection = collection; + /// The collection name. + /// The document key. + /// The last update timestamp. + /// Whether the document is marked as deleted. + public DocumentMetadata(string collection, string key, HlcTimestamp updatedAt, bool isDeleted = false) + { + Collection = collection; Key = key; UpdatedAt = updatedAt; IsDeleted = isDeleted; } -} + + /// + /// Gets or sets the collection name. + /// + public string Collection { get; set; } = ""; + + /// + /// Gets or sets the document key. + /// + public string Key { get; set; } = ""; + + /// + /// Gets or sets the HLC timestamp of the last modification. + /// + public HlcTimestamp UpdatedAt { get; set; } + + /// + /// Gets or sets whether this document is marked as deleted (tombstone). + /// + public bool IsDeleted { get; set; } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Storage/IDocumentStore.cs b/src/ZB.MOM.WW.CBDDC.Core/Storage/IDocumentStore.cs index 789d92e..6f02b24 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Storage/IDocumentStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Storage/IDocumentStore.cs @@ -1,91 +1,112 @@ -using System; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; - +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + namespace ZB.MOM.WW.CBDDC.Core.Storage; /// -/// Handles basic CRUD operations for documents. +/// Handles basic CRUD operations for documents. /// -public interface IDocumentStore : ISnapshotable -{ - /// - /// Gets the collections this store is interested in. - /// - IEnumerable InterestedCollection { get; } +public interface IDocumentStore : ISnapshotable +{ + /// + /// Gets the collections this store is interested in. + /// + IEnumerable InterestedCollection { get; } /// - /// Asynchronously retrieves a incoming from the specified collection by its key. + /// Asynchronously retrieves a incoming from the specified collection by its key. /// /// The name of the collection containing the incoming to retrieve. Cannot be null or empty. /// The unique key identifying the incoming within the collection. Cannot be null or empty. /// A cancellation token that can be used to cancel the operation. - /// A task that represents the asynchronous operation. The task result contains the incoming if found; otherwise, null. + /// + /// A task that represents the asynchronous operation. The task result contains the incoming if found; otherwise, + /// null. + /// Task GetDocumentAsync(string collection, string key, CancellationToken cancellationToken = default); /// - /// Asynchronously retrieves all documents belonging to the specified collection. + /// Asynchronously retrieves all documents belonging to the specified collection. /// /// The name of the collection from which to retrieve documents. Cannot be null or empty. /// A cancellation token that can be used to cancel the asynchronous operation. - /// A task that represents the asynchronous operation. The task result contains an enumerable collection of - /// documents in the specified collection. The collection is empty if no documents are found. - Task> GetDocumentsByCollectionAsync(string collection, CancellationToken cancellationToken = default); + /// + /// A task that represents the asynchronous operation. The task result contains an enumerable collection of + /// documents in the specified collection. The collection is empty if no documents are found. + /// + Task> GetDocumentsByCollectionAsync(string collection, + CancellationToken cancellationToken = default); /// - /// Asynchronously inserts a batch of documents into the data store. + /// Asynchronously inserts a batch of documents into the data store. /// /// The collection of documents to insert. Cannot be null or contain null elements. /// A cancellation token that can be used to cancel the operation. - /// A task that represents the asynchronous operation. The task result is if all documents - /// were inserted successfully; otherwise, . - Task InsertBatchDocumentsAsync(IEnumerable documents, CancellationToken cancellationToken = default); + /// + /// A task that represents the asynchronous operation. The task result is if all documents + /// were inserted successfully; otherwise, . + /// + Task InsertBatchDocumentsAsync(IEnumerable documents, + CancellationToken cancellationToken = default); /// - /// Asynchronously updates the specified incoming in the data store. + /// Asynchronously updates the specified incoming in the data store. /// /// The incoming to update. Cannot be null. /// A cancellation token that can be used to cancel the update operation. - /// A task that represents the asynchronous operation. The task result is if the incoming was - /// successfully updated; otherwise, . + /// + /// A task that represents the asynchronous operation. The task result is if the incoming was + /// successfully updated; otherwise, . + /// Task PutDocumentAsync(Document document, CancellationToken cancellationToken = default); /// - /// Asynchronously updates a batch of documents in the data store. + /// Asynchronously updates a batch of documents in the data store. /// /// The collection of documents to update. Cannot be null or contain null elements. /// A cancellation token that can be used to cancel the operation. - /// A task that represents the asynchronous operation. The task result is if all documents - /// were updated successfully; otherwise, . - Task UpdateBatchDocumentsAsync(IEnumerable documents, CancellationToken cancellationToken = default); + /// + /// A task that represents the asynchronous operation. The task result is if all documents + /// were updated successfully; otherwise, . + /// + Task UpdateBatchDocumentsAsync(IEnumerable documents, + CancellationToken cancellationToken = default); /// - /// Asynchronously deletes a incoming identified by the specified key from the given collection. + /// Asynchronously deletes a incoming identified by the specified key from the given collection. /// /// The name of the collection containing the incoming to delete. Cannot be null or empty. /// The unique key identifying the incoming to delete. Cannot be null or empty. /// A cancellation token that can be used to cancel the delete operation. - /// A task that represents the asynchronous delete operation. The task result is if the - /// incoming was successfully deleted; otherwise, . + /// + /// A task that represents the asynchronous delete operation. The task result is if the + /// incoming was successfully deleted; otherwise, . + /// Task DeleteDocumentAsync(string collection, string key, CancellationToken cancellationToken = default); - /// - /// Asynchronously deletes a batch of documents identified by their keys. - /// - /// - /// If any of the specified documents cannot be deleted, the method returns but does not - /// throw an exception. The operation is performed asynchronously and may complete partially if cancellation is requested. - /// - /// A collection of incoming keys that specify the documents to delete. Cannot be null or contain null or empty - /// values. + /// + /// Asynchronously deletes a batch of documents identified by their keys. + /// + /// + /// If any of the specified documents cannot be deleted, the method returns but does not + /// throw an exception. The operation is performed asynchronously and may complete partially if cancellation is + /// requested. + /// + /// + /// A collection of incoming keys that specify the documents to delete. Cannot be null or contain null or empty + /// values. + /// /// A cancellation token that can be used to cancel the delete operation. - /// A task that represents the asynchronous delete operation. The task result is if all - /// specified documents were successfully deleted; otherwise, . - Task DeleteBatchDocumentsAsync(IEnumerable documentKeys, CancellationToken cancellationToken = default); + /// + /// A task that represents the asynchronous delete operation. The task result is if all + /// specified documents were successfully deleted; otherwise, . + /// + Task DeleteBatchDocumentsAsync(IEnumerable documentKeys, + CancellationToken cancellationToken = default); /// - /// Asynchronously merges the specified incoming with existing data and returns the updated incoming. + /// Asynchronously merges the specified incoming with existing data and returns the updated incoming. /// /// The incoming to merge. Cannot be null. /// A cancellation token that can be used to cancel the merge operation. @@ -93,11 +114,14 @@ public interface IDocumentStore : ISnapshotable Task MergeAsync(Document incoming, CancellationToken cancellationToken = default); /// - /// Asynchronously retrieves documents identified by the specified collection and key pairs. + /// Asynchronously retrieves documents identified by the specified collection and key pairs. /// - /// A list of tuples, each containing the collection name and the document key that uniquely identify the documents - /// to retrieve. Cannot be null or empty. + /// + /// A list of tuples, each containing the collection name and the document key that uniquely identify the documents + /// to retrieve. Cannot be null or empty. + /// /// A cancellation token that can be used to cancel the asynchronous operation. /// A task that represents the asynchronous retrieval operation. - Task> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys, CancellationToken cancellationToken); -} + Task> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys, + CancellationToken cancellationToken); +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Storage/IOplogStore.cs b/src/ZB.MOM.WW.CBDDC.Core/Storage/IOplogStore.cs index 0edb9d6..b6414bc 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Storage/IOplogStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Storage/IOplogStore.cs @@ -1,5 +1,4 @@ using System; -using System.Buffers; using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; @@ -7,17 +6,17 @@ using System.Threading.Tasks; namespace ZB.MOM.WW.CBDDC.Core.Storage; /// -/// Handles operations related to the Operation Log (Oplog), synchronization, and logical clocks. +/// Handles operations related to the Operation Log (Oplog), synchronization, and logical clocks. /// public interface IOplogStore : ISnapshotable { /// - /// Occurs when changes are applied to the store from external sources (sync). + /// Occurs when changes are applied to the store from external sources (sync). /// event EventHandler ChangesApplied; /// - /// Appends a new entry to the operation log asynchronously. + /// Appends a new entry to the operation log asynchronously. /// /// The operation log entry to append. Cannot be null. /// A cancellation token that can be used to cancel the append operation. @@ -25,57 +24,64 @@ public interface IOplogStore : ISnapshotable Task AppendOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default); /// - /// Asynchronously retrieves all oplog entries that occurred after the specified timestamp. + /// Asynchronously retrieves all oplog entries that occurred after the specified timestamp. /// /// The timestamp after which oplog entries should be returned. /// An optional collection of collection names to filter the results. /// A cancellation token that can be used to cancel the asynchronous operation. /// A task that represents the asynchronous operation containing matching oplog entries. - Task> GetOplogAfterAsync(HlcTimestamp timestamp, IEnumerable? collections = null, CancellationToken cancellationToken = default); + Task> GetOplogAfterAsync(HlcTimestamp timestamp, IEnumerable? collections = null, + CancellationToken cancellationToken = default); /// - /// Asynchronously retrieves the latest observed hybrid logical clock (HLC) timestamp. + /// Asynchronously retrieves the latest observed hybrid logical clock (HLC) timestamp. /// /// A cancellation token that can be used to cancel the operation. /// A task that represents the asynchronous operation containing the latest HLC timestamp. Task GetLatestTimestampAsync(CancellationToken cancellationToken = default); /// - /// Asynchronously retrieves the current vector clock representing the state of distributed events. + /// Asynchronously retrieves the current vector clock representing the state of distributed events. /// /// A cancellation token that can be used to cancel the asynchronous operation. /// A task that represents the asynchronous operation containing the current vector clock. Task GetVectorClockAsync(CancellationToken cancellationToken = default); /// - /// Retrieves a collection of oplog entries for the specified node that occurred after the given timestamp. + /// Retrieves a collection of oplog entries for the specified node that occurred after the given timestamp. /// /// The unique identifier of the node for which to retrieve oplog entries. Cannot be null or empty. /// The timestamp after which oplog entries should be returned. /// An optional collection of collection names to filter the oplog entries. /// A cancellation token that can be used to cancel the asynchronous operation. /// A task that represents the asynchronous operation containing oplog entries for the specified node. - Task> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since, IEnumerable? collections = null, CancellationToken cancellationToken = default); + Task> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since, + IEnumerable? collections = null, CancellationToken cancellationToken = default); /// - /// Asynchronously retrieves the hash of the most recent entry for the specified node. + /// Asynchronously retrieves the hash of the most recent entry for the specified node. /// - /// The unique identifier of the node for which to retrieve the last entry hash. Cannot be null or empty. + /// + /// The unique identifier of the node for which to retrieve the last entry hash. Cannot be null or + /// empty. + /// /// A cancellation token that can be used to cancel the operation. /// A task that represents the asynchronous operation containing the hash string of the last entry or null. Task GetLastEntryHashAsync(string nodeId, CancellationToken cancellationToken = default); /// - /// Asynchronously retrieves a sequence of oplog entries representing the chain between the specified start and end hashes. + /// Asynchronously retrieves a sequence of oplog entries representing the chain between the specified start and end + /// hashes. /// /// The hash of the first entry in the chain range. Cannot be null or empty. /// The hash of the last entry in the chain range. Cannot be null or empty. /// A cancellation token that can be used to cancel the asynchronous operation. /// A task that represents the asynchronous operation containing OplogEntry objects in chain order. - Task> GetChainRangeAsync(string startHash, string endHash, CancellationToken cancellationToken = default); + Task> GetChainRangeAsync(string startHash, string endHash, + CancellationToken cancellationToken = default); /// - /// Asynchronously retrieves the oplog entry associated with the specified hash value. + /// Asynchronously retrieves the oplog entry associated with the specified hash value. /// /// The hash string identifying the oplog entry to retrieve. Cannot be null or empty. /// A cancellation token that can be used to cancel the asynchronous operation. @@ -83,7 +89,7 @@ public interface IOplogStore : ISnapshotable Task GetEntryByHashAsync(string hash, CancellationToken cancellationToken = default); /// - /// Applies a batch of oplog entries asynchronously to the target data store. + /// Applies a batch of oplog entries asynchronously to the target data store. /// /// A collection of OplogEntry objects representing the operations to apply. Cannot be null. /// A cancellation token that can be used to cancel the batch operation. @@ -91,11 +97,10 @@ public interface IOplogStore : ISnapshotable Task ApplyBatchAsync(IEnumerable oplogEntries, CancellationToken cancellationToken = default); /// - /// Asynchronously removes entries from the oplog that are older than the specified cutoff timestamp. + /// Asynchronously removes entries from the oplog that are older than the specified cutoff timestamp. /// /// The timestamp that defines the upper bound for entries to be pruned. /// A cancellation token that can be used to cancel the prune operation. /// A task that represents the asynchronous prune operation. Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default); - -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Storage/IPeerConfigurationStore.cs b/src/ZB.MOM.WW.CBDDC.Core/Storage/IPeerConfigurationStore.cs index 47f3fe4..48921db 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Storage/IPeerConfigurationStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Storage/IPeerConfigurationStore.cs @@ -6,26 +6,26 @@ using ZB.MOM.WW.CBDDC.Core.Network; namespace ZB.MOM.WW.CBDDC.Core.Storage; /// -/// Handles storage and retrieval of remote peer configurations. +/// Handles storage and retrieval of remote peer configurations. /// public interface IPeerConfigurationStore : ISnapshotable { /// - /// Saves or updates a remote peer configuration in the persistent store. + /// Saves or updates a remote peer configuration in the persistent store. /// /// The remote peer configuration to save. /// Cancellation token. Task SaveRemotePeerAsync(RemotePeerConfiguration peer, CancellationToken cancellationToken = default); /// - /// Retrieves all remote peer configurations from the persistent store. + /// Retrieves all remote peer configurations from the persistent store. /// /// Cancellation token. /// Collection of remote peer configurations. Task> GetRemotePeersAsync(CancellationToken cancellationToken = default); /// - /// Asynchronously retrieves the configuration for a remote peer identified by the specified node ID. + /// Asynchronously retrieves the configuration for a remote peer identified by the specified node ID. /// /// The unique identifier of the remote peer whose configuration is to be retrieved. /// A cancellation token that can be used to cancel the asynchronous operation. @@ -33,9 +33,9 @@ public interface IPeerConfigurationStore : ISnapshotable GetRemotePeerAsync(string nodeId, CancellationToken cancellationToken); /// - /// Removes a remote peer configuration from the persistent store. + /// Removes a remote peer configuration from the persistent store. /// /// The unique identifier of the peer to remove. /// Cancellation token. Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default); -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Storage/IPeerOplogConfirmationStore.cs b/src/ZB.MOM.WW.CBDDC.Core/Storage/IPeerOplogConfirmationStore.cs index 5d6b7d3..8c27597 100644 --- a/src/ZB.MOM.WW.CBDDC.Core/Storage/IPeerOplogConfirmationStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Storage/IPeerOplogConfirmationStore.cs @@ -6,12 +6,12 @@ using ZB.MOM.WW.CBDDC.Core.Network; namespace ZB.MOM.WW.CBDDC.Core.Storage; /// -/// Defines persistence operations for peer oplog confirmation tracking. +/// Defines persistence operations for peer oplog confirmation tracking. /// public interface IPeerOplogConfirmationStore : ISnapshotable { /// - /// Ensures the specified peer is tracked for confirmation-based pruning. + /// Ensures the specified peer is tracked for confirmation-based pruning. /// /// The peer node identifier. /// The peer network address. @@ -24,7 +24,7 @@ public interface IPeerOplogConfirmationStore : ISnapshotable - /// Updates the confirmation watermark for a tracked peer and source node. + /// Updates the confirmation watermark for a tracked peer and source node. /// /// The tracked peer node identifier. /// The source node identifier of the confirmed oplog stream. @@ -39,14 +39,14 @@ public interface IPeerOplogConfirmationStore : ISnapshotable - /// Gets all persisted peer confirmations. + /// Gets all persisted peer confirmations. /// /// A cancellation token. /// All peer confirmations. Task> GetConfirmationsAsync(CancellationToken cancellationToken = default); /// - /// Gets persisted confirmations for a specific tracked peer. + /// Gets persisted confirmations for a specific tracked peer. /// /// The peer node identifier. /// A cancellation token. @@ -56,16 +56,16 @@ public interface IPeerOplogConfirmationStore : ISnapshotable - /// Deactivates tracking for the specified peer. + /// Deactivates tracking for the specified peer. /// /// The peer node identifier. /// A cancellation token. Task RemovePeerTrackingAsync(string peerNodeId, CancellationToken cancellationToken = default); /// - /// Gets all active tracked peer identifiers. + /// Gets all active tracked peer identifiers. /// /// A cancellation token. /// Distinct active tracked peer identifiers. Task> GetActiveTrackedPeersAsync(CancellationToken cancellationToken = default); -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Storage/ISnapshotMetadataStore.cs b/src/ZB.MOM.WW.CBDDC.Core/Storage/ISnapshotMetadataStore.cs index 5a2b564..ed836d4 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Storage/ISnapshotMetadataStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Storage/ISnapshotMetadataStore.cs @@ -7,16 +7,21 @@ namespace ZB.MOM.WW.CBDDC.Core.Storage; public interface ISnapshotMetadataStore : ISnapshotable { /// - /// Asynchronously retrieves the snapshot metadata associated with the specified node identifier. + /// Asynchronously retrieves the snapshot metadata associated with the specified node identifier. /// - /// The unique identifier of the node for which to retrieve snapshot metadata. Cannot be null or empty. + /// + /// The unique identifier of the node for which to retrieve snapshot metadata. Cannot be null or + /// empty. + /// /// A token to monitor for cancellation requests. - /// A task that represents the asynchronous operation. The task result contains the - /// for the specified node if found; otherwise, . + /// + /// A task that represents the asynchronous operation. The task result contains the + /// for the specified node if found; otherwise, . + /// Task GetSnapshotMetadataAsync(string nodeId, CancellationToken cancellationToken = default); /// - /// Asynchronously inserts the specified snapshot metadata into the data store. + /// Asynchronously inserts the specified snapshot metadata into the data store. /// /// The snapshot metadata to insert. Cannot be null. /// A cancellation token that can be used to cancel the asynchronous operation. @@ -24,7 +29,7 @@ public interface ISnapshotMetadataStore : ISnapshotable Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata, CancellationToken cancellationToken = default); /// - /// Asynchronously updates the metadata for an existing snapshot. + /// Asynchronously updates the metadata for an existing snapshot. /// /// The metadata object representing the snapshot to update. Cannot be null. /// A cancellation token that can be used to cancel the asynchronous operation. @@ -32,7 +37,7 @@ public interface ISnapshotMetadataStore : ISnapshotable Task UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta, CancellationToken cancellationToken = default); /// - /// Asynchronously retrieves the hash of the current snapshot for the specified node. + /// Asynchronously retrieves the hash of the current snapshot for the specified node. /// /// The unique identifier of the node for which to obtain the snapshot hash. /// A cancellation token that can be used to cancel the operation. @@ -40,9 +45,9 @@ public interface ISnapshotMetadataStore : ISnapshotable Task GetSnapshotHashAsync(string nodeId, CancellationToken cancellationToken = default); /// - /// Gets all snapshot metadata entries. Used for initializing VectorClock cache. + /// Gets all snapshot metadata entries. Used for initializing VectorClock cache. /// /// A cancellation token. /// All snapshot metadata entries. Task> GetAllSnapshotMetadataAsync(CancellationToken cancellationToken = default); -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Storage/ISnapshotService.cs b/src/ZB.MOM.WW.CBDDC.Core/Storage/ISnapshotService.cs index 31c8e63..cf391ce 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Storage/ISnapshotService.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Storage/ISnapshotService.cs @@ -5,12 +5,12 @@ using System.Threading.Tasks; namespace ZB.MOM.WW.CBDDC.Core.Storage; /// -/// Handles full database lifecycle operations such as snapshots, replacement, and clearing data. +/// Handles full database lifecycle operations such as snapshots, replacement, and clearing data. /// public interface ISnapshotService { /// - /// Asynchronously creates a snapshot of the current state and writes it to the specified destination stream. + /// Asynchronously creates a snapshot of the current state and writes it to the specified destination stream. /// /// The stream to which the snapshot data will be written. /// A cancellation token that can be used to cancel the snapshot creation operation. @@ -18,7 +18,7 @@ public interface ISnapshotService Task CreateSnapshotAsync(Stream destination, CancellationToken cancellationToken = default); /// - /// Replaces the existing database with the contents provided in the specified stream asynchronously. + /// Replaces the existing database with the contents provided in the specified stream asynchronously. /// /// A stream containing the new database data to be used for replacement. /// A cancellation token that can be used to cancel the operation. @@ -26,10 +26,10 @@ public interface ISnapshotService Task ReplaceDatabaseAsync(Stream databaseStream, CancellationToken cancellationToken = default); /// - /// Merges the provided snapshot stream into the current data store asynchronously. + /// Merges the provided snapshot stream into the current data store asynchronously. /// /// A stream containing the snapshot data to be merged. /// A cancellation token that can be used to cancel the merge operation. /// A task that represents the asynchronous merge operation. Task MergeSnapshotAsync(Stream snapshotStream, CancellationToken cancellationToken = default); -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Storage/ISnapshotable.cs b/src/ZB.MOM.WW.CBDDC.Core/Storage/ISnapshotable.cs index 7737ae2..9c12ca7 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Storage/ISnapshotable.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Storage/ISnapshotable.cs @@ -7,24 +7,28 @@ namespace ZB.MOM.WW.CBDDC.Core.Storage; public interface ISnapshotable { /// - /// Asynchronously deletes the underlying data store and all of its contents. + /// Asynchronously deletes the underlying data store and all of its contents. /// /// A cancellation token that can be used to cancel the drop operation. - /// After calling this method, the data store and all stored data will be permanently removed. - /// This operation cannot be undone. Any further operations on the data store may result in errors. + /// + /// After calling this method, the data store and all stored data will be permanently removed. + /// This operation cannot be undone. Any further operations on the data store may result in errors. + /// /// A task that represents the asynchronous drop operation. Task DropAsync(CancellationToken cancellationToken = default); /// - /// Asynchronously exports a collection of items of type T. + /// Asynchronously exports a collection of items of type T. /// /// A cancellation token that can be used to cancel the export operation. - /// A task that represents the asynchronous export operation. The task result contains an enumerable collection of - /// exported items of type T. + /// + /// A task that represents the asynchronous export operation. The task result contains an enumerable collection of + /// exported items of type T. + /// Task> ExportAsync(CancellationToken cancellationToken = default); /// - /// Imports the specified collection of items asynchronously. + /// Imports the specified collection of items asynchronously. /// /// The collection of items to import. Cannot be null. Each item will be processed in sequence. /// A cancellation token that can be used to cancel the import operation. @@ -32,13 +36,15 @@ public interface ISnapshotable Task ImportAsync(IEnumerable items, CancellationToken cancellationToken = default); /// - /// Merges the specified collection of items into the target data store asynchronously. + /// Merges the specified collection of items into the target data store asynchronously. /// - /// If the operation is canceled via the provided cancellation token, the returned task will be - /// in a canceled state. The merge operation may update existing items or add new items, depending on the - /// implementation. + /// + /// If the operation is canceled via the provided cancellation token, the returned task will be + /// in a canceled state. The merge operation may update existing items or add new items, depending on the + /// implementation. + /// /// The collection of items to merge into the data store. Cannot be null. /// A cancellation token that can be used to cancel the merge operation. /// A task that represents the asynchronous merge operation. Task MergeAsync(IEnumerable items, CancellationToken cancellationToken = default); -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Storage/IVectorClockService.cs b/src/ZB.MOM.WW.CBDDC.Core/Storage/IVectorClockService.cs index 6bbed68..5db170f 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Storage/IVectorClockService.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Storage/IVectorClockService.cs @@ -1,49 +1,49 @@ -using System.Threading; -using System.Threading.Tasks; - -namespace ZB.MOM.WW.CBDDC.Core.Storage; - +using System.Threading; +using System.Threading.Tasks; + +namespace ZB.MOM.WW.CBDDC.Core.Storage; + /// -/// Manages Vector Clock state for the local node. -/// Tracks the latest timestamp and hash per node for sync coordination. +/// Manages Vector Clock state for the local node. +/// Tracks the latest timestamp and hash per node for sync coordination. /// public interface IVectorClockService { /// - /// Indicates whether the cache has been populated with initial data. - /// Reset to false by . + /// Indicates whether the cache has been populated with initial data. + /// Reset to false by . /// bool IsInitialized { get; set; } /// - /// Updates the cache with a new OplogEntry's timestamp and hash. - /// Called by both DocumentStore (local CDC) and OplogStore (remote sync). + /// Updates the cache with a new OplogEntry's timestamp and hash. + /// Called by both DocumentStore (local CDC) and OplogStore (remote sync). /// /// The oplog entry containing timestamp and hash data. void Update(OplogEntry entry); /// - /// Returns the current Vector Clock built from cached node timestamps. + /// Returns the current Vector Clock built from cached node timestamps. /// /// A token used to cancel the operation. Task GetVectorClockAsync(CancellationToken cancellationToken = default); /// - /// Returns the latest known timestamp across all nodes. + /// Returns the latest known timestamp across all nodes. /// /// A token used to cancel the operation. Task GetLatestTimestampAsync(CancellationToken cancellationToken = default); /// - /// Returns the last known hash for the specified node. - /// Returns null if the node is unknown. + /// Returns the last known hash for the specified node. + /// Returns null if the node is unknown. /// /// The node identifier. string? GetLastHash(string nodeId); /// - /// Updates the cache with a specific node's timestamp and hash. - /// Used for snapshot metadata fallback. + /// Updates the cache with a specific node's timestamp and hash. + /// Used for snapshot metadata fallback. /// /// The node identifier. /// The timestamp to store for the node. @@ -51,8 +51,8 @@ public interface IVectorClockService void UpdateNode(string nodeId, HlcTimestamp timestamp, string hash); /// - /// Clears the cache and resets to false, - /// forcing re-initialization on next access. + /// Clears the cache and resets to false, + /// forcing re-initialization on next access. /// void Invalidate(); -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Sync/IConflictResolver.cs b/src/ZB.MOM.WW.CBDDC.Core/Sync/IConflictResolver.cs index 432f45b..10eb508 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Sync/IConflictResolver.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Sync/IConflictResolver.cs @@ -1,21 +1,9 @@ -using System.Text.Json; -using ZB.MOM.WW.CBDDC.Core; - -namespace ZB.MOM.WW.CBDDC.Core.Sync; - +namespace ZB.MOM.WW.CBDDC.Core.Sync; + public class ConflictResolutionResult { /// - /// Gets a value indicating whether the remote change should be applied. - /// - public bool ShouldApply { get; } - /// - /// Gets the merged document to apply when conflict resolution produced one. - /// - public Document? MergedDocument { get; } - - /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// Indicates whether the change should be applied. /// The merged document produced by resolution, if any. @@ -26,25 +14,42 @@ public class ConflictResolutionResult } /// - /// Creates a result indicating that the resolved document should be applied. + /// Gets a value indicating whether the remote change should be applied. + /// + public bool ShouldApply { get; } + + /// + /// Gets the merged document to apply when conflict resolution produced one. + /// + public Document? MergedDocument { get; } + + /// + /// Creates a result indicating that the resolved document should be applied. /// /// The merged document to apply. /// A resolution result that applies the provided document. - public static ConflictResolutionResult Apply(Document document) => new(true, document); + public static ConflictResolutionResult Apply(Document document) + { + return new ConflictResolutionResult(true, document); + } + /// - /// Creates a result indicating that the remote change should be ignored. + /// Creates a result indicating that the remote change should be ignored. /// /// A resolution result that skips applying the remote change. - public static ConflictResolutionResult Ignore() => new(false, null); + public static ConflictResolutionResult Ignore() + { + return new ConflictResolutionResult(false, null); + } } public interface IConflictResolver { /// - /// Resolves a conflict between local state and a remote oplog entry. + /// Resolves a conflict between local state and a remote oplog entry. /// /// The local document state, if present. /// The incoming remote oplog entry. /// The resolution outcome indicating whether and how to apply changes. ConflictResolutionResult Resolve(Document? local, OplogEntry remote); -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Sync/IOfflineQueue.cs b/src/ZB.MOM.WW.CBDDC.Core/Sync/IOfflineQueue.cs index 2928136..ec1a284 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Sync/IOfflineQueue.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Sync/IOfflineQueue.cs @@ -1,40 +1,40 @@ -ο»Ώusing System; -using System.Threading; -using System.Threading.Tasks; - -namespace ZB.MOM.WW.CBDDC.Core.Sync +ο»Ώusing System; +using System.Threading; +using System.Threading.Tasks; + +namespace ZB.MOM.WW.CBDDC.Core.Sync; + +/// +/// Represents a queue for operations that should be executed when connectivity is restored. +/// +public interface IOfflineQueue { /// - /// Represents a queue for operations that should be executed when connectivity is restored. + /// Gets the number of pending operations in the queue. /// - public interface IOfflineQueue - { - /// - /// Gets the number of pending operations in the queue. - /// - int Count { get; } + int Count { get; } - /// - /// Clears all pending operations from the queue. - /// - /// A task that represents the asynchronous operation. - Task Clear(); + /// + /// Clears all pending operations from the queue. + /// + /// A task that represents the asynchronous operation. + Task Clear(); - /// - /// Enqueues a pending operation. - /// - /// The operation to enqueue. - /// A task that represents the asynchronous operation. - Task Enqueue(PendingOperation operation); + /// + /// Enqueues a pending operation. + /// + /// The operation to enqueue. + /// A task that represents the asynchronous operation. + Task Enqueue(PendingOperation operation); - /// - /// Flushes the queue by executing each pending operation. - /// - /// The delegate used to execute each operation. - /// A token used to cancel the flush operation. - /// - /// A task that returns a tuple containing the number of successful and failed operations. - /// - Task<(int Successful, int Failed)> FlushAsync(Func executor, CancellationToken cancellationToken = default); - } -} + /// + /// Flushes the queue by executing each pending operation. + /// + /// The delegate used to execute each operation. + /// A token used to cancel the flush operation. + /// + /// A task that returns a tuple containing the number of successful and failed operations. + /// + Task<(int Successful, int Failed)> FlushAsync(Func executor, + CancellationToken cancellationToken = default); +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Sync/LastWriteWinsConflictResolver.cs b/src/ZB.MOM.WW.CBDDC.Core/Sync/LastWriteWinsConflictResolver.cs index 9192b12..2f6bef4 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Sync/LastWriteWinsConflictResolver.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Sync/LastWriteWinsConflictResolver.cs @@ -1,24 +1,22 @@ -using System.Text.Json; -using ZB.MOM.WW.CBDDC.Core; - namespace ZB.MOM.WW.CBDDC.Core.Sync; -public class LastWriteWinsConflictResolver : IConflictResolver -{ - /// - /// Resolves document conflicts by preferring the entry with the latest timestamp. - /// - /// The local document, if available. - /// The incoming remote oplog entry. - /// The conflict resolution result indicating whether to apply or ignore the remote change. - public ConflictResolutionResult Resolve(Document? local, OplogEntry remote) - { +public class LastWriteWinsConflictResolver : IConflictResolver +{ + /// + /// Resolves document conflicts by preferring the entry with the latest timestamp. + /// + /// The local document, if available. + /// The incoming remote oplog entry. + /// The conflict resolution result indicating whether to apply or ignore the remote change. + public ConflictResolutionResult Resolve(Document? local, OplogEntry remote) + { // If no local document exists, always apply remote change if (local == null) { // Construct new document from oplog entry var content = remote.Payload ?? default; - var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp, remote.Operation == OperationType.Delete); + var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp, + remote.Operation == OperationType.Delete); return ConflictResolutionResult.Apply(newDoc); } @@ -27,11 +25,12 @@ public class LastWriteWinsConflictResolver : IConflictResolver { // Remote is newer, apply it var content = remote.Payload ?? default; - var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp, remote.Operation == OperationType.Delete); + var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp, + remote.Operation == OperationType.Delete); return ConflictResolutionResult.Apply(newDoc); } // Local is newer or equal, ignore remote return ConflictResolutionResult.Ignore(); } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Sync/OfflineQueue.cs b/src/ZB.MOM.WW.CBDDC.Core/Sync/OfflineQueue.cs index b324d48..74aa81d 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Sync/OfflineQueue.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Sync/OfflineQueue.cs @@ -1,37 +1,38 @@ -using ZB.MOM.WW.CBDDC.Core.Network; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; -using System; -using System.Collections.Generic; -using System.Linq; +using System; +using System.Collections.Generic; +using System.Linq; using System.Threading; using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Core.Network; namespace ZB.MOM.WW.CBDDC.Core.Sync; /// -/// Queue for operations performed while offline. +/// Queue for operations performed while offline. /// -public class OfflineQueue : IOfflineQueue -{ - private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider; - private readonly Queue _queue = new(); - private readonly ILogger _logger; - private readonly object _lock = new(); - - /// - /// Initializes a new instance of the class. - /// - /// The configuration provider used for queue limits. - /// The logger instance. - public OfflineQueue(IPeerNodeConfigurationProvider peerNodeConfigurationProvider, ILogger? logger = null) - { - _peerNodeConfigurationProvider = peerNodeConfigurationProvider; - _logger = logger ?? NullLogger.Instance; - } +public class OfflineQueue : IOfflineQueue +{ + private readonly object _lock = new(); + private readonly ILogger _logger; + private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider; + private readonly Queue _queue = new(); /// - /// Gets the number of pending operations. + /// Initializes a new instance of the class. + /// + /// The configuration provider used for queue limits. + /// The logger instance. + public OfflineQueue(IPeerNodeConfigurationProvider peerNodeConfigurationProvider, + ILogger? logger = null) + { + _peerNodeConfigurationProvider = peerNodeConfigurationProvider; + _logger = logger ?? NullLogger.Instance; + } + + /// + /// Gets the number of pending operations. /// public int Count { @@ -44,15 +45,15 @@ public class OfflineQueue : IOfflineQueue } } - /// - /// Enqueues an operation for later execution. - /// - /// The pending operation to enqueue. - /// A task that represents the asynchronous enqueue operation. - public async Task Enqueue(PendingOperation operation) - { - var config = await _peerNodeConfigurationProvider.GetConfiguration(); - lock (_lock) + /// + /// Enqueues an operation for later execution. + /// + /// The pending operation to enqueue. + /// A task that represents the asynchronous enqueue operation. + public async Task Enqueue(PendingOperation operation) + { + var config = await _peerNodeConfigurationProvider.GetConfiguration(); + lock (_lock) { if (_queue.Count >= config.MaxQueueSize) { @@ -67,15 +68,16 @@ public class OfflineQueue : IOfflineQueue } } - /// - /// Flushes all pending operations. - /// - /// The delegate that executes each pending operation. - /// A token used to cancel the operation. - /// A task whose result contains the number of successful and failed operations. - public async Task<(int Successful, int Failed)> FlushAsync(Func executor, CancellationToken cancellationToken = default) - { - List operations; + /// + /// Flushes all pending operations. + /// + /// The delegate that executes each pending operation. + /// A token used to cancel the operation. + /// A task whose result contains the number of successful and failed operations. + public async Task<(int Successful, int Failed)> FlushAsync(Func executor, + CancellationToken cancellationToken = default) + { + List operations; lock (_lock) { @@ -91,11 +93,10 @@ public class OfflineQueue : IOfflineQueue _logger.LogInformation("Flushing {Count} pending operations", operations.Count); - int successful = 0; - int failed = 0; + var successful = 0; + var failed = 0; foreach (var op in operations) - { try { await executor(op); @@ -107,7 +108,6 @@ public class OfflineQueue : IOfflineQueue _logger.LogError(ex, "Failed to execute pending {Type} operation for {Collection}:{Key}", op.Type, op.Collection, op.Key); } - } _logger.LogInformation("Flush completed: {Successful} successful, {Failed} failed", successful, failed); @@ -116,15 +116,15 @@ public class OfflineQueue : IOfflineQueue } /// - /// Clears all pending operations. + /// Clears all pending operations. /// public async Task Clear() { lock (_lock) { - var count = _queue.Count; + int count = _queue.Count; _queue.Clear(); _logger.LogInformation("Cleared {Count} pending operations", count); } } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Sync/PendingOperation.cs b/src/ZB.MOM.WW.CBDDC.Core/Sync/PendingOperation.cs index 9d67ef6..4931ee9 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Sync/PendingOperation.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Sync/PendingOperation.cs @@ -1,32 +1,34 @@ -using System; -using System.Threading; -using System.Threading.Tasks; - -namespace ZB.MOM.WW.CBDDC.Core.Sync; - -/// -/// Represents a pending operation to be executed when connection is restored. -/// +using System; + +namespace ZB.MOM.WW.CBDDC.Core.Sync; + +/// +/// Represents a pending operation to be executed when connection is restored. +/// public class PendingOperation { /// - /// Gets or sets the operation type. + /// Gets or sets the operation type. /// public string Type { get; set; } = ""; + /// - /// Gets or sets the collection targeted by the operation. + /// Gets or sets the collection targeted by the operation. /// public string Collection { get; set; } = ""; + /// - /// Gets or sets the document key targeted by the operation. + /// Gets or sets the document key targeted by the operation. /// public string Key { get; set; } = ""; + /// - /// Gets or sets the payload associated with the operation. + /// Gets or sets the payload associated with the operation. /// public object? Data { get; set; } + /// - /// Gets or sets the UTC time when the operation was queued. + /// Gets or sets the UTC time when the operation was queued. /// public DateTime QueuedAt { get; set; } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/Sync/RecursiveNodeMergeConflictResolver.cs b/src/ZB.MOM.WW.CBDDC.Core/Sync/RecursiveNodeMergeConflictResolver.cs index fe7c2ff..1fb5134 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/Sync/RecursiveNodeMergeConflictResolver.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/Sync/RecursiveNodeMergeConflictResolver.cs @@ -1,28 +1,27 @@ -using System; using System.Buffers; using System.Collections.Generic; -using System.IO; using System.Text.Json; -namespace ZB.MOM.WW.CBDDC.Core.Sync; - -/// -/// Resolves merge conflicts by recursively merging object and array nodes. -/// -public class RecursiveNodeMergeConflictResolver : IConflictResolver -{ - /// - /// Resolves a conflict between a local document and a remote operation. - /// - /// The local document, or if none exists. - /// The remote operation to apply. - /// The conflict resolution result indicating whether and what to apply. - public ConflictResolutionResult Resolve(Document? local, OplogEntry remote) - { - if (local == null) - { +namespace ZB.MOM.WW.CBDDC.Core.Sync; + +/// +/// Resolves merge conflicts by recursively merging object and array nodes. +/// +public class RecursiveNodeMergeConflictResolver : IConflictResolver +{ + /// + /// Resolves a conflict between a local document and a remote operation. + /// + /// The local document, or if none exists. + /// The remote operation to apply. + /// The conflict resolution result indicating whether and what to apply. + public ConflictResolutionResult Resolve(Document? local, OplogEntry remote) + { + if (local == null) + { var content = remote.Payload ?? default; - var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp, remote.Operation == OperationType.Delete); + var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp, + remote.Operation == OperationType.Delete); return ConflictResolutionResult.Apply(newDoc); } @@ -33,6 +32,7 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver var newDoc = new Document(remote.Collection, remote.Key, default, remote.Timestamp, true); return ConflictResolutionResult.Apply(newDoc); } + return ConflictResolutionResult.Ignore(); } @@ -41,12 +41,14 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver var localTs = local.UpdatedAt; var remoteTs = remote.Timestamp; - if (localJson.ValueKind == JsonValueKind.Undefined) return ConflictResolutionResult.Apply(new Document(remote.Collection, remote.Key, remoteJson, remoteTs, false)); - if (remoteJson.ValueKind == JsonValueKind.Undefined) return ConflictResolutionResult.Ignore(); - - // Optimization: Use ArrayBufferWriter (Net6.0) or MemoryStream (NS2.0) - // Utf8JsonWriter works with both, but ArrayBufferWriter is more efficient for high throughput. - + if (localJson.ValueKind == JsonValueKind.Undefined) + return ConflictResolutionResult.Apply(new Document(remote.Collection, remote.Key, remoteJson, remoteTs, + false)); + if (remoteJson.ValueKind == JsonValueKind.Undefined) return ConflictResolutionResult.Ignore(); + + // Optimization: Use ArrayBufferWriter (Net6.0) or MemoryStream (NS2.0) + // Utf8JsonWriter works with both, but ArrayBufferWriter is more efficient for high throughput. + JsonElement mergedDocJson; #if NET6_0_OR_GREATER @@ -55,7 +57,8 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver { MergeJson(writer, localJson, localTs, remoteJson, remoteTs); } - mergedDocJson = JsonDocument.Parse(bufferWriter.WrittenMemory).RootElement; + + mergedDocJson = JsonDocument.Parse(bufferWriter.WrittenMemory).RootElement; #else using (var ms = new MemoryStream()) { @@ -67,13 +70,14 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver mergedDocJson = JsonDocument.Parse(ms.ToArray()).RootElement; } #endif - + var maxTimestamp = remoteTs.CompareTo(localTs) > 0 ? remoteTs : localTs; var mergedDoc = new Document(remote.Collection, remote.Key, mergedDocJson, maxTimestamp, false); return ConflictResolutionResult.Apply(mergedDoc); } - private void MergeJson(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote, HlcTimestamp remoteTs) + private void MergeJson(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote, + HlcTimestamp remoteTs) { if (local.ValueKind != remote.ValueKind) { @@ -93,7 +97,7 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver break; default: // Primitives - if (local.GetRawText() == remote.GetRawText()) + if (local.GetRawText() == remote.GetRawText()) { local.WriteTo(writer); } @@ -102,54 +106,51 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver if (remoteTs.CompareTo(localTs) > 0) remote.WriteTo(writer); else local.WriteTo(writer); } + break; } } - private void MergeObjects(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote, HlcTimestamp remoteTs) + private void MergeObjects(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote, + HlcTimestamp remoteTs) { - writer.WriteStartObject(); - - // We need to iterate keys. To avoid double iteration efficiently, we can use a dictionary for the UNION of keys. - // But populating a dictionary is effectively what we did before. - // Can we do better? - // Yes: Iterate Local, write merged/local. Track handled keys. Then iterate Remote, write remaining. - + writer.WriteStartObject(); + + // We need to iterate keys. To avoid double iteration efficiently, we can use a dictionary for the UNION of keys. + // But populating a dictionary is effectively what we did before. + // Can we do better? + // Yes: Iterate Local, write merged/local. Track handled keys. Then iterate Remote, write remaining. + var processedKeys = new HashSet(); foreach (var prop in local.EnumerateObject()) { - var key = prop.Name; + string key = prop.Name; processedKeys.Add(key); // Mark as processed writer.WritePropertyName(key); if (remote.TryGetProperty(key, out var remoteVal)) - { // Collision -> Merge MergeJson(writer, prop.Value, localTs, remoteVal, remoteTs); - } else - { // Only local prop.Value.WriteTo(writer); - } } foreach (var prop in remote.EnumerateObject()) - { if (!processedKeys.Contains(prop.Name)) { // New from remote writer.WritePropertyName(prop.Name); prop.Value.WriteTo(writer); } - } writer.WriteEndObject(); } - private void MergeArrays(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote, HlcTimestamp remoteTs) + private void MergeArrays(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote, + HlcTimestamp remoteTs) { // Heuristic check bool localIsObj = HasObjects(local); @@ -164,10 +165,10 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver } if (localIsObj != remoteIsObj) - { - // Mixed mistmatch LWW - if (remoteTs.CompareTo(localTs) > 0) remote.WriteTo(writer); - else local.WriteTo(writer); + { + // Mixed mistmatch LWW + if (remoteTs.CompareTo(localTs) > 0) remote.WriteTo(writer); + else local.WriteTo(writer); return; } @@ -184,44 +185,36 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver return; } - writer.WriteStartArray(); - - // We want to write Union of items by ID. - // To preserve some semblance of order (or just determinism), we can iterate local IDs first, then remote new IDs. - // Or just use the dictionary values. - - // NOTE: We cannot simply write to writer inside the map loop if we are creating a merged map. - // Let's iterate the union of keys similar to Objects. - + writer.WriteStartArray(); + + // We want to write Union of items by ID. + // To preserve some semblance of order (or just determinism), we can iterate local IDs first, then remote new IDs. + // Or just use the dictionary values. + + // NOTE: We cannot simply write to writer inside the map loop if we are creating a merged map. + // Let's iterate the union of keys similar to Objects. + var processedIds = new HashSet(); // 1. Process Local Items (Merge or Write) foreach (var kvp in localMap) { - var id = kvp.Key; + string id = kvp.Key; var localItem = kvp.Value; processedIds.Add(id); if (remoteMap.TryGetValue(id, out var remoteItem)) - { // Merge recursively MergeJson(writer, localItem, localTs, remoteItem, remoteTs); - } else - { // Keep local item localItem.WriteTo(writer); - } } // 2. Process New Remote Items foreach (var kvp in remoteMap) - { if (!processedIds.Contains(kvp.Key)) - { kvp.Value.WriteTo(writer); - } - } writer.WriteEndArray(); } @@ -249,6 +242,7 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver map[id] = item; } + return map; } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/VectorClock.cs b/src/ZB.MOM.WW.CBDDC.Core/VectorClock.cs index f9281c1..12a6ada 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/VectorClock.cs +++ b/src/ZB.MOM.WW.CBDDC.Core/VectorClock.cs @@ -5,86 +5,84 @@ using System.Linq; namespace ZB.MOM.WW.CBDDC.Core; /// -/// Represents a Vector Clock for tracking causality in a distributed system. -/// Maps NodeId -> HlcTimestamp to track the latest known state of each node. +/// Represents a Vector Clock for tracking causality in a distributed system. +/// Maps NodeId -> HlcTimestamp to track the latest known state of each node. /// -public class VectorClock -{ - private readonly Dictionary _clock; - - /// - /// Initializes a new empty vector clock. - /// - public VectorClock() - { - _clock = new Dictionary(StringComparer.Ordinal); - } - - /// - /// Initializes a new vector clock from an existing clock state. - /// - /// The clock state to copy. - public VectorClock(Dictionary clock) - { - _clock = new Dictionary(clock, StringComparer.Ordinal); - } +public class VectorClock +{ + private readonly Dictionary _clock; /// - /// Gets all node IDs in this vector clock. + /// Initializes a new empty vector clock. + /// + public VectorClock() + { + _clock = new Dictionary(StringComparer.Ordinal); + } + + /// + /// Initializes a new vector clock from an existing clock state. + /// + /// The clock state to copy. + public VectorClock(Dictionary clock) + { + _clock = new Dictionary(clock, StringComparer.Ordinal); + } + + /// + /// Gets all node IDs in this vector clock. /// public IEnumerable NodeIds => _clock.Keys; - /// - /// Gets the timestamp for a specific node, or default if not present. - /// - /// The node identifier. - public HlcTimestamp GetTimestamp(string nodeId) - { - return _clock.TryGetValue(nodeId, out var ts) ? ts : default; - } + /// + /// Gets the timestamp for a specific node, or default if not present. + /// + /// The node identifier. + public HlcTimestamp GetTimestamp(string nodeId) + { + return _clock.TryGetValue(nodeId, out var ts) ? ts : default; + } - /// - /// Sets or updates the timestamp for a specific node. - /// - /// The node identifier. - /// The timestamp to set. - public void SetTimestamp(string nodeId, HlcTimestamp timestamp) - { - _clock[nodeId] = timestamp; - } + /// + /// Sets or updates the timestamp for a specific node. + /// + /// The node identifier. + /// The timestamp to set. + public void SetTimestamp(string nodeId, HlcTimestamp timestamp) + { + _clock[nodeId] = timestamp; + } - /// - /// Merges another vector clock into this one, taking the maximum timestamp for each node. - /// - /// The vector clock to merge from. - public void Merge(VectorClock other) - { - foreach (var nodeId in other.NodeIds) - { + /// + /// Merges another vector clock into this one, taking the maximum timestamp for each node. + /// + /// The vector clock to merge from. + public void Merge(VectorClock other) + { + foreach (string nodeId in other.NodeIds) + { var otherTs = other.GetTimestamp(nodeId); if (!_clock.TryGetValue(nodeId, out var currentTs) || otherTs.CompareTo(currentTs) > 0) - { _clock[nodeId] = otherTs; - } } } /// - /// Compares this vector clock with another to determine causality. - /// Returns: - /// - Positive: This is strictly ahead (dominates other) - /// - Negative: Other is strictly ahead (other dominates this) - /// - Zero: Concurrent (neither dominates) - /// - /// The vector clock to compare with. - public CausalityRelation CompareTo(VectorClock other) - { - bool thisAhead = false; - bool otherAhead = false; + /// Compares this vector clock with another to determine causality. + /// Returns: + /// - Positive: This is strictly ahead (dominates other) + /// - Negative: Other is strictly ahead (other dominates this) + /// - Zero: Concurrent (neither dominates) + /// + /// The vector clock to compare with. + public CausalityRelation CompareTo(VectorClock other) + { + var thisAhead = false; + var otherAhead = false; var allNodes = new HashSet(_clock.Keys.Union(other._clock.Keys), StringComparer.Ordinal); - foreach (var nodeId in allNodes) + foreach (string nodeId in allNodes) { var thisTs = GetTimestamp(nodeId); var otherTs = other.GetTimestamp(nodeId); @@ -92,19 +90,11 @@ public class VectorClock int cmp = thisTs.CompareTo(otherTs); if (cmp > 0) - { thisAhead = true; - } - else if (cmp < 0) - { - otherAhead = true; - } + else if (cmp < 0) otherAhead = true; // Early exit if concurrent - if (thisAhead && otherAhead) - { - return CausalityRelation.Concurrent; - } + if (thisAhead && otherAhead) return CausalityRelation.Concurrent; } if (thisAhead && !otherAhead) @@ -115,65 +105,56 @@ public class VectorClock return CausalityRelation.Equal; } - /// - /// Determines which nodes have updates that this vector clock doesn't have. - /// Returns node IDs where the other vector clock is ahead. - /// - /// The vector clock to compare against. - public IEnumerable GetNodesWithUpdates(VectorClock other) - { - var allNodes = new HashSet(_clock.Keys, StringComparer.Ordinal); - foreach (var nodeId in other._clock.Keys) - { - allNodes.Add(nodeId); - } + /// + /// Determines which nodes have updates that this vector clock doesn't have. + /// Returns node IDs where the other vector clock is ahead. + /// + /// The vector clock to compare against. + public IEnumerable GetNodesWithUpdates(VectorClock other) + { + var allNodes = new HashSet(_clock.Keys, StringComparer.Ordinal); + foreach (string nodeId in other._clock.Keys) allNodes.Add(nodeId); - foreach (var nodeId in allNodes) + foreach (string nodeId in allNodes) { var thisTs = GetTimestamp(nodeId); var otherTs = other.GetTimestamp(nodeId); - if (otherTs.CompareTo(thisTs) > 0) - { - yield return nodeId; - } - } - } - - /// - /// Determines which nodes have updates that the other vector clock doesn't have. - /// Returns node IDs where this vector clock is ahead. - /// - /// The vector clock to compare against. - public IEnumerable GetNodesToPush(VectorClock other) - { - var allNodes = new HashSet(_clock.Keys.Union(other._clock.Keys), StringComparer.Ordinal); - - foreach (var nodeId in allNodes) - { - var thisTs = GetTimestamp(nodeId); - var otherTs = other.GetTimestamp(nodeId); - - if (thisTs.CompareTo(otherTs) > 0) - { - yield return nodeId; - } + if (otherTs.CompareTo(thisTs) > 0) yield return nodeId; } } /// - /// Creates a copy of this vector clock. + /// Determines which nodes have updates that the other vector clock doesn't have. + /// Returns node IDs where this vector clock is ahead. + /// + /// The vector clock to compare against. + public IEnumerable GetNodesToPush(VectorClock other) + { + var allNodes = new HashSet(_clock.Keys.Union(other._clock.Keys), StringComparer.Ordinal); + + foreach (string nodeId in allNodes) + { + var thisTs = GetTimestamp(nodeId); + var otherTs = other.GetTimestamp(nodeId); + + if (thisTs.CompareTo(otherTs) > 0) yield return nodeId; + } + } + + /// + /// Creates a copy of this vector clock. /// public VectorClock Clone() { return new VectorClock(new Dictionary(_clock, StringComparer.Ordinal)); } - /// - public override string ToString() - { - if (_clock.Count == 0) - return "{}"; + /// + public override string ToString() + { + if (_clock.Count == 0) + return "{}"; var entries = _clock.Select(kvp => $"{kvp.Key}:{kvp.Value}"); return "{" + string.Join(", ", entries) + "}"; @@ -181,16 +162,19 @@ public class VectorClock } /// -/// Represents the causality relationship between two vector clocks. +/// Represents the causality relationship between two vector clocks. /// public enum CausalityRelation { /// Both vector clocks are equal. Equal, + /// This vector clock is strictly ahead (dominates). StrictlyAhead, + /// This vector clock is strictly behind (dominated). StrictlyBehind, + /// Vector clocks are concurrent (neither dominates). Concurrent -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Core/ZB.MOM.WW.CBDDC.Core.csproj b/src/ZB.MOM.WW.CBDDC.Core/ZB.MOM.WW.CBDDC.Core.csproj index 668fb7b..bf088e0 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/ZB.MOM.WW.CBDDC.Core.csproj +++ b/src/ZB.MOM.WW.CBDDC.Core/ZB.MOM.WW.CBDDC.Core.csproj @@ -1,33 +1,33 @@ - - ZB.MOM.WW.CBDDC.Core - ZB.MOM.WW.CBDDC.Core - ZB.MOM.WW.CBDDC.Core - net10.0 - latest - enable - 1.0.3 - MrDevRobot - Core abstractions and logic for CBDDC, a lightweight P2P mesh database. - MIT - p2p;mesh;database;gossip;cbddc;lan;offline-first;distributed - https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net - https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net - git - README.md - + + ZB.MOM.WW.CBDDC.Core + ZB.MOM.WW.CBDDC.Core + ZB.MOM.WW.CBDDC.Core + net10.0 + latest + enable + 1.0.3 + MrDevRobot + Core abstractions and logic for CBDDC, a lightweight P2P mesh database. + MIT + p2p;mesh;database;gossip;cbddc;lan;offline-first;distributed + https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net + https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net + git + README.md + - - - - - - - - - - - + + + + + + + + + + + diff --git a/src/ZB.MOM.WW.CBDDC.Hosting/CBDDCHostingExtensions.cs b/src/ZB.MOM.WW.CBDDC.Hosting/CBDDCHostingExtensions.cs index 346b791..d4baceb 100755 --- a/src/ZB.MOM.WW.CBDDC.Hosting/CBDDCHostingExtensions.cs +++ b/src/ZB.MOM.WW.CBDDC.Hosting/CBDDCHostingExtensions.cs @@ -1,22 +1,22 @@ +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.DependencyInjection.Extensions; +using Microsoft.Extensions.Diagnostics.HealthChecks; +using Microsoft.Extensions.Hosting; using ZB.MOM.WW.CBDDC.Hosting.Configuration; using ZB.MOM.WW.CBDDC.Hosting.HealthChecks; using ZB.MOM.WW.CBDDC.Hosting.HostedServices; using ZB.MOM.WW.CBDDC.Hosting.Services; using ZB.MOM.WW.CBDDC.Network; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.DependencyInjection.Extensions; -using Microsoft.Extensions.Diagnostics.HealthChecks; -using Microsoft.Extensions.Hosting; namespace ZB.MOM.WW.CBDDC.Hosting; /// -/// Extension methods for configuring CBDDC in ASP.NET Core applications. +/// Extension methods for configuring CBDDC in ASP.NET Core applications. /// public static class CBDDCHostingExtensions { /// - /// Adds CBDDC ASP.NET integration with the specified configuration. + /// Adds CBDDC ASP.NET integration with the specified configuration. /// /// The service collection. /// Action to configure CBDDC options. @@ -43,7 +43,7 @@ public static class CBDDCHostingExtensions } /// - /// Adds CBDDC ASP.NET integration for single-cluster mode. + /// Adds CBDDC ASP.NET integration for single-cluster mode. /// /// The service collection. /// Action to configure single-cluster options. @@ -51,10 +51,7 @@ public static class CBDDCHostingExtensions this IServiceCollection services, Action? configure = null) { - return services.AddCBDDCHosting(options => - { - configure?.Invoke(options.Cluster); - }); + return services.AddCBDDCHosting(options => { configure?.Invoke(options.Cluster); }); } private static void RegisterSingleClusterServices( @@ -81,12 +78,10 @@ public static class CBDDCHostingExtensions { // Health checks if (options.EnableHealthChecks) - { services.AddHealthChecks() .AddCheck( "cbddc", - failureStatus: HealthStatus.Unhealthy, - tags: new[] { "db", "ready" }); - } + HealthStatus.Unhealthy, + new[] { "db", "ready" }); } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Hosting/Configuration/CBDDCHostingOptions.cs b/src/ZB.MOM.WW.CBDDC.Hosting/Configuration/CBDDCHostingOptions.cs index f3d35ae..23770ef 100755 --- a/src/ZB.MOM.WW.CBDDC.Hosting/Configuration/CBDDCHostingOptions.cs +++ b/src/ZB.MOM.WW.CBDDC.Hosting/Configuration/CBDDCHostingOptions.cs @@ -1,18 +1,18 @@ namespace ZB.MOM.WW.CBDDC.Hosting.Configuration; /// -/// Configuration options for CBDDC ASP.NET integration. +/// Configuration options for CBDDC ASP.NET integration. /// public class CBDDCHostingOptions { /// - /// Gets or sets the cluster configuration. + /// Gets or sets the cluster configuration. /// public ClusterOptions Cluster { get; set; } = new(); /// - /// Gets or sets whether to enable health checks. - /// Default: true + /// Gets or sets whether to enable health checks. + /// Default: true /// public bool EnableHealthChecks { get; set; } = true; -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Hosting/Configuration/ClusterOptions.cs b/src/ZB.MOM.WW.CBDDC.Hosting/Configuration/ClusterOptions.cs index b7e59ec..2f3ca0d 100755 --- a/src/ZB.MOM.WW.CBDDC.Hosting/Configuration/ClusterOptions.cs +++ b/src/ZB.MOM.WW.CBDDC.Hosting/Configuration/ClusterOptions.cs @@ -1,40 +1,39 @@ -using System; - namespace ZB.MOM.WW.CBDDC.Hosting.Configuration; /// -/// Configuration options for cluster mode. +/// Configuration options for cluster mode. /// public class ClusterOptions { /// - /// Gets or sets the node identifier for this instance. + /// Gets or sets the node identifier for this instance. /// public string NodeId { get; set; } = Environment.MachineName; /// - /// Gets or sets the TCP port for sync operations. - /// Default: 5001 + /// Gets or sets the TCP port for sync operations. + /// Default: 5001 /// public int TcpPort { get; set; } = 5001; /// - /// Gets or sets whether to enable UDP discovery. - /// Default: false (disabled in server mode) + /// Gets or sets whether to enable UDP discovery. + /// Default: false (disabled in server mode) /// public bool EnableUdpDiscovery { get; set; } = false; /// - /// Gets or sets the lag threshold (in milliseconds) used to determine when a tracked peer is considered lagging. - /// Peers above this threshold degrade health status. - /// Default: 30,000 ms. + /// Gets or sets the lag threshold (in milliseconds) used to determine when a tracked peer is considered lagging. + /// Peers above this threshold degrade health status. + /// Default: 30,000 ms. /// public long PeerConfirmationLagThresholdMs { get; set; } = 30_000; /// - /// Gets or sets the critical lag threshold (in milliseconds) used to determine when a tracked peer causes unhealthy status. - /// Peers above this threshold mark health as unhealthy. - /// Default: 120,000 ms. + /// Gets or sets the critical lag threshold (in milliseconds) used to determine when a tracked peer causes unhealthy + /// status. + /// Peers above this threshold mark health as unhealthy. + /// Default: 120,000 ms. /// public long PeerConfirmationCriticalLagThresholdMs { get; set; } = 120_000; -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Hosting/HealthChecks/CBDDCHealthCheck.cs b/src/ZB.MOM.WW.CBDDC.Hosting/HealthChecks/CBDDCHealthCheck.cs index da916ed..1f3d3bc 100755 --- a/src/ZB.MOM.WW.CBDDC.Hosting/HealthChecks/CBDDCHealthCheck.cs +++ b/src/ZB.MOM.WW.CBDDC.Hosting/HealthChecks/CBDDCHealthCheck.cs @@ -1,8 +1,3 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; using Microsoft.Extensions.Diagnostics.HealthChecks; using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Hosting.Configuration; @@ -10,17 +5,17 @@ using ZB.MOM.WW.CBDDC.Hosting.Configuration; namespace ZB.MOM.WW.CBDDC.Hosting.HealthChecks; /// -/// Health check for CBDDC persistence layer. -/// Verifies that the database connection is healthy. +/// Health check for CBDDC persistence layer. +/// Verifies that the database connection is healthy. /// public class CBDDCHealthCheck : IHealthCheck { private readonly IOplogStore _oplogStore; - private readonly IPeerOplogConfirmationStore _peerOplogConfirmationStore; private readonly CBDDCHostingOptions _options; + private readonly IPeerOplogConfirmationStore _peerOplogConfirmationStore; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The oplog store used to verify persistence health. /// The peer confirmation store used for confirmation lag health checks. @@ -31,16 +26,17 @@ public class CBDDCHealthCheck : IHealthCheck CBDDCHostingOptions options) { _oplogStore = oplogStore ?? throw new ArgumentNullException(nameof(oplogStore)); - _peerOplogConfirmationStore = peerOplogConfirmationStore ?? throw new ArgumentNullException(nameof(peerOplogConfirmationStore)); + _peerOplogConfirmationStore = peerOplogConfirmationStore ?? + throw new ArgumentNullException(nameof(peerOplogConfirmationStore)); _options = options ?? throw new ArgumentNullException(nameof(options)); } /// - /// Performs a health check against the CBDDC persistence layer. + /// Performs a health check against the CBDDC persistence layer. /// /// The health check execution context. /// A token used to cancel the health check. - /// A describing the health status. + /// A describing the health status. public async Task CheckHealthAsync( HealthCheckContext context, CancellationToken cancellationToken = default) @@ -58,15 +54,18 @@ public class CBDDCHealthCheck : IHealthCheck var peersWithNoConfirmation = new List(); var laggingPeers = new List(); var criticalLaggingPeers = new List(); - var lastSuccessfulConfirmationUpdateByPeer = new Dictionary(StringComparer.Ordinal); + var lastSuccessfulConfirmationUpdateByPeer = + new Dictionary(StringComparer.Ordinal); var maxLagMs = 0L; - var lagThresholdMs = Math.Max(0, _options.Cluster.PeerConfirmationLagThresholdMs); - var criticalLagThresholdMs = Math.Max(lagThresholdMs, _options.Cluster.PeerConfirmationCriticalLagThresholdMs); + long lagThresholdMs = Math.Max(0, _options.Cluster.PeerConfirmationLagThresholdMs); + long criticalLagThresholdMs = + Math.Max(lagThresholdMs, _options.Cluster.PeerConfirmationCriticalLagThresholdMs); - foreach (var peerNodeId in trackedPeers) + foreach (string peerNodeId in trackedPeers) { - var confirmations = (await _peerOplogConfirmationStore.GetConfirmationsForPeerAsync(peerNodeId, cancellationToken)) + var confirmations = + (await _peerOplogConfirmationStore.GetConfirmationsForPeerAsync(peerNodeId, cancellationToken)) .Where(confirmation => confirmation.IsActive) .ToList(); @@ -83,19 +82,14 @@ public class CBDDCHealthCheck : IHealthCheck .ThenBy(confirmation => confirmation.ConfirmedLogic) .First(); - var lagMs = Math.Max(0, localHead.PhysicalTime - oldestConfirmation.ConfirmedWall); + long lagMs = Math.Max(0, localHead.PhysicalTime - oldestConfirmation.ConfirmedWall); maxLagMs = Math.Max(maxLagMs, lagMs); - lastSuccessfulConfirmationUpdateByPeer[peerNodeId] = confirmations.Max(confirmation => confirmation.LastConfirmedUtc); + lastSuccessfulConfirmationUpdateByPeer[peerNodeId] = + confirmations.Max(confirmation => confirmation.LastConfirmedUtc); - if (lagMs > lagThresholdMs) - { - laggingPeers.Add(peerNodeId); - } + if (lagMs > lagThresholdMs) laggingPeers.Add(peerNodeId); - if (lagMs > criticalLagThresholdMs) - { - criticalLaggingPeers.Add(peerNodeId); - } + if (lagMs > criticalLagThresholdMs) criticalLaggingPeers.Add(peerNodeId); } var payload = new Dictionary @@ -108,18 +102,14 @@ public class CBDDCHealthCheck : IHealthCheck }; if (criticalLaggingPeers.Count > 0) - { return HealthCheckResult.Unhealthy( $"CBDDC is unhealthy. Critical lag detected for {criticalLaggingPeers.Count} tracked peer(s).", data: payload); - } if (peersWithNoConfirmation.Count > 0 || laggingPeers.Count > 0) - { return HealthCheckResult.Degraded( $"CBDDC is degraded. Lagging peers: {laggingPeers.Count}, unconfirmed peers: {peersWithNoConfirmation.Count}.", data: payload); - } return HealthCheckResult.Healthy( $"CBDDC is healthy. Latest timestamp: {localHead.PhysicalTime}.", @@ -129,7 +119,7 @@ public class CBDDCHealthCheck : IHealthCheck { return HealthCheckResult.Unhealthy( "CBDDC persistence layer is unavailable", - exception: ex); + ex); } } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Hosting/HostedServices/DiscoveryServiceHostedService.cs b/src/ZB.MOM.WW.CBDDC.Hosting/HostedServices/DiscoveryServiceHostedService.cs index 2abe7f0..18180f6 100755 --- a/src/ZB.MOM.WW.CBDDC.Hosting/HostedServices/DiscoveryServiceHostedService.cs +++ b/src/ZB.MOM.WW.CBDDC.Hosting/HostedServices/DiscoveryServiceHostedService.cs @@ -1,5 +1,3 @@ -using System.Threading; -using System.Threading.Tasks; using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Logging; using Serilog.Context; @@ -8,7 +6,7 @@ using ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Hosting.HostedServices; /// -/// Hosted service that manages the lifecycle of the discovery service. +/// Hosted service that manages the lifecycle of the discovery service. /// public class DiscoveryServiceHostedService : IHostedService { @@ -16,7 +14,7 @@ public class DiscoveryServiceHostedService : IHostedService private readonly ILogger _logger; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The discovery service to manage. /// The logger used for service lifecycle events. @@ -29,7 +27,7 @@ public class DiscoveryServiceHostedService : IHostedService } /// - /// Starts the discovery service. + /// Starts the discovery service. /// /// A token used to cancel the startup operation. /// A task that represents the asynchronous start operation. @@ -45,7 +43,7 @@ public class DiscoveryServiceHostedService : IHostedService } /// - /// Stops the discovery service. + /// Stops the discovery service. /// /// A token used to cancel the shutdown operation. /// A task that represents the asynchronous stop operation. @@ -59,4 +57,4 @@ public class DiscoveryServiceHostedService : IHostedService await _discoveryService.Stop(); _logger.LogInformation("Discovery Service stopped"); } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Hosting/HostedServices/TcpSyncServerHostedService.cs b/src/ZB.MOM.WW.CBDDC.Hosting/HostedServices/TcpSyncServerHostedService.cs index bf2b691..cc188eb 100755 --- a/src/ZB.MOM.WW.CBDDC.Hosting/HostedServices/TcpSyncServerHostedService.cs +++ b/src/ZB.MOM.WW.CBDDC.Hosting/HostedServices/TcpSyncServerHostedService.cs @@ -1,5 +1,3 @@ -using System.Threading; -using System.Threading.Tasks; using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Logging; using Serilog.Context; @@ -8,15 +6,15 @@ using ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Hosting.HostedServices; /// -/// Hosted service that manages the lifecycle of the TCP sync server. +/// Hosted service that manages the lifecycle of the TCP sync server. /// public class TcpSyncServerHostedService : IHostedService { - private readonly ISyncServer _syncServer; private readonly ILogger _logger; + private readonly ISyncServer _syncServer; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The sync server to start and stop. /// The logger instance. @@ -29,7 +27,7 @@ public class TcpSyncServerHostedService : IHostedService } /// - /// Starts the TCP sync server. + /// Starts the TCP sync server. /// /// A token used to cancel startup. public async Task StartAsync(CancellationToken cancellationToken) @@ -44,7 +42,7 @@ public class TcpSyncServerHostedService : IHostedService } /// - /// Stops the TCP sync server. + /// Stops the TCP sync server. /// /// A token used to cancel shutdown. public async Task StopAsync(CancellationToken cancellationToken) @@ -57,4 +55,4 @@ public class TcpSyncServerHostedService : IHostedService await _syncServer.Stop(); _logger.LogInformation("TCP Sync Server stopped"); } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Hosting/README.md b/src/ZB.MOM.WW.CBDDC.Hosting/README.md index 672fc71..330af2d 100755 --- a/src/ZB.MOM.WW.CBDDC.Hosting/README.md +++ b/src/ZB.MOM.WW.CBDDC.Hosting/README.md @@ -41,6 +41,7 @@ app.Run(); ## Health Checks CBDDC registers health checks that verify: + - Database connectivity - Latest timestamp retrieval @@ -53,6 +54,7 @@ curl http://localhost:5000/health ### Cluster Best for: + - Dedicated database servers - Simple deployments - Development/testing environments @@ -60,6 +62,7 @@ Best for: ## Server Behavior CBDDC servers operate in respond-only mode: + - Accept incoming sync connections - Respond to sync requests - Do not initiate outbound sync @@ -69,11 +72,11 @@ CBDDC servers operate in respond-only mode: ### ClusterOptions -| Property | Type | Default | Description | -|----------|------|---------|-------------| -| NodeId | string | MachineName | Unique node identifier | -| TcpPort | int | 5001 | TCP port for sync | -| EnableUdpDiscovery | bool | false | Enable UDP discovery | +| Property | Type | Default | Description | +|--------------------|--------|-------------|------------------------| +| NodeId | string | MachineName | Unique node identifier | +| TcpPort | int | 5001 | TCP port for sync | +| EnableUdpDiscovery | bool | false | Enable UDP discovery | ## Production Checklist diff --git a/src/ZB.MOM.WW.CBDDC.Hosting/Services/NoOpDiscoveryService.cs b/src/ZB.MOM.WW.CBDDC.Hosting/Services/NoOpDiscoveryService.cs index 127ea79..ffc5b2a 100755 --- a/src/ZB.MOM.WW.CBDDC.Hosting/Services/NoOpDiscoveryService.cs +++ b/src/ZB.MOM.WW.CBDDC.Hosting/Services/NoOpDiscoveryService.cs @@ -1,5 +1,3 @@ -using System; -using System.Collections.Generic; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging.Abstractions; using Serilog.Context; @@ -9,24 +7,24 @@ using ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Hosting.Services; /// -/// No-op implementation of IDiscoveryService for server scenarios. -/// Does not perform UDP broadcast discovery - relies on explicit peer configuration. +/// No-op implementation of IDiscoveryService for server scenarios. +/// Does not perform UDP broadcast discovery - relies on explicit peer configuration. /// public class NoOpDiscoveryService : IDiscoveryService { private readonly ILogger _logger; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// - /// The logger instance to use, or to use a no-op logger. + /// The logger instance to use, or to use a no-op logger. public NoOpDiscoveryService(ILogger? logger = null) { _logger = logger ?? NullLogger.Instance; } /// - /// Gets the currently active peers. + /// Gets the currently active peers. /// /// An empty sequence because discovery is disabled. public IEnumerable GetActivePeers() @@ -35,7 +33,7 @@ public class NoOpDiscoveryService : IDiscoveryService } /// - /// Starts the discovery service. + /// Starts the discovery service. /// /// A completed task. public Task Start() @@ -49,7 +47,7 @@ public class NoOpDiscoveryService : IDiscoveryService } /// - /// Stops the discovery service. + /// Stops the discovery service. /// /// A completed task. public Task Stop() @@ -63,10 +61,10 @@ public class NoOpDiscoveryService : IDiscoveryService } /// - /// Releases resources used by this instance. + /// Releases resources used by this instance. /// public void Dispose() { _logger.LogDebug("NoOpDiscoveryService disposed"); } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Hosting/Services/NoOpSyncOrchestrator.cs b/src/ZB.MOM.WW.CBDDC.Hosting/Services/NoOpSyncOrchestrator.cs index 8ef5bf2..6d4383c 100755 --- a/src/ZB.MOM.WW.CBDDC.Hosting/Services/NoOpSyncOrchestrator.cs +++ b/src/ZB.MOM.WW.CBDDC.Hosting/Services/NoOpSyncOrchestrator.cs @@ -1,4 +1,3 @@ -using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging.Abstractions; using Serilog.Context; @@ -7,24 +6,24 @@ using ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Hosting.Services; /// -/// No-op implementation of ISyncOrchestrator for server scenarios. -/// Does not initiate outbound sync - only responds to incoming sync requests. +/// No-op implementation of ISyncOrchestrator for server scenarios. +/// Does not initiate outbound sync - only responds to incoming sync requests. /// public class NoOpSyncOrchestrator : ISyncOrchestrator { private readonly ILogger _logger; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// - /// The logger instance to use, or for a no-op logger. + /// The logger instance to use, or for a no-op logger. public NoOpSyncOrchestrator(ILogger? logger = null) { _logger = logger ?? NullLogger.Instance; } /// - /// Starts the orchestrator lifecycle. + /// Starts the orchestrator lifecycle. /// /// A completed task. public Task Start() @@ -38,7 +37,7 @@ public class NoOpSyncOrchestrator : ISyncOrchestrator } /// - /// Stops the orchestrator lifecycle. + /// Stops the orchestrator lifecycle. /// /// A completed task. public Task Stop() @@ -52,10 +51,10 @@ public class NoOpSyncOrchestrator : ISyncOrchestrator } /// - /// Releases resources used by the orchestrator. + /// Releases resources used by the orchestrator. /// public void Dispose() { _logger.LogDebug("NoOpSyncOrchestrator disposed"); } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Hosting/ZB.MOM.WW.CBDDC.Hosting.csproj b/src/ZB.MOM.WW.CBDDC.Hosting/ZB.MOM.WW.CBDDC.Hosting.csproj index 7b3ab39..5c5e647 100755 --- a/src/ZB.MOM.WW.CBDDC.Hosting/ZB.MOM.WW.CBDDC.Hosting.csproj +++ b/src/ZB.MOM.WW.CBDDC.Hosting/ZB.MOM.WW.CBDDC.Hosting.csproj @@ -1,37 +1,37 @@ - - - + + + - - - - - - + + + + + + - - ZB.MOM.WW.CBDDC.Hosting - ZB.MOM.WW.CBDDC.Hosting - ZB.MOM.WW.CBDDC.Hosting - net10.0 - latest - enable - enable - 1.0.3 - MrDevRobot - ASP.NET Core integration for CBDDC with health checks and hosted services. - MIT - p2p;database;aspnetcore;healthcheck;hosting;cluster - https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net - https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net - git - README.md - + + ZB.MOM.WW.CBDDC.Hosting + ZB.MOM.WW.CBDDC.Hosting + ZB.MOM.WW.CBDDC.Hosting + net10.0 + latest + enable + enable + 1.0.3 + MrDevRobot + ASP.NET Core integration for CBDDC with health checks and hosted services. + MIT + p2p;database;aspnetcore;healthcheck;hosting;cluster + https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net + https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net + git + README.md + - - - + + + diff --git a/src/ZB.MOM.WW.CBDDC.Network/CBDDCNode.cs b/src/ZB.MOM.WW.CBDDC.Network/CBDDCNode.cs index 43c2681..a12f7b0 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/CBDDCNode.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/CBDDCNode.cs @@ -1,37 +1,21 @@ -using System; +using System.Net; +using System.Net.NetworkInformation; +using System.Net.Sockets; using Microsoft.Extensions.Logging; -using System.Linq; -using System.Threading.Tasks; -using ZB.MOM.WW.CBDDC.Core.Storage; namespace ZB.MOM.WW.CBDDC.Network; /// -/// Represents a single CBDDC Peer Node. -/// Acts as a facade to orchestrate the lifecycle of Networking, Discovery, and Synchronization components. +/// Represents a single CBDDC Peer Node. +/// Acts as a facade to orchestrate the lifecycle of Networking, Discovery, and Synchronization components. /// public class CBDDCNode : ICBDDCNode { private readonly ILogger _logger; - /// - /// Gets the Sync Server instance. - /// - public ISyncServer Server { get; } /// - /// Gets the Discovery Service instance. - /// - public IDiscoveryService Discovery { get; } - - /// - /// Gets the Synchronization Orchestrator instance. - /// - public ISyncOrchestrator Orchestrator { get; } - - - /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The TCP server for handling incoming sync requests. /// The UDP service for peer discovery. @@ -50,7 +34,22 @@ public class CBDDCNode : ICBDDCNode } /// - /// Starts all node components (Server, Discovery, Orchestrator). + /// Gets the Sync Server instance. + /// + public ISyncServer Server { get; } + + /// + /// Gets the Discovery Service instance. + /// + public IDiscoveryService Discovery { get; } + + /// + /// Gets the Synchronization Orchestrator instance. + /// + public ISyncOrchestrator Orchestrator { get; } + + /// + /// Starts all node components (Server, Discovery, Orchestrator). /// public async Task Start() { @@ -66,7 +65,7 @@ public class CBDDCNode : ICBDDCNode } /// - /// Stops all node components. + /// Stops all node components. /// public async Task Stop() { @@ -82,7 +81,7 @@ public class CBDDCNode : ICBDDCNode } /// - /// Gets the address information of this node. + /// Gets the address information of this node. /// public NodeAddress Address { @@ -93,12 +92,11 @@ public class CBDDCNode : ICBDDCNode { // If the server is listening on "Any" (0.0.0.0), we cannot advertise that as a connectable address. // We must resolve the actual machine IP address that peers can reach. - if (Equals(ep.Address, System.Net.IPAddress.Any) || Equals(ep.Address, System.Net.IPAddress.IPv6Any)) - { + if (Equals(ep.Address, IPAddress.Any) || Equals(ep.Address, IPAddress.IPv6Any)) return new NodeAddress(GetLocalIpAddress(), ep.Port); - } return new NodeAddress(ep.Address.ToString(), ep.Port); } + return new NodeAddress("Unknown", 0); } } @@ -107,20 +105,17 @@ public class CBDDCNode : ICBDDCNode { try { - var interfaces = System.Net.NetworkInformation.NetworkInterface.GetAllNetworkInterfaces() - .Where(i => i.OperationalStatus == System.Net.NetworkInformation.OperationalStatus.Up - && i.NetworkInterfaceType != System.Net.NetworkInformation.NetworkInterfaceType.Loopback); + var interfaces = NetworkInterface.GetAllNetworkInterfaces() + .Where(i => i.OperationalStatus == OperationalStatus.Up + && i.NetworkInterfaceType != NetworkInterfaceType.Loopback); foreach (var i in interfaces) { var props = i.GetIPProperties(); var ipInfo = props.UnicastAddresses - .FirstOrDefault(u => u.Address.AddressFamily == System.Net.Sockets.AddressFamily.InterNetwork); // Prefer IPv4 + .FirstOrDefault(u => u.Address.AddressFamily == AddressFamily.InterNetwork); // Prefer IPv4 - if (ipInfo != null) - { - return ipInfo.Address.ToString(); - } + if (ipInfo != null) return ipInfo.Address.ToString(); } return "127.0.0.1"; @@ -133,28 +128,32 @@ public class CBDDCNode : ICBDDCNode } } -public class NodeAddress -{ - /// - /// Gets the host portion of the node address. - /// - public string Host { get; } - /// - /// Gets the port portion of the node address. - /// - public int Port { get; } - - /// - /// Initializes a new instance of the class. - /// - /// The host name or IP address. - /// The port number. - public NodeAddress(string host, int port) - { - Host = host; - Port = port; - } - - /// - public override string ToString() => $"{Host}:{Port}"; -} +public class NodeAddress +{ + /// + /// Initializes a new instance of the class. + /// + /// The host name or IP address. + /// The port number. + public NodeAddress(string host, int port) + { + Host = host; + Port = port; + } + + /// + /// Gets the host portion of the node address. + /// + public string Host { get; } + + /// + /// Gets the port portion of the node address. + /// + public int Port { get; } + + /// + public override string ToString() + { + return $"{Host}:{Port}"; + } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/CBDDCNodeService.cs b/src/ZB.MOM.WW.CBDDC.Network/CBDDCNodeService.cs index ca40e18..44cc77a 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/CBDDCNodeService.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/CBDDCNodeService.cs @@ -1,22 +1,19 @@ using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Logging; using Serilog.Context; -using System; -using System.Threading; -using System.Threading.Tasks; namespace ZB.MOM.WW.CBDDC.Network; /// -/// Hosted service that automatically starts and stops the CBDDC node. +/// Hosted service that automatically starts and stops the CBDDC node. /// public class CBDDCNodeService : IHostedService { - private readonly ICBDDCNode _node; private readonly ILogger _logger; + private readonly ICBDDCNode _node; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The CBDDC node to manage. /// The logger instance. @@ -27,7 +24,7 @@ public class CBDDCNodeService : IHostedService } /// - /// Starts the managed CBDDC node. + /// Starts the managed CBDDC node. /// /// A token used to cancel startup. /// A task that represents the asynchronous start operation. @@ -60,7 +57,7 @@ public class CBDDCNodeService : IHostedService } /// - /// Stops the managed CBDDC node. + /// Stops the managed CBDDC node. /// /// A token used to cancel shutdown. /// A task that represents the asynchronous stop operation. @@ -82,4 +79,4 @@ public class CBDDCNodeService : IHostedService // Don't rethrow during shutdown to avoid breaking the shutdown process } } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/CBDDCServiceCollectionExtensions.cs b/src/ZB.MOM.WW.CBDDC.Network/CBDDCServiceCollectionExtensions.cs index 693ce36..ccc66d2 100644 --- a/src/ZB.MOM.WW.CBDDC.Network/CBDDCServiceCollectionExtensions.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/CBDDCServiceCollectionExtensions.cs @@ -8,15 +8,15 @@ using ZB.MOM.WW.CBDDC.Core.Sync; namespace ZB.MOM.WW.CBDDC.Network; /// -/// Provides extension methods for registering core CBDDC services. +/// Provides extension methods for registering core CBDDC services. /// public static class CBDDCServiceCollectionExtensions { /// - /// Registers core CBDDC service dependencies. + /// Registers core CBDDC service dependencies. /// /// The service collection to update. - /// The same instance for chaining. + /// The same instance for chaining. public static IServiceCollection AddCBDDCCore(this IServiceCollection services) { ArgumentNullException.ThrowIfNull(services); @@ -29,4 +29,4 @@ public static class CBDDCServiceCollectionExtensions return services; } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/CompositeDiscoveryService.cs b/src/ZB.MOM.WW.CBDDC.Network/CompositeDiscoveryService.cs index 8d140c5..9da10dd 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/CompositeDiscoveryService.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/CompositeDiscoveryService.cs @@ -1,38 +1,33 @@ -using System; using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using ZB.MOM.WW.CBDDC.Core.Network; -using ZB.MOM.WW.CBDDC.Core.Storage; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Core.Storage; namespace ZB.MOM.WW.CBDDC.Network; /// -/// Composite discovery service that combines UDP LAN discovery with persistent remote peers from the peerConfigurationStore. -/// Periodically refreshes the remote peer list and merges with actively discovered LAN peers. -/// -/// Remote peer configurations are stored in a synchronized collection that is automatically -/// replicated across all nodes in the cluster. Any node that adds a remote peer will have -/// it synchronized to all other nodes automatically. +/// Composite discovery service that combines UDP LAN discovery with persistent remote peers from the +/// peerConfigurationStore. +/// Periodically refreshes the remote peer list and merges with actively discovered LAN peers. +/// Remote peer configurations are stored in a synchronized collection that is automatically +/// replicated across all nodes in the cluster. Any node that adds a remote peer will have +/// it synchronized to all other nodes automatically. /// public class CompositeDiscoveryService : IDiscoveryService { - private readonly IDiscoveryService _udpDiscovery; - private readonly IPeerConfigurationStore _peerConfigurationStore; - private readonly ILogger _logger; - private readonly TimeSpan _refreshInterval; private const string RemotePeersCollectionName = "_system_remote_peers"; + private readonly ILogger _logger; + private readonly IPeerConfigurationStore _peerConfigurationStore; + private readonly TimeSpan _refreshInterval; + private readonly ConcurrentDictionary _remotePeers = new(); + private readonly object _startStopLock = new(); + private readonly IDiscoveryService _udpDiscovery; private CancellationTokenSource? _cts; - private readonly ConcurrentDictionary _remotePeers = new(); - private readonly object _startStopLock = new object(); /// - /// Initializes a new instance of the CompositeDiscoveryService class. + /// Initializes a new instance of the CompositeDiscoveryService class. /// /// UDP-based LAN discovery service. /// Database instance for accessing the synchronized remote peers collection. @@ -45,13 +40,14 @@ public class CompositeDiscoveryService : IDiscoveryService TimeSpan? refreshInterval = null) { _udpDiscovery = udpDiscovery ?? throw new ArgumentNullException(nameof(udpDiscovery)); - _peerConfigurationStore = peerConfigurationStore ?? throw new ArgumentNullException(nameof(peerConfigurationStore)); + _peerConfigurationStore = + peerConfigurationStore ?? throw new ArgumentNullException(nameof(peerConfigurationStore)); _logger = logger ?? NullLogger.Instance; _refreshInterval = refreshInterval ?? TimeSpan.FromMinutes(5); } /// - /// Gets the currently active peers from LAN discovery and configured remote peers. + /// Gets the currently active peers from LAN discovery and configured remote peers. /// /// A sequence of active peer nodes. public IEnumerable GetActivePeers() @@ -64,7 +60,7 @@ public class CompositeDiscoveryService : IDiscoveryService } /// - /// Starts peer discovery and the remote peer refresh loop. + /// Starts peer discovery and the remote peer refresh loop. /// /// A task that represents the asynchronous start operation. public async Task Start() @@ -76,6 +72,7 @@ public class CompositeDiscoveryService : IDiscoveryService _logger.LogWarning("Composite discovery service already started"); return; } + _cts = new CancellationTokenSource(); } @@ -103,7 +100,7 @@ public class CompositeDiscoveryService : IDiscoveryService } /// - /// Stops peer discovery and the remote peer refresh loop. + /// Stops peer discovery and the remote peer refresh loop. /// /// A task that represents the asynchronous stop operation. public async Task Stop() @@ -143,7 +140,6 @@ public class CompositeDiscoveryService : IDiscoveryService private async Task RefreshLoopAsync(CancellationToken cancellationToken) { while (!cancellationToken.IsCancellationRequested) - { try { await Task.Delay(_refreshInterval, cancellationToken); @@ -158,7 +154,6 @@ public class CompositeDiscoveryService : IDiscoveryService { _logger.LogError(ex, "Error during remote peer refresh"); } - } } private async Task RefreshRemotePeersAsync() @@ -178,18 +173,18 @@ public class CompositeDiscoveryService : IDiscoveryService config.NodeId, config.Address, now, // LastSeen is now for persistent peers (always considered active) - config.Type, - NodeRole.Member // Remote peers are always members, never gateways + config.Type // Remote peers are always members, never gateways ); _remotePeers[config.NodeId] = peerNode; } - _logger.LogInformation("Refreshed remote peers: {Count} enabled peers loaded from synchronized collection", _remotePeers.Count); + _logger.LogInformation("Refreshed remote peers: {Count} enabled peers loaded from synchronized collection", + _remotePeers.Count); } catch (Exception ex) { _logger.LogError(ex, "Failed to refresh remote peers from database"); } } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/CompressionHelper.cs b/src/ZB.MOM.WW.CBDDC.Network/CompressionHelper.cs index 14eb6a2..fede9a6 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/CompressionHelper.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/CompressionHelper.cs @@ -1,18 +1,16 @@ -using System; -using System.IO; using System.IO.Compression; namespace ZB.MOM.WW.CBDDC.Network; -public static class CompressionHelper -{ - public const int THRESHOLD = 1024; // 1KB - - /// - /// Gets a value indicating whether Brotli compression is supported on the current target framework. - /// - public static bool IsBrotliSupported - { +public static class CompressionHelper +{ + public const int THRESHOLD = 1024; // 1KB + + /// + /// Gets a value indicating whether Brotli compression is supported on the current target framework. + /// + public static bool IsBrotliSupported + { get { #if NET6_0_OR_GREATER @@ -20,16 +18,16 @@ public static class CompressionHelper #else return false; #endif - } - } - - /// - /// Compresses the specified data when Brotli is supported and the payload exceeds the threshold. - /// - /// The input data to compress. - /// The compressed payload, or the original payload if compression is skipped. - public static byte[] Compress(byte[] data) - { + } + } + + /// + /// Compresses the specified data when Brotli is supported and the payload exceeds the threshold. + /// + /// The input data to compress. + /// The compressed payload, or the original payload if compression is skipped. + public static byte[] Compress(byte[] data) + { if (data.Length < THRESHOLD || !IsBrotliSupported) return data; #if NET6_0_OR_GREATER @@ -38,19 +36,20 @@ public static class CompressionHelper { brotli.Write(data, 0, data.Length); } + return output.ToArray(); #else return data; -#endif - } - - /// - /// Decompresses Brotli-compressed data. - /// - /// The compressed payload. - /// The decompressed payload. - public static byte[] Decompress(byte[] compressedData) - { +#endif + } + + /// + /// Decompresses Brotli-compressed data. + /// + /// The compressed payload. + /// The decompressed payload. + public static byte[] Decompress(byte[] compressedData) + { #if NET6_0_OR_GREATER using var input = new MemoryStream(compressedData); using var output = new MemoryStream(); @@ -58,9 +57,10 @@ public static class CompressionHelper { brotli.CopyTo(output); } + return output.ToArray(); #else throw new NotSupportedException("Brotli decompression not supported on this platform."); #endif } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/ICBDDCNode.cs b/src/ZB.MOM.WW.CBDDC.Network/ICBDDCNode.cs index 103b1ed..c3d8f18 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/ICBDDCNode.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/ICBDDCNode.cs @@ -1,35 +1,36 @@ -ο»Ώusing System.Threading.Tasks; - -namespace ZB.MOM.WW.CBDDC.Network -{ - public interface ICBDDCNode - { - /// - /// Gets the node address. - /// - NodeAddress Address { get; } - /// - /// Gets the discovery service. - /// - IDiscoveryService Discovery { get; } - /// - /// Gets the synchronization orchestrator. - /// - ISyncOrchestrator Orchestrator { get; } - /// - /// Gets the synchronization server. - /// - ISyncServer Server { get; } +ο»Ώnamespace ZB.MOM.WW.CBDDC.Network; - /// - /// Starts the node services. - /// - /// A task that represents the asynchronous start operation. - Task Start(); - /// - /// Stops the node services. - /// - /// A task that represents the asynchronous stop operation. - Task Stop(); - } -} +public interface ICBDDCNode +{ + /// + /// Gets the node address. + /// + NodeAddress Address { get; } + + /// + /// Gets the discovery service. + /// + IDiscoveryService Discovery { get; } + + /// + /// Gets the synchronization orchestrator. + /// + ISyncOrchestrator Orchestrator { get; } + + /// + /// Gets the synchronization server. + /// + ISyncServer Server { get; } + + /// + /// Starts the node services. + /// + /// A task that represents the asynchronous start operation. + Task Start(); + + /// + /// Stops the node services. + /// + /// A task that represents the asynchronous stop operation. + Task Stop(); +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/IDiscoveryService.cs b/src/ZB.MOM.WW.CBDDC.Network/IDiscoveryService.cs index b1e1bec..e31036c 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/IDiscoveryService.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/IDiscoveryService.cs @@ -1,30 +1,27 @@ -ο»Ώusing ZB.MOM.WW.CBDDC.Core.Network; -using System.Collections.Generic; -using System.Threading.Tasks; - -namespace ZB.MOM.WW.CBDDC.Network +ο»Ώusing ZB.MOM.WW.CBDDC.Core.Network; + +namespace ZB.MOM.WW.CBDDC.Network; + +/// +/// Defines peer discovery operations. +/// +public interface IDiscoveryService { /// - /// Defines peer discovery operations. + /// Gets the currently active peers. /// - public interface IDiscoveryService - { - /// - /// Gets the currently active peers. - /// - /// The active peer nodes. - IEnumerable GetActivePeers(); + /// The active peer nodes. + IEnumerable GetActivePeers(); - /// - /// Starts the discovery service. - /// - /// A task that represents the asynchronous operation. - Task Start(); + /// + /// Starts the discovery service. + /// + /// A task that represents the asynchronous operation. + Task Start(); - /// - /// Stops the discovery service. - /// - /// A task that represents the asynchronous operation. - Task Stop(); - } -} + /// + /// Stops the discovery service. + /// + /// A task that represents the asynchronous operation. + Task Stop(); +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/IOplogPruneCutoffCalculator.cs b/src/ZB.MOM.WW.CBDDC.Network/IOplogPruneCutoffCalculator.cs index 246a875..1702f89 100644 --- a/src/ZB.MOM.WW.CBDDC.Network/IOplogPruneCutoffCalculator.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/IOplogPruneCutoffCalculator.cs @@ -1,16 +1,14 @@ -using System.Threading; -using System.Threading.Tasks; using ZB.MOM.WW.CBDDC.Core.Network; namespace ZB.MOM.WW.CBDDC.Network; /// -/// Calculates the effective oplog prune cutoff for maintenance. +/// Calculates the effective oplog prune cutoff for maintenance. /// public interface IOplogPruneCutoffCalculator { /// - /// Calculates the effective prune cutoff for the provided node configuration. + /// Calculates the effective prune cutoff for the provided node configuration. /// /// The local node configuration. /// The cancellation token. @@ -18,4 +16,4 @@ public interface IOplogPruneCutoffCalculator Task CalculateEffectiveCutoffAsync( PeerNodeConfiguration configuration, CancellationToken cancellationToken = default); -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/ISyncOrchestrator.cs b/src/ZB.MOM.WW.CBDDC.Network/ISyncOrchestrator.cs index a605ddf..5ec329f 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/ISyncOrchestrator.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/ISyncOrchestrator.cs @@ -1,22 +1,19 @@ -ο»Ώusing System.Threading.Tasks; - -namespace ZB.MOM.WW.CBDDC.Network +ο»Ώnamespace ZB.MOM.WW.CBDDC.Network; + +/// +/// Defines lifecycle operations for synchronization orchestration. +/// +public interface ISyncOrchestrator { /// - /// Defines lifecycle operations for synchronization orchestration. + /// Starts synchronization orchestration. /// - public interface ISyncOrchestrator - { - /// - /// Starts synchronization orchestration. - /// - /// A task that represents the asynchronous start operation. - Task Start(); + /// A task that represents the asynchronous start operation. + Task Start(); - /// - /// Stops synchronization orchestration. - /// - /// A task that represents the asynchronous stop operation. - Task Stop(); - } -} + /// + /// Stops synchronization orchestration. + /// + /// A task that represents the asynchronous stop operation. + Task Stop(); +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/ISyncServer.cs b/src/ZB.MOM.WW.CBDDC.Network/ISyncServer.cs index 0053e69..9307eb4 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/ISyncServer.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/ISyncServer.cs @@ -1,31 +1,33 @@ -ο»Ώusing System.Net; -using System.Threading.Tasks; - -namespace ZB.MOM.WW.CBDDC.Network; - -/// -/// Defines the contract for a server that supports starting, stopping, and reporting its listening network endpoint for -/// synchronization operations. -/// -/// Implementations of this interface are expected to provide asynchronous methods for starting and -/// stopping the server. The listening endpoint may be null if the server is not currently active or has not been -/// started. +ο»Ώusing System.Net; + +namespace ZB.MOM.WW.CBDDC.Network; + +/// +/// Defines the contract for a server that supports starting, stopping, and reporting its listening network endpoint +/// for +/// synchronization operations. +/// +/// +/// Implementations of this interface are expected to provide asynchronous methods for starting and +/// stopping the server. The listening endpoint may be null if the server is not currently active or has not been +/// started. +/// public interface ISyncServer { /// - /// Starts the synchronization server. + /// Gets the network endpoint currently used by the server for listening. + /// + IPEndPoint? ListeningEndpoint { get; } + + /// + /// Starts the synchronization server. /// /// A task that represents the asynchronous operation. Task Start(); /// - /// Stops the synchronization server. + /// Stops the synchronization server. /// /// A task that represents the asynchronous operation. Task Stop(); - - /// - /// Gets the network endpoint currently used by the server for listening. - /// - IPEndPoint? ListeningEndpoint { get; } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/Leadership/BullyLeaderElectionService.cs b/src/ZB.MOM.WW.CBDDC.Network/Leadership/BullyLeaderElectionService.cs index 6f7ea7d..fcff9a5 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/Leadership/BullyLeaderElectionService.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/Leadership/BullyLeaderElectionService.cs @@ -1,48 +1,26 @@ -using System; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Network; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Core.Network; namespace ZB.MOM.WW.CBDDC.Network.Leadership; /// -/// Implements the Bully algorithm for leader election. -/// The node with the lexicographically smallest NodeId becomes the cloud gateway (leader). -/// Elections run periodically (every 5 seconds) to adapt to cluster changes. +/// Implements the Bully algorithm for leader election. +/// The node with the lexicographically smallest NodeId becomes the cloud gateway (leader). +/// Elections run periodically (every 5 seconds) to adapt to cluster changes. /// public class BullyLeaderElectionService : ILeaderElectionService { - private readonly IDiscoveryService _discoveryService; private readonly IPeerNodeConfigurationProvider _configProvider; - private readonly ILogger _logger; + private readonly IDiscoveryService _discoveryService; private readonly TimeSpan _electionInterval; + private readonly ILogger _logger; private CancellationTokenSource? _cts; private string? _localNodeId; - private string? _currentGatewayNodeId; - private bool _isCloudGateway; /// - /// Gets a value indicating whether this node is currently the cloud gateway leader. - /// - public bool IsCloudGateway => _isCloudGateway; - - /// - /// Gets the current gateway node identifier. - /// - public string? CurrentGatewayNodeId => _currentGatewayNodeId; - - /// - /// Occurs when leadership changes. - /// - public event EventHandler? LeadershipChanged; - - /// - /// Initializes a new instance of the BullyLeaderElectionService class. + /// Initializes a new instance of the BullyLeaderElectionService class. /// /// Service providing active peer information. /// Provider for local node configuration. @@ -61,7 +39,22 @@ public class BullyLeaderElectionService : ILeaderElectionService } /// - /// Starts the leader election loop. + /// Gets a value indicating whether this node is currently the cloud gateway leader. + /// + public bool IsCloudGateway { get; private set; } + + /// + /// Gets the current gateway node identifier. + /// + public string? CurrentGatewayNodeId { get; private set; } + + /// + /// Occurs when leadership changes. + /// + public event EventHandler? LeadershipChanged; + + /// + /// Starts the leader election loop. /// /// A task that represents the asynchronous start operation. public async Task Start() @@ -82,7 +75,7 @@ public class BullyLeaderElectionService : ILeaderElectionService } /// - /// Stops the leader election loop. + /// Stops the leader election loop. /// /// A task that represents the asynchronous stop operation. public Task Stop() @@ -100,7 +93,6 @@ public class BullyLeaderElectionService : ILeaderElectionService private async Task ElectionLoopAsync(CancellationToken cancellationToken) { while (!cancellationToken.IsCancellationRequested) - { try { await Task.Delay(_electionInterval, cancellationToken); @@ -115,7 +107,6 @@ public class BullyLeaderElectionService : ILeaderElectionService { _logger.LogError(ex, "Error during leader election"); } - } } private void RunElection() @@ -132,35 +123,31 @@ public class BullyLeaderElectionService : ILeaderElectionService lanPeers.Add(_localNodeId); // Bully algorithm: smallest NodeId wins (lexicographic comparison) - var newLeader = lanPeers.OrderBy(id => id, StringComparer.Ordinal).FirstOrDefault(); + string? newLeader = lanPeers.OrderBy(id => id, StringComparer.Ordinal).FirstOrDefault(); if (newLeader == null) - { // No peers available, local node is leader by default newLeader = _localNodeId; - } // Check if leadership changed - if (newLeader != _currentGatewayNodeId) + if (newLeader != CurrentGatewayNodeId) { - var wasLeader = _isCloudGateway; - _currentGatewayNodeId = newLeader; - _isCloudGateway = newLeader == _localNodeId; + bool wasLeader = IsCloudGateway; + CurrentGatewayNodeId = newLeader; + IsCloudGateway = newLeader == _localNodeId; - if (wasLeader != _isCloudGateway) + if (wasLeader != IsCloudGateway) { - if (_isCloudGateway) - { - _logger.LogInformation("πŸ” This node is now the CLOUD GATEWAY (Leader) - Will sync with remote cloud nodes"); - } + if (IsCloudGateway) + _logger.LogInformation( + "πŸ” This node is now the CLOUD GATEWAY (Leader) - Will sync with remote cloud nodes"); else - { - _logger.LogInformation("πŸ‘€ This node is now a MEMBER - Cloud sync handled by gateway: {Gateway}", _currentGatewayNodeId); - } + _logger.LogInformation("πŸ‘€ This node is now a MEMBER - Cloud sync handled by gateway: {Gateway}", + CurrentGatewayNodeId); // Raise event - LeadershipChanged?.Invoke(this, new LeadershipChangedEventArgs(_currentGatewayNodeId, _isCloudGateway)); + LeadershipChanged?.Invoke(this, new LeadershipChangedEventArgs(CurrentGatewayNodeId, IsCloudGateway)); } } } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/Leadership/ILeaderElectionService.cs b/src/ZB.MOM.WW.CBDDC.Network/Leadership/ILeaderElectionService.cs index 9b363be..cdb5931 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/Leadership/ILeaderElectionService.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/Leadership/ILeaderElectionService.cs @@ -1,65 +1,65 @@ -using System; -using System.Threading.Tasks; - namespace ZB.MOM.WW.CBDDC.Network.Leadership; /// -/// Event arguments for leadership change events. +/// Event arguments for leadership change events. /// public class LeadershipChangedEventArgs : EventArgs { /// - /// Gets the NodeId of the current cloud gateway (leader). - /// Null if no leader is elected. + /// Initializes a new instance of the LeadershipChangedEventArgs class. /// - public string? CurrentGatewayNodeId { get; } - - /// - /// Gets whether the local node is now the cloud gateway. - /// - public bool IsLocalNodeGateway { get; } - - /// - /// Initializes a new instance of the LeadershipChangedEventArgs class. - /// - /// The NodeId of the current gateway node, or when none is elected. + /// + /// The NodeId of the current gateway node, or when none is + /// elected. + /// /// A value indicating whether the local node is the gateway. public LeadershipChangedEventArgs(string? currentGatewayNodeId, bool isLocalNodeGateway) { CurrentGatewayNodeId = currentGatewayNodeId; IsLocalNodeGateway = isLocalNodeGateway; } + + /// + /// Gets the NodeId of the current cloud gateway (leader). + /// Null if no leader is elected. + /// + public string? CurrentGatewayNodeId { get; } + + /// + /// Gets whether the local node is now the cloud gateway. + /// + public bool IsLocalNodeGateway { get; } } /// -/// Service for managing leader election in a distributed cluster. -/// Uses the Bully algorithm where the node with the lexicographically smallest NodeId becomes the leader. -/// Only the leader (Cloud Gateway) synchronizes with remote cloud nodes. +/// Service for managing leader election in a distributed cluster. +/// Uses the Bully algorithm where the node with the lexicographically smallest NodeId becomes the leader. +/// Only the leader (Cloud Gateway) synchronizes with remote cloud nodes. /// public interface ILeaderElectionService { /// - /// Gets whether the local node is currently the cloud gateway (leader). + /// Gets whether the local node is currently the cloud gateway (leader). /// bool IsCloudGateway { get; } /// - /// Gets the NodeId of the current cloud gateway, or null if no gateway is elected. + /// Gets the NodeId of the current cloud gateway, or null if no gateway is elected. /// string? CurrentGatewayNodeId { get; } /// - /// Event raised when leadership changes. + /// Event raised when leadership changes. /// event EventHandler? LeadershipChanged; /// - /// Starts the leader election service. + /// Starts the leader election service. /// Task Start(); /// - /// Stops the leader election service. + /// Stops the leader election service. /// Task Stop(); -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/OplogPruneCutoffCalculator.cs b/src/ZB.MOM.WW.CBDDC.Network/OplogPruneCutoffCalculator.cs index 33cad41..b7a4248 100644 --- a/src/ZB.MOM.WW.CBDDC.Network/OplogPruneCutoffCalculator.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/OplogPruneCutoffCalculator.cs @@ -1,8 +1,3 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Storage; @@ -10,7 +5,7 @@ using ZB.MOM.WW.CBDDC.Core.Storage; namespace ZB.MOM.WW.CBDDC.Network; /// -/// Default implementation for effective oplog prune cutoff calculation. +/// Default implementation for effective oplog prune cutoff calculation. /// public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator { @@ -18,7 +13,7 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator private readonly IPeerOplogConfirmationStore? _peerOplogConfirmationStore; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The oplog store. /// The optional peer confirmation store. @@ -39,23 +34,19 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator var retentionCutoff = BuildRetentionCutoff(configuration); if (_peerOplogConfirmationStore == null) - { return OplogPruneCutoffDecision.WithCutoff( retentionCutoff, - confirmationCutoff: null, - effectiveCutoff: retentionCutoff, - reason: "Confirmation tracking is not configured."); - } + null, + retentionCutoff, + "Confirmation tracking is not configured."); var relevantSources = await GetRelevantSourceNodesAsync(cancellationToken); if (relevantSources.Count == 0) - { return OplogPruneCutoffDecision.WithCutoff( retentionCutoff, - confirmationCutoff: null, - effectiveCutoff: retentionCutoff, - reason: "No local non-default oplog/vector-clock sources were found."); - } + null, + retentionCutoff, + "No local non-default oplog/vector-clock sources were found."); var activeTrackedPeers = (await _peerOplogConfirmationStore.GetActiveTrackedPeersAsync(cancellationToken)) .Where(peerNodeId => !string.IsNullOrWhiteSpace(peerNodeId)) @@ -63,19 +54,18 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator .ToList(); if (activeTrackedPeers.Count == 0) - { return OplogPruneCutoffDecision.WithCutoff( retentionCutoff, - confirmationCutoff: null, - effectiveCutoff: retentionCutoff, - reason: "No active tracked peers found for confirmation gating."); - } + null, + retentionCutoff, + "No active tracked peers found for confirmation gating."); HlcTimestamp? confirmationCutoff = null; - foreach (var peerNodeId in activeTrackedPeers) + foreach (string peerNodeId in activeTrackedPeers) { - var confirmationsForPeer = (await _peerOplogConfirmationStore.GetConfirmationsForPeerAsync(peerNodeId, cancellationToken)) + var confirmationsForPeer = + (await _peerOplogConfirmationStore.GetConfirmationsForPeerAsync(peerNodeId, cancellationToken)) .Where(confirmation => confirmation.IsActive) .Where(confirmation => !string.IsNullOrWhiteSpace(confirmation.SourceNodeId)) .GroupBy(confirmation => confirmation.SourceNodeId, StringComparer.Ordinal) @@ -87,30 +77,25 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator .Last(), StringComparer.Ordinal); - foreach (var sourceNodeId in relevantSources) + foreach (string sourceNodeId in relevantSources) { - if (!confirmationsForPeer.TryGetValue(sourceNodeId, out var confirmedTimestamp) || confirmedTimestamp == default) - { + if (!confirmationsForPeer.TryGetValue(sourceNodeId, out var confirmedTimestamp) || + confirmedTimestamp == default) return OplogPruneCutoffDecision.NoCutoff( retentionCutoff, $"Active tracked peer '{peerNodeId}' is missing confirmation for source '{sourceNodeId}'."); - } if (!confirmationCutoff.HasValue || confirmedTimestamp < confirmationCutoff.Value) - { confirmationCutoff = confirmedTimestamp; - } } } if (!confirmationCutoff.HasValue) - { return OplogPruneCutoffDecision.WithCutoff( retentionCutoff, - confirmationCutoff: null, - effectiveCutoff: retentionCutoff, - reason: "No confirmation cutoff could be determined."); - } + null, + retentionCutoff, + "No confirmation cutoff could be determined."); var effectiveCutoff = retentionCutoff <= confirmationCutoff.Value ? retentionCutoff @@ -124,7 +109,7 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator private static HlcTimestamp BuildRetentionCutoff(PeerNodeConfiguration configuration) { - var retentionTimestamp = DateTimeOffset.UtcNow + long retentionTimestamp = DateTimeOffset.UtcNow .AddHours(-configuration.OplogRetentionHours) .ToUnixTimeMilliseconds(); @@ -135,18 +120,12 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator { var localVectorClock = await _oplogStore.GetVectorClockAsync(cancellationToken); var sourceNodes = new HashSet(StringComparer.Ordinal); - foreach (var sourceNodeId in localVectorClock.NodeIds) + foreach (string sourceNodeId in localVectorClock.NodeIds) { - if (string.IsNullOrWhiteSpace(sourceNodeId)) - { - continue; - } + if (string.IsNullOrWhiteSpace(sourceNodeId)) continue; var timestamp = localVectorClock.GetTimestamp(sourceNodeId); - if (timestamp == default) - { - continue; - } + if (timestamp == default) continue; sourceNodes.Add(sourceNodeId); } @@ -161,4 +140,4 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator confirmation.ConfirmedLogic, confirmation.SourceNodeId ?? string.Empty); } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/OplogPruneCutoffDecision.cs b/src/ZB.MOM.WW.CBDDC.Network/OplogPruneCutoffDecision.cs index bd15583..caceaad 100644 --- a/src/ZB.MOM.WW.CBDDC.Network/OplogPruneCutoffDecision.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/OplogPruneCutoffDecision.cs @@ -3,7 +3,7 @@ using ZB.MOM.WW.CBDDC.Core; namespace ZB.MOM.WW.CBDDC.Network; /// -/// Represents the prune cutoff decision for an oplog maintenance cycle. +/// Represents the prune cutoff decision for an oplog maintenance cycle. /// public sealed class OplogPruneCutoffDecision { @@ -22,32 +22,32 @@ public sealed class OplogPruneCutoffDecision } /// - /// Gets a value indicating whether pruning is allowed for this decision. + /// Gets a value indicating whether pruning is allowed for this decision. /// public bool HasCutoff { get; } /// - /// Gets the retention-based cutoff. + /// Gets the retention-based cutoff. /// public HlcTimestamp RetentionCutoff { get; } /// - /// Gets the confirmation-based cutoff, when available. + /// Gets the confirmation-based cutoff, when available. /// public HlcTimestamp? ConfirmationCutoff { get; } /// - /// Gets the effective cutoff to use for pruning when is true. + /// Gets the effective cutoff to use for pruning when is true. /// public HlcTimestamp? EffectiveCutoff { get; } /// - /// Gets the explanatory reason for skip/special handling decisions. + /// Gets the explanatory reason for skip/special handling decisions. /// public string Reason { get; } /// - /// Creates a prune-allowed decision with the provided cutoffs. + /// Creates a prune-allowed decision with the provided cutoffs. /// /// The cutoff derived from retention policy. /// The cutoff derived from peer confirmations, if available. @@ -60,25 +60,25 @@ public sealed class OplogPruneCutoffDecision string reason = "") { return new OplogPruneCutoffDecision( - hasCutoff: true, - retentionCutoff: retentionCutoff, - confirmationCutoff: confirmationCutoff, - effectiveCutoff: effectiveCutoff, - reason: reason); + true, + retentionCutoff, + confirmationCutoff, + effectiveCutoff, + reason); } /// - /// Creates a prune-blocked decision. + /// Creates a prune-blocked decision. /// /// The cutoff derived from retention policy. /// The explanatory reason associated with the decision. public static OplogPruneCutoffDecision NoCutoff(HlcTimestamp retentionCutoff, string reason) { return new OplogPruneCutoffDecision( - hasCutoff: false, - retentionCutoff: retentionCutoff, - confirmationCutoff: null, - effectiveCutoff: null, - reason: reason); + false, + retentionCutoff, + null, + null, + reason); } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/PeerDbNetworkExtensions.cs b/src/ZB.MOM.WW.CBDDC.Network/PeerDbNetworkExtensions.cs index 5d05949..71d3395 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/PeerDbNetworkExtensions.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/PeerDbNetworkExtensions.cs @@ -1,57 +1,55 @@ -using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Network; // For IMeshNetwork if we implement it -using ZB.MOM.WW.CBDDC.Core.Storage; -using ZB.MOM.WW.CBDDC.Network.Security; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.DependencyInjection.Extensions; using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Hosting; -using System; +using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Network.Security; +using ZB.MOM.WW.CBDDC.Network.Telemetry; +// For IMeshNetwork if we implement it namespace ZB.MOM.WW.CBDDC.Network; public static class CBDDCNetworkExtensions { - /// - /// Adds CBDDC network services to the service collection. - /// - /// The peer node configuration provider implementation type. - /// The service collection to register services into. - /// If true, registers CBDDCNodeService as IHostedService to automatically start/stop the node. - public static IServiceCollection AddCBDDCNetwork( - this IServiceCollection services, - bool useHostedService = true) + /// + /// Adds CBDDC network services to the service collection. + /// + /// The peer node configuration provider implementation type. + /// The service collection to register services into. + /// + /// If true, registers CBDDCNodeService as IHostedService to automatically start/stop the + /// node. + /// + public static IServiceCollection AddCBDDCNetwork( + this IServiceCollection services, + bool useHostedService = true) where TPeerNodeConfigurationProvider : class, IPeerNodeConfigurationProvider { services.TryAddSingleton(); - services.TryAddSingleton(); - + services.TryAddSingleton(); + services.TryAddSingleton(); services.TryAddSingleton(); - services.TryAddSingleton(sp => + services.TryAddSingleton(sp => { - var logger = sp.GetRequiredService>(); - var path = System.IO.Path.Combine(System.AppContext.BaseDirectory, "cbddc_metrics.bin"); - return new ZB.MOM.WW.CBDDC.Network.Telemetry.NetworkTelemetryService(logger, path); + var logger = sp.GetRequiredService>(); + string path = Path.Combine(AppContext.BaseDirectory, "cbddc_metrics.bin"); + return new NetworkTelemetryService(logger, path); }); - services.TryAddSingleton(); - - services.TryAddSingleton(); - - services.TryAddSingleton(); + services.TryAddSingleton(); + + services.TryAddSingleton(); + + services.TryAddSingleton(); services.TryAddSingleton(); // Optionally register hosted service for automatic node lifecycle management - if (useHostedService) - { - services.AddHostedService(); - } + if (useHostedService) services.AddHostedService(); return services; } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/Protocol/ProtocolHandler.cs b/src/ZB.MOM.WW.CBDDC.Network/Protocol/ProtocolHandler.cs index ab05ed1..80fe70c 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/Protocol/ProtocolHandler.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/Protocol/ProtocolHandler.cs @@ -1,259 +1,252 @@ -using System; -using System.IO; -using System.Threading; -using System.Threading.Tasks; using Google.Protobuf; using Microsoft.Extensions.Logging; using ZB.MOM.WW.CBDDC.Network.Proto; using ZB.MOM.WW.CBDDC.Network.Security; using ZB.MOM.WW.CBDDC.Network.Telemetry; -namespace ZB.MOM.WW.CBDDC.Network.Protocol +namespace ZB.MOM.WW.CBDDC.Network.Protocol; + +/// +/// Handles the low-level framing, compression, encryption, and serialization of CBDDC messages. +/// Encapsulates the wire format: [Length (4)] [Type (1)] [Compression (1)] [Payload (N)] +/// +internal class ProtocolHandler { + private readonly ILogger _logger; + private readonly SemaphoreSlim _readLock = new(1, 1); + private readonly INetworkTelemetryService? _telemetry; + private readonly SemaphoreSlim _writeLock = new(1, 1); + /// - /// Handles the low-level framing, compression, encryption, and serialization of CBDDC messages. - /// Encapsulates the wire format: [Length (4)] [Type (1)] [Compression (1)] [Payload (N)] + /// Initializes a new instance of the class. /// - internal class ProtocolHandler - { - private readonly ILogger _logger; - private readonly INetworkTelemetryService? _telemetry; - private readonly SemaphoreSlim _writeLock = new SemaphoreSlim(1, 1); - private readonly SemaphoreSlim _readLock = new SemaphoreSlim(1, 1); - - /// - /// Initializes a new instance of the class. - /// - /// The logger used for protocol diagnostics. - /// An optional telemetry service used to record network metrics. - public ProtocolHandler(ILogger logger, INetworkTelemetryService? telemetry = null) - { - _logger = logger; - _telemetry = telemetry; - } - - /// - /// Initializes a new instance of the class using a non-generic logger. - /// - /// The logger used for protocol diagnostics. - /// An optional telemetry service used to record network metrics. - internal ProtocolHandler(ILogger logger, INetworkTelemetryService? telemetry = null) - : this(new ForwardingLogger(logger), telemetry) - { - } - - /// - /// Serializes and sends a protocol message to the provided stream. - /// - /// The destination stream. - /// The protocol message type. - /// The message payload to serialize. - /// Whether payload compression should be attempted. - /// Optional cipher state used to encrypt outgoing payloads. - /// Cancellation token. - /// A task that represents the asynchronous send operation. - public async Task SendMessageAsync(Stream stream, MessageType type, IMessage message, bool useCompression, CipherState? cipherState, CancellationToken token = default) - { - if (stream == null) throw new ArgumentNullException(nameof(stream)); + /// The logger used for protocol diagnostics. + /// An optional telemetry service used to record network metrics. + public ProtocolHandler(ILogger logger, INetworkTelemetryService? telemetry = null) + { + _logger = logger; + _telemetry = telemetry; + } - // 1. Serialize - byte[] payloadBytes = message.ToByteArray(); - int originalSize = payloadBytes.Length; - byte compressionFlag = 0x00; + /// + /// Initializes a new instance of the class using a non-generic logger. + /// + /// The logger used for protocol diagnostics. + /// An optional telemetry service used to record network metrics. + internal ProtocolHandler(ILogger logger, INetworkTelemetryService? telemetry = null) + : this(new ForwardingLogger(logger), telemetry) + { + } - // 2. Compress (inner payload) - if (useCompression && payloadBytes.Length > CompressionHelper.THRESHOLD && type != MessageType.SecureEnv) - { - // Measure Compression Time - // using var _ = _telemetry?.StartMetric(MetricType.CompressionTime); // Oops, MetricType.CompressionTime not defined? Wait, user asked for "Compression Ratio". - // User asked for "performance della compressione brotli (% media di compressione)". - // That usually means ratio. But time is also good? - // Plan said: "MetricType: CompressionRatio, EncryptionTime..." - - // byte[] compressed; // Removed unused variable - // using (_telemetry?.StartMetric(MetricType.CompressionTime)) // Let's stick to Time if relevant? NO, MetricType only has Ratio. - // Ah I see MetricType enum: CompressionRatio, EncryptionTime, DecryptionTime, RoundTripTime. - // So for compression we only record Ratio. - - payloadBytes = CompressionHelper.Compress(payloadBytes); - compressionFlag = 0x01; // Brotli - - if (_telemetry != null && originalSize > 0) - { - double ratio = (double)payloadBytes.Length / originalSize; - _telemetry.RecordValue(MetricType.CompressionRatio, ratio); - } - } + /// + /// Serializes and sends a protocol message to the provided stream. + /// + /// The destination stream. + /// The protocol message type. + /// The message payload to serialize. + /// Whether payload compression should be attempted. + /// Optional cipher state used to encrypt outgoing payloads. + /// Cancellation token. + /// A task that represents the asynchronous send operation. + public async Task SendMessageAsync(Stream stream, MessageType type, IMessage message, bool useCompression, + CipherState? cipherState, CancellationToken token = default) + { + if (stream == null) throw new ArgumentNullException(nameof(stream)); - // 3. Encrypt - if (cipherState != null) + // 1. Serialize + byte[] payloadBytes = message.ToByteArray(); + int originalSize = payloadBytes.Length; + byte compressionFlag = 0x00; + + // 2. Compress (inner payload) + if (useCompression && payloadBytes.Length > CompressionHelper.THRESHOLD && type != MessageType.SecureEnv) + { + // Measure Compression Time + // using var _ = _telemetry?.StartMetric(MetricType.CompressionTime); // Oops, MetricType.CompressionTime not defined? Wait, user asked for "Compression Ratio". + // User asked for "performance della compressione brotli (% media di compressione)". + // That usually means ratio. But time is also good? + // Plan said: "MetricType: CompressionRatio, EncryptionTime..." + + // byte[] compressed; // Removed unused variable + // using (_telemetry?.StartMetric(MetricType.CompressionTime)) // Let's stick to Time if relevant? NO, MetricType only has Ratio. + // Ah I see MetricType enum: CompressionRatio, EncryptionTime, DecryptionTime, RoundTripTime. + // So for compression we only record Ratio. + + payloadBytes = CompressionHelper.Compress(payloadBytes); + compressionFlag = 0x01; // Brotli + + if (_telemetry != null && originalSize > 0) { - using (_telemetry?.StartMetric(MetricType.EncryptionTime)) - { - // Inner data: [Type (1)] [Compression (1)] [Payload (N)] - var dataToEncrypt = new byte[2 + payloadBytes.Length]; - dataToEncrypt[0] = (byte)type; - dataToEncrypt[1] = compressionFlag; - Buffer.BlockCopy(payloadBytes, 0, dataToEncrypt, 2, payloadBytes.Length); - - var (ciphertext, iv, tag) = CryptoHelper.Encrypt(dataToEncrypt, cipherState.EncryptKey); - - var env = new SecureEnvelope - { - Ciphertext = ByteString.CopyFrom(ciphertext), - Nonce = ByteString.CopyFrom(iv), - AuthTag = ByteString.CopyFrom(tag) - }; - - payloadBytes = env.ToByteArray(); - type = MessageType.SecureEnv; - compressionFlag = 0x00; // Outer envelope is not compressed - } - } - - // 4. Thread-Safe Write - await _writeLock.WaitAsync(token); - try - { - _logger.LogDebug("Sending Message {Type}, OrgSize: {Org}, WireSize: {Wire}", type, originalSize, payloadBytes.Length); - - // Framing: [Length (4)] [Type (1)] [Compression (1)] [Payload (N)] - var lengthBytes = BitConverter.GetBytes(payloadBytes.Length); - await stream.WriteAsync(lengthBytes, 0, 4, token); - stream.WriteByte((byte)type); - stream.WriteByte(compressionFlag); - await stream.WriteAsync(payloadBytes, 0, payloadBytes.Length, token); - await stream.FlushAsync(token); - } - finally - { - _writeLock.Release(); - } - } - - /// - /// Reads and decodes the next protocol message from the provided stream. - /// - /// The source stream. - /// Optional cipher state used to decrypt incoming payloads. - /// Cancellation token. - /// A tuple containing the decoded message type and payload bytes. - public async Task<(MessageType, byte[])> ReadMessageAsync(Stream stream, CipherState? cipherState, CancellationToken token = default) - { - await _readLock.WaitAsync(token); - try - { - var lenBuf = new byte[4]; - int read = await ReadExactAsync(stream, lenBuf, 0, 4, token); - if (read == 0) return (MessageType.Unknown, null!); - - int length = BitConverter.ToInt32(lenBuf, 0); - - int typeByte = stream.ReadByte(); - if (typeByte == -1) throw new EndOfStreamException("Connection closed abruptly (type byte)"); - - int compByte = stream.ReadByte(); - if (compByte == -1) throw new EndOfStreamException("Connection closed abruptly (comp byte)"); - - var payload = new byte[length]; - await ReadExactAsync(stream, payload, 0, length, token); - - var msgType = (MessageType)typeByte; - - // Handle Secure Envelope - if (msgType == MessageType.SecureEnv) - { - if (cipherState == null) throw new InvalidOperationException("Received encrypted message but no cipher state established"); - - byte[] decrypted; - using (_telemetry?.StartMetric(MetricType.DecryptionTime)) - { - var env = SecureEnvelope.Parser.ParseFrom(payload); - decrypted = CryptoHelper.Decrypt( - env.Ciphertext.ToByteArray(), - env.Nonce.ToByteArray(), - env.AuthTag.ToByteArray(), - cipherState.DecryptKey); - } - - if (decrypted.Length < 2) throw new InvalidDataException("Decrypted payload too short"); - - msgType = (MessageType)decrypted[0]; - int innerComp = decrypted[1]; - - var innerPayload = new byte[decrypted.Length - 2]; - Buffer.BlockCopy(decrypted, 2, innerPayload, 0, innerPayload.Length); - - if (innerComp == 0x01) - { - innerPayload = CompressionHelper.Decompress(innerPayload); - } - - return (msgType, innerPayload); - } - - // Handle Unencrypted Compression - if (compByte == 0x01) - { - payload = CompressionHelper.Decompress(payload); - } - - _logger.LogDebug("Read Message {Type}, Size: {Size}", msgType, payload.Length); - return (msgType, payload); - } - finally - { - _readLock.Release(); + double ratio = (double)payloadBytes.Length / originalSize; + _telemetry.RecordValue(MetricType.CompressionRatio, ratio); } } - private async Task ReadExactAsync(Stream stream, byte[] buffer, int offset, int count, CancellationToken token) - { - int total = 0; - while (total < count) - { - int read = await stream.ReadAsync(buffer, offset + total, count - total, token); - if (read == 0) return 0; // EOF - total += read; - } - return total; - } - - private sealed class ForwardingLogger : ILogger - { - private readonly ILogger _inner; - - /// - /// Initializes a new instance of the class. - /// - /// The underlying logger instance. - public ForwardingLogger(ILogger inner) - { - _inner = inner ?? throw new ArgumentNullException(nameof(inner)); - } - - /// - public IDisposable? BeginScope(TState state) where TState : notnull - { - return _inner.BeginScope(state); - } - - /// - public bool IsEnabled(LogLevel logLevel) - { - return _inner.IsEnabled(logLevel); - } - - /// - public void Log( - LogLevel logLevel, - EventId eventId, - TState state, - Exception? exception, - Func formatter) - { - _inner.Log(logLevel, eventId, state, exception, formatter); - } - } - } -} + // 3. Encrypt + if (cipherState != null) + using (_telemetry?.StartMetric(MetricType.EncryptionTime)) + { + // Inner data: [Type (1)] [Compression (1)] [Payload (N)] + var dataToEncrypt = new byte[2 + payloadBytes.Length]; + dataToEncrypt[0] = (byte)type; + dataToEncrypt[1] = compressionFlag; + Buffer.BlockCopy(payloadBytes, 0, dataToEncrypt, 2, payloadBytes.Length); + + (byte[] ciphertext, byte[] iv, byte[] tag) = + CryptoHelper.Encrypt(dataToEncrypt, cipherState.EncryptKey); + + var env = new SecureEnvelope + { + Ciphertext = ByteString.CopyFrom(ciphertext), + Nonce = ByteString.CopyFrom(iv), + AuthTag = ByteString.CopyFrom(tag) + }; + + payloadBytes = env.ToByteArray(); + type = MessageType.SecureEnv; + compressionFlag = 0x00; // Outer envelope is not compressed + } + + // 4. Thread-Safe Write + await _writeLock.WaitAsync(token); + try + { + _logger.LogDebug("Sending Message {Type}, OrgSize: {Org}, WireSize: {Wire}", type, originalSize, + payloadBytes.Length); + + // Framing: [Length (4)] [Type (1)] [Compression (1)] [Payload (N)] + byte[] lengthBytes = BitConverter.GetBytes(payloadBytes.Length); + await stream.WriteAsync(lengthBytes, 0, 4, token); + stream.WriteByte((byte)type); + stream.WriteByte(compressionFlag); + await stream.WriteAsync(payloadBytes, 0, payloadBytes.Length, token); + await stream.FlushAsync(token); + } + finally + { + _writeLock.Release(); + } + } + + /// + /// Reads and decodes the next protocol message from the provided stream. + /// + /// The source stream. + /// Optional cipher state used to decrypt incoming payloads. + /// Cancellation token. + /// A tuple containing the decoded message type and payload bytes. + public async Task<(MessageType, byte[])> ReadMessageAsync(Stream stream, CipherState? cipherState, + CancellationToken token = default) + { + await _readLock.WaitAsync(token); + try + { + var lenBuf = new byte[4]; + int read = await ReadExactAsync(stream, lenBuf, 0, 4, token); + if (read == 0) return (MessageType.Unknown, null!); + + var length = BitConverter.ToInt32(lenBuf, 0); + + int typeByte = stream.ReadByte(); + if (typeByte == -1) throw new EndOfStreamException("Connection closed abruptly (type byte)"); + + int compByte = stream.ReadByte(); + if (compByte == -1) throw new EndOfStreamException("Connection closed abruptly (comp byte)"); + + var payload = new byte[length]; + await ReadExactAsync(stream, payload, 0, length, token); + + var msgType = (MessageType)typeByte; + + // Handle Secure Envelope + if (msgType == MessageType.SecureEnv) + { + if (cipherState == null) + throw new InvalidOperationException("Received encrypted message but no cipher state established"); + + byte[] decrypted; + using (_telemetry?.StartMetric(MetricType.DecryptionTime)) + { + var env = SecureEnvelope.Parser.ParseFrom(payload); + decrypted = CryptoHelper.Decrypt( + env.Ciphertext.ToByteArray(), + env.Nonce.ToByteArray(), + env.AuthTag.ToByteArray(), + cipherState.DecryptKey); + } + + if (decrypted.Length < 2) throw new InvalidDataException("Decrypted payload too short"); + + msgType = (MessageType)decrypted[0]; + int innerComp = decrypted[1]; + + var innerPayload = new byte[decrypted.Length - 2]; + Buffer.BlockCopy(decrypted, 2, innerPayload, 0, innerPayload.Length); + + if (innerComp == 0x01) innerPayload = CompressionHelper.Decompress(innerPayload); + + return (msgType, innerPayload); + } + + // Handle Unencrypted Compression + if (compByte == 0x01) payload = CompressionHelper.Decompress(payload); + + _logger.LogDebug("Read Message {Type}, Size: {Size}", msgType, payload.Length); + return (msgType, payload); + } + finally + { + _readLock.Release(); + } + } + + private async Task ReadExactAsync(Stream stream, byte[] buffer, int offset, int count, CancellationToken token) + { + var total = 0; + while (total < count) + { + int read = await stream.ReadAsync(buffer, offset + total, count - total, token); + if (read == 0) return 0; // EOF + total += read; + } + + return total; + } + + private sealed class ForwardingLogger : ILogger + { + private readonly ILogger _inner; + + /// + /// Initializes a new instance of the class. + /// + /// The underlying logger instance. + public ForwardingLogger(ILogger inner) + { + _inner = inner ?? throw new ArgumentNullException(nameof(inner)); + } + + /// + public IDisposable? BeginScope(TState state) where TState : notnull + { + return _inner.BeginScope(state); + } + + /// + public bool IsEnabled(LogLevel logLevel) + { + return _inner.IsEnabled(logLevel); + } + + /// + public void Log( + LogLevel logLevel, + EventId eventId, + TState state, + Exception? exception, + Func formatter) + { + _inner.Log(logLevel, eventId, state, exception, formatter); + } + } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/README.md b/src/ZB.MOM.WW.CBDDC.Network/README.md index a240d16..da85032 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/README.md +++ b/src/ZB.MOM.WW.CBDDC.Network/README.md @@ -48,12 +48,15 @@ node.Start(); ## Features ### Automatic Discovery + Nodes broadcast their presence via UDP and automatically connect to peers on the same network. ### Secure Synchronization + All nodes must share the same authentication token to sync data. ### Scalable Gossip + Updates propagate exponentially - each node tells multiple peers, ensuring fast network-wide propagation. ## Documentation diff --git a/src/ZB.MOM.WW.CBDDC.Network/Security/ClusterKeyAuthenticator.cs b/src/ZB.MOM.WW.CBDDC.Network/Security/ClusterKeyAuthenticator.cs index 1bdaa61..7b3ac88 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/Security/ClusterKeyAuthenticator.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/Security/ClusterKeyAuthenticator.cs @@ -1,20 +1,19 @@ -using ZB.MOM.WW.CBDDC.Core.Network; -using System.Security.Cryptography; -using System.Text; -using System.Threading.Tasks; +using System.Security.Cryptography; +using System.Text; +using ZB.MOM.WW.CBDDC.Core.Network; namespace ZB.MOM.WW.CBDDC.Network.Security; /// -/// Authenticator implementation that uses a shared secret (pre-shared key) to validate nodes. -/// Both nodes must possess the same key to successfully handshake. +/// Authenticator implementation that uses a shared secret (pre-shared key) to validate nodes. +/// Both nodes must possess the same key to successfully handshake. /// public class ClusterKeyAuthenticator : IAuthenticator { private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The provider for peer node configuration. public ClusterKeyAuthenticator(IPeerNodeConfigurationProvider peerNodeConfigurationProvider) @@ -23,11 +22,11 @@ public class ClusterKeyAuthenticator : IAuthenticator } /// - public async Task ValidateAsync(string nodeId, string token) - { - var config = await _peerNodeConfigurationProvider.GetConfiguration(); - var configuredHash = SHA256.HashData(Encoding.UTF8.GetBytes(config.AuthToken ?? string.Empty)); - var presentedHash = SHA256.HashData(Encoding.UTF8.GetBytes(token ?? string.Empty)); - return CryptographicOperations.FixedTimeEquals(configuredHash, presentedHash); - } -} + public async Task ValidateAsync(string nodeId, string token) + { + var config = await _peerNodeConfigurationProvider.GetConfiguration(); + byte[] configuredHash = SHA256.HashData(Encoding.UTF8.GetBytes(config.AuthToken ?? string.Empty)); + byte[] presentedHash = SHA256.HashData(Encoding.UTF8.GetBytes(token ?? string.Empty)); + return CryptographicOperations.FixedTimeEquals(configuredHash, presentedHash); + } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/Security/CryptoHelper.cs b/src/ZB.MOM.WW.CBDDC.Network/Security/CryptoHelper.cs index abffdbc..4bbf012 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/Security/CryptoHelper.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/Security/CryptoHelper.cs @@ -1,30 +1,28 @@ -using System; -using System.IO; using System.Security.Cryptography; namespace ZB.MOM.WW.CBDDC.Network.Security; -public static class CryptoHelper -{ +public static class CryptoHelper +{ private const int KeySize = 32; // 256 bits - private const int BlockSize = 16; // 128 bits - private const int MacSize = 32; // 256 bits (HMACSHA256) - - /// - /// Encrypts plaintext and computes an authentication tag. - /// - /// The plaintext bytes to encrypt. - /// The encryption and HMAC key. - /// The ciphertext, IV, and authentication tag. - public static (byte[] ciphertext, byte[] iv, byte[] tag) Encrypt(byte[] plaintext, byte[] key) - { + private const int BlockSize = 16; // 128 bits + private const int MacSize = 32; // 256 bits (HMACSHA256) + + /// + /// Encrypts plaintext and computes an authentication tag. + /// + /// The plaintext bytes to encrypt. + /// The encryption and HMAC key. + /// The ciphertext, IV, and authentication tag. + public static (byte[] ciphertext, byte[] iv, byte[] tag) Encrypt(byte[] plaintext, byte[] key) + { using var aes = Aes.Create(); aes.Key = key; aes.GenerateIV(); - var iv = aes.IV; + byte[] iv = aes.IV; using var encryptor = aes.CreateEncryptor(); - var ciphertext = encryptor.TransformFinalBlock(plaintext, 0, plaintext.Length); + byte[] ciphertext = encryptor.TransformFinalBlock(plaintext, 0, plaintext.Length); // Compute HMAC using var hmac = new HMACSHA256(key); @@ -32,32 +30,30 @@ public static class CryptoHelper var toSign = new byte[iv.Length + ciphertext.Length]; Buffer.BlockCopy(iv, 0, toSign, 0, iv.Length); Buffer.BlockCopy(ciphertext, 0, toSign, iv.Length, ciphertext.Length); - var tag = hmac.ComputeHash(toSign); + byte[] tag = hmac.ComputeHash(toSign); - return (ciphertext, iv, tag); - } - - /// - /// Verifies and decrypts ciphertext. - /// - /// The encrypted bytes. - /// The initialization vector used during encryption. - /// The authentication tag for integrity verification. - /// The encryption and HMAC key. - /// The decrypted plaintext bytes. - public static byte[] Decrypt(byte[] ciphertext, byte[] iv, byte[] tag, byte[] key) - { + return (ciphertext, iv, tag); + } + + /// + /// Verifies and decrypts ciphertext. + /// + /// The encrypted bytes. + /// The initialization vector used during encryption. + /// The authentication tag for integrity verification. + /// The encryption and HMAC key. + /// The decrypted plaintext bytes. + public static byte[] Decrypt(byte[] ciphertext, byte[] iv, byte[] tag, byte[] key) + { // Verify HMAC using var hmac = new HMACSHA256(key); var toVerify = new byte[iv.Length + ciphertext.Length]; Buffer.BlockCopy(iv, 0, toVerify, 0, iv.Length); Buffer.BlockCopy(ciphertext, 0, toVerify, iv.Length, ciphertext.Length); - var computedTag = hmac.ComputeHash(toVerify); + byte[] computedTag = hmac.ComputeHash(toVerify); if (!FixedTimeEquals(tag, computedTag)) - { throw new CryptographicException("Authentication failed (HMAC mismatch)"); - } using var aes = Aes.Create(); aes.Key = key; @@ -78,4 +74,4 @@ public static class CryptoHelper return res == 0; #endif } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/Security/IAuthenticator.cs b/src/ZB.MOM.WW.CBDDC.Network/Security/IAuthenticator.cs index 133e920..551e975 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/Security/IAuthenticator.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/Security/IAuthenticator.cs @@ -1,14 +1,12 @@ -using System.Threading.Tasks; - -namespace ZB.MOM.WW.CBDDC.Network.Security; - +namespace ZB.MOM.WW.CBDDC.Network.Security; + public interface IAuthenticator { /// - /// Validates an authentication token for a node identifier. + /// Validates an authentication token for a node identifier. /// /// The node identifier to validate. /// The authentication token to validate. - /// if the token is valid for the node; otherwise . + /// if the token is valid for the node; otherwise . Task ValidateAsync(string nodeId, string token); -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/Security/IPeerHandshakeService.cs b/src/ZB.MOM.WW.CBDDC.Network/Security/IPeerHandshakeService.cs index 1843243..f8ad1b7 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/Security/IPeerHandshakeService.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/Security/IPeerHandshakeService.cs @@ -1,36 +1,25 @@ -using System.Threading; -using System.Threading.Tasks; - -namespace ZB.MOM.WW.CBDDC.Network.Security; - +namespace ZB.MOM.WW.CBDDC.Network.Security; + public interface IPeerHandshakeService { /// - /// Performs a handshake to establishing identity and optional security context. + /// Performs a handshake to establishing identity and optional security context. /// /// The transport stream used for handshake message exchange. /// A value indicating whether the caller initiated the connection. /// The local node identifier. /// Cancellation token. /// A CipherState if encryption is established, or null if plaintext. - Task HandshakeAsync(System.IO.Stream stream, bool isInitiator, string myNodeId, CancellationToken token); + Task HandshakeAsync(Stream stream, bool isInitiator, string myNodeId, CancellationToken token); } public class CipherState { - /// - /// Gets the key used to encrypt outgoing messages. - /// - public byte[] EncryptKey { get; } - /// - /// Gets the key used to decrypt incoming messages. - /// - public byte[] DecryptKey { get; } // For simplicity using IV chaining or explicit IVs. // We'll store just the keys here and let the encryption helper handle IVs. /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The key used for encrypting outgoing payloads. /// The key used for decrypting incoming payloads. @@ -39,4 +28,14 @@ public class CipherState EncryptKey = encryptKey; DecryptKey = decryptKey; } -} + + /// + /// Gets the key used to encrypt outgoing messages. + /// + public byte[] EncryptKey { get; } + + /// + /// Gets the key used to decrypt incoming messages. + /// + public byte[] DecryptKey { get; } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/Security/NoOpHandshakeService.cs b/src/ZB.MOM.WW.CBDDC.Network/Security/NoOpHandshakeService.cs index 08d08c4..f0d5e5d 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/Security/NoOpHandshakeService.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/Security/NoOpHandshakeService.cs @@ -1,29 +1,32 @@ -using System.IO; -using System.Threading; -using System.Threading.Tasks; - namespace ZB.MOM.WW.CBDDC.Network.Security; /// -/// Provides a no-operation implementation of the peer handshake service that performs no handshake and always returns -/// null. +/// Provides a no-operation implementation of the peer handshake service that performs no handshake and always returns +/// null. /// -/// This class can be used in scenarios where a handshake is not required or for testing purposes. All -/// handshake attempts using this service will result in no cipher state being established. +/// +/// This class can be used in scenarios where a handshake is not required or for testing purposes. All +/// handshake attempts using this service will result in no cipher state being established. +/// public class NoOpHandshakeService : IPeerHandshakeService { /// - /// Performs a handshake over the specified stream to establish a secure communication channel between two nodes - /// asynchronously. + /// Performs a handshake over the specified stream to establish a secure communication channel between two nodes + /// asynchronously. /// /// The stream used for exchanging handshake messages between nodes. Must be readable and writable. - /// true to initiate the handshake as the local node; otherwise, false to respond as the remote node. + /// + /// true to initiate the handshake as the local node; otherwise, false to respond as the remote + /// node. + /// /// The unique identifier of the local node participating in the handshake. Cannot be null. /// A cancellation token that can be used to cancel the handshake operation. - /// A task that represents the asynchronous handshake operation. The task result contains a CipherState if the - /// handshake succeeds; otherwise, null. + /// + /// A task that represents the asynchronous handshake operation. The task result contains a CipherState if the + /// handshake succeeds; otherwise, null. + /// public Task HandshakeAsync(Stream stream, bool isInitiator, string myNodeId, CancellationToken token) { return Task.FromResult(null); } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/Security/SecureHandshakeService.cs b/src/ZB.MOM.WW.CBDDC.Network/Security/SecureHandshakeService.cs index f931775..fa79d65 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/Security/SecureHandshakeService.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/Security/SecureHandshakeService.cs @@ -1,8 +1,4 @@ -using System; -using System.IO; using System.Security.Cryptography; -using System.Threading; -using System.Threading.Tasks; using Microsoft.Extensions.Logging; namespace ZB.MOM.WW.CBDDC.Network.Security; @@ -12,7 +8,7 @@ public class SecureHandshakeService : IPeerHandshakeService private readonly ILogger? _logger; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The optional logger instance. public SecureHandshakeService(ILogger? logger = null) @@ -26,44 +22,42 @@ public class SecureHandshakeService : IPeerHandshakeService // Both derive shared secret -> Split into SendKey/RecvKey using HKDF /// - /// Performs a secure key exchange handshake over the provided stream. + /// Performs a secure key exchange handshake over the provided stream. /// /// The transport stream used for the handshake. /// A value indicating whether the local node initiated the handshake. /// The local node identifier. /// A token used to cancel the handshake. /// - /// A task that returns the negotiated , or if unavailable. + /// A task that returns the negotiated , or if unavailable. /// - public async Task HandshakeAsync(Stream stream, bool isInitiator, string myNodeId, CancellationToken token) + public async Task HandshakeAsync(Stream stream, bool isInitiator, string myNodeId, + CancellationToken token) { #if NET6_0_OR_GREATER - using var ecdh = ECDiffieHellman.Create(); - ecdh.KeySize = 256; - - // 1. ExportAsync & Send Public Key - var myPublicKey = ecdh.ExportSubjectPublicKeyInfo(); - var lenBytes = BitConverter.GetBytes(myPublicKey.Length); - await stream.WriteAsync(lenBytes, 0, 4, token); - await stream.WriteAsync(myPublicKey, 0, myPublicKey.Length, token); - await stream.FlushAsync(token); // CRITICAL: Ensure data is sent immediately - - // 2. Receive Peer Public Key - var peerLenBuf = new byte[4]; - await ReadExactAsync(stream, peerLenBuf, 0, 4, token); - int peerLen = BitConverter.ToInt32(peerLenBuf, 0); + using var ecdh = ECDiffieHellman.Create(); + ecdh.KeySize = 256; + + // 1. ExportAsync & Send Public Key + byte[] myPublicKey = ecdh.ExportSubjectPublicKeyInfo(); + byte[] lenBytes = BitConverter.GetBytes(myPublicKey.Length); + await stream.WriteAsync(lenBytes, 0, 4, token); + await stream.WriteAsync(myPublicKey, 0, myPublicKey.Length, token); + await stream.FlushAsync(token); // CRITICAL: Ensure data is sent immediately + + // 2. Receive Peer Public Key + var peerLenBuf = new byte[4]; + await ReadExactAsync(stream, peerLenBuf, 0, 4, token); + var peerLen = BitConverter.ToInt32(peerLenBuf, 0); // Validate peer key length to prevent DoS - if (peerLen <= 0 || peerLen > 10000) - { - throw new InvalidOperationException($"Invalid peer key length: {peerLen}"); - } - - var peerKeyBytes = new byte[peerLen]; - await ReadExactAsync(stream, peerKeyBytes, 0, peerLen, token); - - // 3. Import Peer Key & Derive Shared Secret - using var peerEcdh = ECDiffieHellman.Create(); + if (peerLen <= 0 || peerLen > 10000) throw new InvalidOperationException($"Invalid peer key length: {peerLen}"); + + var peerKeyBytes = new byte[peerLen]; + await ReadExactAsync(stream, peerKeyBytes, 0, peerLen, token); + + // 3. Import Peer Key & Derive Shared Secret + using var peerEcdh = ECDiffieHellman.Create(); peerEcdh.ImportSubjectPublicKeyInfo(peerKeyBytes, out _); byte[] sharedSecret = ecdh.DeriveKeyMaterial(peerEcdh.PublicKey); @@ -74,39 +68,40 @@ public class SecureHandshakeService : IPeerHandshakeService using var sha = SHA256.Create(); - var k1Input = new byte[sharedSecret.Length + 1]; - Buffer.BlockCopy(sharedSecret, 0, k1Input, 0, sharedSecret.Length); - k1Input[sharedSecret.Length] = 0; // "0" - var key1 = sha.ComputeHash(k1Input); + var k1Input = new byte[sharedSecret.Length + 1]; + Buffer.BlockCopy(sharedSecret, 0, k1Input, 0, sharedSecret.Length); + k1Input[sharedSecret.Length] = 0; // "0" + byte[] key1 = sha.ComputeHash(k1Input); - var k2Input = new byte[sharedSecret.Length + 1]; - Buffer.BlockCopy(sharedSecret, 0, k2Input, 0, sharedSecret.Length); - k2Input[sharedSecret.Length] = 1; // "1" - var key2 = sha.ComputeHash(k2Input); + var k2Input = new byte[sharedSecret.Length + 1]; + Buffer.BlockCopy(sharedSecret, 0, k2Input, 0, sharedSecret.Length); + k2Input[sharedSecret.Length] = 1; // "1" + byte[] key2 = sha.ComputeHash(k2Input); // If initiator: Encrypt with Key1, Decrypt with Key2 // If responder: Encrypt with Key2, Decrypt with Key1 - var encryptKey = isInitiator ? key1 : key2; - var decryptKey = isInitiator ? key2 : key1; - - return new CipherState(encryptKey, decryptKey); -#else - // For netstandard2.0, standard ECDH import is broken/hard without external libs. - // Returning null or throwing. - throw new PlatformNotSupportedException("Secure handshake requires .NET 6.0+"); -#endif - } - - private async Task ReadExactAsync(Stream stream, byte[] buffer, int offset, int count, CancellationToken token) - { - int total = 0; - while (total < count) - { - int read = await stream.ReadAsync(buffer, offset + total, count - total, token); - if (read == 0) throw new EndOfStreamException(); - total += read; - } - return total; - } -} + byte[] encryptKey = isInitiator ? key1 : key2; + byte[] decryptKey = isInitiator ? key2 : key1; + + return new CipherState(encryptKey, decryptKey); +#else + // For netstandard2.0, standard ECDH import is broken/hard without external libs. + // Returning null or throwing. + throw new PlatformNotSupportedException("Secure handshake requires .NET 6.0+"); +#endif + } + + private async Task ReadExactAsync(Stream stream, byte[] buffer, int offset, int count, CancellationToken token) + { + var total = 0; + while (total < count) + { + int read = await stream.ReadAsync(buffer, offset + total, count - total, token); + if (read == 0) throw new EndOfStreamException(); + total += read; + } + + return total; + } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/SyncOrchestrator.cs b/src/ZB.MOM.WW.CBDDC.Network/SyncOrchestrator.cs index 22eba88..a88c604 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/SyncOrchestrator.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/SyncOrchestrator.cs @@ -1,116 +1,98 @@ +using System.Collections.Concurrent; +using System.Net.Sockets; +using Microsoft.Extensions.Logging; +using Serilog.Context; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Network.Security; using ZB.MOM.WW.CBDDC.Network.Telemetry; -using Microsoft.Extensions.Logging; -using System; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Net.Sockets; -using System.Threading; -using System.Threading.Tasks; -using Serilog.Context; namespace ZB.MOM.WW.CBDDC.Network; /// -/// Orchestrates the synchronization process between the local node and discovered peers. -/// Manages anti-entropy sessions and data exchange. +/// Orchestrates the synchronization process between the local node and discovered peers. +/// Manages anti-entropy sessions and data exchange. /// -public class SyncOrchestrator : ISyncOrchestrator -{ - private readonly IDiscoveryService _discovery; - private readonly IOplogStore _oplogStore; - private readonly IOplogPruneCutoffCalculator? _oplogPruneCutoffCalculator; - private readonly IPeerOplogConfirmationStore? _peerOplogConfirmationStore; - private readonly IDocumentStore _documentStore; - private readonly ISnapshotMetadataStore _snapshotMetadataStore; - private readonly ISnapshotService _snapshotService; - private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider; - private readonly ILogger _logger; - private readonly ILoggerFactory _loggerFactory; - private CancellationTokenSource? _cts; - private readonly Random _random = new Random(); - private readonly object _startStopLock = new object(); - +public class SyncOrchestrator : ISyncOrchestrator +{ // Persistent clients pool private readonly ConcurrentDictionary _clients = new(); - private readonly ConcurrentDictionary _peerStates = new(); + private readonly IDiscoveryService _discovery; + private readonly IDocumentStore _documentStore; - private readonly IPeerHandshakeService? _handshakeService; - private readonly INetworkTelemetryService? _telemetry; - private class PeerStatus - { - /// - /// Gets or sets the number of consecutive failures for the peer. - /// - public int FailureCount { get; set; } - - /// - /// Gets or sets the next time a retry attempt is allowed. - /// - public DateTime NextRetryTime { get; set; } - } + private readonly IPeerHandshakeService? _handshakeService; + private readonly ILogger _logger; + private readonly ILoggerFactory _loggerFactory; + private readonly IOplogPruneCutoffCalculator? _oplogPruneCutoffCalculator; + private readonly IOplogStore _oplogStore; + private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider; + private readonly IPeerOplogConfirmationStore? _peerOplogConfirmationStore; + private readonly ConcurrentDictionary _peerStates = new(); + private readonly Random _random = new(); + private readonly ISnapshotMetadataStore _snapshotMetadataStore; + private readonly ISnapshotService _snapshotService; + private readonly object _startStopLock = new(); + private readonly INetworkTelemetryService? _telemetry; + private CancellationTokenSource? _cts; private DateTime _lastMaintenanceTime = DateTime.MinValue; - /// - /// Initializes a new instance of the class. - /// - /// The discovery service. - /// The oplog store. - /// The document store. - /// The snapshot metadata store. - /// The snapshot service. - /// The peer configuration provider. - /// The logger factory. - /// The optional peer confirmation watermark store. - /// The optional peer handshake service. - /// The optional network telemetry service. - /// The optional cutoff calculator for safe maintenance pruning. - public SyncOrchestrator( - IDiscoveryService discovery, - IOplogStore oplogStore, - IDocumentStore documentStore, - ISnapshotMetadataStore snapshotStore, - ISnapshotService snapshotService, - IPeerNodeConfigurationProvider peerNodeConfigurationProvider, - ILoggerFactory loggerFactory, - IPeerOplogConfirmationStore? peerOplogConfirmationStore = null, - IPeerHandshakeService? handshakeService = null, - INetworkTelemetryService? telemetry = null, - IOplogPruneCutoffCalculator? oplogPruneCutoffCalculator = null) - { - _discovery = discovery; - _oplogStore = oplogStore; - _oplogPruneCutoffCalculator = oplogPruneCutoffCalculator; - _peerOplogConfirmationStore = peerOplogConfirmationStore; - _documentStore = documentStore; - _snapshotMetadataStore = snapshotStore; - _snapshotService = snapshotService; + /// + /// Initializes a new instance of the class. + /// + /// The discovery service. + /// The oplog store. + /// The document store. + /// The snapshot metadata store. + /// The snapshot service. + /// The peer configuration provider. + /// The logger factory. + /// The optional peer confirmation watermark store. + /// The optional peer handshake service. + /// The optional network telemetry service. + /// The optional cutoff calculator for safe maintenance pruning. + public SyncOrchestrator( + IDiscoveryService discovery, + IOplogStore oplogStore, + IDocumentStore documentStore, + ISnapshotMetadataStore snapshotStore, + ISnapshotService snapshotService, + IPeerNodeConfigurationProvider peerNodeConfigurationProvider, + ILoggerFactory loggerFactory, + IPeerOplogConfirmationStore? peerOplogConfirmationStore = null, + IPeerHandshakeService? handshakeService = null, + INetworkTelemetryService? telemetry = null, + IOplogPruneCutoffCalculator? oplogPruneCutoffCalculator = null) + { + _discovery = discovery; + _oplogStore = oplogStore; + _oplogPruneCutoffCalculator = oplogPruneCutoffCalculator; + _peerOplogConfirmationStore = peerOplogConfirmationStore; + _documentStore = documentStore; + _snapshotMetadataStore = snapshotStore; + _snapshotService = snapshotService; _peerNodeConfigurationProvider = peerNodeConfigurationProvider; _loggerFactory = loggerFactory; _logger = loggerFactory.CreateLogger(); - _handshakeService = handshakeService; - _telemetry = telemetry; - } - - /// - /// Starts the synchronization orchestrator loop. - /// - /// A completed task once startup has been triggered. - public async Task Start() - { - lock (_startStopLock) + _handshakeService = handshakeService; + _telemetry = telemetry; + } + + /// + /// Starts the synchronization orchestrator loop. + /// + /// A completed task once startup has been triggered. + public async Task Start() + { + lock (_startStopLock) { if (_cts != null) { _logger.LogWarning("Sync Orchestrator already started"); return; } + _cts = new CancellationTokenSource(); } @@ -127,16 +109,16 @@ public class SyncOrchestrator : ISyncOrchestrator } }, token); - await Task.CompletedTask; - } - - /// - /// Stops the synchronization orchestrator loop and releases client resources. - /// - /// A completed task once shutdown has been triggered. - public async Task Stop() - { - CancellationTokenSource? ctsToDispose = null; + await Task.CompletedTask; + } + + /// + /// Stops the synchronization orchestrator loop and releases client resources. + /// + /// A completed task once shutdown has been triggered. + public async Task Stop() + { + CancellationTokenSource? ctsToDispose = null; lock (_startStopLock) { @@ -165,7 +147,6 @@ public class SyncOrchestrator : ISyncOrchestrator // Cleanup clients foreach (var client in _clients.Values) - { try { client.Dispose(); @@ -174,43 +155,40 @@ public class SyncOrchestrator : ISyncOrchestrator { _logger.LogWarning(ex, "Error disposing client during shutdown"); } - } + _clients.Clear(); await Task.CompletedTask; } /// - /// Main synchronization loop. Periodically selects random peers to gossip with. + /// Main synchronization loop. Periodically selects random peers to gossip with. /// - private async Task SyncLoopAsync(CancellationToken token) - { - _logger.LogInformation("Sync Orchestrator Started (Parallel P2P)"); + private async Task SyncLoopAsync(CancellationToken token) + { + _logger.LogInformation("Sync Orchestrator Started (Parallel P2P)"); while (!token.IsCancellationRequested) { var config = await _peerNodeConfigurationProvider.GetConfiguration(); - try - { - var discoveredPeers = _discovery.GetActivePeers(); - - var knownPeers = config.KnownPeers.Select(k => new PeerNode( - k.NodeId, - $"{k.Host}:{k.Port}", - DateTimeOffset.UtcNow, - PeerType.StaticRemote)); - - var allPeers = BuildMergedPeerList(discoveredPeers, knownPeers, config.NodeId); - - await EnsurePeersRegisteredAsync(allPeers, config.NodeId, token); - - // Filter peers based on backoff - var now = DateTime.UtcNow; - var eligiblePeers = allPeers.Where(p => + try + { + var discoveredPeers = _discovery.GetActivePeers(); + + var knownPeers = config.KnownPeers.Select(k => new PeerNode( + k.NodeId, + $"{k.Host}:{k.Port}", + DateTimeOffset.UtcNow, + PeerType.StaticRemote)); + + var allPeers = BuildMergedPeerList(discoveredPeers, knownPeers, config.NodeId); + + await EnsurePeersRegisteredAsync(allPeers, config.NodeId, token); + + // Filter peers based on backoff + var now = DateTime.UtcNow; + var eligiblePeers = allPeers.Where(p => { - if (_peerStates.TryGetValue(p.NodeId, out var status)) - { - return status.NextRetryTime <= now; - } + if (_peerStates.TryGetValue(p.NodeId, out var status)) return status.NextRetryTime <= now; return true; }).ToList(); @@ -226,7 +204,7 @@ public class SyncOrchestrator : ISyncOrchestrator var tasks = targets.Select(peer => TrySyncWithPeer(peer, token)); await Task.WhenAll(tasks); - await RunMaintenanceIfDueAsync(config, now, token); + await RunMaintenanceIfDueAsync(config, now, token); } catch (OperationCanceledException) { @@ -245,97 +223,91 @@ public class SyncOrchestrator : ISyncOrchestrator catch (OperationCanceledException) { break; - } - } - } - - /// - /// Runs periodic maintenance when the configured interval has elapsed. - /// - /// The current peer node configuration. - /// The current UTC time used for interval evaluation. - /// The cancellation token. - /// A task that represents the asynchronous maintenance operation. - internal async Task RunMaintenanceIfDueAsync(PeerNodeConfiguration config, DateTime now, CancellationToken token) - { - var maintenanceInterval = TimeSpan.FromMinutes(config.MaintenanceIntervalMinutes); - if ((now - _lastMaintenanceTime) < maintenanceInterval) - { - return; - } - - _logger.LogInformation("Running periodic maintenance (Oplog pruning)..."); - try - { - var cutoffDecision = await CalculatePruneCutoffAsync(config, token); - if (!cutoffDecision.HasCutoff || !cutoffDecision.EffectiveCutoff.HasValue) - { - _lastMaintenanceTime = now; - var reason = string.IsNullOrWhiteSpace(cutoffDecision.Reason) - ? "No effective cutoff was produced." - : cutoffDecision.Reason; - _logger.LogInformation("Skipping oplog prune for this maintenance cycle: {Reason}", reason); - return; - } - - await _oplogStore.PruneOplogAsync(cutoffDecision.EffectiveCutoff.Value, token); - _lastMaintenanceTime = now; - - if (cutoffDecision.ConfirmationCutoff.HasValue) - { - _logger.LogInformation( - "Maintenance completed successfully (Retention: {RetentionHours}h, EffectiveCutoff: {EffectiveCutoff}, ConfirmationCutoff: {ConfirmationCutoff}).", - config.OplogRetentionHours, - cutoffDecision.EffectiveCutoff.Value, - cutoffDecision.ConfirmationCutoff.Value); - } - else - { - _logger.LogInformation( - "Maintenance completed successfully (Retention: {RetentionHours}h, EffectiveCutoff: {EffectiveCutoff}).", - config.OplogRetentionHours, - cutoffDecision.EffectiveCutoff.Value); - } - } - catch (Exception maintenanceEx) - { - _logger.LogError(maintenanceEx, "Maintenance failed."); - } - } - - private async Task CalculatePruneCutoffAsync(PeerNodeConfiguration config, CancellationToken token) - { - if (_oplogPruneCutoffCalculator == null) - { - var retentionCutoff = new HlcTimestamp( - DateTimeOffset.UtcNow.AddHours(-config.OplogRetentionHours).ToUnixTimeMilliseconds(), - 0, - config.NodeId); - - return OplogPruneCutoffDecision.WithCutoff( - retentionCutoff, - confirmationCutoff: null, - effectiveCutoff: retentionCutoff, - reason: "Oplog prune cutoff calculator is not configured."); - } - - return await _oplogPruneCutoffCalculator.CalculateEffectiveCutoffAsync(config, token); - } - - /// - /// Attempts to synchronize with a specific peer. - /// Uses Vector Clock comparison to determine what to pull/push for each node. - /// Performs handshake, vector clock exchange, and data exchange (Push/Pull per node). + } + } + } + + /// + /// Runs periodic maintenance when the configured interval has elapsed. /// - private async Task TrySyncWithPeer(PeerNode peer, CancellationToken token) - { - using var operationContext = LogContext.PushProperty("OperationId", Guid.NewGuid().ToString("N")); - using var peerContext = LogContext.PushProperty("PeerNodeId", peer.NodeId); - using var peerAddressContext = LogContext.PushProperty("PeerAddress", peer.Address); - - TcpPeerClient? client = null; - bool shouldRemoveClient = false; - bool syncSuccessful = false; + /// The current peer node configuration. + /// The current UTC time used for interval evaluation. + /// The cancellation token. + /// A task that represents the asynchronous maintenance operation. + internal async Task RunMaintenanceIfDueAsync(PeerNodeConfiguration config, DateTime now, CancellationToken token) + { + var maintenanceInterval = TimeSpan.FromMinutes(config.MaintenanceIntervalMinutes); + if (now - _lastMaintenanceTime < maintenanceInterval) return; + + _logger.LogInformation("Running periodic maintenance (Oplog pruning)..."); + try + { + var cutoffDecision = await CalculatePruneCutoffAsync(config, token); + if (!cutoffDecision.HasCutoff || !cutoffDecision.EffectiveCutoff.HasValue) + { + _lastMaintenanceTime = now; + string reason = string.IsNullOrWhiteSpace(cutoffDecision.Reason) + ? "No effective cutoff was produced." + : cutoffDecision.Reason; + _logger.LogInformation("Skipping oplog prune for this maintenance cycle: {Reason}", reason); + return; + } + + await _oplogStore.PruneOplogAsync(cutoffDecision.EffectiveCutoff.Value, token); + _lastMaintenanceTime = now; + + if (cutoffDecision.ConfirmationCutoff.HasValue) + _logger.LogInformation( + "Maintenance completed successfully (Retention: {RetentionHours}h, EffectiveCutoff: {EffectiveCutoff}, ConfirmationCutoff: {ConfirmationCutoff}).", + config.OplogRetentionHours, + cutoffDecision.EffectiveCutoff.Value, + cutoffDecision.ConfirmationCutoff.Value); + else + _logger.LogInformation( + "Maintenance completed successfully (Retention: {RetentionHours}h, EffectiveCutoff: {EffectiveCutoff}).", + config.OplogRetentionHours, + cutoffDecision.EffectiveCutoff.Value); + } + catch (Exception maintenanceEx) + { + _logger.LogError(maintenanceEx, "Maintenance failed."); + } + } + + private async Task CalculatePruneCutoffAsync(PeerNodeConfiguration config, + CancellationToken token) + { + if (_oplogPruneCutoffCalculator == null) + { + var retentionCutoff = new HlcTimestamp( + DateTimeOffset.UtcNow.AddHours(-config.OplogRetentionHours).ToUnixTimeMilliseconds(), + 0, + config.NodeId); + + return OplogPruneCutoffDecision.WithCutoff( + retentionCutoff, + null, + retentionCutoff, + "Oplog prune cutoff calculator is not configured."); + } + + return await _oplogPruneCutoffCalculator.CalculateEffectiveCutoffAsync(config, token); + } + + /// + /// Attempts to synchronize with a specific peer. + /// Uses Vector Clock comparison to determine what to pull/push for each node. + /// Performs handshake, vector clock exchange, and data exchange (Push/Pull per node). + /// + private async Task TrySyncWithPeer(PeerNode peer, CancellationToken token) + { + using var operationContext = LogContext.PushProperty("OperationId", Guid.NewGuid().ToString("N")); + using var peerContext = LogContext.PushProperty("PeerNodeId", peer.NodeId); + using var peerAddressContext = LogContext.PushProperty("PeerAddress", peer.Address); + + TcpPeerClient? client = null; + var shouldRemoveClient = false; + var syncSuccessful = false; try { @@ -349,13 +321,11 @@ public class SyncOrchestrator : ISyncOrchestrator _telemetry)); // Reconnect if disconnected - if (!client.IsConnected) - { - await client.ConnectAsync(token); - } + if (!client.IsConnected) await client.ConnectAsync(token); // Handshake (idempotent) - if (!await client.HandshakeAsync(config.NodeId, config.AuthToken, _documentStore.InterestedCollection, token)) + if (!await client.HandshakeAsync(config.NodeId, config.AuthToken, _documentStore.InterestedCollection, + token)) { _logger.LogWarning("Handshake rejected by {NodeId}", peer.NodeId); shouldRemoveClient = true; @@ -363,15 +333,15 @@ public class SyncOrchestrator : ISyncOrchestrator } // 1. Exchange Vector Clocks - var remoteVectorClock = await client.GetVectorClockAsync(token); - var localVectorClock = await _oplogStore.GetVectorClockAsync(token); - - _logger.LogDebug("Vector Clock - Local: {Local}, Remote: {Remote}", localVectorClock, remoteVectorClock); - - await AdvanceConfirmationsFromVectorClockAsync(peer.NodeId, localVectorClock, remoteVectorClock, token); - - // 2. Determine causality relationship - var causality = localVectorClock.CompareTo(remoteVectorClock); + var remoteVectorClock = await client.GetVectorClockAsync(token); + var localVectorClock = await _oplogStore.GetVectorClockAsync(token); + + _logger.LogDebug("Vector Clock - Local: {Local}, Remote: {Remote}", localVectorClock, remoteVectorClock); + + await AdvanceConfirmationsFromVectorClockAsync(peer.NodeId, localVectorClock, remoteVectorClock, token); + + // 2. Determine causality relationship + var causality = localVectorClock.CompareTo(remoteVectorClock); // 3. PULL: Identify nodes where remote is ahead var nodesToPull = localVectorClock.GetNodesWithUpdates(remoteVectorClock).ToList(); @@ -381,7 +351,7 @@ public class SyncOrchestrator : ISyncOrchestrator _logger.LogInformation("Pulling changes from {PeerNodeId} for {Count} nodes: {Nodes}", peer.NodeId, nodesToPull.Count, string.Join(", ", nodesToPull)); - foreach (var nodeId in nodesToPull) + foreach (string nodeId in nodesToPull) { var localTs = localVectorClock.GetTimestamp(nodeId); var remoteTs = remoteVectorClock.GetTimestamp(nodeId); @@ -390,13 +360,16 @@ public class SyncOrchestrator : ISyncOrchestrator nodeId, localTs, remoteTs); // PASS LOCAL INTERESTS TO PULL - var changes = await client.PullChangesFromNodeAsync(nodeId, localTs, _documentStore.InterestedCollection, token); + var changes = await client.PullChangesFromNodeAsync(nodeId, localTs, + _documentStore.InterestedCollection, token); if (changes != null && changes.Count > 0) { var result = await ProcessInboundBatchAsync(client, peer.NodeId, changes, token); if (result != SyncBatchResult.Success) { - _logger.LogWarning("Inbound batch processing failed with status {Status}. Aborting sync for this session.", result); + _logger.LogWarning( + "Inbound batch processing failed with status {Status}. Aborting sync for this session.", + result); RecordFailure(peer.NodeId); return; } @@ -410,33 +383,32 @@ public class SyncOrchestrator : ISyncOrchestrator _logger.LogInformation("Pushing changes to {PeerNodeId} for {Count} nodes: {Nodes}", peer.NodeId, nodesToPush.Count, string.Join(", ", nodesToPush)); - foreach (var nodeId in nodesToPush) + foreach (string nodeId in nodesToPush) { - var remoteTs = remoteVectorClock.GetTimestamp(nodeId); - - // PUSH FILTERING: Pass remote receiver's interests to oplogStore for efficient retrieval - var remoteInterests = client.RemoteInterests; - var changes = (await _oplogStore.GetOplogForNodeAfterAsync(nodeId, remoteTs, remoteInterests, token)).ToList(); + var remoteTs = remoteVectorClock.GetTimestamp(nodeId); - if (changes.Any()) - { - _logger.LogDebug("Pushing {Count} filtered changes for Node {NodeId}", changes.Count, nodeId); - await client.PushChangesAsync(changes, token); - await AdvanceConfirmationForPushedBatchAsync(peer.NodeId, nodeId, changes, token); - } - } - } + // PUSH FILTERING: Pass remote receiver's interests to oplogStore for efficient retrieval + var remoteInterests = client.RemoteInterests; + var changes = + (await _oplogStore.GetOplogForNodeAfterAsync(nodeId, remoteTs, remoteInterests, token)) + .ToList(); + + if (changes.Any()) + { + _logger.LogDebug("Pushing {Count} filtered changes for Node {NodeId}", changes.Count, nodeId); + await client.PushChangesAsync(changes, token); + await AdvanceConfirmationForPushedBatchAsync(peer.NodeId, nodeId, changes, token); + } + } + } // 5. Handle Concurrent/Equal cases if (causality == CausalityRelation.Equal) - { _logger.LogDebug("Vector clocks are equal with {PeerNodeId}. No sync needed.", peer.NodeId); - } else if (causality == CausalityRelation.Concurrent && !nodesToPull.Any() && !nodesToPush.Any()) - { - _logger.LogDebug("Vector clocks are concurrent with {PeerNodeId}, but no divergence detected.", peer.NodeId); - } - + _logger.LogDebug("Vector clocks are concurrent with {PeerNodeId}, but no divergence detected.", + peer.NodeId); + syncSuccessful = true; RecordSuccess(peer.NodeId); } @@ -446,27 +418,29 @@ public class SyncOrchestrator : ISyncOrchestrator _logger.LogWarning("Snapshot required for peer {NodeId}. Initiating merge sync.", peer.NodeId); if (client != null && client.IsConnected) { - try + try { await PerformSnapshotSyncAsync(client, true, token); syncSuccessful = true; RecordSuccess(peer.NodeId); } catch - { - RecordFailure(peer.NodeId); + { + RecordFailure(peer.NodeId); shouldRemoveClient = true; } } else - { - RecordFailure(peer.NodeId); + { + RecordFailure(peer.NodeId); shouldRemoveClient = true; } } catch (CorruptDatabaseException cex) { - _logger.LogCritical(cex, "Local database corruption detected during sync with {NodeId}. Initiating EMERGENCY SNAPSHOT RECOVERY.", peer.NodeId); + _logger.LogCritical(cex, + "Local database corruption detected during sync with {NodeId}. Initiating EMERGENCY SNAPSHOT RECOVERY.", + peer.NodeId); if (client != null && client.IsConnected) { try @@ -485,8 +459,8 @@ public class SyncOrchestrator : ISyncOrchestrator } } else - { - RecordFailure(peer.NodeId); + { + RecordFailure(peer.NodeId); shouldRemoveClient = true; } } @@ -498,7 +472,8 @@ public class SyncOrchestrator : ISyncOrchestrator } catch (SocketException sex) { - _logger.LogWarning("Network error syncing with {NodeId}: {Message}. Will retry later.", peer.NodeId, sex.Message); + _logger.LogWarning("Network error syncing with {NodeId}: {Message}. Will retry later.", peer.NodeId, + sex.Message); shouldRemoveClient = true; RecordFailure(peer.NodeId); } @@ -511,299 +486,268 @@ public class SyncOrchestrator : ISyncOrchestrator finally { if (shouldRemoveClient && client != null) - { if (_clients.TryRemove(peer.NodeId, out var removedClient)) - { - try { removedClient.Dispose(); } catch { /* Ignore disposal errors */ } - } - } + try + { + removedClient.Dispose(); + } + catch + { + /* Ignore disposal errors */ + } // Log successful sync outcome (failures are already logged in catch blocks) - if (syncSuccessful) + if (syncSuccessful) _logger.LogInformation("Sync with {NodeId} completed successfully.", peer.NodeId); + } + } + + private void RecordSuccess(string nodeId) + { + _peerStates.AddOrUpdate(nodeId, + new PeerStatus { FailureCount = 0, NextRetryTime = DateTime.MinValue }, + (k, v) => { - _logger.LogInformation("Sync with {NodeId} completed successfully.", peer.NodeId); + v.FailureCount = 0; + v.NextRetryTime = DateTime.MinValue; + return v; + }); + } + + /// + /// Merges discovered and configured peers into a distinct list that excludes the local node. + /// + /// The peers discovered dynamically. + /// The peers configured statically. + /// The local node identifier to exclude from results. + /// A de-duplicated list of peers eligible for synchronization. + internal static IReadOnlyList BuildMergedPeerList( + IEnumerable discoveredPeers, + IEnumerable knownPeers, + string localNodeId) + { + return discoveredPeers + .Concat(knownPeers) + .Where(p => !string.Equals(p.NodeId, localNodeId, StringComparison.Ordinal)) + .GroupBy(p => p.NodeId, StringComparer.Ordinal) + .Select(g => g.First()) + .ToList(); + } + + /// + /// Ensures peers are registered in the confirmation store when that store is available. + /// + /// The peers to register. + /// The local node identifier used to skip self-registration. + /// The cancellation token. + /// A task that represents the asynchronous registration operation. + internal async Task EnsurePeersRegisteredAsync(IEnumerable peers, string localNodeId, + CancellationToken token) + { + if (_peerOplogConfirmationStore == null) return; + + foreach (var peer in peers) + { + if (string.Equals(peer.NodeId, localNodeId, StringComparison.Ordinal)) continue; + + try + { + await _peerOplogConfirmationStore.EnsurePeerRegisteredAsync(peer.NodeId, peer.Address, peer.Type, + token); + } + catch (OperationCanceledException) when (token.IsCancellationRequested) + { + throw; + } + catch (Exception ex) + { + _logger.LogWarning(ex, "Failed to register peer {PeerNodeId} in confirmation store", peer.NodeId); } } - } - - private void RecordSuccess(string nodeId) - { - _peerStates.AddOrUpdate(nodeId, - new PeerStatus { FailureCount = 0, NextRetryTime = DateTime.MinValue }, - (k, v) => { v.FailureCount = 0; v.NextRetryTime = DateTime.MinValue; return v; }); - } - - /// - /// Merges discovered and configured peers into a distinct list that excludes the local node. - /// - /// The peers discovered dynamically. - /// The peers configured statically. - /// The local node identifier to exclude from results. - /// A de-duplicated list of peers eligible for synchronization. - internal static IReadOnlyList BuildMergedPeerList( - IEnumerable discoveredPeers, - IEnumerable knownPeers, - string localNodeId) - { - return discoveredPeers - .Concat(knownPeers) - .Where(p => !string.Equals(p.NodeId, localNodeId, StringComparison.Ordinal)) - .GroupBy(p => p.NodeId, StringComparer.Ordinal) - .Select(g => g.First()) - .ToList(); - } - - /// - /// Ensures peers are registered in the confirmation store when that store is available. - /// - /// The peers to register. - /// The local node identifier used to skip self-registration. - /// The cancellation token. - /// A task that represents the asynchronous registration operation. - internal async Task EnsurePeersRegisteredAsync(IEnumerable peers, string localNodeId, CancellationToken token) - { - if (_peerOplogConfirmationStore == null) - { - return; - } - - foreach (var peer in peers) - { - if (string.Equals(peer.NodeId, localNodeId, StringComparison.Ordinal)) - { - continue; - } - - try - { - await _peerOplogConfirmationStore.EnsurePeerRegisteredAsync(peer.NodeId, peer.Address, peer.Type, token); - } - catch (OperationCanceledException) when (token.IsCancellationRequested) - { - throw; - } - catch (Exception ex) - { - _logger.LogWarning(ex, "Failed to register peer {PeerNodeId} in confirmation store", peer.NodeId); - } - } - } - - /// - /// Advances peer confirmation watermarks using local and remote vector clock state. - /// - /// The peer node identifier whose confirmations are being updated. - /// The local vector clock. - /// The remote vector clock. - /// The cancellation token. - /// A task that represents the asynchronous confirmation update operation. - internal async Task AdvanceConfirmationsFromVectorClockAsync( - string peerNodeId, - VectorClock localVectorClock, - VectorClock remoteVectorClock, - CancellationToken token) - { - if (_peerOplogConfirmationStore == null) - { - return; - } - - var nodeIds = new HashSet(localVectorClock.NodeIds, StringComparer.Ordinal); - foreach (var nodeId in remoteVectorClock.NodeIds) - { - nodeIds.Add(nodeId); - } - - foreach (var sourceNodeId in nodeIds) - { - var localTimestamp = localVectorClock.GetTimestamp(sourceNodeId); - if (localTimestamp == default) - { - continue; - } - - var remoteTimestamp = remoteVectorClock.GetTimestamp(sourceNodeId); - if (remoteTimestamp < localTimestamp) - { - continue; - } - - await UpdatePeerConfirmationAsync(peerNodeId, sourceNodeId, localTimestamp, token); - } - } - - /// - /// Advances the peer confirmation watermark after successfully pushing a batch of changes. - /// - /// The peer node identifier that received the changes. - /// The source node identifier associated with the pushed changes. - /// The pushed oplog entries. - /// The cancellation token. - /// A task that represents the asynchronous confirmation update operation. - internal async Task AdvanceConfirmationForPushedBatchAsync( - string peerNodeId, - string sourceNodeId, - IReadOnlyCollection pushedChanges, - CancellationToken token) - { - if (_peerOplogConfirmationStore == null || pushedChanges.Count == 0) - { - return; - } - - var maxPushed = pushedChanges - .OrderBy(entry => entry.Timestamp) - .Last(); - - try - { - await _peerOplogConfirmationStore.UpdateConfirmationAsync( - peerNodeId, - sourceNodeId, - maxPushed.Timestamp, - maxPushed.Hash ?? string.Empty, - token); - } - catch (OperationCanceledException) when (token.IsCancellationRequested) - { - throw; - } - catch (Exception ex) - { - _logger.LogWarning(ex, - "Failed to advance push confirmation watermark for peer {PeerNodeId} and source {SourceNodeId}", - peerNodeId, sourceNodeId); - } - } - - private async Task UpdatePeerConfirmationAsync( - string peerNodeId, - string sourceNodeId, - HlcTimestamp timestamp, - CancellationToken token) - { - if (_peerOplogConfirmationStore == null) - { - return; - } - - try - { - // Best-effort hash lookup: IOplogStore exposes latest hash per source node. - var hash = await _oplogStore.GetLastEntryHashAsync(sourceNodeId, token) ?? string.Empty; - await _peerOplogConfirmationStore.UpdateConfirmationAsync(peerNodeId, sourceNodeId, timestamp, hash, token); - } - catch (OperationCanceledException) when (token.IsCancellationRequested) - { - throw; - } - catch (Exception ex) - { - _logger.LogWarning(ex, - "Failed to advance confirmation watermark for peer {PeerNodeId} and source {SourceNodeId}", - peerNodeId, sourceNodeId); - } - } - - private void RecordFailure(string nodeId) - { - _peerStates.AddOrUpdate(nodeId, + } + + /// + /// Advances peer confirmation watermarks using local and remote vector clock state. + /// + /// The peer node identifier whose confirmations are being updated. + /// The local vector clock. + /// The remote vector clock. + /// The cancellation token. + /// A task that represents the asynchronous confirmation update operation. + internal async Task AdvanceConfirmationsFromVectorClockAsync( + string peerNodeId, + VectorClock localVectorClock, + VectorClock remoteVectorClock, + CancellationToken token) + { + if (_peerOplogConfirmationStore == null) return; + + var nodeIds = new HashSet(localVectorClock.NodeIds, StringComparer.Ordinal); + foreach (string nodeId in remoteVectorClock.NodeIds) nodeIds.Add(nodeId); + + foreach (string sourceNodeId in nodeIds) + { + var localTimestamp = localVectorClock.GetTimestamp(sourceNodeId); + if (localTimestamp == default) continue; + + var remoteTimestamp = remoteVectorClock.GetTimestamp(sourceNodeId); + if (remoteTimestamp < localTimestamp) continue; + + await UpdatePeerConfirmationAsync(peerNodeId, sourceNodeId, localTimestamp, token); + } + } + + /// + /// Advances the peer confirmation watermark after successfully pushing a batch of changes. + /// + /// The peer node identifier that received the changes. + /// The source node identifier associated with the pushed changes. + /// The pushed oplog entries. + /// The cancellation token. + /// A task that represents the asynchronous confirmation update operation. + internal async Task AdvanceConfirmationForPushedBatchAsync( + string peerNodeId, + string sourceNodeId, + IReadOnlyCollection pushedChanges, + CancellationToken token) + { + if (_peerOplogConfirmationStore == null || pushedChanges.Count == 0) return; + + var maxPushed = pushedChanges + .OrderBy(entry => entry.Timestamp) + .Last(); + + try + { + await _peerOplogConfirmationStore.UpdateConfirmationAsync( + peerNodeId, + sourceNodeId, + maxPushed.Timestamp, + maxPushed.Hash ?? string.Empty, + token); + } + catch (OperationCanceledException) when (token.IsCancellationRequested) + { + throw; + } + catch (Exception ex) + { + _logger.LogWarning(ex, + "Failed to advance push confirmation watermark for peer {PeerNodeId} and source {SourceNodeId}", + peerNodeId, sourceNodeId); + } + } + + private async Task UpdatePeerConfirmationAsync( + string peerNodeId, + string sourceNodeId, + HlcTimestamp timestamp, + CancellationToken token) + { + if (_peerOplogConfirmationStore == null) return; + + try + { + // Best-effort hash lookup: IOplogStore exposes latest hash per source node. + string hash = await _oplogStore.GetLastEntryHashAsync(sourceNodeId, token) ?? string.Empty; + await _peerOplogConfirmationStore.UpdateConfirmationAsync(peerNodeId, sourceNodeId, timestamp, hash, token); + } + catch (OperationCanceledException) when (token.IsCancellationRequested) + { + throw; + } + catch (Exception ex) + { + _logger.LogWarning(ex, + "Failed to advance confirmation watermark for peer {PeerNodeId} and source {SourceNodeId}", + peerNodeId, sourceNodeId); + } + } + + private void RecordFailure(string nodeId) + { + _peerStates.AddOrUpdate(nodeId, new PeerStatus { FailureCount = 1, NextRetryTime = DateTime.UtcNow.AddSeconds(1) }, - (k, v) => + (k, v) => { v.FailureCount++; // Exponential backoff: 1s, 2s, 4s... max 60s - var delaySeconds = Math.Min(Math.Pow(2, v.FailureCount), 60); + double delaySeconds = Math.Min(Math.Pow(2, v.FailureCount), 60); v.NextRetryTime = DateTime.UtcNow.AddSeconds(delaySeconds); return v; }); } /// - /// Validates an inbound batch of changes, checks for gaps, performs recovery if needed, and applies to oplogStore. - /// Extracted to enforce Single Responsibility Principle. + /// Validates an inbound batch of changes, checks for gaps, performs recovery if needed, and applies to oplogStore. + /// Extracted to enforce Single Responsibility Principle. /// - private enum SyncBatchResult - { - Success, - GapDetected, - IntegrityError, - ChainBroken - } - - /// - /// Validates an inbound batch of changes, checks for gaps, performs recovery if needed, and applies to oplogStore. - /// Extracted to enforce Single Responsibility Principle. - /// - private async Task ProcessInboundBatchAsync(TcpPeerClient client, string peerNodeId, IList changes, CancellationToken token) + private async Task ProcessInboundBatchAsync(TcpPeerClient client, string peerNodeId, + IList changes, CancellationToken token) { _logger.LogInformation("Received {Count} changes from {NodeId}", changes.Count, peerNodeId); // 1. Validate internal integrity of the batch (Hash check) foreach (var entry in changes) - { if (!entry.IsValid()) - { // CHANGED: Log Critical Error but ACCEPT the entry to allow sync to progress (Soft Validation). // Throwing here would cause an unrecoverable state where this batch blocks sync forever. - _logger.LogError("Integrity Check Failed for Entry {Hash} (Node: {NodeId}). Expected: {computedHash}. ACCEPTING payload despite mismatch to maintain availability.", + _logger.LogError( + "Integrity Check Failed for Entry {Hash} (Node: {NodeId}). Expected: {computedHash}. ACCEPTING payload despite mismatch to maintain availability.", entry.Hash, entry.Timestamp.NodeId, entry.ComputeHash()); - } - } // 2. Group changes by Author Node to validate Source Chains independently var changesByNode = changes.GroupBy(c => c.Timestamp.NodeId); foreach (var group in changesByNode) { - var authorNodeId = group.Key; + string authorNodeId = group.Key; // FIX: Order by the full Timestamp (Physical + Logical), not just LogicalCounter. // LogicalCounter resets when PhysicalTime advances, so sorting by Counter alone breaks chronological order. var authorChain = group.OrderBy(c => c.Timestamp).ToList(); // Check linkage within the batch - for (int i = 1; i < authorChain.Count; i++) - { + for (var i = 1; i < authorChain.Count; i++) if (authorChain[i].PreviousHash != authorChain[i - 1].Hash) { _logger.LogError("Chain Broken in Batch for Node {AuthorId}", authorNodeId); return SyncBatchResult.ChainBroken; } - } // Check linkage with Local State var firstEntry = authorChain[0]; - var localHeadHash = await _oplogStore.GetLastEntryHashAsync(authorNodeId, token); + string? localHeadHash = await _oplogStore.GetLastEntryHashAsync(authorNodeId, token); - _logger.LogDebug("Processing chain for Node {AuthorId}: FirstEntry.PrevHash={PrevHash}, FirstEntry.Hash={Hash}, LocalHeadHash={LocalHead}", + _logger.LogDebug( + "Processing chain for Node {AuthorId}: FirstEntry.PrevHash={PrevHash}, FirstEntry.Hash={Hash}, LocalHeadHash={LocalHead}", authorNodeId, firstEntry.PreviousHash, firstEntry.Hash, localHeadHash ?? "(null)"); if (localHeadHash != null && firstEntry.PreviousHash != localHeadHash) { // Check if entry starts from snapshot boundary (valid case after pruning) - var snapshotHash = await _snapshotMetadataStore.GetSnapshotHashAsync(authorNodeId, token); - + string? snapshotHash = await _snapshotMetadataStore.GetSnapshotHashAsync(authorNodeId, token); + if (snapshotHash != null && firstEntry.PreviousHash == snapshotHash) { // Entry connects to snapshot boundary - this is expected after pruning/snapshot sync // This prevents infinite snapshot request loops when old nodes reconnect _logger.LogInformation( "Entry for Node {AuthorId} connects to snapshot boundary (Hash: {SnapshotHash}). Accepting without gap recovery. Network convergence in effect.", - authorNodeId, snapshotHash); - - // No gap recovery needed - this is a valid state + authorNodeId, snapshotHash); + + // No gap recovery needed - this is a valid state } else { // GAP DETECTED (not a snapshot boundary case) - _logger.LogWarning("Gap Detected for Node {AuthorId}. Local Head: {Local}, Remote Prev: {Prev}. Initiating Recovery.", + _logger.LogWarning( + "Gap Detected for Node {AuthorId}. Local Head: {Local}, Remote Prev: {Prev}. Initiating Recovery.", authorNodeId, localHeadHash, firstEntry.PreviousHash); // Gap Recovery (Range Sync) List? missingChain = null; try - { + { missingChain = await client.GetChainRangeAsync(localHeadHash, firstEntry.PreviousHash, token); } catch (SnapshotRequiredException) @@ -821,10 +765,11 @@ public class SyncOrchestrator : ISyncOrchestrator _logger.LogInformation("Gap Recovery: Retrieved {Count} missing entries.", missingChain.Count); // Validate Recovery Chain Linkage - bool linkValid = true; + var linkValid = true; if (missingChain[0].PreviousHash != localHeadHash) linkValid = false; - for (int i = 1; i < missingChain.Count; i++) - if (missingChain[i].PreviousHash != missingChain[i - 1].Hash) linkValid = false; + for (var i = 1; i < missingChain.Count; i++) + if (missingChain[i].PreviousHash != missingChain[i - 1].Hash) + linkValid = false; if (missingChain.Last().Hash != firstEntry.PreviousHash) linkValid = false; if (!linkValid) @@ -838,26 +783,28 @@ public class SyncOrchestrator : ISyncOrchestrator _logger.LogInformation("Gap Recovery Applied Successfully."); } else - { - // Gap recovery failed. This can happen if: - // 1. This is actually our first contact with this node's history - // 2. The peer doesn't have the full history - // 3. There's a true gap that cannot be recovered - - // DECISION: Accept the entries anyway but log a warning - // This allows forward progress even with partial history - _logger.LogWarning("Could not recover gap for Node {AuthorId}. Local Head: {Local}, Remote Prev: {Prev}. Accepting entries anyway (partial sync).", - authorNodeId, localHeadHash, firstEntry.PreviousHash); - - // Optionally: Mark this as a partial sync in metadata - // For now, we proceed and let the chain continue from this point + { + // Gap recovery failed. This can happen if: + // 1. This is actually our first contact with this node's history + // 2. The peer doesn't have the full history + // 3. There's a true gap that cannot be recovered + + // DECISION: Accept the entries anyway but log a warning + // This allows forward progress even with partial history + _logger.LogWarning( + "Could not recover gap for Node {AuthorId}. Local Head: {Local}, Remote Prev: {Prev}. Accepting entries anyway (partial sync).", + authorNodeId, localHeadHash, firstEntry.PreviousHash); + + // Optionally: Mark this as a partial sync in metadata + // For now, we proceed and let the chain continue from this point } } } else if (localHeadHash == null && !string.IsNullOrEmpty(firstEntry.PreviousHash)) { // Implicit Accept / Partial Sync warning - _logger.LogWarning("First contact with Node {AuthorId} at explicit state (Not Genesis). Accepting.", authorNodeId); + _logger.LogWarning("First contact with Node {AuthorId} at explicit state (Not Genesis). Accepting.", + authorNodeId); } // Apply original batch (grouped by node for clarity, but oplogStore usually handles bulk) @@ -869,31 +816,27 @@ public class SyncOrchestrator : ISyncOrchestrator private async Task PerformSnapshotSyncAsync(TcpPeerClient client, bool mergeOnly, CancellationToken token) { - _logger.LogInformation(mergeOnly ? "Starting Snapshot Merge..." : "Starting Full Database Replacement..."); - - var tempFile = Path.GetTempFileName(); + _logger.LogInformation(mergeOnly ? "Starting Snapshot Merge..." : "Starting Full Database Replacement..."); + + string tempFile = Path.GetTempFileName(); try { _logger.LogInformation("Downloading snapshot to {TempFile}...", tempFile); using (var fs = File.Create(tempFile)) { await client.GetSnapshotAsync(fs, token); - } - - _logger.LogInformation("Snapshot Downloaded. applying to store..."); - + } + + _logger.LogInformation("Snapshot Downloaded. applying to store..."); + using (var fs = File.OpenRead(tempFile)) { if (mergeOnly) - { await _snapshotService.MergeSnapshotAsync(fs, token); - } else - { await _snapshotService.ReplaceDatabaseAsync(fs, token); - } - } - + } + _logger.LogInformation("Snapshot applied successfully."); } catch (Exception ex) @@ -904,7 +847,6 @@ public class SyncOrchestrator : ISyncOrchestrator finally { if (File.Exists(tempFile)) - { try { File.Delete(tempFile); @@ -913,7 +855,31 @@ public class SyncOrchestrator : ISyncOrchestrator { _logger.LogWarning(ex, "Failed to delete temporary snapshot file {TempFile}", tempFile); } - } } } -} + + private class PeerStatus + { + /// + /// Gets or sets the number of consecutive failures for the peer. + /// + public int FailureCount { get; set; } + + /// + /// Gets or sets the next time a retry attempt is allowed. + /// + public DateTime NextRetryTime { get; set; } + } + + /// + /// Validates an inbound batch of changes, checks for gaps, performs recovery if needed, and applies to oplogStore. + /// Extracted to enforce Single Responsibility Principle. + /// + private enum SyncBatchResult + { + Success, + GapDetected, + IntegrityError, + ChainBroken + } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/TcpPeerClient.cs b/src/ZB.MOM.WW.CBDDC.Network/TcpPeerClient.cs index 92cefd6..3080a5b 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/TcpPeerClient.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/TcpPeerClient.cs @@ -1,44 +1,60 @@ -using System; -using System.Collections.Generic; -using System.IO; -using System.Linq; using System.Net.Sockets; -using System.Threading; -using System.Threading.Tasks; -using Google.Protobuf; +using System.Text.Json; using Microsoft.Extensions.Logging; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Network.Proto; -using ZB.MOM.WW.CBDDC.Network.Security; using ZB.MOM.WW.CBDDC.Network.Protocol; +using ZB.MOM.WW.CBDDC.Network.Security; using ZB.MOM.WW.CBDDC.Network.Telemetry; namespace ZB.MOM.WW.CBDDC.Network; /// -/// Represents a TCP client connection to a remote peer for synchronization. +/// Represents a TCP client connection to a remote peer for synchronization. /// -public class TcpPeerClient : IDisposable -{ - private readonly TcpClient _client; - private readonly string _peerAddress; - private readonly ILogger _logger; - private readonly IPeerHandshakeService? _handshakeService; - private NetworkStream? _stream; - private CipherState? _cipherState; - private readonly object _connectionLock = new object(); - private bool _disposed = false; - +public class TcpPeerClient : IDisposable +{ private const int ConnectionTimeoutMs = 5000; - private const int OperationTimeoutMs = 30000; - - private readonly ProtocolHandler _protocol; - - /// - /// Gets a value indicating whether the client currently has an active connection. - /// - public bool IsConnected - { + private const int OperationTimeoutMs = 30000; + private readonly TcpClient _client; + private readonly object _connectionLock = new(); + private readonly IPeerHandshakeService? _handshakeService; + private readonly ILogger _logger; + private readonly string _peerAddress; + + private readonly ProtocolHandler _protocol; + + private readonly INetworkTelemetryService? _telemetry; + private CipherState? _cipherState; + private bool _disposed; + private List _remoteInterests = new(); + private NetworkStream? _stream; + + private bool _useCompression; // Negotiated after handshake + + /// + /// Initializes a new instance of the class. + /// + /// The remote peer address in host:port format. + /// The logger used for connection and protocol events. + /// The optional handshake service used to establish secure sessions. + /// The optional telemetry service for network metrics. + public TcpPeerClient(string peerAddress, ILogger logger, + IPeerHandshakeService? handshakeService = null, INetworkTelemetryService? telemetry = null) + { + _client = new TcpClient(); + _peerAddress = peerAddress; + _logger = logger; + _handshakeService = handshakeService; + _telemetry = telemetry; + _protocol = new ProtocolHandler(logger, telemetry); + } + + /// + /// Gets a value indicating whether the client currently has an active connection. + /// + public bool IsConnected + { get { lock (_connectionLock) @@ -46,89 +62,97 @@ public class TcpPeerClient : IDisposable return _client != null && _client.Connected && _stream != null && !_disposed; } } - } - - /// - /// Gets a value indicating whether the handshake with the remote peer has completed successfully. - /// - public bool HasHandshaked { get; private set; } - - private readonly INetworkTelemetryService? _telemetry; - - /// - /// Initializes a new instance of the class. - /// - /// The remote peer address in host:port format. - /// The logger used for connection and protocol events. - /// The optional handshake service used to establish secure sessions. - /// The optional telemetry service for network metrics. - public TcpPeerClient(string peerAddress, ILogger logger, IPeerHandshakeService? handshakeService = null, INetworkTelemetryService? telemetry = null) - { - _client = new TcpClient(); - _peerAddress = peerAddress; - _logger = logger; - _handshakeService = handshakeService; - _telemetry = telemetry; - _protocol = new ProtocolHandler(logger, telemetry); - } - - /// - /// Connects to the configured remote peer. - /// - /// A token used to cancel the connection attempt. - /// A task that represents the asynchronous connect operation. - public async Task ConnectAsync(CancellationToken token) - { + } + + /// + /// Gets a value indicating whether the handshake with the remote peer has completed successfully. + /// + public bool HasHandshaked { get; private set; } + + /// + /// Gets the list of collections the remote peer is interested in. + /// + public IReadOnlyList RemoteInterests => _remoteInterests.AsReadOnly(); + + /// + /// Releases resources used by the peer client. + /// + public void Dispose() + { lock (_connectionLock) { - if (_disposed) - { - throw new ObjectDisposedException(nameof(TcpPeerClient)); - } - + if (_disposed) return; + _disposed = true; + } + + try + { + _stream?.Dispose(); + } + catch (Exception ex) + { + _logger.LogWarning(ex, "Error disposing network stream"); + } + + try + { + _client?.Dispose(); + } + catch (Exception ex) + { + _logger.LogWarning(ex, "Error disposing TCP client"); + } + + _logger.LogDebug("Disposed connection to peer: {Address}", _peerAddress); + } + + /// + /// Connects to the configured remote peer. + /// + /// A token used to cancel the connection attempt. + /// A task that represents the asynchronous connect operation. + public async Task ConnectAsync(CancellationToken token) + { + lock (_connectionLock) + { + if (_disposed) throw new ObjectDisposedException(nameof(TcpPeerClient)); + if (IsConnected) return; } - var parts = _peerAddress.Split(':'); + string[] parts = _peerAddress.Split(':'); if (parts.Length != 2) - { throw new ArgumentException($"Invalid address format: {_peerAddress}. Expected format: host:port"); - } if (!int.TryParse(parts[1], out int port) || port <= 0 || port > 65535) - { throw new ArgumentException($"Invalid port number: {parts[1]}"); - } // Connect with timeout using var timeoutCts = new CancellationTokenSource(ConnectionTimeoutMs); - using var linkedCts = CancellationTokenSource.CreateLinkedTokenSource(token, timeoutCts.Token); - + using var linkedCts = CancellationTokenSource.CreateLinkedTokenSource(token, timeoutCts.Token); + try { - await _client.ConnectAsync(parts[0], port); - + await _client.ConnectAsync(parts[0], port); + lock (_connectionLock) { - if (_disposed) - { - throw new ObjectDisposedException(nameof(TcpPeerClient)); - } - - _stream = _client.GetStream(); - - // CRITICAL for Android: Disable Nagle's algorithm to prevent buffering delays - // This ensures immediate packet transmission for handshake data - _client.NoDelay = true; - - // Configure TCP keepalive - _client.Client.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, true); - - // Set read/write timeouts + if (_disposed) throw new ObjectDisposedException(nameof(TcpPeerClient)); + + _stream = _client.GetStream(); + + // CRITICAL for Android: Disable Nagle's algorithm to prevent buffering delays + // This ensures immediate packet transmission for handshake data + _client.NoDelay = true; + + // Configure TCP keepalive + _client.Client.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, true); + + // Set read/write timeouts _stream.ReadTimeout = OperationTimeoutMs; _stream.WriteTimeout = OperationTimeoutMs; - } - + } + _logger.LogDebug("Connected to peer: {Address} (NoDelay=true for immediate send)", _peerAddress); } catch (OperationCanceledException) when (timeoutCts.IsCancellationRequested) @@ -138,13 +162,7 @@ public class TcpPeerClient : IDisposable } /// - /// Gets the list of collections the remote peer is interested in. - /// - public System.Collections.Generic.IReadOnlyList RemoteInterests => _remoteInterests.AsReadOnly(); - private List _remoteInterests = new(); - - /// - /// Performs authentication handshake with the remote peer. + /// Performs authentication handshake with the remote peer. /// /// The local node identifier. /// The authentication token. @@ -155,46 +173,38 @@ public class TcpPeerClient : IDisposable return await HandshakeAsync(myNodeId, authToken, null, token); } - /// - /// Performs authentication handshake with the remote peer, including collection interests. - /// - /// The local node identifier. - /// The authentication token. - /// Optional collection names this node is interested in receiving. - /// Cancellation token. - /// if handshake was accepted; otherwise . - public async Task HandshakeAsync(string myNodeId, string authToken, IEnumerable? interestingCollections, CancellationToken token) - { + /// + /// Performs authentication handshake with the remote peer, including collection interests. + /// + /// The local node identifier. + /// The authentication token. + /// Optional collection names this node is interested in receiving. + /// Cancellation token. + /// if handshake was accepted; otherwise . + public async Task HandshakeAsync(string myNodeId, string authToken, + IEnumerable? interestingCollections, CancellationToken token) + { if (HasHandshaked) return true; if (_handshakeService != null) - { // Perform secure handshake if service is available // We assume we are initiator here _cipherState = await _handshakeService.HandshakeAsync(_stream!, true, myNodeId, token); - } var req = new HandshakeRequest { NodeId = myNodeId, AuthToken = authToken ?? "" }; if (interestingCollections != null) - { - foreach (var coll in interestingCollections) - { + foreach (string coll in interestingCollections) req.InterestingCollections.Add(coll); - } - } - if (CompressionHelper.IsBrotliSupported) - { - req.SupportedCompression.Add("brotli"); - } + if (CompressionHelper.IsBrotliSupported) req.SupportedCompression.Add("brotli"); _logger.LogDebug("Sending HandshakeReq to {Address}", _peerAddress); await _protocol.SendMessageAsync(_stream!, MessageType.HandshakeReq, req, false, _cipherState, token); - var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); - _logger.LogDebug("Received Handshake response type: {Type}", type); - + (var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); + _logger.LogDebug("Received Handshake response type: {Type}", type); + if (type != MessageType.HandshakeRes) return false; var res = HandshakeResponse.Parser.ParseFrom(payload); @@ -213,18 +223,19 @@ public class TcpPeerClient : IDisposable return res.Accepted; } - /// - /// Retrieves the remote peer's latest HLC timestamp. - /// - /// Cancellation token. - /// The latest remote hybrid logical clock timestamp. - public async Task GetClockAsync(CancellationToken token) - { + /// + /// Retrieves the remote peer's latest HLC timestamp. + /// + /// Cancellation token. + /// The latest remote hybrid logical clock timestamp. + public async Task GetClockAsync(CancellationToken token) + { using (_telemetry?.StartMetric(MetricType.RoundTripTime)) { - await _protocol.SendMessageAsync(_stream!, MessageType.GetClockReq, new GetClockRequest(), _useCompression, _cipherState, token); + await _protocol.SendMessageAsync(_stream!, MessageType.GetClockReq, new GetClockRequest(), _useCompression, + _cipherState, token); - var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); + (var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); if (type != MessageType.ClockRes) throw new Exception("Unexpected response"); var res = ClockResponse.Parser.ParseFrom(payload); @@ -232,69 +243,67 @@ public class TcpPeerClient : IDisposable } } - /// - /// Retrieves the remote peer's vector clock (latest timestamp per node). - /// - /// Cancellation token. - /// The remote vector clock. - public async Task GetVectorClockAsync(CancellationToken token) - { + /// + /// Retrieves the remote peer's vector clock (latest timestamp per node). + /// + /// Cancellation token. + /// The remote vector clock. + public async Task GetVectorClockAsync(CancellationToken token) + { using (_telemetry?.StartMetric(MetricType.RoundTripTime)) { - await _protocol.SendMessageAsync(_stream!, MessageType.GetVectorClockReq, new GetVectorClockRequest(), _useCompression, _cipherState, token); + await _protocol.SendMessageAsync(_stream!, MessageType.GetVectorClockReq, new GetVectorClockRequest(), + _useCompression, _cipherState, token); - var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); + (var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); if (type != MessageType.VectorClockRes) throw new Exception("Unexpected response"); var res = VectorClockResponse.Parser.ParseFrom(payload); var vectorClock = new VectorClock(); foreach (var entry in res.Entries) - { vectorClock.SetTimestamp(entry.NodeId, new HlcTimestamp(entry.HlcWall, entry.HlcLogic, entry.NodeId)); - } return vectorClock; } } - /// - /// Pulls oplog changes from the remote peer since the specified timestamp. - /// - /// The starting timestamp for requested changes. - /// Cancellation token. - /// The list of oplog entries returned by the remote peer. - public async Task> PullChangesAsync(HlcTimestamp since, CancellationToken token) - { - return await PullChangesAsync(since, null, token); - } - - /// - /// Pulls oplog changes from the remote peer since the specified timestamp, filtered by collections. - /// - /// The starting timestamp for requested changes. - /// Optional collection names used to filter the returned entries. - /// Cancellation token. - /// The list of oplog entries returned by the remote peer. - public async Task> PullChangesAsync(HlcTimestamp since, IEnumerable? collections, CancellationToken token) - { - var req = new PullChangesRequest - { - SinceWall = since.PhysicalTime, - SinceLogic = since.LogicalCounter, - // Empty SinceNode indicates a global pull (not source-node filtered). - SinceNode = string.Empty - }; - if (collections != null) - { - foreach (var coll in collections) - { - req.Collections.Add(coll); - } - } - await _protocol.SendMessageAsync(_stream!, MessageType.PullChangesReq, req, _useCompression, _cipherState, token); + /// + /// Pulls oplog changes from the remote peer since the specified timestamp. + /// + /// The starting timestamp for requested changes. + /// Cancellation token. + /// The list of oplog entries returned by the remote peer. + public async Task> PullChangesAsync(HlcTimestamp since, CancellationToken token) + { + return await PullChangesAsync(since, null, token); + } - var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); + /// + /// Pulls oplog changes from the remote peer since the specified timestamp, filtered by collections. + /// + /// The starting timestamp for requested changes. + /// Optional collection names used to filter the returned entries. + /// Cancellation token. + /// The list of oplog entries returned by the remote peer. + public async Task> PullChangesAsync(HlcTimestamp since, IEnumerable? collections, + CancellationToken token) + { + var req = new PullChangesRequest + { + SinceWall = since.PhysicalTime, + SinceLogic = since.LogicalCounter, + // Empty SinceNode indicates a global pull (not source-node filtered). + SinceNode = string.Empty + }; + if (collections != null) + foreach (string coll in collections) + req.Collections.Add(coll); + + await _protocol.SendMessageAsync(_stream!, MessageType.PullChangesReq, req, _useCompression, _cipherState, + token); + + (var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); if (type != MessageType.ChangeSetRes) throw new Exception("Unexpected response"); var res = ChangeSetResponse.Parser.ParseFrom(payload); @@ -303,35 +312,38 @@ public class TcpPeerClient : IDisposable e.Collection, e.Key, ParseOp(e.Operation), - string.IsNullOrEmpty(e.JsonData) ? default : System.Text.Json.JsonSerializer.Deserialize(e.JsonData), + string.IsNullOrEmpty(e.JsonData) ? default : JsonSerializer.Deserialize(e.JsonData), new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode), e.PreviousHash, e.Hash // Pass the received hash to preserve integrity reference )).ToList(); } - /// - /// Pulls oplog changes for a specific node from the remote peer since the specified timestamp. - /// - /// The node identifier to filter changes by. - /// The starting timestamp for requested changes. - /// Cancellation token. - /// The list of oplog entries returned by the remote peer. - public async Task> PullChangesFromNodeAsync(string nodeId, HlcTimestamp since, CancellationToken token) - { - return await PullChangesFromNodeAsync(nodeId, since, null, token); - } - - /// - /// Pulls oplog changes for a specific node from the remote peer since the specified timestamp, filtered by collections. - /// - /// The node identifier to filter changes by. - /// The starting timestamp for requested changes. - /// Optional collection names used to filter the returned entries. - /// Cancellation token. - /// The list of oplog entries returned by the remote peer. - public async Task> PullChangesFromNodeAsync(string nodeId, HlcTimestamp since, IEnumerable? collections, CancellationToken token) - { + /// + /// Pulls oplog changes for a specific node from the remote peer since the specified timestamp. + /// + /// The node identifier to filter changes by. + /// The starting timestamp for requested changes. + /// Cancellation token. + /// The list of oplog entries returned by the remote peer. + public async Task> PullChangesFromNodeAsync(string nodeId, HlcTimestamp since, + CancellationToken token) + { + return await PullChangesFromNodeAsync(nodeId, since, null, token); + } + + /// + /// Pulls oplog changes for a specific node from the remote peer since the specified timestamp, filtered by + /// collections. + /// + /// The node identifier to filter changes by. + /// The starting timestamp for requested changes. + /// Optional collection names used to filter the returned entries. + /// Cancellation token. + /// The list of oplog entries returned by the remote peer. + public async Task> PullChangesFromNodeAsync(string nodeId, HlcTimestamp since, + IEnumerable? collections, CancellationToken token) + { var req = new PullChangesRequest { SinceNode = nodeId, @@ -339,15 +351,13 @@ public class TcpPeerClient : IDisposable SinceLogic = since.LogicalCounter }; if (collections != null) - { - foreach (var coll in collections) - { + foreach (string coll in collections) req.Collections.Add(coll); - } - } - await _protocol.SendMessageAsync(_stream!, MessageType.PullChangesReq, req, _useCompression, _cipherState, token); - var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); + await _protocol.SendMessageAsync(_stream!, MessageType.PullChangesReq, req, _useCompression, _cipherState, + token); + + (var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); if (type != MessageType.ChangeSetRes) throw new Exception("Unexpected response"); var res = ChangeSetResponse.Parser.ParseFrom(payload); @@ -356,26 +366,28 @@ public class TcpPeerClient : IDisposable e.Collection, e.Key, ParseOp(e.Operation), - string.IsNullOrEmpty(e.JsonData) ? default : System.Text.Json.JsonSerializer.Deserialize(e.JsonData), + string.IsNullOrEmpty(e.JsonData) ? default : JsonSerializer.Deserialize(e.JsonData), new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode), e.PreviousHash, e.Hash )).ToList(); } - /// - /// Retrieves a range of oplog entries connecting two hashes (Gap Recovery). - /// - /// The starting hash in the chain. - /// The ending hash in the chain. - /// Cancellation token. - /// The chain entries connecting the requested hash range. - public virtual async Task> GetChainRangeAsync(string startHash, string endHash, CancellationToken token) - { + /// + /// Retrieves a range of oplog entries connecting two hashes (Gap Recovery). + /// + /// The starting hash in the chain. + /// The ending hash in the chain. + /// Cancellation token. + /// The chain entries connecting the requested hash range. + public virtual async Task> GetChainRangeAsync(string startHash, string endHash, + CancellationToken token) + { var req = new GetChainRangeRequest { StartHash = startHash, EndHash = endHash }; - await _protocol.SendMessageAsync(_stream!, MessageType.GetChainRangeReq, req, _useCompression, _cipherState, token); + await _protocol.SendMessageAsync(_stream!, MessageType.GetChainRangeReq, req, _useCompression, _cipherState, + token); - var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); + (var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); if (type != MessageType.ChainRangeRes) throw new Exception($"Unexpected response for ChainRange: {type}"); var res = ChainRangeResponse.Parser.ParseFrom(payload); @@ -386,27 +398,26 @@ public class TcpPeerClient : IDisposable e.Collection, e.Key, ParseOp(e.Operation), - string.IsNullOrEmpty(e.JsonData) ? default : System.Text.Json.JsonSerializer.Deserialize(e.JsonData), + string.IsNullOrEmpty(e.JsonData) ? default : JsonSerializer.Deserialize(e.JsonData), new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode), e.PreviousHash, e.Hash )).ToList(); } - /// - /// Pushes local oplog changes to the remote peer. - /// - /// The oplog entries to push. - /// Cancellation token. - /// A task that represents the asynchronous push operation. - public async Task PushChangesAsync(IEnumerable entries, CancellationToken token) - { + /// + /// Pushes local oplog changes to the remote peer. + /// + /// The oplog entries to push. + /// Cancellation token. + /// A task that represents the asynchronous push operation. + public async Task PushChangesAsync(IEnumerable entries, CancellationToken token) + { var req = new PushChangesRequest(); var entryList = entries.ToList(); if (entryList.Count == 0) return; foreach (var e in entryList) - { req.Entries.Add(new ProtoOplogEntry { Collection = e.Collection, @@ -419,11 +430,11 @@ public class TcpPeerClient : IDisposable Hash = e.Hash, PreviousHash = e.PreviousHash }); - } - await _protocol.SendMessageAsync(_stream!, MessageType.PushChangesReq, req, _useCompression, _cipherState, token); + await _protocol.SendMessageAsync(_stream!, MessageType.PushChangesReq, req, _useCompression, _cipherState, + token); - var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); + (var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); if (type != MessageType.AckRes) throw new Exception("Push failed"); var res = AckResponse.Parser.ParseFrom(payload); @@ -431,72 +442,43 @@ public class TcpPeerClient : IDisposable if (!res.Success) throw new Exception("Push failed"); } - private bool _useCompression = false; // Negotiated after handshake - - private OperationType ParseOp(string op) => Enum.TryParse(op, out var val) ? val : OperationType.Put; - - /// - /// Downloads a full snapshot from the remote peer to the provided destination stream. - /// - /// The stream that receives snapshot bytes. - /// Cancellation token. - /// A task that represents the asynchronous snapshot transfer operation. - public async Task GetSnapshotAsync(Stream destination, CancellationToken token) - { - await _protocol.SendMessageAsync(_stream!, MessageType.GetSnapshotReq, new GetSnapshotRequest(), _useCompression, _cipherState, token); + private OperationType ParseOp(string op) + { + return Enum.TryParse(op, out var val) ? val : OperationType.Put; + } + + /// + /// Downloads a full snapshot from the remote peer to the provided destination stream. + /// + /// The stream that receives snapshot bytes. + /// Cancellation token. + /// A task that represents the asynchronous snapshot transfer operation. + public async Task GetSnapshotAsync(Stream destination, CancellationToken token) + { + await _protocol.SendMessageAsync(_stream!, MessageType.GetSnapshotReq, new GetSnapshotRequest(), + _useCompression, _cipherState, token); while (true) - { - var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); - if (type != MessageType.SnapshotChunkMsg) throw new Exception($"Unexpected message type during snapshot: {type}"); - - var chunk = SnapshotChunk.Parser.ParseFrom(payload); - if (chunk.Data.Length > 0) - { - await destination.WriteAsync(chunk.Data.ToByteArray(), 0, chunk.Data.Length, token); - } - + { + (var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token); + if (type != MessageType.SnapshotChunkMsg) + throw new Exception($"Unexpected message type during snapshot: {type}"); + + var chunk = SnapshotChunk.Parser.ParseFrom(payload); + if (chunk.Data.Length > 0) + await destination.WriteAsync(chunk.Data.ToByteArray(), 0, chunk.Data.Length, token); + if (chunk.IsLast) break; - } - } - - /// - /// Releases resources used by the peer client. - /// - public void Dispose() - { - lock (_connectionLock) - { - if (_disposed) return; - _disposed = true; - } - - try - { - _stream?.Dispose(); } - catch (Exception ex) - { - _logger.LogWarning(ex, "Error disposing network stream"); - } - - try - { - _client?.Dispose(); - } - catch (Exception ex) - { - _logger.LogWarning(ex, "Error disposing TCP client"); - } - - _logger.LogDebug("Disposed connection to peer: {Address}", _peerAddress); } } -public class SnapshotRequiredException : Exception -{ - /// - /// Initializes a new instance of the class. - /// - public SnapshotRequiredException() : base("Peer requires a full snapshot sync.") { } -} +public class SnapshotRequiredException : Exception +{ + /// + /// Initializes a new instance of the class. + /// + public SnapshotRequiredException() : base("Peer requires a full snapshot sync.") + { + } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/TcpSyncServer.cs b/src/ZB.MOM.WW.CBDDC.Network/TcpSyncServer.cs index c141fbc..7aa9aa0 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/TcpSyncServer.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/TcpSyncServer.cs @@ -1,69 +1,67 @@ +using System.Net; +using System.Net.Sockets; +using System.Text.Json; +using Google.Protobuf; +using Microsoft.Extensions.Logging; +using Serilog.Context; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Network.Proto; -using ZB.MOM.WW.CBDDC.Network.Security; using ZB.MOM.WW.CBDDC.Network.Protocol; +using ZB.MOM.WW.CBDDC.Network.Security; using ZB.MOM.WW.CBDDC.Network.Telemetry; -using Google.Protobuf; -using Microsoft.Extensions.Logging; -using System; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Net; -using System.Net.Sockets; -using System.Threading; -using System.Threading.Tasks; -using Serilog.Context; namespace ZB.MOM.WW.CBDDC.Network; /// -/// TCP server that handles incoming synchronization requests from remote peers. +/// TCP server that handles incoming synchronization requests from remote peers. /// -internal class TcpSyncServer : ISyncServer -{ - private readonly IOplogStore _oplogStore; - private readonly IDocumentStore _documentStore; - private readonly ISnapshotService _snapshotStore; - private readonly ILogger _logger; - private readonly IPeerNodeConfigurationProvider _configProvider; - private CancellationTokenSource? _cts; - private TcpListener? _listener; - private readonly object _startStopLock = new object(); - private int _activeConnections = 0; - - internal int MaxConnections = 100; +internal class TcpSyncServer : ISyncServer +{ private const int ClientOperationTimeoutMs = 60000; private readonly IAuthenticator _authenticator; + private readonly IPeerNodeConfigurationProvider _configProvider; + private readonly IDocumentStore _documentStore; private readonly IPeerHandshakeService _handshakeService; + private readonly ILogger _logger; + private readonly IOplogStore _oplogStore; + private readonly ISnapshotService _snapshotStore; + private readonly object _startStopLock = new(); private readonly INetworkTelemetryService? _telemetry; + private int _activeConnections; + private CancellationTokenSource? _cts; + private TcpListener? _listener; + + internal int MaxConnections = 100; /// - /// Initializes a new instance of the TcpSyncServer class with the specified peer oplogStore, configuration provider, - /// logger, and authenticator. + /// Initializes a new instance of the TcpSyncServer class with the specified peer oplogStore, configuration provider, + /// logger, and authenticator. /// - /// The server automatically restarts when the configuration provided by - /// peerNodeConfigurationProvider changes. This ensures that configuration updates are applied without requiring - /// manual intervention. - /// The peer oplogStore used to manage and persist peer information for the server. - /// The document store used to read and apply synchronized documents. - /// The snapshot store used to create and manage database snapshots for synchronization. - /// The provider that supplies configuration settings for the peer node and notifies the server of configuration - /// changes. - /// The logger used to record informational and error messages for the server instance. - /// The authenticator responsible for validating peer connections to the server. - /// The service used to perform secure handshake (optional). - /// The optional telemetry service used to record network performance metrics. - public TcpSyncServer( - IOplogStore oplogStore, - IDocumentStore documentStore, + /// + /// The server automatically restarts when the configuration provided by + /// peerNodeConfigurationProvider changes. This ensures that configuration updates are applied without requiring + /// manual intervention. + /// + /// The peer oplogStore used to manage and persist peer information for the server. + /// The document store used to read and apply synchronized documents. + /// The snapshot store used to create and manage database snapshots for synchronization. + /// + /// The provider that supplies configuration settings for the peer node and notifies the server of configuration + /// changes. + /// + /// The logger used to record informational and error messages for the server instance. + /// The authenticator responsible for validating peer connections to the server. + /// The service used to perform secure handshake (optional). + /// The optional telemetry service used to record network performance metrics. + public TcpSyncServer( + IOplogStore oplogStore, + IDocumentStore documentStore, ISnapshotService snapshotStore, - IPeerNodeConfigurationProvider peerNodeConfigurationProvider, - ILogger logger, + IPeerNodeConfigurationProvider peerNodeConfigurationProvider, + ILogger logger, IAuthenticator authenticator, IPeerHandshakeService handshakeService, INetworkTelemetryService? telemetry = null) @@ -85,10 +83,17 @@ internal class TcpSyncServer : ISyncServer } /// - /// Starts the TCP synchronization server and begins listening for incoming connections asynchronously. + /// Gets the port on which the server is listening. /// - /// If the server is already running, this method returns immediately without starting a new - /// listener. The server will listen on the TCP port specified in the current configuration. + public int? ListeningPort => ListeningEndpoint?.Port; + + /// + /// Starts the TCP synchronization server and begins listening for incoming connections asynchronously. + /// + /// + /// If the server is already running, this method returns immediately without starting a new + /// listener. The server will listen on the TCP port specified in the current configuration. + /// /// A task that represents the asynchronous start operation. public async Task Start() { @@ -101,6 +106,7 @@ internal class TcpSyncServer : ISyncServer _logger.LogWarning("TCP Sync Server already started"); return; } + _cts = new CancellationTokenSource(); } @@ -126,31 +132,33 @@ internal class TcpSyncServer : ISyncServer } /// - /// Stops the listener and cancels any pending operations. + /// Stops the listener and cancels any pending operations. /// - /// After calling this method, the listener will no longer accept new connections or process - /// requests. This method is safe to call multiple times; subsequent calls have no effect if the listener is already - /// stopped. + /// + /// After calling this method, the listener will no longer accept new connections or process + /// requests. This method is safe to call multiple times; subsequent calls have no effect if the listener is already + /// stopped. + /// /// A task that represents the asynchronous stop operation. public async Task Stop() { CancellationTokenSource? ctsToDispose = null; - TcpListener? listenerToStop = null; - + TcpListener? listenerToStop = null; + lock (_startStopLock) { if (_cts == null) { _logger.LogWarning("TCP Sync Server already stopped or never started"); return; - } - + } + ctsToDispose = _cts; listenerToStop = _listener; _cts = null; _listener = null; - } - + } + try { ctsToDispose.Cancel(); @@ -162,32 +170,26 @@ internal class TcpSyncServer : ISyncServer finally { ctsToDispose.Dispose(); - } - - listenerToStop?.Stop(); - + } + + listenerToStop?.Stop(); + await Task.CompletedTask; } /// - /// Gets the full local endpoint on which the server is listening. + /// Gets the full local endpoint on which the server is listening. /// public IPEndPoint? ListeningEndpoint => _listener?.LocalEndpoint as IPEndPoint; - /// - /// Gets the port on which the server is listening. - /// - public int? ListeningPort => ListeningEndpoint?.Port; - private async Task ListenAsync(CancellationToken token) { while (!token.IsCancellationRequested) - { try { if (_listener == null) break; - var client = await _listener.AcceptTcpClientAsync(); - + var client = await _listener.AcceptTcpClientAsync(); + if (_activeConnections >= MaxConnections) { _logger.LogWarning("Max connections reached ({Max}). Rejecting client.", MaxConnections); @@ -197,7 +199,7 @@ internal class TcpSyncServer : ISyncServer Interlocked.Increment(ref _activeConnections); - _ = Task.Run(async () => + _ = Task.Run(async () => { try { @@ -209,46 +211,47 @@ internal class TcpSyncServer : ISyncServer } }, token); } - catch (ObjectDisposedException) { break; } + catch (ObjectDisposedException) + { + break; + } catch (Exception ex) { _logger.LogError(ex, "TCP Accept Error"); } - } } - private async Task HandleClientAsync(TcpClient client, CancellationToken token) - { - var remoteEp = client.Client.RemoteEndPoint; - using var operationContext = LogContext.PushProperty("OperationId", Guid.NewGuid().ToString("N")); - using var endpointContext = LogContext.PushProperty("RemoteEndpoint", remoteEp?.ToString() ?? "unknown"); - _logger.LogDebug("Client Connected: {Endpoint}", remoteEp); - + private async Task HandleClientAsync(TcpClient client, CancellationToken token) + { + var remoteEp = client.Client.RemoteEndPoint; + using var operationContext = LogContext.PushProperty("OperationId", Guid.NewGuid().ToString("N")); + using var endpointContext = LogContext.PushProperty("RemoteEndpoint", remoteEp?.ToString() ?? "unknown"); + _logger.LogDebug("Client Connected: {Endpoint}", remoteEp); + try { using (client) using (var stream = client.GetStream()) { // CRITICAL for Android: Disable Nagle's algorithm for immediate packet send - client.NoDelay = true; - - // Configure TCP keepalive - client.Client.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, true); - - // Set stream timeouts + client.NoDelay = true; + + // Configure TCP keepalive + client.Client.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, true); + + // Set stream timeouts stream.ReadTimeout = ClientOperationTimeoutMs; - stream.WriteTimeout = ClientOperationTimeoutMs; - - var protocol = new ProtocolHandler(_logger, _telemetry); - - bool useCompression = false; + stream.WriteTimeout = ClientOperationTimeoutMs; + + var protocol = new ProtocolHandler(_logger, _telemetry); + + var useCompression = false; CipherState? cipherState = null; List remoteInterests = new(); // Perform Secure Handshake (if service is available) var config = await _configProvider.GetConfiguration(); if (_handshakeService != null) - { try { // We are NOT initiator @@ -261,40 +264,39 @@ internal class TcpSyncServer : ISyncServer _logger.LogError(ex, "Secure Handshake failed check logic"); return; } - } while (client.Connected && !token.IsCancellationRequested) { // Re-fetch config if needed, though usually stable config = await _configProvider.GetConfiguration(); - var (type, payload) = await protocol.ReadMessageAsync(stream, cipherState, token); + (var type, byte[] payload) = await protocol.ReadMessageAsync(stream, cipherState, token); if (type == MessageType.Unknown) break; // EOF or Error // Handshake Loop if (type == MessageType.HandshakeReq) { var hReq = HandshakeRequest.Parser.ParseFrom(payload); - _logger.LogDebug("Received HandshakeReq from Node {NodeId}", hReq.NodeId); - - // Track remote peer interests - remoteInterests = hReq.InterestingCollections.ToList(); - + _logger.LogDebug("Received HandshakeReq from Node {NodeId}", hReq.NodeId); + + // Track remote peer interests + remoteInterests = hReq.InterestingCollections.ToList(); + bool valid = await _authenticator.ValidateAsync(hReq.NodeId, hReq.AuthToken); if (!valid) { _logger.LogWarning("Authentication failed for Node {NodeId}", hReq.NodeId); - await protocol.SendMessageAsync(stream, MessageType.HandshakeRes, new HandshakeResponse { NodeId = config.NodeId, Accepted = false }, false, cipherState, token); + await protocol.SendMessageAsync(stream, MessageType.HandshakeRes, + new HandshakeResponse { NodeId = config.NodeId, Accepted = false }, false, cipherState, + token); return; } - var hRes = new HandshakeResponse { NodeId = config.NodeId, Accepted = true }; - - // Include local interests from IDocumentStore in response for push filtering - foreach (var coll in _documentStore.InterestedCollection) - { + var hRes = new HandshakeResponse { NodeId = config.NodeId, Accepted = true }; + + // Include local interests from IDocumentStore in response for push filtering + foreach (string coll in _documentStore.InterestedCollection) hRes.InterestingCollections.Add(coll); - } if (CompressionHelper.IsBrotliSupported && hReq.SupportedCompression.Contains("brotli")) { @@ -302,12 +304,13 @@ internal class TcpSyncServer : ISyncServer useCompression = true; } - await protocol.SendMessageAsync(stream, MessageType.HandshakeRes, hRes, false, cipherState, token); + await protocol.SendMessageAsync(stream, MessageType.HandshakeRes, hRes, false, cipherState, + token); continue; } IMessage? response = null; - MessageType resType = MessageType.Unknown; + var resType = MessageType.Unknown; switch (type) { @@ -325,7 +328,7 @@ internal class TcpSyncServer : ISyncServer case MessageType.GetVectorClockReq: var vectorClock = await _oplogStore.GetVectorClockAsync(token); var vcRes = new VectorClockResponse(); - foreach (var nodeId in vectorClock.NodeIds) + foreach (string nodeId in vectorClock.NodeIds) { var ts = vectorClock.GetTimestamp(nodeId); vcRes.Entries.Add(new VectorClockEntry @@ -335,23 +338,23 @@ internal class TcpSyncServer : ISyncServer HlcLogic = ts.LogicalCounter }); } + response = vcRes; resType = MessageType.VectorClockRes; break; - case MessageType.PullChangesReq: - var pReq = PullChangesRequest.Parser.ParseFrom(payload); - var since = new HlcTimestamp(pReq.SinceWall, pReq.SinceLogic, pReq.SinceNode); - - // Use collection filter from request - var filter = pReq.Collections.Any() ? pReq.Collections : null; - var oplog = string.IsNullOrWhiteSpace(pReq.SinceNode) - ? await _oplogStore.GetOplogAfterAsync(since, filter, token) - : await _oplogStore.GetOplogForNodeAfterAsync(pReq.SinceNode, since, filter, token); - + case MessageType.PullChangesReq: + var pReq = PullChangesRequest.Parser.ParseFrom(payload); + var since = new HlcTimestamp(pReq.SinceWall, pReq.SinceLogic, pReq.SinceNode); + + // Use collection filter from request + var filter = pReq.Collections.Any() ? pReq.Collections : null; + var oplog = string.IsNullOrWhiteSpace(pReq.SinceNode) + ? await _oplogStore.GetOplogAfterAsync(since, filter, token) + : await _oplogStore.GetOplogForNodeAfterAsync(pReq.SinceNode, since, filter, token); + var csRes = new ChangeSetResponse(); foreach (var e in oplog) - { csRes.Entries.Add(new ProtoOplogEntry { Collection = e.Collection, @@ -364,7 +367,6 @@ internal class TcpSyncServer : ISyncServer Hash = e.Hash, PreviousHash = e.PreviousHash }); - } response = csRes; resType = MessageType.ChangeSetRes; break; @@ -375,10 +377,12 @@ internal class TcpSyncServer : ISyncServer e.Collection, e.Key, (OperationType)Enum.Parse(typeof(OperationType), e.Operation), - string.IsNullOrEmpty(e.JsonData) ? (System.Text.Json.JsonElement?)null : System.Text.Json.JsonSerializer.Deserialize(e.JsonData), + string.IsNullOrEmpty(e.JsonData) + ? null + : JsonSerializer.Deserialize(e.JsonData), new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode), e.PreviousHash, // Restore PreviousHash - e.Hash // Restore Hash + e.Hash // Restore Hash )); await _oplogStore.ApplyBatchAsync(entries, token); @@ -389,18 +393,15 @@ internal class TcpSyncServer : ISyncServer case MessageType.GetChainRangeReq: var rangeReq = GetChainRangeRequest.Parser.ParseFrom(payload); - var rangeEntries = await _oplogStore.GetChainRangeAsync(rangeReq.StartHash, rangeReq.EndHash, token); - var rangeRes = new ChainRangeResponse(); - + var rangeEntries = + await _oplogStore.GetChainRangeAsync(rangeReq.StartHash, rangeReq.EndHash, token); + var rangeRes = new ChainRangeResponse(); + if (!rangeEntries.Any() && rangeReq.StartHash != rangeReq.EndHash) - { // Gap cannot be filled (likely pruned or unknown branch) rangeRes.SnapshotRequired = true; - } else - { foreach (var e in rangeEntries) - { rangeRes.Entries.Add(new ProtoOplogEntry { Collection = e.Collection, @@ -413,52 +414,52 @@ internal class TcpSyncServer : ISyncServer Hash = e.Hash, PreviousHash = e.PreviousHash }); - } - } + response = rangeRes; resType = MessageType.ChainRangeRes; break; case MessageType.GetSnapshotReq: _logger.LogInformation("Processing GetSnapshotReq from {Endpoint}", remoteEp); - var tempFile = Path.GetTempFileName(); - try + string tempFile = Path.GetTempFileName(); + try { // Create backup using (var fs = File.Create(tempFile)) { await _snapshotStore.CreateSnapshotAsync(fs, token); - } - + } + using (var fs = File.OpenRead(tempFile)) { - byte[] buffer = new byte[80 * 1024]; // 80KB chunks + var buffer = new byte[80 * 1024]; // 80KB chunks int bytesRead; while ((bytesRead = await fs.ReadAsync(buffer, 0, buffer.Length, token)) > 0) { - var chunk = new SnapshotChunk - { + var chunk = new SnapshotChunk + { Data = ByteString.CopyFrom(buffer, 0, bytesRead), - IsLast = false + IsLast = false }; - await protocol.SendMessageAsync(stream, MessageType.SnapshotChunkMsg, chunk, false, cipherState, token); - } - - // Send End of Snapshot - await protocol.SendMessageAsync(stream, MessageType.SnapshotChunkMsg, new SnapshotChunk { IsLast = true }, false, cipherState, token); + await protocol.SendMessageAsync(stream, MessageType.SnapshotChunkMsg, chunk, + false, cipherState, token); + } + + // Send End of Snapshot + await protocol.SendMessageAsync(stream, MessageType.SnapshotChunkMsg, + new SnapshotChunk { IsLast = true }, false, cipherState, token); } } finally { if (File.Exists(tempFile)) File.Delete(tempFile); } + break; } if (response != null) - { await protocol.SendMessageAsync(stream, resType, response, useCompression, cipherState, token); - } } } } @@ -471,4 +472,4 @@ internal class TcpSyncServer : ISyncServer _logger.LogDebug("Client Disconnected: {Endpoint}", remoteEp); } } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/Telemetry/INetworkTelemetryService.cs b/src/ZB.MOM.WW.CBDDC.Network/Telemetry/INetworkTelemetryService.cs index da4377f..23ef394 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/Telemetry/INetworkTelemetryService.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/Telemetry/INetworkTelemetryService.cs @@ -1,39 +1,38 @@ -using System; -using System.Diagnostics; - -namespace ZB.MOM.WW.CBDDC.Network.Telemetry; - +using System.Diagnostics; + +namespace ZB.MOM.WW.CBDDC.Network.Telemetry; + public interface INetworkTelemetryService { /// - /// Records a metric value for the specified metric type. + /// Records a metric value for the specified metric type. /// /// The metric type to record. /// The metric value. void RecordValue(MetricType type, double value); /// - /// Starts timing a metric for the specified metric type. + /// Starts timing a metric for the specified metric type. /// /// The metric type to time. /// A timer that records elapsed time when disposed. MetricTimer StartMetric(MetricType type); /// - /// Gets a snapshot of all recorded metric values. + /// Gets a snapshot of all recorded metric values. /// /// A dictionary of metric values grouped by metric type and bucket. - System.Collections.Generic.Dictionary> GetSnapshot(); + Dictionary> GetSnapshot(); } - + public readonly struct MetricTimer : IDisposable { - private readonly INetworkTelemetryService _service; - private readonly MetricType _type; - private readonly long _startTimestamp; - + private readonly INetworkTelemetryService _service; + private readonly MetricType _type; + private readonly long _startTimestamp; + /// - /// Initializes a new metric timer. + /// Initializes a new metric timer. /// /// The telemetry service that receives the recorded value. /// The metric type being timed. @@ -45,16 +44,16 @@ public readonly struct MetricTimer : IDisposable } /// - /// Stops timing and records the elapsed duration. + /// Stops timing and records the elapsed duration. /// public void Dispose() { - var elapsed = Stopwatch.GetTimestamp() - _startTimestamp; - // Convert ticks to milliseconds? Or keep as ticks? - // Plan said "latency", usually ms. - // Stopwatch.Frequency depends on hardware. - // Let's store MS representation. - double ms = (double)elapsed * 1000 / Stopwatch.Frequency; - _service.RecordValue(_type, ms); - } -} + long elapsed = Stopwatch.GetTimestamp() - _startTimestamp; + // Convert ticks to milliseconds? Or keep as ticks? + // Plan said "latency", usually ms. + // Stopwatch.Frequency depends on hardware. + // Let's store MS representation. + double ms = (double)elapsed * 1000 / Stopwatch.Frequency; + _service.RecordValue(_type, ms); + } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/Telemetry/MetricType.cs b/src/ZB.MOM.WW.CBDDC.Network/Telemetry/MetricType.cs index cf87ee4..6a700dc 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/Telemetry/MetricType.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/Telemetry/MetricType.cs @@ -6,4 +6,4 @@ public enum MetricType EncryptionTime = 1, DecryptionTime = 2, RoundTripTime = 3 -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/Telemetry/NetworkTelemetryService.cs b/src/ZB.MOM.WW.CBDDC.Network/Telemetry/NetworkTelemetryService.cs index b6a4763..bc3cda2 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/Telemetry/NetworkTelemetryService.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/Telemetry/NetworkTelemetryService.cs @@ -1,101 +1,99 @@ -using System; -using System.Buffers; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Threading; using System.Threading.Channels; -using System.Threading.Tasks; using Microsoft.Extensions.Logging; namespace ZB.MOM.WW.CBDDC.Network.Telemetry; -public class NetworkTelemetryService : INetworkTelemetryService, IDisposable -{ - private readonly Channel<(MetricType Type, double Value)> _metricChannel; - private readonly CancellationTokenSource _cts; - private readonly ILogger _logger; - private readonly string _persistencePath; - - // Aggregation State - // We keep 30m of history with 1s resolution = 1800 buckets. - private const int MaxHistorySeconds = 1800; - - private readonly object _lock = new object(); - private readonly MetricBucket[] _history; - private int _headIndex = 0; // Points to current second - private long _currentSecondTimestamp; // Unix timestamp of current bucket +public class NetworkTelemetryService : INetworkTelemetryService, IDisposable +{ + // Aggregation State + // We keep 30m of history with 1s resolution = 1800 buckets. + private const int MaxHistorySeconds = 1800; // Rolling Averages (Last calculated) - private readonly Dictionary _averages = new Dictionary(); + private readonly Dictionary _averages = new(); + private readonly CancellationTokenSource _cts; + private readonly MetricBucket[] _history; - /// - /// Initializes a new instance of the class. - /// - /// The logger used to report telemetry processing and persistence errors. - /// The file path where persisted telemetry snapshots are written. - public NetworkTelemetryService(ILogger logger, string persistencePath) - { + private readonly object _lock = new(); + private readonly ILogger _logger; + private readonly Channel<(MetricType Type, double Value)> _metricChannel; + private readonly string _persistencePath; + private long _currentSecondTimestamp; // Unix timestamp of current bucket + private int _headIndex; // Points to current second + + /// + /// Initializes a new instance of the class. + /// + /// The logger used to report telemetry processing and persistence errors. + /// The file path where persisted telemetry snapshots are written. + public NetworkTelemetryService(ILogger logger, string persistencePath) + { _logger = logger; _persistencePath = persistencePath; _metricChannel = Channel.CreateUnbounded<(MetricType, double)>(new UnboundedChannelOptions { SingleReader = true, - SingleWriter = false + SingleWriter = false }); - _cts = new CancellationTokenSource(); - - _history = new MetricBucket[MaxHistorySeconds]; - for (int i = 0; i < MaxHistorySeconds; i++) _history[i] = new MetricBucket(); + _cts = new CancellationTokenSource(); + + _history = new MetricBucket[MaxHistorySeconds]; + for (var i = 0; i < MaxHistorySeconds; i++) _history[i] = new MetricBucket(); + + _currentSecondTimestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds(); - _currentSecondTimestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds(); - _ = Task.Run(ProcessMetricsLoop); _ = Task.Run(PersistenceLoop); } - /// - /// Records a metric value for the specified metric type. - /// - /// The metric category to update. - /// The metric value to record. - public void RecordValue(MetricType type, double value) - { - _metricChannel.Writer.TryWrite((type, value)); - } - - /// - /// Starts a timer for the specified metric type. - /// - /// The metric category to time. - /// A metric timer that records elapsed time when disposed. - public MetricTimer StartMetric(MetricType type) - { - return new MetricTimer(this, type); - } - - /// - /// Gets a point-in-time snapshot of rolling averages for each metric type. - /// - /// A dictionary keyed by metric type containing average values by window size in seconds. - public Dictionary> GetSnapshot() - { + /// + /// Releases resources used by the telemetry service. + /// + public void Dispose() + { + _cts.Cancel(); + _cts.Dispose(); + } + + /// + /// Records a metric value for the specified metric type. + /// + /// The metric category to update. + /// The metric value to record. + public void RecordValue(MetricType type, double value) + { + _metricChannel.Writer.TryWrite((type, value)); + } + + /// + /// Starts a timer for the specified metric type. + /// + /// The metric category to time. + /// A metric timer that records elapsed time when disposed. + public MetricTimer StartMetric(MetricType type) + { + return new MetricTimer(this, type); + } + + /// + /// Gets a point-in-time snapshot of rolling averages for each metric type. + /// + /// A dictionary keyed by metric type containing average values by window size in seconds. + public Dictionary> GetSnapshot() + { var snapshot = new Dictionary>(); - var windows = new[] { 60, 300, 600, 1800 }; - + var windows = new[] { 60, 300, 600, 1800 }; + lock (_lock) { foreach (var type in Enum.GetValues(typeof(MetricType)).Cast()) { var typeDict = new Dictionary(); - foreach (var w in windows) - { - typeDict[w] = CalculateAverage(type, w); - } + foreach (int w in windows) typeDict[w] = CalculateAverage(type, w); snapshot[type] = typeDict; } } + return snapshot; } @@ -103,29 +101,26 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable { var reader = _metricChannel.Reader; while (!_cts.IsCancellationRequested) - { try { if (await reader.WaitToReadAsync(_cts.Token)) - { while (reader.TryRead(out var item)) - { AddMetricToCurrentBucket(item.Type, item.Value); - } - } } - catch (OperationCanceledException) { break; } + catch (OperationCanceledException) + { + break; + } catch (Exception ex) { _logger.LogError(ex, "Error processing metrics"); } - } } private void AddMetricToCurrentBucket(MetricType type, double value) { - long now = DateTimeOffset.UtcNow.ToUnixTimeSeconds(); - + long now = DateTimeOffset.UtcNow.ToUnixTimeSeconds(); + lock (_lock) { // Rotate bucket if second changed @@ -133,11 +128,12 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable { long diff = now - _currentSecondTimestamp; // Move head forward, clearing buckets in between if gap > 1s - for (int i = 0; i < diff && i < MaxHistorySeconds; i++) + for (var i = 0; i < diff && i < MaxHistorySeconds; i++) { _headIndex = (_headIndex + 1) % MaxHistorySeconds; _history[_headIndex].Reset(); } + _currentSecondTimestamp = now; } @@ -148,18 +144,19 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable private async Task PersistenceLoop() { while (!_cts.IsCancellationRequested) - { try { await Task.Delay(TimeSpan.FromMinutes(1), _cts.Token); CalculateAndPersist(); } - catch (OperationCanceledException) { break; } + catch (OperationCanceledException) + { + break; + } catch (Exception ex) { _logger.LogError(ex, "Error persisting metrics"); } - } } private void CalculateAndPersist() @@ -167,117 +164,116 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable lock (_lock) { // Calculate averages - var windows = new[] { 60, 300, 600, 1800 }; // 1m, 5m, 10m, 30m - + var windows = new[] { 60, 300, 600, 1800 }; // 1m, 5m, 10m, 30m + using var fs = new FileStream(_persistencePath, FileMode.Create, FileAccess.Write); - using var bw = new BinaryWriter(fs); - - // Header + using var bw = new BinaryWriter(fs); + + // Header bw.Write((byte)1); // Version - bw.Write(DateTimeOffset.UtcNow.ToUnixTimeSeconds()); // Timestamp - + bw.Write(DateTimeOffset.UtcNow.ToUnixTimeSeconds()); // Timestamp + foreach (var type in Enum.GetValues(typeof(MetricType)).Cast()) { bw.Write((int)type); - foreach (var w in windows) + foreach (int w in windows) { double avg = CalculateAverage(type, w); - bw.Write(w); // Window Seconds - bw.Write(avg); // Average Value + bw.Write(w); // Window Seconds + bw.Write(avg); // Average Value } } } } - /// - /// Forces immediate calculation and persistence of telemetry data. - /// - internal void ForcePersist() - { - CalculateAndPersist(); - } + /// + /// Forces immediate calculation and persistence of telemetry data. + /// + internal void ForcePersist() + { + CalculateAndPersist(); + } private double CalculateAverage(MetricType type, int seconds) { // Go backwards from head double sum = 0; - int count = 0; - int scanned = 0; - - int idx = _headIndex; - + var count = 0; + var scanned = 0; + + int idx = _headIndex; + while (scanned < seconds && scanned < MaxHistorySeconds) { var bucket = _history[idx]; sum += bucket.GetSum(type); - count += bucket.GetCount(type); - + count += bucket.GetCount(type); + idx--; if (idx < 0) idx = MaxHistorySeconds - 1; scanned++; - } - - return count == 0 ? 0 : sum / count; - } + } - /// - /// Releases resources used by the telemetry service. - /// - public void Dispose() - { - _cts.Cancel(); - _cts.Dispose(); + return count == 0 ? 0 : sum / count; } } -internal class MetricBucket -{ +internal class MetricBucket +{ + private readonly int[] _counts; + // Simple lock-free or locked accumulation? Global lock handles it for now. // Storing Sum and Count for each type private readonly double[] _sums; - private readonly int[] _counts; - /// - /// Initializes a new instance of the class. - /// - public MetricBucket() - { - var typeCount = Enum.GetValues(typeof(MetricType)).Length; - _sums = new double[typeCount]; - _counts = new int[typeCount]; - } - - /// - /// Clears all accumulated metric sums and counts in this bucket. - /// - public void Reset() - { - Array.Clear(_sums, 0, _sums.Length); - Array.Clear(_counts, 0, _counts.Length); - } - - /// - /// Adds a metric value to the bucket. - /// - /// The metric category to update. - /// The value to accumulate. - public void Add(MetricType type, double value) - { - int idx = (int)type; - _sums[idx] += value; - _counts[idx]++; - } - - /// - /// Gets the accumulated sum for a metric type. - /// - /// The metric category to read. - /// The accumulated sum for the specified metric type. - public double GetSum(MetricType type) => _sums[(int)type]; - /// - /// Gets the accumulated count for a metric type. - /// - /// The metric category to read. - /// The accumulated sample count for the specified metric type. - public int GetCount(MetricType type) => _counts[(int)type]; -} + /// + /// Initializes a new instance of the class. + /// + public MetricBucket() + { + int typeCount = Enum.GetValues(typeof(MetricType)).Length; + _sums = new double[typeCount]; + _counts = new int[typeCount]; + } + + /// + /// Clears all accumulated metric sums and counts in this bucket. + /// + public void Reset() + { + Array.Clear(_sums, 0, _sums.Length); + Array.Clear(_counts, 0, _counts.Length); + } + + /// + /// Adds a metric value to the bucket. + /// + /// The metric category to update. + /// The value to accumulate. + public void Add(MetricType type, double value) + { + var idx = (int)type; + _sums[idx] += value; + _counts[idx]++; + } + + /// + /// Gets the accumulated sum for a metric type. + /// + /// The metric category to read. + /// The accumulated sum for the specified metric type. + public double GetSum(MetricType type) + { + return _sums[(int)type]; + } + + /// + /// Gets the accumulated count for a metric type. + /// + /// The metric category to read. + /// The accumulated sample count for the specified metric type. + public int GetCount(MetricType type) + { + return _counts[(int)type]; + } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/UdpDiscoveryService.cs b/src/ZB.MOM.WW.CBDDC.Network/UdpDiscoveryService.cs index 1940c22..6e12203 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/UdpDiscoveryService.cs +++ b/src/ZB.MOM.WW.CBDDC.Network/UdpDiscoveryService.cs @@ -1,52 +1,49 @@ -using System; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Linq; -using System.Net; -using System.Net.Sockets; -using System.Text; -using System.Text.Json; -using System.Threading; -using System.Threading.Tasks; -using ZB.MOM.WW.CBDDC.Core.Storage; +using System.Collections.Concurrent; +using System.Net; +using System.Net.Sockets; +using System.Security.Cryptography; +using System.Text; +using System.Text.Json; +using System.Text.Json.Serialization; using Microsoft.Extensions.Logging; using ZB.MOM.WW.CBDDC.Core.Network; -using ZB.MOM.WW.CBDDC.Core; +using ZB.MOM.WW.CBDDC.Core.Storage; namespace ZB.MOM.WW.CBDDC.Network; /// -/// Provides UDP-based peer discovery for the CBDDC network. -/// Broadcasts presence beacons and listens for other nodes on the local network. +/// Provides UDP-based peer discovery for the CBDDC network. +/// Broadcasts presence beacons and listens for other nodes on the local network. /// -internal class UdpDiscoveryService : IDiscoveryService -{ - private const int DiscoveryPort = 25000; - private readonly ILogger _logger; - private readonly IPeerNodeConfigurationProvider _configProvider; - private readonly IDocumentStore _documentStore; - private CancellationTokenSource? _cts; - private readonly ConcurrentDictionary _activePeers = new(); - private readonly object _startStopLock = new object(); - - /// - /// Initializes a new instance of the class. - /// - /// Provider for peer node configuration. - /// Document store used to obtain collection interests. - /// Logger for discovery service events. - public UdpDiscoveryService( - IPeerNodeConfigurationProvider peerNodeConfigurationProvider, - IDocumentStore documentStore, - ILogger logger) - { - _configProvider = peerNodeConfigurationProvider ?? throw new ArgumentNullException(nameof(peerNodeConfigurationProvider)); - _documentStore = documentStore ?? throw new ArgumentNullException(nameof(documentStore)); - _logger = logger; +internal class UdpDiscoveryService : IDiscoveryService +{ + private const int DiscoveryPort = 25000; + private readonly ConcurrentDictionary _activePeers = new(); + private readonly IPeerNodeConfigurationProvider _configProvider; + private readonly IDocumentStore _documentStore; + private readonly ILogger _logger; + private readonly object _startStopLock = new(); + private CancellationTokenSource? _cts; + + /// + /// Initializes a new instance of the class. + /// + /// Provider for peer node configuration. + /// Document store used to obtain collection interests. + /// Logger for discovery service events. + public UdpDiscoveryService( + IPeerNodeConfigurationProvider peerNodeConfigurationProvider, + IDocumentStore documentStore, + ILogger logger) + { + _configProvider = peerNodeConfigurationProvider ?? + throw new ArgumentNullException(nameof(peerNodeConfigurationProvider)); + _documentStore = documentStore ?? throw new ArgumentNullException(nameof(documentStore)); + _logger = logger; } /// - /// Starts the discovery service, initiating listener, broadcaster, and cleanup tasks. + /// Starts the discovery service, initiating listener, broadcaster, and cleanup tasks. /// public async Task Start() { @@ -57,11 +54,12 @@ internal class UdpDiscoveryService : IDiscoveryService _logger.LogWarning("UDP Discovery Service already started"); return; } + _cts = new CancellationTokenSource(); } - var token = _cts.Token; - + var token = _cts.Token; + _ = Task.Run(async () => { try @@ -72,8 +70,8 @@ internal class UdpDiscoveryService : IDiscoveryService { _logger.LogError(ex, "UDP Listen task failed"); } - }, token); - + }, token); + _ = Task.Run(async () => { try @@ -84,8 +82,8 @@ internal class UdpDiscoveryService : IDiscoveryService { _logger.LogError(ex, "UDP Broadcast task failed"); } - }, token); - + }, token); + _ = Task.Run(async () => { try @@ -101,75 +99,26 @@ internal class UdpDiscoveryService : IDiscoveryService await Task.CompletedTask; } - // ... Stop ... - - private async Task CleanupAsync(CancellationToken token) + /// + /// Stops the discovery service. + /// + /// A task that completes when stop processing has finished. + public async Task Stop() { - while (!token.IsCancellationRequested) - { - try - { - await Task.Delay(10000, token); // Check every 10s - var now = DateTimeOffset.UtcNow; - var expired = new List(); + CancellationTokenSource? ctsToDispose = null; - foreach (var pair in _activePeers) - { - // Expiry: 15 seconds (broadcast is every 5s, so 3 missed beats = dead) - if ((now - pair.Value.LastSeen).TotalSeconds > 15) - { - expired.Add(pair.Key); - } - } - - foreach (var id in expired) - { - if (_activePeers.TryRemove(id, out var removed)) - { - _logger.LogInformation("Peer Expired: {NodeId} at {Endpoint}", removed.NodeId, removed.Address); - } - } - } - catch (OperationCanceledException) { break; } - catch (Exception ex) - { - _logger.LogError(ex, "Cleanup Loop Error"); - } - } - } - - // ... Listen ... - - private void HandleBeacon(DiscoveryBeacon beacon, IPAddress address) - { - var peerId = beacon.NodeId; - var endpoint = $"{address}:{beacon.TcpPort}"; - - var peer = new PeerNode(peerId, endpoint, DateTimeOffset.UtcNow, interestingCollections: beacon.InterestingCollections); - - _activePeers.AddOrUpdate(peerId, peer, (key, old) => peer); - } - - /// - /// Stops the discovery service. - /// - /// A task that completes when stop processing has finished. - public async Task Stop() - { - CancellationTokenSource? ctsToDispose = null; - lock (_startStopLock) { if (_cts == null) { _logger.LogWarning("UDP Discovery Service already stopped or never started"); return; - } - + } + ctsToDispose = _cts; _cts = null; - } - + } + try { ctsToDispose.Cancel(); @@ -181,16 +130,62 @@ internal class UdpDiscoveryService : IDiscoveryService finally { ctsToDispose.Dispose(); - } - - await Task.CompletedTask; - } - - /// - /// Gets the currently active peers discovered on the network. - /// - /// The collection of active peers. - public IEnumerable GetActivePeers() => _activePeers.Values; + } + + await Task.CompletedTask; + } + + /// + /// Gets the currently active peers discovered on the network. + /// + /// The collection of active peers. + public IEnumerable GetActivePeers() + { + return _activePeers.Values; + } + + // ... Stop ... + + private async Task CleanupAsync(CancellationToken token) + { + while (!token.IsCancellationRequested) + try + { + await Task.Delay(10000, token); // Check every 10s + var now = DateTimeOffset.UtcNow; + var expired = new List(); + + foreach (var pair in _activePeers) + // Expiry: 15 seconds (broadcast is every 5s, so 3 missed beats = dead) + if ((now - pair.Value.LastSeen).TotalSeconds > 15) + expired.Add(pair.Key); + + foreach (string id in expired) + if (_activePeers.TryRemove(id, out var removed)) + _logger.LogInformation("Peer Expired: {NodeId} at {Endpoint}", removed.NodeId, removed.Address); + } + catch (OperationCanceledException) + { + break; + } + catch (Exception ex) + { + _logger.LogError(ex, "Cleanup Loop Error"); + } + } + + // ... Listen ... + + private void HandleBeacon(DiscoveryBeacon beacon, IPAddress address) + { + string peerId = beacon.NodeId; + var endpoint = $"{address}:{beacon.TcpPort}"; + + var peer = new PeerNode(peerId, endpoint, DateTimeOffset.UtcNow, + interestingCollections: beacon.InterestingCollections); + + _activePeers.AddOrUpdate(peerId, peer, (key, old) => peer); + } private async Task ListenAsync(CancellationToken token) { @@ -201,28 +196,25 @@ internal class UdpDiscoveryService : IDiscoveryService _logger.LogInformation("UDP Discovery Listening on port {Port}", DiscoveryPort); while (!token.IsCancellationRequested) - { try { var result = await udp.ReceiveAsync(); - var json = Encoding.UTF8.GetString(result.Buffer); + string json = Encoding.UTF8.GetString(result.Buffer); try { var config = await _configProvider.GetConfiguration(); - var _nodeId = config.NodeId; - var localClusterHash = ComputeClusterHash(config.AuthToken); + string _nodeId = config.NodeId; + string localClusterHash = ComputeClusterHash(config.AuthToken); + + var beacon = JsonSerializer.Deserialize(json); - var beacon = JsonSerializer.Deserialize(json); - if (beacon != null && beacon.NodeId != _nodeId) { // Filter by ClusterHash to reduce congestion from different clusters if (!string.Equals(beacon.ClusterHash, localClusterHash, StringComparison.Ordinal)) - { // Optional: Log trace if needed, but keeping it silent avoids flooding logs during congestion - continue; - } + continue; HandleBeacon(beacon, result.RemoteEndPoint.Address); } @@ -232,12 +224,14 @@ internal class UdpDiscoveryService : IDiscoveryService _logger.LogWarning(ex, "Failed to parse beacon from {Address}", result.RemoteEndPoint.Address); } } - catch (ObjectDisposedException) { break; } + catch (ObjectDisposedException) + { + break; + } catch (Exception ex) { _logger.LogError(ex, "UDP Listener Error"); } - } } private async Task BroadcastAsync(CancellationToken token) @@ -252,18 +246,18 @@ internal class UdpDiscoveryService : IDiscoveryService try { // Re-fetch config each time in case it changes (though usually static) - var conf = await _configProvider.GetConfiguration(); - - var beacon = new DiscoveryBeacon - { - NodeId = conf.NodeId, + var conf = await _configProvider.GetConfiguration(); + + var beacon = new DiscoveryBeacon + { + NodeId = conf.NodeId, TcpPort = conf.TcpPort, ClusterHash = ComputeClusterHash(conf.AuthToken), InterestingCollections = _documentStore.InterestedCollection.ToList() }; - var json = JsonSerializer.Serialize(beacon); - var bytes = Encoding.UTF8.GetBytes(json); + string json = JsonSerializer.Serialize(beacon); + byte[] bytes = Encoding.UTF8.GetBytes(json); await udp.SendAsync(bytes, bytes.Length, endpoint); } @@ -279,39 +273,38 @@ internal class UdpDiscoveryService : IDiscoveryService private string ComputeClusterHash(string authToken) { if (string.IsNullOrEmpty(authToken)) return ""; - using var sha256 = System.Security.Cryptography.SHA256.Create(); - var bytes = Encoding.UTF8.GetBytes(authToken); - var hash = sha256.ComputeHash(bytes); + using var sha256 = SHA256.Create(); + byte[] bytes = Encoding.UTF8.GetBytes(authToken); + byte[] hash = sha256.ComputeHash(bytes); // Return first 8 chars (4 bytes hex) is enough for filtering return BitConverter.ToString(hash).Replace("-", "").Substring(0, 8); } + private class DiscoveryBeacon + { + /// + /// Gets or sets the broadcasting node identifier. + /// + [JsonPropertyName("node_id")] + public string NodeId { get; set; } = ""; - private class DiscoveryBeacon - { - /// - /// Gets or sets the broadcasting node identifier. - /// - [System.Text.Json.Serialization.JsonPropertyName("node_id")] - public string NodeId { get; set; } = ""; - - /// - /// Gets or sets the TCP port used by the broadcasting node. - /// - [System.Text.Json.Serialization.JsonPropertyName("tcp_port")] - public int TcpPort { get; set; } - - /// - /// Gets or sets the cluster hash used for discovery filtering. - /// - [System.Text.Json.Serialization.JsonPropertyName("cluster_hash")] - public string ClusterHash { get; set; } = ""; - - /// - /// Gets or sets the collections the node is interested in. - /// - [System.Text.Json.Serialization.JsonPropertyName("interests")] - public List InterestingCollections { get; set; } = new(); - } -} + /// + /// Gets or sets the TCP port used by the broadcasting node. + /// + [JsonPropertyName("tcp_port")] + public int TcpPort { get; set; } + + /// + /// Gets or sets the cluster hash used for discovery filtering. + /// + [JsonPropertyName("cluster_hash")] + public string ClusterHash { get; set; } = ""; + + /// + /// Gets or sets the collections the node is interested in. + /// + [JsonPropertyName("interests")] + public List InterestingCollections { get; set; } = new(); + } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Network/ZB.MOM.WW.CBDDC.Network.csproj b/src/ZB.MOM.WW.CBDDC.Network/ZB.MOM.WW.CBDDC.Network.csproj index 3ee4a0c..182805a 100755 --- a/src/ZB.MOM.WW.CBDDC.Network/ZB.MOM.WW.CBDDC.Network.csproj +++ b/src/ZB.MOM.WW.CBDDC.Network/ZB.MOM.WW.CBDDC.Network.csproj @@ -1,52 +1,52 @@ - - - + + + - - - - runtime; build; native; contentfiles; analyzers; buildtransitive - all - - - - - - + + + + runtime; build; native; contentfiles; analyzers; buildtransitive + all + + + + + + - - - + + + - - ZB.MOM.WW.CBDDC.Network - ZB.MOM.WW.CBDDC.Network - ZB.MOM.WW.CBDDC.Network - net10.0 - latest - enable - enable - 1.0.3 - MrDevRobot - Networking layer (TCP/UDP/Gossip) for CBDDC. - MIT - p2p;mesh;network;gossip;lan;udp;tcp;discovery - https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net - https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net - git - README.md - + + ZB.MOM.WW.CBDDC.Network + ZB.MOM.WW.CBDDC.Network + ZB.MOM.WW.CBDDC.Network + net10.0 + latest + enable + enable + 1.0.3 + MrDevRobot + Networking layer (TCP/UDP/Gossip) for CBDDC. + MIT + p2p;mesh;network;gossip;lan;udp;tcp;discovery + https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net + https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net + git + README.md + - - - + + + - - - <_Parameter1>ZB.MOM.WW.CBDDC.Network.Tests - - + + + <_Parameter1>ZB.MOM.WW.CBDDC.Network.Tests + + diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentMetadataStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentMetadataStore.cs index 4639547..901e179 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentMetadataStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentMetadataStore.cs @@ -1,13 +1,13 @@ +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; namespace ZB.MOM.WW.CBDDC.Persistence.BLite; /// -/// BLite implementation of document metadata storage for sync tracking. +/// BLite implementation of document metadata storage for sync tracking. /// /// The type of CBDDCDocumentDbContext. public class BLiteDocumentMetadataStore : DocumentMetadataStore where TDbContext : CBDDCDocumentDbContext @@ -16,18 +16,20 @@ public class BLiteDocumentMetadataStore : DocumentMetadataStore wher private readonly ILogger> _logger; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The BLite document database context. /// The optional logger instance. - public BLiteDocumentMetadataStore(TDbContext context, ILogger>? logger = null) + public BLiteDocumentMetadataStore(TDbContext context, + ILogger>? logger = null) { _context = context ?? throw new ArgumentNullException(nameof(context)); _logger = logger ?? NullLogger>.Instance; } /// - public override async Task GetMetadataAsync(string collection, string key, CancellationToken cancellationToken = default) + public override async Task GetMetadataAsync(string collection, string key, + CancellationToken cancellationToken = default) { var entity = _context.DocumentMetadatas .Find(m => m.Collection == collection && m.Key == key) @@ -37,7 +39,8 @@ public class BLiteDocumentMetadataStore : DocumentMetadataStore wher } /// - public override async Task> GetMetadataByCollectionAsync(string collection, CancellationToken cancellationToken = default) + public override async Task> GetMetadataByCollectionAsync(string collection, + CancellationToken cancellationToken = default) { return _context.DocumentMetadatas .Find(m => m.Collection == collection) @@ -46,7 +49,8 @@ public class BLiteDocumentMetadataStore : DocumentMetadataStore wher } /// - public override async Task UpsertMetadataAsync(DocumentMetadata metadata, CancellationToken cancellationToken = default) + public override async Task UpsertMetadataAsync(DocumentMetadata metadata, + CancellationToken cancellationToken = default) { var existing = _context.DocumentMetadatas .Find(m => m.Collection == metadata.Collection && m.Key == metadata.Key) @@ -69,7 +73,8 @@ public class BLiteDocumentMetadataStore : DocumentMetadataStore wher } /// - public override async Task UpsertMetadataBatchAsync(IEnumerable metadatas, CancellationToken cancellationToken = default) + public override async Task UpsertMetadataBatchAsync(IEnumerable metadatas, + CancellationToken cancellationToken = default) { foreach (var metadata in metadatas) { @@ -95,7 +100,8 @@ public class BLiteDocumentMetadataStore : DocumentMetadataStore wher } /// - public override async Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp, CancellationToken cancellationToken = default) + public override async Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp, + CancellationToken cancellationToken = default) { var existing = _context.DocumentMetadatas .Find(m => m.Collection == collection && m.Key == key) @@ -127,11 +133,12 @@ public class BLiteDocumentMetadataStore : DocumentMetadataStore wher } /// - public override async Task> GetMetadataAfterAsync(HlcTimestamp since, IEnumerable? collections = null, CancellationToken cancellationToken = default) + public override async Task> GetMetadataAfterAsync(HlcTimestamp since, + IEnumerable? collections = null, CancellationToken cancellationToken = default) { var query = _context.DocumentMetadatas.AsQueryable() - .Where(m => (m.HlcPhysicalTime > since.PhysicalTime) || - (m.HlcPhysicalTime == since.PhysicalTime && m.HlcLogicalCounter > since.LogicalCounter)); + .Where(m => m.HlcPhysicalTime > since.PhysicalTime || + (m.HlcPhysicalTime == since.PhysicalTime && m.HlcLogicalCounter > since.LogicalCounter)); if (collections != null) { @@ -161,17 +168,16 @@ public class BLiteDocumentMetadataStore : DocumentMetadataStore wher } /// - public override async Task ImportAsync(IEnumerable items, CancellationToken cancellationToken = default) + public override async Task ImportAsync(IEnumerable items, + CancellationToken cancellationToken = default) { - foreach (var item in items) - { - await _context.DocumentMetadatas.InsertAsync(ToEntity(item)); - } + foreach (var item in items) await _context.DocumentMetadatas.InsertAsync(ToEntity(item)); await _context.SaveChangesAsync(cancellationToken); } /// - public override async Task MergeAsync(IEnumerable items, CancellationToken cancellationToken = default) + public override async Task MergeAsync(IEnumerable items, + CancellationToken cancellationToken = default) { foreach (var item in items) { @@ -186,7 +192,8 @@ public class BLiteDocumentMetadataStore : DocumentMetadataStore wher else { // Update only if incoming is newer - var existingTs = new HlcTimestamp(existing.HlcPhysicalTime, existing.HlcLogicalCounter, existing.HlcNodeId); + var existingTs = new HlcTimestamp(existing.HlcPhysicalTime, existing.HlcLogicalCounter, + existing.HlcNodeId); if (item.UpdatedAt.CompareTo(existingTs) > 0) { existing.HlcPhysicalTime = item.UpdatedAt.PhysicalTime; @@ -197,6 +204,7 @@ public class BLiteDocumentMetadataStore : DocumentMetadataStore wher } } } + await _context.SaveChangesAsync(cancellationToken); } @@ -227,4 +235,4 @@ public class BLiteDocumentMetadataStore : DocumentMetadataStore wher } #endregion -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.README.md b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.README.md index ae801db..b9fc335 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.README.md +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.README.md @@ -2,7 +2,8 @@ ## Overview -`BLiteDocumentStore` is an abstract base class that simplifies creating document stores for CBDDC with BLite persistence. It handles all Oplog management internally, so you only need to implement entity-to-JSON mapping methods. +`BLiteDocumentStore` is an abstract base class that simplifies creating document stores for CBDDC with BLite +persistence. It handles all Oplog management internally, so you only need to implement entity-to-JSON mapping methods. ## Key Features @@ -11,23 +12,24 @@ - ? **No CDC Events Needed** - Direct Oplog management eliminates event loops - ? **Simple API** - Only 4 abstract methods to implement -## Architecture - -``` -User Code ? SampleDocumentStore (extends BLiteDocumentStore) - ? - BLiteDocumentStore - ??? _context.Users / TodoLists (read/write entities) - ??? _context.OplogEntries (write oplog directly) - -Remote Sync ? OplogStore.ApplyBatchAsync() - ? - BLiteDocumentStore.PutDocumentAsync(fromSync=true) - ??? _context.Users / TodoLists (write only) - ??? _context.OplogEntries (skip - already exists) -``` - -**Key Advantage**: No circular dependency! `BLiteDocumentStore` writes directly to `CBDDCDocumentDbContext.OplogEntries` collection. +## Architecture + +``` +User Code ? SampleDocumentStore (extends BLiteDocumentStore) + ? + BLiteDocumentStore + ??? _context.Users / TodoLists (read/write entities) + ??? _context.OplogEntries (write oplog directly) + +Remote Sync ? OplogStore.ApplyBatchAsync() + ? + BLiteDocumentStore.PutDocumentAsync(fromSync=true) + ??? _context.Users / TodoLists (write only) + ??? _context.OplogEntries (skip - already exists) +``` + +**Key Advantage**: No circular dependency! `BLiteDocumentStore` writes directly to `CBDDCDocumentDbContext.OplogEntries` +collection. ## Implementation Example @@ -129,15 +131,15 @@ public class SampleDocumentStore : BLiteDocumentStore ## Usage in Application -### Setup (DI Container) - -```csharp -services.AddSingleton(sp => - new SampleDbContext("data/sample.blite")); - -// No OplogStore dependency needed! -services.AddSingleton(); -services.AddSingleton>(); +### Setup (DI Container) + +```csharp +services.AddSingleton(sp => + new SampleDbContext("data/sample.blite")); + +// No OplogStore dependency needed! +services.AddSingleton(); +services.AddSingleton>(); ``` ### Local Changes (User operations) @@ -180,6 +182,7 @@ using (documentStore.BeginRemoteSync()) // ? Suppresses Oplog creation ## Migration from Old CDC-based Approach ### Before (with CDC Events) + ```csharp // SampleDocumentStore subscribes to BLite CDC // CDC emits events ? OplogCoordinator creates Oplog @@ -187,6 +190,7 @@ using (documentStore.BeginRemoteSync()) // ? Suppresses Oplog creation ``` ### After (with BLiteDocumentStore) + ```csharp // Direct Oplog management in DocumentStore // AsyncLocal flag prevents duplicates during sync @@ -203,6 +207,7 @@ using (documentStore.BeginRemoteSync()) // ? Suppresses Oplog creation ## Next Steps After implementing your DocumentStore: + 1. Remove CDC subscriptions from your code 2. Remove `OplogCoordinator` from DI (no longer needed) 3. Test local operations create Oplog entries diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.cs index c88719b..8025d75 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.cs @@ -1,56 +1,50 @@ -using System; using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Linq; using System.Text.Json; -using System.Threading; -using System.Threading.Tasks; using BLite.Core.CDC; using BLite.Core.Collections; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Sync; using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; - using BLiteOperationType = BLite.Core.Transactions.OperationType; -namespace ZB.MOM.WW.CBDDC.Persistence.BLite; - -/// -/// Abstract base class for BLite-based document stores. -/// Handles Oplog creation internally - subclasses only implement entity mapping. -/// -/// The BLite DbContext type. +namespace ZB.MOM.WW.CBDDC.Persistence.BLite; + +/// +/// Abstract base class for BLite-based document stores. +/// Handles Oplog creation internally - subclasses only implement entity mapping. +/// +/// The BLite DbContext type. public abstract class BLiteDocumentStore : IDocumentStore, IDisposable where TDbContext : CBDDCDocumentDbContext { - protected readonly TDbContext _context; + private readonly List _cdcWatchers = new(); + private readonly object _clockLock = new(); protected readonly IPeerNodeConfigurationProvider _configProvider; protected readonly IConflictResolver _conflictResolver; - protected readonly IVectorClockService _vectorClock; + protected readonly TDbContext _context; protected readonly ILogger> _logger; + private readonly HashSet _registeredCollections = new(); /// - /// Semaphore used to suppress CDC-triggered OplogEntry creation during remote sync. - /// CurrentCount == 0 ? sync in progress, CDC must skip. - /// CurrentCount == 1 ? no sync, CDC creates OplogEntry. + /// Semaphore used to suppress CDC-triggered OplogEntry creation during remote sync. + /// CurrentCount == 0 ? sync in progress, CDC must skip. + /// CurrentCount == 1 ? no sync, CDC creates OplogEntry. /// - private readonly SemaphoreSlim _remoteSyncGuard = new SemaphoreSlim(1, 1); - private readonly ConcurrentDictionary _suppressedCdcEvents = new(StringComparer.Ordinal); + private readonly SemaphoreSlim _remoteSyncGuard = new(1, 1); - private readonly List _cdcWatchers = new(); - private readonly HashSet _registeredCollections = new(); + private readonly ConcurrentDictionary _suppressedCdcEvents = new(StringComparer.Ordinal); + protected readonly IVectorClockService _vectorClock; // HLC state for generating timestamps for local changes private long _lastPhysicalTime; private int _logicalCounter; - private readonly object _clockLock = new object(); /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The BLite database context. /// The peer node configuration provider. @@ -74,17 +68,29 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab _logicalCounter = 0; } + /// + /// Releases managed resources used by this document store. + /// + public virtual void Dispose() + { + foreach (var watcher in _cdcWatchers) + try + { + watcher.Dispose(); + } + catch + { + } + + _cdcWatchers.Clear(); + _remoteSyncGuard.Dispose(); + } + private static ILogger> CreateTypedLogger(ILogger? logger) { - if (logger is null) - { - return NullLogger>.Instance; - } + if (logger is null) return NullLogger>.Instance; - if (logger is ILogger> typedLogger) - { - return typedLogger; - } + if (logger is ILogger> typedLogger) return typedLogger; return new ForwardingLogger(logger); } @@ -94,7 +100,7 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab private readonly ILogger _inner; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The underlying logger instance. public ForwardingLogger(ILogger inner) @@ -135,35 +141,26 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab private void RegisterSuppressedCdcEvent(string collection, string key, OperationType operationType) { - var suppressionKey = BuildSuppressionKey(collection, key, operationType); + string suppressionKey = BuildSuppressionKey(collection, key, operationType); _suppressedCdcEvents.AddOrUpdate(suppressionKey, 1, (_, current) => current + 1); } private bool TryConsumeSuppressedCdcEvent(string collection, string key, OperationType operationType) { - var suppressionKey = BuildSuppressionKey(collection, key, operationType); + string suppressionKey = BuildSuppressionKey(collection, key, operationType); while (true) { - if (!_suppressedCdcEvents.TryGetValue(suppressionKey, out var current)) - { - return false; - } + if (!_suppressedCdcEvents.TryGetValue(suppressionKey, out int current)) return false; - if (current <= 1) - { - return _suppressedCdcEvents.TryRemove(suppressionKey, out _); - } + if (current <= 1) return _suppressedCdcEvents.TryRemove(suppressionKey, out _); - if (_suppressedCdcEvents.TryUpdate(suppressionKey, current - 1, current)) - { - return true; - } + if (_suppressedCdcEvents.TryUpdate(suppressionKey, current - 1, current)) return true; } } /// - /// Registers a BLite collection for CDC tracking. - /// Call in subclass constructor for each collection to sync. + /// Registers a BLite collection for CDC tracking. + /// Call in subclass constructor for each collection to sync. /// /// The entity type. /// The logical collection name used in Oplog. @@ -177,14 +174,14 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab { _registeredCollections.Add(collectionName); - var watcher = collection.Watch(capturePayload: true) + var watcher = collection.Watch(true) .Subscribe(new CdcObserver(collectionName, keySelector, this)); _cdcWatchers.Add(watcher); } /// - /// Generic CDC observer. Forwards BLite change events to OnLocalChangeDetectedAsync. - /// Automatically skips events when remote sync is in progress. + /// Generic CDC observer. Forwards BLite change events to OnLocalChangeDetectedAsync. + /// Automatically skips events when remote sync is in progress. /// private class CdcObserver : IObserver> where TEntity : class @@ -194,7 +191,7 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab private readonly BLiteDocumentStore _store; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The logical collection name. /// The key selector for observed entities. @@ -210,23 +207,20 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab } /// - /// Handles a change stream event from BLite CDC. + /// Handles a change stream event from BLite CDC. /// /// The change event payload. public void OnNext(ChangeStreamEvent changeEvent) { - var operationType = changeEvent.Type == BLiteOperationType.Delete ? OperationType.Delete : OperationType.Put; + var operationType = changeEvent.Type == BLiteOperationType.Delete + ? OperationType.Delete + : OperationType.Put; - var entityId = changeEvent.DocumentId?.ToString() ?? ""; + string entityId = changeEvent.DocumentId ?? ""; if (operationType == OperationType.Put && changeEvent.Entity != null) - { entityId = _keySelector(changeEvent.Entity); - } - if (_store.TryConsumeSuppressedCdcEvent(_collectionName, entityId, operationType)) - { - return; - } + if (_store.TryConsumeSuppressedCdcEvent(_collectionName, entityId, operationType)) return; if (_store._remoteSyncGuard.CurrentCount == 0) return; @@ -238,22 +232,26 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab else if (changeEvent.Entity != null) { var content = JsonSerializer.SerializeToElement(changeEvent.Entity); - var key = _keySelector(changeEvent.Entity); + string key = _keySelector(changeEvent.Entity); _store.OnLocalChangeDetectedAsync(_collectionName, key, OperationType.Put, content) .GetAwaiter().GetResult(); } } /// - /// Handles CDC observer errors. + /// Handles CDC observer errors. /// /// The observed exception. - public void OnError(Exception error) { } + public void OnError(Exception error) + { + } /// - /// Handles completion of the CDC stream. + /// Handles completion of the CDC stream. /// - public void OnCompleted() { } + public void OnCompleted() + { + } } #endregion @@ -261,8 +259,8 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab #region Abstract Methods - Implemented by subclass /// - /// Applies JSON content to a single entity (insert or update) and commits changes. - /// Called for single-document operations. + /// Applies JSON content to a single entity (insert or update) and commits changes. + /// Called for single-document operations. /// /// The logical collection name. /// The document key. @@ -272,16 +270,17 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab string collection, string key, JsonElement content, CancellationToken cancellationToken); /// - /// Applies JSON content to multiple entities (insert or update) with a single commit. - /// Called for batch operations. Must commit all changes in a single SaveChanges. + /// Applies JSON content to multiple entities (insert or update) with a single commit. + /// Called for batch operations. Must commit all changes in a single SaveChanges. /// /// The documents to apply in one batch. /// The cancellation token. protected abstract Task ApplyContentToEntitiesBatchAsync( - IEnumerable<(string Collection, string Key, JsonElement Content)> documents, CancellationToken cancellationToken); + IEnumerable<(string Collection, string Key, JsonElement Content)> documents, + CancellationToken cancellationToken); /// - /// Reads an entity from the DbContext and returns it as JsonElement. + /// Reads an entity from the DbContext and returns it as JsonElement. /// /// The logical collection name. /// The document key. @@ -290,7 +289,7 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab string collection, string key, CancellationToken cancellationToken); /// - /// Removes a single entity from the DbContext and commits changes. + /// Removes a single entity from the DbContext and commits changes. /// /// The logical collection name. /// The document key. @@ -299,7 +298,7 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab string collection, string key, CancellationToken cancellationToken); /// - /// Removes multiple entities from the DbContext with a single commit. + /// Removes multiple entities from the DbContext with a single commit. /// /// The documents to remove in one batch. /// The cancellation token. @@ -307,45 +306,47 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab IEnumerable<(string Collection, string Key)> documents, CancellationToken cancellationToken); /// - /// Reads all entities from a collection as JsonElements. + /// Reads all entities from a collection as JsonElements. /// /// The logical collection name. /// The cancellation token. protected abstract Task> GetAllEntitiesAsJsonAsync( string collection, CancellationToken cancellationToken); - #endregion - + #endregion + #region IDocumentStore Implementation /// - /// Returns the collections registered via WatchCollection. + /// Returns the collections registered via WatchCollection. /// public IEnumerable InterestedCollection => _registeredCollections; /// - /// Gets a document by collection and key. + /// Gets a document by collection and key. /// /// The logical collection name. /// The document key. /// The cancellation token. - /// The matching document, or when not found. - public async Task GetDocumentAsync(string collection, string key, CancellationToken cancellationToken = default) + /// The matching document, or when not found. + public async Task GetDocumentAsync(string collection, string key, + CancellationToken cancellationToken = default) { var content = await GetEntityAsJsonAsync(collection, key, cancellationToken); if (content == null) return null; - + var timestamp = new HlcTimestamp(0, 0, ""); // Will be populated from metadata if needed return new Document(collection, key, content.Value, timestamp, false); } /// - /// Gets all documents for a collection. + /// Gets all documents for a collection. /// /// The logical collection name. /// The cancellation token. /// The documents in the specified collection. - public async Task> GetDocumentsByCollectionAsync(string collection, CancellationToken cancellationToken = default) + public async Task> GetDocumentsByCollectionAsync(string collection, + CancellationToken cancellationToken = default) { var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken); var timestamp = new HlcTimestamp(0, 0, ""); @@ -353,31 +354,30 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab } /// - /// Gets documents for the specified collection and key pairs. + /// Gets documents for the specified collection and key pairs. /// /// The collection and key pairs to resolve. /// The cancellation token. /// The documents that were found. - public async Task> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys, CancellationToken cancellationToken) + public async Task> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys, + CancellationToken cancellationToken) { var documents = new List(); - foreach (var (collection, key) in documentKeys) - { - var doc = await GetDocumentAsync(collection, key, cancellationToken); - if (doc != null) - { - documents.Add(doc); - } + foreach ((string collection, string key) in documentKeys) + { + var doc = await GetDocumentAsync(collection, key, cancellationToken); + if (doc != null) documents.Add(doc); } + return documents; } /// - /// Inserts or updates a single document. + /// Inserts or updates a single document. /// /// The document to persist. /// The cancellation token. - /// when the operation succeeds. + /// when the operation succeeds. public async Task PutDocumentAsync(Document document, CancellationToken cancellationToken = default) { await _remoteSyncGuard.WaitAsync(cancellationToken); @@ -389,6 +389,7 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab { _remoteSyncGuard.Release(); } + return true; } @@ -399,21 +400,20 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab } /// - /// Updates a batch of documents. + /// Updates a batch of documents. /// /// The documents to update. /// The cancellation token. - /// when the operation succeeds. - public async Task UpdateBatchDocumentsAsync(IEnumerable documents, CancellationToken cancellationToken = default) + /// when the operation succeeds. + public async Task UpdateBatchDocumentsAsync(IEnumerable documents, + CancellationToken cancellationToken = default) { var documentList = documents.ToList(); await _remoteSyncGuard.WaitAsync(cancellationToken); try { foreach (var document in documentList) - { RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put); - } await ApplyContentToEntitiesBatchAsync( documentList.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken); @@ -422,25 +422,25 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab { _remoteSyncGuard.Release(); } + return true; } /// - /// Inserts a batch of documents. + /// Inserts a batch of documents. /// /// The documents to insert. /// The cancellation token. - /// when the operation succeeds. - public async Task InsertBatchDocumentsAsync(IEnumerable documents, CancellationToken cancellationToken = default) + /// when the operation succeeds. + public async Task InsertBatchDocumentsAsync(IEnumerable documents, + CancellationToken cancellationToken = default) { var documentList = documents.ToList(); await _remoteSyncGuard.WaitAsync(cancellationToken); try { foreach (var document in documentList) - { RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put); - } await ApplyContentToEntitiesBatchAsync( documentList.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken); @@ -449,17 +449,19 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab { _remoteSyncGuard.Release(); } + return true; } /// - /// Deletes a single document. + /// Deletes a single document. /// /// The logical collection name. /// The document key. /// The cancellation token. - /// when the operation succeeds. - public async Task DeleteDocumentAsync(string collection, string key, CancellationToken cancellationToken = default) + /// when the operation succeeds. + public async Task DeleteDocumentAsync(string collection, string key, + CancellationToken cancellationToken = default) { await _remoteSyncGuard.WaitAsync(cancellationToken); try @@ -470,6 +472,7 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab { _remoteSyncGuard.Release(); } + return true; } @@ -480,25 +483,22 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab } /// - /// Deletes a batch of documents by composite keys. + /// Deletes a batch of documents by composite keys. /// /// The document keys in collection/key format. /// The cancellation token. - /// when the operation succeeds. - public async Task DeleteBatchDocumentsAsync(IEnumerable documentKeys, CancellationToken cancellationToken = default) + /// when the operation succeeds. + public async Task DeleteBatchDocumentsAsync(IEnumerable documentKeys, + CancellationToken cancellationToken = default) { var parsedKeys = new List<(string Collection, string Key)>(); - foreach (var key in documentKeys) + foreach (string key in documentKeys) { - var parts = key.Split('/'); + string[] parts = key.Split('/'); if (parts.Length == 2) - { parsedKeys.Add((parts[0], parts[1])); - } else - { _logger.LogWarning("Invalid document key format: {Key}", key); - } } if (parsedKeys.Count == 0) return true; @@ -506,10 +506,8 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab await _remoteSyncGuard.WaitAsync(cancellationToken); try { - foreach (var (collection, key) in parsedKeys) - { + foreach ((string collection, string key) in parsedKeys) RegisterSuppressedCdcEvent(collection, key, OperationType.Delete); - } await RemoveEntitiesBatchAsync(parsedKeys, cancellationToken); } @@ -517,11 +515,12 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab { _remoteSyncGuard.Release(); } + return true; } /// - /// Merges an incoming document with the current stored document. + /// Merges an incoming document with the current stored document. /// /// The incoming document. /// The cancellation token. @@ -553,46 +552,44 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab } return existing; - } - - #endregion + } + + #endregion #region ISnapshotable Implementation /// - /// Removes all tracked documents from registered collections. + /// Removes all tracked documents from registered collections. /// /// The cancellation token. public async Task DropAsync(CancellationToken cancellationToken = default) { - foreach (var collection in InterestedCollection) + foreach (string collection in InterestedCollection) { - var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken); - foreach (var (key, _) in entities) - { - await RemoveEntityAsync(collection, key, cancellationToken); - } + var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken); + foreach ((string key, var _) in entities) await RemoveEntityAsync(collection, key, cancellationToken); } } /// - /// Exports all tracked documents from registered collections. + /// Exports all tracked documents from registered collections. /// /// The cancellation token. /// The exported documents. public async Task> ExportAsync(CancellationToken cancellationToken = default) { var documents = new List(); - foreach (var collection in InterestedCollection) - { - var collectionDocs = await GetDocumentsByCollectionAsync(collection, cancellationToken); - documents.AddRange(collectionDocs); + foreach (string collection in InterestedCollection) + { + var collectionDocs = await GetDocumentsByCollectionAsync(collection, cancellationToken); + documents.AddRange(collectionDocs); } + return documents; } /// - /// Imports a batch of documents. + /// Imports a batch of documents. /// /// The documents to import. /// The cancellation token. @@ -603,9 +600,7 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab try { foreach (var document in documents) - { RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put); - } await ApplyContentToEntitiesBatchAsync( documents.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken); @@ -617,7 +612,7 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab } /// - /// Merges a batch of incoming documents. + /// Merges a batch of incoming documents. /// /// The incoming documents. /// The cancellation token. @@ -625,32 +620,29 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab { // Acquire guard to prevent Oplog creation during merge await _remoteSyncGuard.WaitAsync(cancellationToken); - try - { - foreach (var document in items) - { - await MergeAsync(document, cancellationToken); - } - } - finally - { - _remoteSyncGuard.Release(); - } - } - - #endregion - - #region Oplog Management - - /// - /// Returns true if a remote sync operation is in progress (guard acquired). - /// CDC listeners should check this before creating OplogEntry. - /// - protected bool IsRemoteSyncInProgress => _remoteSyncGuard.CurrentCount == 0; - + try + { + foreach (var document in items) await MergeAsync(document, cancellationToken); + } + finally + { + _remoteSyncGuard.Release(); + } + } + + #endregion + + #region Oplog Management + /// - /// Called by subclass CDC listeners when a local change is detected. - /// Creates OplogEntry + DocumentMetadata only if no remote sync is in progress. + /// Returns true if a remote sync operation is in progress (guard acquired). + /// CDC listeners should check this before creating OplogEntry. + /// + protected bool IsRemoteSyncInProgress => _remoteSyncGuard.CurrentCount == 0; + + /// + /// Called by subclass CDC listeners when a local change is detected. + /// Creates OplogEntry + DocumentMetadata only if no remote sync is in progress. /// /// The logical collection name. /// The document key. @@ -661,90 +653,90 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab string collection, string key, OperationType operationType, - JsonElement? content, - CancellationToken cancellationToken = default) - { - if (IsRemoteSyncInProgress) return; - - await CreateOplogEntryAsync(collection, key, operationType, content, cancellationToken); - } - - private HlcTimestamp GenerateTimestamp(string nodeId) - { - lock (_clockLock) - { - var now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); + JsonElement? content, + CancellationToken cancellationToken = default) + { + if (IsRemoteSyncInProgress) return; - if (now > _lastPhysicalTime) - { - _lastPhysicalTime = now; - _logicalCounter = 0; - } - else - { - _logicalCounter++; + await CreateOplogEntryAsync(collection, key, operationType, content, cancellationToken); + } + + private HlcTimestamp GenerateTimestamp(string nodeId) + { + lock (_clockLock) + { + long now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); + + if (now > _lastPhysicalTime) + { + _lastPhysicalTime = now; + _logicalCounter = 0; + } + else + { + _logicalCounter++; } - return new HlcTimestamp(_lastPhysicalTime, _logicalCounter, nodeId); - } - } - - private async Task CreateOplogEntryAsync( + return new HlcTimestamp(_lastPhysicalTime, _logicalCounter, nodeId); + } + } + + private async Task CreateOplogEntryAsync( string collection, string key, OperationType operationType, - JsonElement? content, - CancellationToken cancellationToken) - { - var config = await _configProvider.GetConfiguration(); - var nodeId = config.NodeId; - - // Get last hash from OplogEntries collection directly - var lastEntry = _context.OplogEntries - .Find(e => e.TimestampNodeId == nodeId) - .OrderByDescending(e => e.TimestampPhysicalTime) - .ThenByDescending(e => e.TimestampLogicalCounter) - .FirstOrDefault(); - - var previousHash = lastEntry?.Hash ?? string.Empty; - var timestamp = GenerateTimestamp(nodeId); - - var oplogEntry = new OplogEntry( - collection, - key, - operationType, - content, - timestamp, - previousHash); - - // Write directly to OplogEntries collection - await _context.OplogEntries.InsertAsync(oplogEntry.ToEntity()); - - // Write DocumentMetadata for sync tracking - var docMetadata = EntityMappers.CreateDocumentMetadata( + JsonElement? content, + CancellationToken cancellationToken) + { + var config = await _configProvider.GetConfiguration(); + string nodeId = config.NodeId; + + // Get last hash from OplogEntries collection directly + var lastEntry = _context.OplogEntries + .Find(e => e.TimestampNodeId == nodeId) + .OrderByDescending(e => e.TimestampPhysicalTime) + .ThenByDescending(e => e.TimestampLogicalCounter) + .FirstOrDefault(); + + string previousHash = lastEntry?.Hash ?? string.Empty; + var timestamp = GenerateTimestamp(nodeId); + + var oplogEntry = new OplogEntry( + collection, + key, + operationType, + content, + timestamp, + previousHash); + + // Write directly to OplogEntries collection + await _context.OplogEntries.InsertAsync(oplogEntry.ToEntity()); + + // Write DocumentMetadata for sync tracking + var docMetadata = EntityMappers.CreateDocumentMetadata( collection, key, timestamp, - isDeleted: operationType == OperationType.Delete); - - var existingMetadata = _context.DocumentMetadatas - .Find(m => m.Collection == collection && m.Key == key) - .FirstOrDefault(); - - if (existingMetadata != null) - { - // Update existing metadata - existingMetadata.HlcPhysicalTime = timestamp.PhysicalTime; - existingMetadata.HlcLogicalCounter = timestamp.LogicalCounter; - existingMetadata.HlcNodeId = timestamp.NodeId; - existingMetadata.IsDeleted = operationType == OperationType.Delete; - await _context.DocumentMetadatas.UpdateAsync(existingMetadata); - } - else - { - await _context.DocumentMetadatas.InsertAsync(docMetadata); - } - + operationType == OperationType.Delete); + + var existingMetadata = _context.DocumentMetadatas + .Find(m => m.Collection == collection && m.Key == key) + .FirstOrDefault(); + + if (existingMetadata != null) + { + // Update existing metadata + existingMetadata.HlcPhysicalTime = timestamp.PhysicalTime; + existingMetadata.HlcLogicalCounter = timestamp.LogicalCounter; + existingMetadata.HlcNodeId = timestamp.NodeId; + existingMetadata.IsDeleted = operationType == OperationType.Delete; + await _context.DocumentMetadatas.UpdateAsync(existingMetadata); + } + else + { + await _context.DocumentMetadatas.InsertAsync(docMetadata); + } + await _context.SaveChangesAsync(cancellationToken); // Notify VectorClockService so sync sees local changes @@ -753,24 +745,24 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab _logger.LogDebug( "Created Oplog entry: {Operation} {Collection}/{Key} at {Timestamp} (hash: {Hash})", operationType, collection, key, timestamp, oplogEntry.Hash); - } - - /// - /// Marks the start of remote sync operations (suppresses CDC-triggered Oplog creation). - /// Use in using statement: using (store.BeginRemoteSync()) { ... } - /// - public IDisposable BeginRemoteSync() - { - _remoteSyncGuard.Wait(); - return new RemoteSyncScope(_remoteSyncGuard); - } - + } + + /// + /// Marks the start of remote sync operations (suppresses CDC-triggered Oplog creation). + /// Use in using statement: using (store.BeginRemoteSync()) { ... } + /// + public IDisposable BeginRemoteSync() + { + _remoteSyncGuard.Wait(); + return new RemoteSyncScope(_remoteSyncGuard); + } + private class RemoteSyncScope : IDisposable { private readonly SemaphoreSlim _guard; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The semaphore guarding remote sync operations. public RemoteSyncScope(SemaphoreSlim guard) @@ -779,26 +771,13 @@ public abstract class BLiteDocumentStore : IDocumentStore, IDisposab } /// - /// Releases the remote sync guard. + /// Releases the remote sync guard. /// public void Dispose() { _guard.Release(); } - } + } #endregion - - /// - /// Releases managed resources used by this document store. - /// - public virtual void Dispose() - { - foreach (var watcher in _cdcWatchers) - { - try { watcher.Dispose(); } catch { } - } - _cdcWatchers.Clear(); - _remoteSyncGuard.Dispose(); - } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteOplogStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteOplogStore.cs index 39a1292..e4e2012 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteOplogStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteOplogStore.cs @@ -1,9 +1,9 @@ -ο»Ώusing ZB.MOM.WW.CBDDC.Core; +ο»Ώusing Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Sync; using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; namespace ZB.MOM.WW.CBDDC.Persistence.BLite; @@ -13,7 +13,7 @@ public class BLiteOplogStore : OplogStore where TDbContext : CBDDCDo protected readonly ILogger> _logger; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The BLite database context. /// The document store used by the oplog store. @@ -27,14 +27,16 @@ public class BLiteOplogStore : OplogStore where TDbContext : CBDDCDo IConflictResolver conflictResolver, IVectorClockService vectorClockService, ISnapshotMetadataStore? snapshotMetadataStore = null, - ILogger>? logger = null) : base(documentStore, conflictResolver, vectorClockService, snapshotMetadataStore) + ILogger>? logger = null) : base(documentStore, conflictResolver, vectorClockService, + snapshotMetadataStore) { _context = dbContext ?? throw new ArgumentNullException(nameof(dbContext)); _logger = logger ?? NullLogger>.Instance; } /// - public override async Task ApplyBatchAsync(IEnumerable oplogEntries, CancellationToken cancellationToken = default) + public override async Task ApplyBatchAsync(IEnumerable oplogEntries, + CancellationToken cancellationToken = default) { // BLite transactions are committed by each SaveChangesAsync internally. // Wrapping in an explicit transaction causes "Cannot rollback committed transaction" @@ -58,22 +60,25 @@ public class BLiteOplogStore : OplogStore where TDbContext : CBDDCDo } /// - public override async Task> GetChainRangeAsync(string startHash, string endHash, CancellationToken cancellationToken = default) + public override async Task> GetChainRangeAsync(string startHash, string endHash, + CancellationToken cancellationToken = default) { var startRow = _context.OplogEntries.Find(o => o.Hash == startHash).FirstOrDefault(); var endRow = _context.OplogEntries.Find(o => o.Hash == endHash).FirstOrDefault(); if (startRow == null || endRow == null) return []; - var nodeId = startRow.TimestampNodeId; + string nodeId = startRow.TimestampNodeId; // 2. Fetch range (Start < Entry <= End) var entities = _context.OplogEntries .Find(o => o.TimestampNodeId == nodeId && - ((o.TimestampPhysicalTime > startRow.TimestampPhysicalTime) || - (o.TimestampPhysicalTime == startRow.TimestampPhysicalTime && o.TimestampLogicalCounter > startRow.TimestampLogicalCounter)) && - ((o.TimestampPhysicalTime < endRow.TimestampPhysicalTime) || - (o.TimestampPhysicalTime == endRow.TimestampPhysicalTime && o.TimestampLogicalCounter <= endRow.TimestampLogicalCounter))) + (o.TimestampPhysicalTime > startRow.TimestampPhysicalTime || + (o.TimestampPhysicalTime == startRow.TimestampPhysicalTime && + o.TimestampLogicalCounter > startRow.TimestampLogicalCounter)) && + (o.TimestampPhysicalTime < endRow.TimestampPhysicalTime || + (o.TimestampPhysicalTime == endRow.TimestampPhysicalTime && + o.TimestampLogicalCounter <= endRow.TimestampLogicalCounter))) .OrderBy(o => o.TimestampPhysicalTime) .ThenBy(o => o.TimestampLogicalCounter) .ToList(); @@ -82,23 +87,27 @@ public class BLiteOplogStore : OplogStore where TDbContext : CBDDCDo } /// - public override async Task GetEntryByHashAsync(string hash, CancellationToken cancellationToken = default) + public override async Task GetEntryByHashAsync(string hash, + CancellationToken cancellationToken = default) { // Hash is now a regular indexed property, not the Key return _context.OplogEntries.Find(o => o.Hash == hash).FirstOrDefault()?.ToDomain(); } /// - public override async Task> GetOplogAfterAsync(HlcTimestamp timestamp, IEnumerable? collections = null, CancellationToken cancellationToken = default) + public override async Task> GetOplogAfterAsync(HlcTimestamp timestamp, + IEnumerable? collections = null, CancellationToken cancellationToken = default) { var query = _context.OplogEntries - .Find(o => (o.TimestampPhysicalTime > timestamp.PhysicalTime) || - (o.TimestampPhysicalTime == timestamp.PhysicalTime && o.TimestampLogicalCounter > timestamp.LogicalCounter)); + .Find(o => o.TimestampPhysicalTime > timestamp.PhysicalTime || + (o.TimestampPhysicalTime == timestamp.PhysicalTime && + o.TimestampLogicalCounter > timestamp.LogicalCounter)); if (collections != null) { var collectionSet = new HashSet(collections); query = query.Where(o => collectionSet.Contains(o.Collection)); } + return query .OrderBy(o => o.TimestampPhysicalTime) .ThenBy(o => o.TimestampLogicalCounter) @@ -107,17 +116,20 @@ public class BLiteOplogStore : OplogStore where TDbContext : CBDDCDo } /// - public override async Task> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since, IEnumerable? collections = null, CancellationToken cancellationToken = default) + public override async Task> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since, + IEnumerable? collections = null, CancellationToken cancellationToken = default) { var query = _context.OplogEntries.AsQueryable() .Where(o => o.TimestampNodeId == nodeId && - ((o.TimestampPhysicalTime > since.PhysicalTime) || - (o.TimestampPhysicalTime == since.PhysicalTime && o.TimestampLogicalCounter > since.LogicalCounter))); + (o.TimestampPhysicalTime > since.PhysicalTime || + (o.TimestampPhysicalTime == since.PhysicalTime && + o.TimestampLogicalCounter > since.LogicalCounter))); if (collections != null) { var collectionSet = new HashSet(collections); query = query.Where(o => collectionSet.Contains(o.Collection)); } + return query .OrderBy(o => o.TimestampPhysicalTime) .ThenBy(o => o.TimestampLogicalCounter) @@ -128,10 +140,7 @@ public class BLiteOplogStore : OplogStore where TDbContext : CBDDCDo /// public override async Task ImportAsync(IEnumerable items, CancellationToken cancellationToken = default) { - foreach (var item in items) - { - await _context.OplogEntries.InsertAsync(item.ToEntity()); - } + foreach (var item in items) await _context.OplogEntries.InsertAsync(item.ToEntity()); await _context.SaveChangesAsync(cancellationToken); } @@ -142,11 +151,9 @@ public class BLiteOplogStore : OplogStore where TDbContext : CBDDCDo { // Hash is now a regular indexed property, not the Key var existing = _context.OplogEntries.Find(o => o.Hash == item.Hash).FirstOrDefault(); - if (existing == null) - { - await _context.OplogEntries.InsertAsync(item.ToEntity()); - } + if (existing == null) await _context.OplogEntries.InsertAsync(item.ToEntity()); } + await _context.SaveChangesAsync(cancellationToken); } @@ -154,8 +161,9 @@ public class BLiteOplogStore : OplogStore where TDbContext : CBDDCDo public override async Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default) { var toDelete = _context.OplogEntries.AsQueryable() - .Where(o => (o.TimestampPhysicalTime < cutoff.PhysicalTime) || - (o.TimestampPhysicalTime == cutoff.PhysicalTime && o.TimestampLogicalCounter <= cutoff.LogicalCounter)) + .Where(o => o.TimestampPhysicalTime < cutoff.PhysicalTime || + (o.TimestampPhysicalTime == cutoff.PhysicalTime && + o.TimestampLogicalCounter <= cutoff.LogicalCounter)) .Select(o => o.Hash) .ToList(); await _context.OplogEntries.DeleteBulkAsync(toDelete); @@ -175,23 +183,20 @@ public class BLiteOplogStore : OplogStore where TDbContext : CBDDCDo // Step 1: Load from SnapshotMetadata FIRST (base state after prune) if (_snapshotMetadataStore != null) - { try { var snapshots = _snapshotMetadataStore.GetAllSnapshotMetadataAsync().GetAwaiter().GetResult(); foreach (var snapshot in snapshots) - { _vectorClock.UpdateNode( snapshot.NodeId, - new HlcTimestamp(snapshot.TimestampPhysicalTime, snapshot.TimestampLogicalCounter, snapshot.NodeId), + new HlcTimestamp(snapshot.TimestampPhysicalTime, snapshot.TimestampLogicalCounter, + snapshot.NodeId), snapshot.Hash ?? ""); - } } catch { // Ignore errors during initialization - oplog data will be used as fallback } - } // Step 2: Load from Oplog (Latest State - Overrides Snapshot if newer) var latestPerNode = _context.OplogEntries.AsQueryable() @@ -200,23 +205,20 @@ public class BLiteOplogStore : OplogStore where TDbContext : CBDDCDo { NodeId = g.Key, MaxEntry = g.OrderByDescending(o => o.TimestampPhysicalTime) - .ThenByDescending(o => o.TimestampLogicalCounter) - .FirstOrDefault() + .ThenByDescending(o => o.TimestampLogicalCounter) + .FirstOrDefault() }) .ToList() .Where(x => x.MaxEntry != null) .ToList(); foreach (var node in latestPerNode) - { if (node.MaxEntry != null) - { _vectorClock.UpdateNode( node.NodeId, - new HlcTimestamp(node.MaxEntry.TimestampPhysicalTime, node.MaxEntry.TimestampLogicalCounter, node.MaxEntry.TimestampNodeId), + new HlcTimestamp(node.MaxEntry.TimestampPhysicalTime, node.MaxEntry.TimestampLogicalCounter, + node.MaxEntry.TimestampNodeId), node.MaxEntry.Hash ?? ""); - } - } _vectorClock.IsInitialized = true; } @@ -228,7 +230,8 @@ public class BLiteOplogStore : OplogStore where TDbContext : CBDDCDo } /// - protected override async Task QueryLastHashForNodeAsync(string nodeId, CancellationToken cancellationToken = default) + protected override async Task QueryLastHashForNodeAsync(string nodeId, + CancellationToken cancellationToken = default) { var lastEntry = _context.OplogEntries.AsQueryable() .Where(o => o.TimestampNodeId == nodeId) @@ -239,11 +242,12 @@ public class BLiteOplogStore : OplogStore where TDbContext : CBDDCDo } /// - protected override async Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash, CancellationToken cancellationToken = default) + protected override async Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash, + CancellationToken cancellationToken = default) { // Hash is now a regular indexed property, not the Key var entry = _context.OplogEntries.Find(o => o.Hash == hash).FirstOrDefault(); if (entry == null) return null; return (entry.TimestampPhysicalTime, entry.TimestampLogicalCounter); } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerConfigurationStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerConfigurationStore.cs index 1778146..7850c94 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerConfigurationStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerConfigurationStore.cs @@ -1,39 +1,45 @@ -ο»Ώusing ZB.MOM.WW.CBDDC.Core.Network; -using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; +ο»Ώusing System.Text.Json; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; namespace ZB.MOM.WW.CBDDC.Persistence.BLite; /// -/// Provides a peer configuration store implementation that uses a specified CBDDCDocumentDbContext for persistence -/// operations. +/// Provides a peer configuration store implementation that uses a specified CBDDCDocumentDbContext for persistence +/// operations. /// -/// This class enables storage, retrieval, and management of remote peer configurations using the provided -/// database context. It is typically used in scenarios where peer configurations need to be persisted in a document -/// database. -/// The type of the document database context used for accessing and managing peer configurations. Must inherit from -/// CBDDCDocumentDbContext. +/// +/// This class enables storage, retrieval, and management of remote peer configurations using the provided +/// database context. It is typically used in scenarios where peer configurations need to be persisted in a document +/// database. +/// +/// +/// The type of the document database context used for accessing and managing peer configurations. Must inherit from +/// CBDDCDocumentDbContext. +/// public class BLitePeerConfigurationStore : PeerConfigurationStore where TDbContext : CBDDCDocumentDbContext { /// - /// Represents the database context used for data access operations within the derived class. + /// Represents the database context used for data access operations within the derived class. /// protected readonly TDbContext _context; /// - /// Provides logging capabilities for the BLitePeerConfigurationStore operations. + /// Provides logging capabilities for the BLitePeerConfigurationStore operations. /// protected readonly ILogger> _logger; /// - /// Initializes a new instance of the BLitePeerConfigurationStore class using the specified database context and - /// optional logger. + /// Initializes a new instance of the BLitePeerConfigurationStore class using the specified database context and + /// optional logger. /// /// The database context used to access and manage peer configuration data. Cannot be null. /// An optional logger for logging diagnostic messages. If null, a no-op logger is used. /// Thrown if the context parameter is null. - public BLitePeerConfigurationStore(TDbContext context, ILogger>? logger = null) + public BLitePeerConfigurationStore(TDbContext context, + ILogger>? logger = null) { _context = context ?? throw new ArgumentNullException(nameof(context)); _logger = logger ?? NullLogger>.Instance; @@ -42,29 +48,36 @@ public class BLitePeerConfigurationStore : PeerConfigurationStore wh /// public override async Task DropAsync(CancellationToken cancellationToken = default) { - _logger.LogWarning("Dropping peer configuration store - all remote peer configurations will be permanently deleted!"); + _logger.LogWarning( + "Dropping peer configuration store - all remote peer configurations will be permanently deleted!"); // Use Id (technical key) for deletion, not NodeId (business key) - var allIds = await Task.Run(() => _context.RemotePeerConfigurations.FindAll().Select(p => p.Id).ToList(), cancellationToken); + var allIds = await Task.Run(() => _context.RemotePeerConfigurations.FindAll().Select(p => p.Id).ToList(), + cancellationToken); await _context.RemotePeerConfigurations.DeleteBulkAsync(allIds); await _context.SaveChangesAsync(cancellationToken); _logger.LogInformation("Peer configuration store dropped successfully."); } /// - public override async Task> ExportAsync(CancellationToken cancellationToken = default) + public override async Task> ExportAsync( + CancellationToken cancellationToken = default) { return await Task.Run(() => _context.RemotePeerConfigurations.FindAll().ToDomain().ToList(), cancellationToken); } /// - public override async Task GetRemotePeerAsync(string nodeId, CancellationToken cancellationToken) + public override async Task GetRemotePeerAsync(string nodeId, + CancellationToken cancellationToken) { // NodeId is now a regular indexed property, not the Key - return await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault()?.ToDomain(), cancellationToken); + return await Task.Run( + () => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault()?.ToDomain(), + cancellationToken); } /// - public override async Task> GetRemotePeersAsync(CancellationToken cancellationToken = default) + public override async Task> GetRemotePeersAsync( + CancellationToken cancellationToken = default) { return await Task.Run(() => _context.RemotePeerConfigurations.FindAll().ToDomain().ToList(), cancellationToken); } @@ -73,7 +86,8 @@ public class BLitePeerConfigurationStore : PeerConfigurationStore wh public override async Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default) { // NodeId is now a regular indexed property, not the Key - var peer = await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault(), cancellationToken); + var peer = await Task.Run( + () => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault(), cancellationToken); if (peer != null) { await _context.RemotePeerConfigurations.DeleteAsync(peer.Id); @@ -87,10 +101,13 @@ public class BLitePeerConfigurationStore : PeerConfigurationStore wh } /// - public override async Task SaveRemotePeerAsync(RemotePeerConfiguration peer, CancellationToken cancellationToken = default) + public override async Task SaveRemotePeerAsync(RemotePeerConfiguration peer, + CancellationToken cancellationToken = default) { // NodeId is now a regular indexed property, not the Key - var existing = await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == peer.NodeId).FirstOrDefault(), cancellationToken); + var existing = + await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == peer.NodeId).FirstOrDefault(), + cancellationToken); if (existing == null) { @@ -103,7 +120,7 @@ public class BLitePeerConfigurationStore : PeerConfigurationStore wh existing.Type = (int)peer.Type; existing.IsEnabled = peer.IsEnabled; existing.InterestsJson = peer.InterestingCollections.Count > 0 - ? System.Text.Json.JsonSerializer.Serialize(peer.InterestingCollections) + ? JsonSerializer.Serialize(peer.InterestingCollections) : ""; await _context.RemotePeerConfigurations.UpdateAsync(existing); } @@ -111,4 +128,4 @@ public class BLitePeerConfigurationStore : PeerConfigurationStore wh await _context.SaveChangesAsync(cancellationToken); _logger.LogInformation("Saved remote peer configuration: {NodeId} ({Type})", peer.NodeId, peer.Type); } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerOplogConfirmationStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerOplogConfirmationStore.cs index 3f7bbf9..74183c1 100644 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerOplogConfirmationStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerOplogConfirmationStore.cs @@ -7,10 +7,11 @@ using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; namespace ZB.MOM.WW.CBDDC.Persistence.BLite; /// -/// BLite-backed peer oplog confirmation store. +/// BLite-backed peer oplog confirmation store. /// /// The BLite context type. -public class BLitePeerOplogConfirmationStore : PeerOplogConfirmationStore where TDbContext : CBDDCDocumentDbContext +public class BLitePeerOplogConfirmationStore : PeerOplogConfirmationStore + where TDbContext : CBDDCDocumentDbContext { internal const string RegistrationSourceNodeId = "__peer_registration__"; @@ -18,7 +19,7 @@ public class BLitePeerOplogConfirmationStore : PeerOplogConfirmation private readonly ILogger> _logger; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The BLite context. /// An optional logger. @@ -38,9 +39,7 @@ public class BLitePeerOplogConfirmationStore : PeerOplogConfirmation CancellationToken cancellationToken = default) { if (string.IsNullOrWhiteSpace(peerNodeId)) - { throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); - } var existing = _context.PeerOplogConfirmations .Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId == RegistrationSourceNodeId) @@ -61,7 +60,8 @@ public class BLitePeerOplogConfirmationStore : PeerOplogConfirmation }); await _context.SaveChangesAsync(cancellationToken); - _logger.LogDebug("Registered peer confirmation tracking for {PeerNodeId} ({Address}, {Type}).", peerNodeId, address, type); + _logger.LogDebug("Registered peer confirmation tracking for {PeerNodeId} ({Address}, {Type}).", peerNodeId, + address, type); return; } @@ -83,20 +83,16 @@ public class BLitePeerOplogConfirmationStore : PeerOplogConfirmation CancellationToken cancellationToken = default) { if (string.IsNullOrWhiteSpace(peerNodeId)) - { throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); - } if (string.IsNullOrWhiteSpace(sourceNodeId)) - { throw new ArgumentException("Source node id is required.", nameof(sourceNodeId)); - } var existing = _context.PeerOplogConfirmations .Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId == sourceNodeId) .FirstOrDefault(); - var nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); + long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); if (existing == null) { @@ -115,15 +111,12 @@ public class BLitePeerOplogConfirmationStore : PeerOplogConfirmation return; } - var isNewer = IsIncomingTimestampNewer(timestamp, existing); - var samePointHashChanged = timestamp.PhysicalTime == existing.ConfirmedWall && - timestamp.LogicalCounter == existing.ConfirmedLogic && - !string.Equals(existing.ConfirmedHash, hash, StringComparison.Ordinal); + bool isNewer = IsIncomingTimestampNewer(timestamp, existing); + bool samePointHashChanged = timestamp.PhysicalTime == existing.ConfirmedWall && + timestamp.LogicalCounter == existing.ConfirmedLogic && + !string.Equals(existing.ConfirmedHash, hash, StringComparison.Ordinal); - if (!isNewer && !samePointHashChanged && existing.IsActive) - { - return; - } + if (!isNewer && !samePointHashChanged && existing.IsActive) return; existing.ConfirmedWall = timestamp.PhysicalTime; existing.ConfirmedLogic = timestamp.LogicalCounter; @@ -136,7 +129,8 @@ public class BLitePeerOplogConfirmationStore : PeerOplogConfirmation } /// - public override Task> GetConfirmationsAsync(CancellationToken cancellationToken = default) + public override Task> GetConfirmationsAsync( + CancellationToken cancellationToken = default) { var confirmations = _context.PeerOplogConfirmations .Find(c => c.SourceNodeId != RegistrationSourceNodeId) @@ -152,9 +146,7 @@ public class BLitePeerOplogConfirmationStore : PeerOplogConfirmation CancellationToken cancellationToken = default) { if (string.IsNullOrWhiteSpace(peerNodeId)) - { throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); - } var confirmations = _context.PeerOplogConfirmations .Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId != RegistrationSourceNodeId) @@ -168,26 +160,18 @@ public class BLitePeerOplogConfirmationStore : PeerOplogConfirmation public override async Task RemovePeerTrackingAsync(string peerNodeId, CancellationToken cancellationToken = default) { if (string.IsNullOrWhiteSpace(peerNodeId)) - { throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); - } var matches = _context.PeerOplogConfirmations .Find(c => c.PeerNodeId == peerNodeId) .ToList(); - if (matches.Count == 0) - { - return; - } + if (matches.Count == 0) return; - var nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); + long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); foreach (var match in matches) { - if (!match.IsActive) - { - continue; - } + if (!match.IsActive) continue; match.IsActive = false; match.LastConfirmedUtcMs = nowMs; @@ -229,7 +213,8 @@ public class BLitePeerOplogConfirmationStore : PeerOplogConfirmation } /// - public override async Task ImportAsync(IEnumerable items, CancellationToken cancellationToken = default) + public override async Task ImportAsync(IEnumerable items, + CancellationToken cancellationToken = default) { foreach (var item in items) { @@ -255,7 +240,8 @@ public class BLitePeerOplogConfirmationStore : PeerOplogConfirmation } /// - public override async Task MergeAsync(IEnumerable items, CancellationToken cancellationToken = default) + public override async Task MergeAsync(IEnumerable items, + CancellationToken cancellationToken = default) { foreach (var item in items) { @@ -271,7 +257,8 @@ public class BLitePeerOplogConfirmationStore : PeerOplogConfirmation var changed = false; var incomingTimestamp = new HlcTimestamp(item.ConfirmedWall, item.ConfirmedLogic, item.SourceNodeId); - var existingTimestamp = new HlcTimestamp(existing.ConfirmedWall, existing.ConfirmedLogic, existing.SourceNodeId); + var existingTimestamp = + new HlcTimestamp(existing.ConfirmedWall, existing.ConfirmedLogic, existing.SourceNodeId); if (incomingTimestamp > existingTimestamp) { @@ -281,7 +268,7 @@ public class BLitePeerOplogConfirmationStore : PeerOplogConfirmation changed = true; } - var incomingLastConfirmedMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds(); + long incomingLastConfirmedMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds(); if (incomingLastConfirmedMs > existing.LastConfirmedUtcMs) { existing.LastConfirmedUtcMs = incomingLastConfirmedMs; @@ -294,10 +281,7 @@ public class BLitePeerOplogConfirmationStore : PeerOplogConfirmation changed = true; } - if (changed) - { - await _context.PeerOplogConfirmations.UpdateAsync(existing); - } + if (changed) await _context.PeerOplogConfirmations.UpdateAsync(existing); } await _context.SaveChangesAsync(cancellationToken); @@ -305,17 +289,12 @@ public class BLitePeerOplogConfirmationStore : PeerOplogConfirmation private static bool IsIncomingTimestampNewer(HlcTimestamp incomingTimestamp, PeerOplogConfirmationEntity existing) { - if (incomingTimestamp.PhysicalTime > existing.ConfirmedWall) - { - return true; - } + if (incomingTimestamp.PhysicalTime > existing.ConfirmedWall) return true; if (incomingTimestamp.PhysicalTime == existing.ConfirmedWall && incomingTimestamp.LogicalCounter > existing.ConfirmedLogic) - { return true; - } return false; } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteSnapshotMetadataStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteSnapshotMetadataStore.cs index 1ee7879..d7649e0 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteSnapshotMetadataStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteSnapshotMetadataStore.cs @@ -1,44 +1,53 @@ -ο»Ώusing ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; -using Microsoft.Extensions.Logging; +ο»Ώusing Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Core; +using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; namespace ZB.MOM.WW.CBDDC.Persistence.BLite; /// -/// Provides a snapshot metadata store implementation that uses a specified CBDDCDocumentDbContext for persistence -/// operations. +/// Provides a snapshot metadata store implementation that uses a specified CBDDCDocumentDbContext for persistence +/// operations. /// -/// This class enables storage, retrieval, and management of snapshot metadata using the provided -/// database context. It is typically used in scenarios where snapshot metadata needs to be persisted in a document -/// database. The class supports bulk operations and incremental updates, and can be extended for custom database -/// contexts. Thread safety depends on the underlying context implementation. -/// The type of the document database context used for accessing and managing snapshot metadata. Must inherit from -/// CBDDCDocumentDbContext. +/// +/// This class enables storage, retrieval, and management of snapshot metadata using the provided +/// database context. It is typically used in scenarios where snapshot metadata needs to be persisted in a document +/// database. The class supports bulk operations and incremental updates, and can be extended for custom database +/// contexts. Thread safety depends on the underlying context implementation. +/// +/// +/// The type of the document database context used for accessing and managing snapshot metadata. Must inherit from +/// CBDDCDocumentDbContext. +/// public class BLiteSnapshotMetadataStore : SnapshotMetadataStore where TDbContext : CBDDCDocumentDbContext { /// - /// Represents the database context used for data access operations within the derived class. + /// Represents the database context used for data access operations within the derived class. /// - /// Intended for use by derived classes to interact with the underlying database. The context - /// should be properly disposed of according to the application's lifetime management strategy. + /// + /// Intended for use by derived classes to interact with the underlying database. The context + /// should be properly disposed of according to the application's lifetime management strategy. + /// protected readonly TDbContext _context; /// - /// Provides logging capabilities for the BLiteSnapshotMetadataStore operations. + /// Provides logging capabilities for the BLiteSnapshotMetadataStore operations. /// - /// Intended for use by derived classes to record diagnostic and operational information. The - /// logger instance is specific to the BLiteSnapshotMetadataStore type. + /// + /// Intended for use by derived classes to record diagnostic and operational information. The + /// logger instance is specific to the BLiteSnapshotMetadataStore type. + /// protected readonly ILogger> _logger; /// - /// Initializes a new instance of the BLiteSnapshotMetadataStore class using the specified database context and - /// optional logger. + /// Initializes a new instance of the BLiteSnapshotMetadataStore class using the specified database context and + /// optional logger. /// /// The database context to be used for accessing snapshot metadata. Cannot be null. /// An optional logger for logging diagnostic messages. If null, a no-op logger is used. /// Thrown if the context parameter is null. - public BLiteSnapshotMetadataStore(TDbContext context, ILogger>? logger = null) + public BLiteSnapshotMetadataStore(TDbContext context, + ILogger>? logger = null) { _context = context ?? throw new ArgumentNullException(nameof(context)); _logger = logger ?? NullLogger>.Instance; @@ -48,7 +57,8 @@ public class BLiteSnapshotMetadataStore : SnapshotMetadataStore wher public override async Task DropAsync(CancellationToken cancellationToken = default) { // Use Id (technical key) for deletion, not NodeId (business key) - var allIds = await Task.Run(() => _context.SnapshotMetadatas.FindAll().Select(s => s.Id).ToList(), cancellationToken); + var allIds = await Task.Run(() => _context.SnapshotMetadatas.FindAll().Select(s => s.Id).ToList(), + cancellationToken); await _context.SnapshotMetadatas.DeleteBulkAsync(allIds); await _context.SaveChangesAsync(cancellationToken); } @@ -60,37 +70,41 @@ public class BLiteSnapshotMetadataStore : SnapshotMetadataStore wher } /// - public override async Task GetSnapshotHashAsync(string nodeId, CancellationToken cancellationToken = default) + public override async Task GetSnapshotHashAsync(string nodeId, + CancellationToken cancellationToken = default) { // NodeId is now a regular indexed property, not the Key - var snapshot = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault(), cancellationToken); + var snapshot = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault(), + cancellationToken); return snapshot?.Hash; } /// - public override async Task ImportAsync(IEnumerable items, CancellationToken cancellationToken = default) + public override async Task ImportAsync(IEnumerable items, + CancellationToken cancellationToken = default) { - foreach (var metadata in items) - { - await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity()); - } + foreach (var metadata in items) await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity()); await _context.SaveChangesAsync(cancellationToken); } /// - public override async Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata, CancellationToken cancellationToken = default) + public override async Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata, + CancellationToken cancellationToken = default) { await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity()); await _context.SaveChangesAsync(cancellationToken); } /// - public override async Task MergeAsync(IEnumerable items, CancellationToken cancellationToken = default) + public override async Task MergeAsync(IEnumerable items, + CancellationToken cancellationToken = default) { foreach (var metadata in items) { // NodeId is now a regular indexed property, not the Key - var existing = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == metadata.NodeId).FirstOrDefault(), cancellationToken); + var existing = + await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == metadata.NodeId).FirstOrDefault(), + cancellationToken); if (existing == null) { @@ -111,14 +125,18 @@ public class BLiteSnapshotMetadataStore : SnapshotMetadataStore wher } } } + await _context.SaveChangesAsync(cancellationToken); } /// - public override async Task UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta, CancellationToken cancellationToken) + public override async Task UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta, + CancellationToken cancellationToken) { // NodeId is now a regular indexed property, not the Key - find existing by NodeId - var existing = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == existingMeta.NodeId).FirstOrDefault(), cancellationToken); + var existing = + await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == existingMeta.NodeId).FirstOrDefault(), + cancellationToken); if (existing != null) { existing.NodeId = existingMeta.NodeId; @@ -131,14 +149,18 @@ public class BLiteSnapshotMetadataStore : SnapshotMetadataStore wher } /// - public override async Task GetSnapshotMetadataAsync(string nodeId, CancellationToken cancellationToken = default) + public override async Task GetSnapshotMetadataAsync(string nodeId, + CancellationToken cancellationToken = default) { // NodeId is now a regular indexed property, not the Key - return await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault()?.ToDomain(), cancellationToken); + return await Task.Run( + () => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault()?.ToDomain(), + cancellationToken); } /// - public override async Task> GetAllSnapshotMetadataAsync(CancellationToken cancellationToken = default) + public override async Task> GetAllSnapshotMetadataAsync( + CancellationToken cancellationToken = default) { return await Task.Run(() => _context.SnapshotMetadatas.FindAll().ToDomain().ToList(), cancellationToken); } diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCBLiteExtensions.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCBLiteExtensions.cs index 40a9021..8b77ac7 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCBLiteExtensions.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCBLiteExtensions.cs @@ -1,17 +1,17 @@ -using ZB.MOM.WW.CBDDC.Core.Storage; -using ZB.MOM.WW.CBDDC.Core.Sync; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.DependencyInjection.Extensions; +using ZB.MOM.WW.CBDDC.Core.Storage; +using ZB.MOM.WW.CBDDC.Core.Sync; namespace ZB.MOM.WW.CBDDC.Persistence.BLite; /// -/// Extension methods for configuring BLite persistence for ZB.MOM.WW.CBDDC. +/// Extension methods for configuring BLite persistence for ZB.MOM.WW.CBDDC. /// public static class CBDDCBLiteExtensions { /// - /// Adds BLite persistence to CBDDC using a custom DbContext and DocumentStore implementation. + /// Adds BLite persistence to CBDDC using a custom DbContext and DocumentStore implementation. /// /// The type of the BLite document database context. Must inherit from CBDDCDocumentDbContext. /// The type of the document store implementation. Must implement IDocumentStore. @@ -54,7 +54,7 @@ public static class CBDDCBLiteExtensions } /// - /// Adds BLite persistence to CBDDC using a custom DbContext (without explicit DocumentStore type). + /// Adds BLite persistence to CBDDC using a custom DbContext (without explicit DocumentStore type). /// /// The type of the BLite document database context. Must inherit from CBDDCDocumentDbContext. /// The service collection to add the services to. @@ -91,12 +91,12 @@ public static class CBDDCBLiteExtensions } /// -/// Options for configuring BLite persistence. +/// Options for configuring BLite persistence. /// public class BLiteOptions { /// - /// Gets or sets the file path to the BLite database file. + /// Gets or sets the file path to the BLite database file. /// public string DatabasePath { get; set; } = ""; -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCDocumentDbContext.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCDocumentDbContext.cs index bda98b9..bf4b31e 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCDocumentDbContext.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCDocumentDbContext.cs @@ -9,56 +9,68 @@ namespace ZB.MOM.WW.CBDDC.Persistence.BLite; public partial class CBDDCDocumentDbContext : DocumentDbContext { /// - /// Gets the collection of operation log entries associated with this instance. + /// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database file path. /// - /// The collection provides access to all recorded operation log (oplog) entries, which can be - /// used to track changes or replicate operations. The collection is read-only; entries cannot be added or removed - /// directly through this property. - public DocumentCollection OplogEntries { get; private set; } = null!; - - /// - /// Gets the collection of snapshot metadata associated with the document. - /// - public DocumentCollection SnapshotMetadatas { get; private set; } = null!; - - /// - /// Gets the collection of remote peer configurations associated with this instance. - /// - /// Use this collection to access or enumerate the configuration settings for each remote peer. - /// The collection is read-only; to modify peer configurations, use the appropriate methods provided by the - /// containing class. - public DocumentCollection RemotePeerConfigurations { get; private set; } = null!; - - /// - /// Gets the collection of document metadata for sync tracking. - /// - /// Stores HLC timestamps and deleted state for each document without modifying application entities. - /// Used to track document versions for incremental sync instead of full snapshots. - public DocumentCollection DocumentMetadatas { get; private set; } = null!; - - /// - /// Gets the collection of peer oplog confirmation records for pruning safety tracking. - /// - public DocumentCollection PeerOplogConfirmations { get; private set; } = null!; - - /// - /// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database file path. - /// - /// The file system path to the database file to be used by the context. Cannot be null or empty. + /// + /// The file system path to the database file to be used by the context. Cannot be null or + /// empty. + /// public CBDDCDocumentDbContext(string databasePath) : base(databasePath) { } /// - /// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database path and page file - /// configuration. + /// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database path and page file + /// configuration. /// /// The file system path to the database file. This value cannot be null or empty. - /// The configuration settings for the page file. Specifies options that control how the database pages are managed. + /// + /// The configuration settings for the page file. Specifies options that control how the database + /// pages are managed. + /// public CBDDCDocumentDbContext(string databasePath, PageFileConfig config) : base(databasePath, config) { } + /// + /// Gets the collection of operation log entries associated with this instance. + /// + /// + /// The collection provides access to all recorded operation log (oplog) entries, which can be + /// used to track changes or replicate operations. The collection is read-only; entries cannot be added or removed + /// directly through this property. + /// + public DocumentCollection OplogEntries { get; private set; } = null!; + + /// + /// Gets the collection of snapshot metadata associated with the document. + /// + public DocumentCollection SnapshotMetadatas { get; private set; } = null!; + + /// + /// Gets the collection of remote peer configurations associated with this instance. + /// + /// + /// Use this collection to access or enumerate the configuration settings for each remote peer. + /// The collection is read-only; to modify peer configurations, use the appropriate methods provided by the + /// containing class. + /// + public DocumentCollection RemotePeerConfigurations { get; private set; } = null!; + + /// + /// Gets the collection of document metadata for sync tracking. + /// + /// + /// Stores HLC timestamps and deleted state for each document without modifying application entities. + /// Used to track document versions for incremental sync instead of full snapshots. + /// + public DocumentCollection DocumentMetadatas { get; private set; } = null!; + + /// + /// Gets the collection of peer oplog confirmation records for pruning safety tracking. + /// + public DocumentCollection PeerOplogConfirmations { get; private set; } = null!; + /// protected override void OnModelCreating(ModelBuilder modelBuilder) { @@ -102,4 +114,4 @@ public partial class CBDDCDocumentDbContext : DocumentDbContext .HasIndex(e => e.IsActive) .HasIndex(e => new { e.SourceNodeId, e.ConfirmedWall, e.ConfirmedLogic }); } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/DocumentMetadataEntity.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/DocumentMetadataEntity.cs index 9a23802..5444394 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/DocumentMetadataEntity.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/DocumentMetadataEntity.cs @@ -3,45 +3,45 @@ using System.ComponentModel.DataAnnotations; namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; /// -/// BLite entity representing document metadata for sync tracking. -/// Stores HLC timestamp and deleted state for each document without modifying application entities. +/// BLite entity representing document metadata for sync tracking. +/// Stores HLC timestamp and deleted state for each document without modifying application entities. /// public class DocumentMetadataEntity { /// - /// Gets or sets the unique identifier for this entity (technical key). - /// Auto-generated GUID string. + /// Gets or sets the unique identifier for this entity (technical key). + /// Auto-generated GUID string. /// [Key] public string Id { get; set; } = ""; /// - /// Gets or sets the collection name (business key part 1). + /// Gets or sets the collection name (business key part 1). /// public string Collection { get; set; } = ""; /// - /// Gets or sets the document key within the collection (business key part 2). + /// Gets or sets the document key within the collection (business key part 2). /// public string Key { get; set; } = ""; /// - /// Gets or sets the physical time component of the HLC timestamp. + /// Gets or sets the physical time component of the HLC timestamp. /// public long HlcPhysicalTime { get; set; } /// - /// Gets or sets the logical counter component of the HLC timestamp. + /// Gets or sets the logical counter component of the HLC timestamp. /// public int HlcLogicalCounter { get; set; } /// - /// Gets or sets the node ID that last modified this document. + /// Gets or sets the node ID that last modified this document. /// public string HlcNodeId { get; set; } = ""; /// - /// Gets or sets whether this document is marked as deleted (tombstone). + /// Gets or sets whether this document is marked as deleted (tombstone). /// public bool IsDeleted { get; set; } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/EntityMappers.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/EntityMappers.cs index 04bf8bf..19340d2 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/EntityMappers.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/EntityMappers.cs @@ -1,120 +1,145 @@ -using System.Text.Json; -using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Network; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; - -/// -/// Provides extension methods for mapping between BLite entities and domain models. -/// -public static class EntityMappers -{ - #region OplogEntity Mappers - +using System.Text.Json; +using ZB.MOM.WW.CBDDC.Core; +using ZB.MOM.WW.CBDDC.Core.Network; + +namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; + +/// +/// Provides extension methods for mapping between BLite entities and domain models. +/// +public static class EntityMappers +{ + #region DocumentMetadataEntity Helpers + /// - /// Converts an OplogEntry domain model to an OplogEntity for persistence. + /// Creates a DocumentMetadataEntity from collection, key, timestamp, and deleted state. + /// Used for tracking document sync state. + /// + /// The collection name that owns the document. + /// The document key within the collection. + /// The hybrid logical clock timestamp for the document state. + /// Indicates whether the document is marked as deleted. + public static DocumentMetadataEntity CreateDocumentMetadata(string collection, string key, HlcTimestamp timestamp, + bool isDeleted = false) + { + return new DocumentMetadataEntity + { + Id = Guid.NewGuid().ToString(), + Collection = collection, + Key = key, + HlcPhysicalTime = timestamp.PhysicalTime, + HlcLogicalCounter = timestamp.LogicalCounter, + HlcNodeId = timestamp.NodeId, + IsDeleted = isDeleted + }; + } + + #endregion + + #region OplogEntity Mappers + + /// + /// Converts an OplogEntry domain model to an OplogEntity for persistence. /// /// The oplog entry to convert. public static OplogEntity ToEntity(this OplogEntry entry) { return new OplogEntity - { - Id = Guid.NewGuid().ToString(), // Auto-generate technical key - Collection = entry.Collection, - Key = entry.Key, - Operation = (int)entry.Operation, - // Use empty string instead of null to avoid BLite BSON serialization issues - PayloadJson = entry.Payload?.GetRawText() ?? "", - TimestampPhysicalTime = entry.Timestamp.PhysicalTime, - TimestampLogicalCounter = entry.Timestamp.LogicalCounter, - TimestampNodeId = entry.Timestamp.NodeId, - Hash = entry.Hash, - PreviousHash = entry.PreviousHash - }; - } - + { + Id = Guid.NewGuid().ToString(), // Auto-generate technical key + Collection = entry.Collection, + Key = entry.Key, + Operation = (int)entry.Operation, + // Use empty string instead of null to avoid BLite BSON serialization issues + PayloadJson = entry.Payload?.GetRawText() ?? "", + TimestampPhysicalTime = entry.Timestamp.PhysicalTime, + TimestampLogicalCounter = entry.Timestamp.LogicalCounter, + TimestampNodeId = entry.Timestamp.NodeId, + Hash = entry.Hash, + PreviousHash = entry.PreviousHash + }; + } + /// - /// Converts an OplogEntity to an OplogEntry domain model. + /// Converts an OplogEntity to an OplogEntry domain model. /// /// The persisted oplog entity to convert. public static OplogEntry ToDomain(this OplogEntity entity) { JsonElement? payload = null; - // Treat empty string as null payload (Delete operations) - if (!string.IsNullOrEmpty(entity.PayloadJson)) - { - payload = JsonSerializer.Deserialize(entity.PayloadJson); - } - - return new OplogEntry( - entity.Collection, - entity.Key, - (OperationType)entity.Operation, - payload, - new HlcTimestamp(entity.TimestampPhysicalTime, entity.TimestampLogicalCounter, entity.TimestampNodeId), - entity.PreviousHash, - entity.Hash); - } - + // Treat empty string as null payload (Delete operations) + if (!string.IsNullOrEmpty(entity.PayloadJson)) + payload = JsonSerializer.Deserialize(entity.PayloadJson); + + return new OplogEntry( + entity.Collection, + entity.Key, + (OperationType)entity.Operation, + payload, + new HlcTimestamp(entity.TimestampPhysicalTime, entity.TimestampLogicalCounter, entity.TimestampNodeId), + entity.PreviousHash, + entity.Hash); + } + /// - /// Converts a collection of OplogEntity to OplogEntry domain models. + /// Converts a collection of OplogEntity to OplogEntry domain models. /// /// The oplog entities to convert. public static IEnumerable ToDomain(this IEnumerable entities) { return entities.Select(e => e.ToDomain()); - } - - #endregion - - #region SnapshotMetadataEntity Mappers - + } + + #endregion + + #region SnapshotMetadataEntity Mappers + /// - /// Converts a SnapshotMetadata domain model to a SnapshotMetadataEntity for persistence. + /// Converts a SnapshotMetadata domain model to a SnapshotMetadataEntity for persistence. /// /// The snapshot metadata to convert. public static SnapshotMetadataEntity ToEntity(this SnapshotMetadata metadata) { return new SnapshotMetadataEntity - { - Id = Guid.NewGuid().ToString(), // Auto-generate technical key - NodeId = metadata.NodeId, - TimestampPhysicalTime = metadata.TimestampPhysicalTime, - TimestampLogicalCounter = metadata.TimestampLogicalCounter, - Hash = metadata.Hash - }; - } - + { + Id = Guid.NewGuid().ToString(), // Auto-generate technical key + NodeId = metadata.NodeId, + TimestampPhysicalTime = metadata.TimestampPhysicalTime, + TimestampLogicalCounter = metadata.TimestampLogicalCounter, + Hash = metadata.Hash + }; + } + /// - /// Converts a SnapshotMetadataEntity to a SnapshotMetadata domain model. + /// Converts a SnapshotMetadataEntity to a SnapshotMetadata domain model. /// /// The persisted snapshot metadata entity to convert. public static SnapshotMetadata ToDomain(this SnapshotMetadataEntity entity) { return new SnapshotMetadata - { - NodeId = entity.NodeId, - TimestampPhysicalTime = entity.TimestampPhysicalTime, - TimestampLogicalCounter = entity.TimestampLogicalCounter, - Hash = entity.Hash - }; - } - + { + NodeId = entity.NodeId, + TimestampPhysicalTime = entity.TimestampPhysicalTime, + TimestampLogicalCounter = entity.TimestampLogicalCounter, + Hash = entity.Hash + }; + } + /// - /// Converts a collection of SnapshotMetadataEntity to SnapshotMetadata domain models. + /// Converts a collection of SnapshotMetadataEntity to SnapshotMetadata domain models. /// /// The snapshot metadata entities to convert. public static IEnumerable ToDomain(this IEnumerable entities) { return entities.Select(e => e.ToDomain()); - } - - #endregion - - #region RemotePeerEntity Mappers - + } + + #endregion + + #region RemotePeerEntity Mappers + /// - /// Converts a RemotePeerConfiguration domain model to a RemotePeerEntity for persistence. + /// Converts a RemotePeerConfiguration domain model to a RemotePeerEntity for persistence. /// /// The remote peer configuration to convert. public static RemotePeerEntity ToEntity(this RemotePeerConfiguration config) @@ -129,11 +154,11 @@ public static class EntityMappers InterestsJson = config.InterestingCollections.Count > 0 ? JsonSerializer.Serialize(config.InterestingCollections) : "" - }; - } - + }; + } + /// - /// Converts a RemotePeerEntity to a RemotePeerConfiguration domain model. + /// Converts a RemotePeerEntity to a RemotePeerConfiguration domain model. /// /// The persisted remote peer entity to convert. public static RemotePeerConfiguration ToDomain(this RemotePeerEntity entity) @@ -145,30 +170,28 @@ public static class EntityMappers Type = (PeerType)entity.Type, IsEnabled = entity.IsEnabled }; - - if (!string.IsNullOrEmpty(entity.InterestsJson)) - { - config.InterestingCollections = JsonSerializer.Deserialize>(entity.InterestsJson) ?? []; - } - - return config; - } - + + if (!string.IsNullOrEmpty(entity.InterestsJson)) + config.InterestingCollections = JsonSerializer.Deserialize>(entity.InterestsJson) ?? []; + + return config; + } + /// - /// Converts a collection of RemotePeerEntity to RemotePeerConfiguration domain models. + /// Converts a collection of RemotePeerEntity to RemotePeerConfiguration domain models. /// /// The remote peer entities to convert. public static IEnumerable ToDomain(this IEnumerable entities) { return entities.Select(e => e.ToDomain()); - } - + } + #endregion #region PeerOplogConfirmationEntity Mappers /// - /// Converts a peer oplog confirmation domain model to a BLite entity. + /// Converts a peer oplog confirmation domain model to a BLite entity. /// /// The confirmation to convert. public static PeerOplogConfirmationEntity ToEntity(this PeerOplogConfirmation confirmation) @@ -187,7 +210,7 @@ public static class EntityMappers } /// - /// Converts a peer oplog confirmation entity to a domain model. + /// Converts a peer oplog confirmation entity to a domain model. /// /// The entity to convert. public static PeerOplogConfirmation ToDomain(this PeerOplogConfirmationEntity entity) @@ -205,7 +228,7 @@ public static class EntityMappers } /// - /// Converts a collection of peer oplog confirmation entities to domain models. + /// Converts a collection of peer oplog confirmation entities to domain models. /// /// The entities to convert. public static IEnumerable ToDomain(this IEnumerable entities) @@ -214,30 +237,4 @@ public static class EntityMappers } #endregion - - #region DocumentMetadataEntity Helpers - - /// - /// Creates a DocumentMetadataEntity from collection, key, timestamp, and deleted state. - /// Used for tracking document sync state. - /// - /// The collection name that owns the document. - /// The document key within the collection. - /// The hybrid logical clock timestamp for the document state. - /// Indicates whether the document is marked as deleted. - public static DocumentMetadataEntity CreateDocumentMetadata(string collection, string key, HlcTimestamp timestamp, bool isDeleted = false) - { - return new DocumentMetadataEntity - { - Id = Guid.NewGuid().ToString(), - Collection = collection, - Key = key, - HlcPhysicalTime = timestamp.PhysicalTime, - HlcLogicalCounter = timestamp.LogicalCounter, - HlcNodeId = timestamp.NodeId, - IsDeleted = isDeleted - }; - } - - #endregion -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/OplogEntity.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/OplogEntity.cs index 35dd544..2a958c6 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/OplogEntity.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/OplogEntity.cs @@ -3,59 +3,59 @@ using System.ComponentModel.DataAnnotations; namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; /// -/// BLite entity representing an operation log entry. +/// BLite entity representing an operation log entry. /// public class OplogEntity { /// - /// Gets or sets the unique identifier for this entity (technical key). - /// Auto-generated GUID string. + /// Gets or sets the unique identifier for this entity (technical key). + /// Auto-generated GUID string. /// [Key] public string Id { get; set; } = ""; /// - /// Gets or sets the collection name. + /// Gets or sets the collection name. /// public string Collection { get; set; } = ""; /// - /// Gets or sets the document key. + /// Gets or sets the document key. /// public string Key { get; set; } = ""; /// - /// Gets or sets the operation type (0 = Put, 1 = Delete). + /// Gets or sets the operation type (0 = Put, 1 = Delete). /// public int Operation { get; set; } /// - /// Gets or sets the payload JSON (empty string for Delete operations). + /// Gets or sets the payload JSON (empty string for Delete operations). /// public string PayloadJson { get; set; } = ""; /// - /// Gets or sets the physical time component of the HLC timestamp. + /// Gets or sets the physical time component of the HLC timestamp. /// public long TimestampPhysicalTime { get; set; } /// - /// Gets or sets the logical counter component of the HLC timestamp. + /// Gets or sets the logical counter component of the HLC timestamp. /// public int TimestampLogicalCounter { get; set; } /// - /// Gets or sets the node ID component of the HLC timestamp. + /// Gets or sets the node ID component of the HLC timestamp. /// public string TimestampNodeId { get; set; } = ""; /// - /// Gets or sets the cryptographic hash of this entry (business key). + /// Gets or sets the cryptographic hash of this entry (business key). /// public string Hash { get; set; } = ""; /// - /// Gets or sets the hash of the previous entry in the chain. + /// Gets or sets the hash of the previous entry in the chain. /// public string PreviousHash { get; set; } = ""; -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/PeerOplogConfirmationEntity.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/PeerOplogConfirmationEntity.cs index 7cd17da..b65c546 100644 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/PeerOplogConfirmationEntity.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/PeerOplogConfirmationEntity.cs @@ -3,48 +3,48 @@ using System.ComponentModel.DataAnnotations; namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; /// -/// BLite entity representing a peer oplog confirmation watermark. +/// BLite entity representing a peer oplog confirmation watermark. /// public class PeerOplogConfirmationEntity { /// - /// Gets or sets the unique technical identifier for this entity. + /// Gets or sets the unique technical identifier for this entity. /// [Key] public string Id { get; set; } = ""; /// - /// Gets or sets the tracked peer node identifier. + /// Gets or sets the tracked peer node identifier. /// public string PeerNodeId { get; set; } = ""; /// - /// Gets or sets the source node identifier for this confirmation. + /// Gets or sets the source node identifier for this confirmation. /// public string SourceNodeId { get; set; } = ""; /// - /// Gets or sets the physical wall-clock component of the confirmed HLC timestamp. + /// Gets or sets the physical wall-clock component of the confirmed HLC timestamp. /// public long ConfirmedWall { get; set; } /// - /// Gets or sets the logical component of the confirmed HLC timestamp. + /// Gets or sets the logical component of the confirmed HLC timestamp. /// public int ConfirmedLogic { get; set; } /// - /// Gets or sets the confirmed hash value. + /// Gets or sets the confirmed hash value. /// public string ConfirmedHash { get; set; } = ""; /// - /// Gets or sets the UTC instant of the last update as unix milliseconds. + /// Gets or sets the UTC instant of the last update as unix milliseconds. /// public long LastConfirmedUtcMs { get; set; } /// - /// Gets or sets whether the tracked peer remains active. + /// Gets or sets whether the tracked peer remains active. /// public bool IsActive { get; set; } = true; -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/RemotePeerEntity.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/RemotePeerEntity.cs index 0914c03..1626dd8 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/RemotePeerEntity.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/RemotePeerEntity.cs @@ -3,40 +3,40 @@ using System.ComponentModel.DataAnnotations; namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; /// -/// BLite entity representing a remote peer configuration. +/// BLite entity representing a remote peer configuration. /// public class RemotePeerEntity { /// - /// Gets or sets the unique identifier for this entity (technical key). - /// Auto-generated GUID string. + /// Gets or sets the unique identifier for this entity (technical key). + /// Auto-generated GUID string. /// [Key] public string Id { get; set; } = ""; /// - /// Gets or sets the unique identifier for the remote peer node (business key). + /// Gets or sets the unique identifier for the remote peer node (business key). /// public string NodeId { get; set; } = ""; /// - /// Gets or sets the network address of the remote peer (hostname:port). + /// Gets or sets the network address of the remote peer (hostname:port). /// public string Address { get; set; } = ""; /// - /// Gets or sets the type of the peer (0=LanDiscovered, 1=StaticRemote, 2=CloudRemote). + /// Gets or sets the type of the peer (0=LanDiscovered, 1=StaticRemote, 2=CloudRemote). /// public int Type { get; set; } /// - /// Gets or sets whether this peer is enabled for synchronization. + /// Gets or sets whether this peer is enabled for synchronization. /// public bool IsEnabled { get; set; } = true; /// - /// Gets or sets the collection interests as a JSON string. - /// Use empty string instead of null for BLite compatibility. + /// Gets or sets the collection interests as a JSON string. + /// Use empty string instead of null for BLite compatibility. /// public string InterestsJson { get; set; } = ""; -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/SnapshotMetadataEntity.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/SnapshotMetadataEntity.cs index f4234bb..facf2d7 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/SnapshotMetadataEntity.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/SnapshotMetadataEntity.cs @@ -3,34 +3,34 @@ using System.ComponentModel.DataAnnotations; namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; /// -/// BLite entity representing snapshot metadata (oplog pruning checkpoint). +/// BLite entity representing snapshot metadata (oplog pruning checkpoint). /// public class SnapshotMetadataEntity { /// - /// Gets or sets the unique identifier for this entity (technical key). - /// Auto-generated GUID string. + /// Gets or sets the unique identifier for this entity (technical key). + /// Auto-generated GUID string. /// [Key] public string Id { get; set; } = ""; /// - /// Gets or sets the node identifier (business key). + /// Gets or sets the node identifier (business key). /// public string NodeId { get; set; } = ""; /// - /// Gets or sets the physical time component of the timestamp. + /// Gets or sets the physical time component of the timestamp. /// public long TimestampPhysicalTime { get; set; } /// - /// Gets or sets the logical counter component of the timestamp. + /// Gets or sets the logical counter component of the timestamp. /// public int TimestampLogicalCounter { get; set; } /// - /// Gets or sets the hash of the snapshot. + /// Gets or sets the hash of the snapshot. /// public string Hash { get; set; } = ""; -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/DocumentMetadataStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/DocumentMetadataStore.cs index ee7ff66..c997d7d 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/DocumentMetadataStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/DocumentMetadataStore.cs @@ -4,28 +4,33 @@ using ZB.MOM.WW.CBDDC.Core.Storage; namespace ZB.MOM.WW.CBDDC.Persistence; /// -/// Abstract base class for document metadata storage implementations. -/// Provides common functionality for tracking document HLC timestamps for sync. +/// Abstract base class for document metadata storage implementations. +/// Provides common functionality for tracking document HLC timestamps for sync. /// public abstract class DocumentMetadataStore : IDocumentMetadataStore { /// - public abstract Task GetMetadataAsync(string collection, string key, CancellationToken cancellationToken = default); + public abstract Task GetMetadataAsync(string collection, string key, + CancellationToken cancellationToken = default); /// - public abstract Task> GetMetadataByCollectionAsync(string collection, CancellationToken cancellationToken = default); + public abstract Task> GetMetadataByCollectionAsync(string collection, + CancellationToken cancellationToken = default); /// public abstract Task UpsertMetadataAsync(DocumentMetadata metadata, CancellationToken cancellationToken = default); /// - public abstract Task UpsertMetadataBatchAsync(IEnumerable metadatas, CancellationToken cancellationToken = default); + public abstract Task UpsertMetadataBatchAsync(IEnumerable metadatas, + CancellationToken cancellationToken = default); /// - public abstract Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp, CancellationToken cancellationToken = default); + public abstract Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp, + CancellationToken cancellationToken = default); /// - public abstract Task> GetMetadataAfterAsync(HlcTimestamp since, IEnumerable? collections = null, CancellationToken cancellationToken = default); + public abstract Task> GetMetadataAfterAsync(HlcTimestamp since, + IEnumerable? collections = null, CancellationToken cancellationToken = default); /// public abstract Task DropAsync(CancellationToken cancellationToken = default); @@ -34,8 +39,9 @@ public abstract class DocumentMetadataStore : IDocumentMetadataStore public abstract Task> ExportAsync(CancellationToken cancellationToken = default); /// - public abstract Task ImportAsync(IEnumerable items, CancellationToken cancellationToken = default); + public abstract Task ImportAsync(IEnumerable items, + CancellationToken cancellationToken = default); /// public abstract Task MergeAsync(IEnumerable items, CancellationToken cancellationToken = default); -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/NodeCacheEntry.cs b/src/ZB.MOM.WW.CBDDC.Persistence/NodeCacheEntry.cs index 4367ec2..67983a7 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/NodeCacheEntry.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/NodeCacheEntry.cs @@ -5,12 +5,12 @@ namespace ZB.MOM.WW.CBDDC.Persistence; public class NodeCacheEntry { /// - /// Gets or sets the latest known timestamp for the node. + /// Gets or sets the latest known timestamp for the node. /// public HlcTimestamp Timestamp { get; set; } + /// - /// Gets or sets the latest known hash for the node. + /// Gets or sets the latest known hash for the node. /// public string Hash { get; set; } = ""; -} - +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/OplogStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/OplogStore.cs index 07d5ddf..ad889b4 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/OplogStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/OplogStore.cs @@ -6,27 +6,13 @@ namespace ZB.MOM.WW.CBDDC.Persistence; public abstract class OplogStore : IOplogStore { - protected readonly IDocumentStore _documentStore; protected readonly IConflictResolver _conflictResolver; + protected readonly IDocumentStore _documentStore; protected readonly ISnapshotMetadataStore? _snapshotMetadataStore; protected readonly IVectorClockService _vectorClock; /// - /// Occurs after a set of oplog entries has been applied. - /// - public event EventHandler? ChangesApplied; - - /// - /// Raises the event. - /// - /// The entries that were applied. - public virtual void OnChangesApplied(IEnumerable appliedEntries) - { - ChangesApplied?.Invoke(this, new ChangesAppliedEventArgs(appliedEntries)); - } - - /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The backing document store. /// The conflict resolver used during merges. @@ -46,20 +32,9 @@ public abstract class OplogStore : IOplogStore } /// - /// Initializes the VectorClockService with existing oplog/snapshot data. - /// Called once at construction time. + /// Occurs after a set of oplog entries has been applied. /// - protected abstract void InitializeVectorClock(); - - /// - /// Asynchronously inserts an operation log entry into the underlying data store. - /// - /// Implementations should ensure that the entry is persisted reliably. If the operation is - /// cancelled, the entry may not be inserted. - /// The operation log entry to insert. Cannot be null. - /// A cancellation token that can be used to cancel the insert operation. - /// A task that represents the asynchronous insert operation. - protected abstract Task InsertOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default); + public event EventHandler? ChangesApplied; /// public async Task AppendOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default) @@ -69,13 +44,11 @@ public abstract class OplogStore : IOplogStore } /// - public async virtual Task ApplyBatchAsync(IEnumerable oplogEntries, CancellationToken cancellationToken = default) + public virtual async Task ApplyBatchAsync(IEnumerable oplogEntries, + CancellationToken cancellationToken = default) { var entries = oplogEntries.ToList(); - if (entries.Count == 0) - { - return; - } + if (entries.Count == 0) return; var documentKeys = entries.Select(e => (e.Collection, e.Key)).Distinct().ToList(); var documentsToFetch = await _documentStore.GetDocumentsAsync(documentKeys, cancellationToken); @@ -83,12 +56,13 @@ public abstract class OplogStore : IOplogStore var orderedEntriesPerCollectionKey = entries .GroupBy(e => (e.Collection, e.Key)) .ToDictionary(g => g.Key, g => g.OrderBy(e => e.Timestamp.PhysicalTime) - .ThenBy(e => e.Timestamp.LogicalCounter) - .ToList()); + .ThenBy(e => e.Timestamp.LogicalCounter) + .ToList()); foreach (var entry in orderedEntriesPerCollectionKey) { - var existingDocument = documentsToFetch.FirstOrDefault(d => d.Collection == entry.Key.Collection && d.Key == entry.Key.Key); + var existingDocument = + documentsToFetch.FirstOrDefault(d => d.Collection == entry.Key.Collection && d.Key == entry.Key.Key); var document = existingDocument; var sawDelete = false; var sawPut = false; @@ -106,34 +80,25 @@ public abstract class OplogStore : IOplogStore { sawPut = true; if (document == null) - { document = new Document( oplogEntry.Collection, oplogEntry.Key, oplogEntry.Payload.Value, oplogEntry.Timestamp, - isDeleted: false); - } + false); else - { document.Merge(oplogEntry, _conflictResolver); - } } } if (document == null) { if (sawDelete && existingDocument != null) - { await _documentStore.DeleteDocumentAsync(entry.Key.Collection, entry.Key.Key, cancellationToken); - } continue; } - if (sawPut || existingDocument == null) - { - await _documentStore.PutDocumentAsync(document, cancellationToken); - } + if (sawPut || existingDocument == null) await _documentStore.PutDocumentAsync(document, cancellationToken); } //insert all oplog entries after processing documents to ensure oplog reflects the actual state of documents @@ -145,41 +110,21 @@ public abstract class OplogStore : IOplogStore } /// - public abstract Task> GetChainRangeAsync(string startHash, string endHash, CancellationToken cancellationToken = default); + public abstract Task> GetChainRangeAsync(string startHash, string endHash, + CancellationToken cancellationToken = default); /// public abstract Task GetEntryByHashAsync(string hash, CancellationToken cancellationToken = default); - /// - /// Asynchronously retrieves the most recent hash value associated with the specified node. - /// - /// The unique identifier of the node for which to query the last hash. Cannot be null or empty. - /// A cancellation token that can be used to cancel the operation. - /// A task that represents the asynchronous operation. The task result contains the last hash value for the node, or - /// null if no hash is available. - protected abstract Task QueryLastHashForNodeAsync(string nodeId, CancellationToken cancellationToken = default); - - /// - /// Asynchronously queries the oplog for the most recent timestamp associated with the specified hash. - /// - /// This method is intended to be implemented by derived classes to provide access to the oplog. - /// The returned timestamps can be used to track the last occurrence of a hash in the oplog for synchronization or - /// auditing purposes. - /// The hash value to search for in the oplog. Cannot be null. - /// A cancellation token that can be used to cancel the asynchronous operation. - /// A task that represents the asynchronous operation. The task result contains a tuple with the wall clock - /// timestamp and logical timestamp if the hash is found; otherwise, null. - protected abstract Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash, CancellationToken cancellationToken = default); - /// public async Task GetLastEntryHashAsync(string nodeId, CancellationToken cancellationToken = default) { // Try cache first - var cachedHash = _vectorClock.GetLastHash(nodeId); + string? cachedHash = _vectorClock.GetLastHash(nodeId); if (cachedHash != null) return cachedHash; // Cache miss - query database (Oplog first) - var hash = await QueryLastHashForNodeAsync(nodeId, cancellationToken); + string? hash = await QueryLastHashForNodeAsync(nodeId, cancellationToken); // FALLBACK: If not in oplog, check SnapshotMetadata (important after prune!) if (hash == null && _snapshotMetadataStore != null) @@ -190,11 +135,10 @@ public abstract class OplogStore : IOplogStore { var snapshotMeta = await _snapshotMetadataStore.GetSnapshotMetadataAsync(nodeId, cancellationToken); if (snapshotMeta != null) - { _vectorClock.UpdateNode(nodeId, - new HlcTimestamp(snapshotMeta.TimestampPhysicalTime, snapshotMeta.TimestampLogicalCounter, nodeId), + new HlcTimestamp(snapshotMeta.TimestampPhysicalTime, snapshotMeta.TimestampLogicalCounter, + nodeId), hash); - } return hash; } } @@ -204,11 +148,9 @@ public abstract class OplogStore : IOplogStore { var row = await QueryLastHashTimestampFromOplogAsync(hash, cancellationToken); if (row.HasValue) - { _vectorClock.UpdateNode(nodeId, new HlcTimestamp(row.Value.Wall, row.Value.Logic, nodeId), hash); - } } return hash; @@ -221,10 +163,12 @@ public abstract class OplogStore : IOplogStore } /// - public abstract Task> GetOplogAfterAsync(HlcTimestamp timestamp, IEnumerable? collections = null, CancellationToken cancellationToken = default); + public abstract Task> GetOplogAfterAsync(HlcTimestamp timestamp, + IEnumerable? collections = null, CancellationToken cancellationToken = default); /// - public abstract Task> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since, IEnumerable? collections = null, CancellationToken cancellationToken = default); + public abstract Task> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since, + IEnumerable? collections = null, CancellationToken cancellationToken = default); /// public Task GetVectorClockAsync(CancellationToken cancellationToken = default) @@ -236,21 +180,21 @@ public abstract class OplogStore : IOplogStore public abstract Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default); /// - /// Drops all oplog data from the underlying store. + /// Drops all oplog data from the underlying store. /// /// A token used to cancel the operation. /// A task that represents the asynchronous operation. public abstract Task DropAsync(CancellationToken cancellationToken = default); /// - /// Exports all oplog entries from the underlying store. + /// Exports all oplog entries from the underlying store. /// /// A token used to cancel the operation. /// A task that returns the exported oplog entries. public abstract Task> ExportAsync(CancellationToken cancellationToken = default); /// - /// Imports oplog entries into the underlying store. + /// Imports oplog entries into the underlying store. /// /// The entries to import. /// A token used to cancel the operation. @@ -258,10 +202,66 @@ public abstract class OplogStore : IOplogStore public abstract Task ImportAsync(IEnumerable items, CancellationToken cancellationToken = default); /// - /// Merges oplog entries into the underlying store. + /// Merges oplog entries into the underlying store. /// /// The entries to merge. /// A token used to cancel the operation. /// A task that represents the asynchronous operation. public abstract Task MergeAsync(IEnumerable items, CancellationToken cancellationToken = default); -} + + /// + /// Raises the event. + /// + /// The entries that were applied. + public virtual void OnChangesApplied(IEnumerable appliedEntries) + { + ChangesApplied?.Invoke(this, new ChangesAppliedEventArgs(appliedEntries)); + } + + /// + /// Initializes the VectorClockService with existing oplog/snapshot data. + /// Called once at construction time. + /// + protected abstract void InitializeVectorClock(); + + /// + /// Asynchronously inserts an operation log entry into the underlying data store. + /// + /// + /// Implementations should ensure that the entry is persisted reliably. If the operation is + /// cancelled, the entry may not be inserted. + /// + /// The operation log entry to insert. Cannot be null. + /// A cancellation token that can be used to cancel the insert operation. + /// A task that represents the asynchronous insert operation. + protected abstract Task InsertOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default); + + /// + /// Asynchronously retrieves the most recent hash value associated with the specified node. + /// + /// The unique identifier of the node for which to query the last hash. Cannot be null or empty. + /// A cancellation token that can be used to cancel the operation. + /// + /// A task that represents the asynchronous operation. The task result contains the last hash value for the node, or + /// null if no hash is available. + /// + protected abstract Task QueryLastHashForNodeAsync(string nodeId, + CancellationToken cancellationToken = default); + + /// + /// Asynchronously queries the oplog for the most recent timestamp associated with the specified hash. + /// + /// + /// This method is intended to be implemented by derived classes to provide access to the oplog. + /// The returned timestamps can be used to track the last occurrence of a hash in the oplog for synchronization or + /// auditing purposes. + /// + /// The hash value to search for in the oplog. Cannot be null. + /// A cancellation token that can be used to cancel the asynchronous operation. + /// + /// A task that represents the asynchronous operation. The task result contains a tuple with the wall clock + /// timestamp and logical timestamp if the hash is found; otherwise, null. + /// + protected abstract Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash, + CancellationToken cancellationToken = default); +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/PeerConfigurationStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/PeerConfigurationStore.cs index 8143c22..bcb2508 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/PeerConfigurationStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/PeerConfigurationStore.cs @@ -6,44 +6,43 @@ namespace ZB.MOM.WW.CBDDC.Persistence; public abstract class PeerConfigurationStore : IPeerConfigurationStore { /// - public abstract Task> GetRemotePeersAsync(CancellationToken cancellationToken = default); + public abstract Task> GetRemotePeersAsync( + CancellationToken cancellationToken = default); /// - public abstract Task GetRemotePeerAsync(string nodeId, CancellationToken cancellationToken); + public abstract Task GetRemotePeerAsync(string nodeId, + CancellationToken cancellationToken); /// public abstract Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default); /// - public abstract Task SaveRemotePeerAsync(RemotePeerConfiguration peer, CancellationToken cancellationToken = default); + public abstract Task SaveRemotePeerAsync(RemotePeerConfiguration peer, + CancellationToken cancellationToken = default); /// public abstract Task DropAsync(CancellationToken cancellationToken = default); /// - public abstract Task> ExportAsync(CancellationToken cancellationToken = default); + public abstract Task> ExportAsync( + CancellationToken cancellationToken = default); /// - public virtual async Task ImportAsync(IEnumerable items, CancellationToken cancellationToken = default) + public virtual async Task ImportAsync(IEnumerable items, + CancellationToken cancellationToken = default) { - foreach (var item in items) - { - await SaveRemotePeerAsync(item, cancellationToken); - } + foreach (var item in items) await SaveRemotePeerAsync(item, cancellationToken); } /// - public virtual async Task MergeAsync(IEnumerable items, CancellationToken cancellationToken = default) + public virtual async Task MergeAsync(IEnumerable items, + CancellationToken cancellationToken = default) { foreach (var item in items) { var existing = await GetRemotePeerAsync(item.NodeId, cancellationToken); - if (existing == null) - { - await SaveRemotePeerAsync(item, cancellationToken); - } + if (existing == null) await SaveRemotePeerAsync(item, cancellationToken); // If exists, keep existing (simple merge strategy) } } -} - +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/PeerOplogConfirmationStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/PeerOplogConfirmationStore.cs index 55b543f..c751890 100644 --- a/src/ZB.MOM.WW.CBDDC.Persistence/PeerOplogConfirmationStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/PeerOplogConfirmationStore.cs @@ -5,7 +5,7 @@ using ZB.MOM.WW.CBDDC.Core.Storage; namespace ZB.MOM.WW.CBDDC.Persistence; /// -/// Abstract base class for peer oplog confirmation store implementations. +/// Abstract base class for peer oplog confirmation store implementations. /// public abstract class PeerOplogConfirmationStore : IPeerOplogConfirmationStore { @@ -49,8 +49,10 @@ public abstract class PeerOplogConfirmationStore : IPeerOplogConfirmationStore public abstract Task> ExportAsync(CancellationToken cancellationToken = default); /// - public abstract Task ImportAsync(IEnumerable items, CancellationToken cancellationToken = default); + public abstract Task ImportAsync(IEnumerable items, + CancellationToken cancellationToken = default); /// - public abstract Task MergeAsync(IEnumerable items, CancellationToken cancellationToken = default); -} + public abstract Task MergeAsync(IEnumerable items, + CancellationToken cancellationToken = default); +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/README.md b/src/ZB.MOM.WW.CBDDC.Persistence/README.md index c45addc..d03cc08 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/README.md +++ b/src/ZB.MOM.WW.CBDDC.Persistence/README.md @@ -15,25 +15,32 @@ This package provides both BLite provider types and core persistence services: ## When To Use This Package - **As a Library User**: Install this package to use CBDDC with BLite persistence. -- **As a Provider Developer**: Reference this package to build custom persistence providers by extending the base classes +- **As a Provider Developer**: Reference this package to build custom persistence providers by extending the base + classes ## Key Components ### OplogStore + Base implementation for operation log storage with: + - Hash-chain verification - Batch application - Conflict resolution integration - Change event notifications ### VectorClockService + Thread-safe vector clock management: + - In-memory caching for fast lookups - Atomic updates - Causal ordering tracking ### SnapshotStore + Snapshot lifecycle management: + - Creation and compression - Restoration logic - Metadata tracking diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Snapshot/SnapshotDto.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Snapshot/SnapshotDto.cs index aaf7c0f..3b04f4b 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/Snapshot/SnapshotDto.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Snapshot/SnapshotDto.cs @@ -1,49 +1,47 @@ -using System.Collections.Generic; - -namespace ZB.MOM.WW.CBDDC.Persistence.Snapshot; - -/// -/// Root DTO for CBDDC snapshots (JSON format). -/// +namespace ZB.MOM.WW.CBDDC.Persistence.Snapshot; + +/// +/// Root DTO for CBDDC snapshots (JSON format). +/// public class SnapshotDto { /// - /// Gets or sets the snapshot format version. + /// Gets or sets the snapshot format version. /// public string Version { get; set; } = "1.0"; /// - /// Gets or sets the snapshot creation timestamp. + /// Gets or sets the snapshot creation timestamp. /// public string CreatedAt { get; set; } = ""; /// - /// Gets or sets the source node identifier. + /// Gets or sets the source node identifier. /// public string NodeId { get; set; } = ""; /// - /// Gets or sets the serialized document records. + /// Gets or sets the serialized document records. /// public List Documents { get; set; } = new(); /// - /// Gets or sets the serialized oplog entries. + /// Gets or sets the serialized oplog entries. /// public List Oplog { get; set; } = new(); /// - /// Gets or sets the snapshot metadata entries. + /// Gets or sets the snapshot metadata entries. /// public List SnapshotMetadata { get; set; } = new(); /// - /// Gets or sets the remote peer configurations in the snapshot. + /// Gets or sets the remote peer configurations in the snapshot. /// public List RemotePeers { get; set; } = new(); /// - /// Gets or sets peer oplog confirmation records in the snapshot. + /// Gets or sets peer oplog confirmation records in the snapshot. /// public List PeerConfirmations { get; set; } = new(); } @@ -51,37 +49,37 @@ public class SnapshotDto public class DocumentDto { /// - /// Gets or sets the document collection name. + /// Gets or sets the document collection name. /// public string Collection { get; set; } = ""; /// - /// Gets or sets the document key. + /// Gets or sets the document key. /// public string Key { get; set; } = ""; /// - /// Gets or sets the serialized document payload. + /// Gets or sets the serialized document payload. /// public string? JsonData { get; set; } /// - /// Gets or sets a value indicating whether the document is deleted. + /// Gets or sets a value indicating whether the document is deleted. /// public bool IsDeleted { get; set; } /// - /// Gets or sets the HLC wall-clock component. + /// Gets or sets the HLC wall-clock component. /// public long HlcWall { get; set; } /// - /// Gets or sets the HLC logical counter component. + /// Gets or sets the HLC logical counter component. /// public int HlcLogic { get; set; } /// - /// Gets or sets the HLC node component. + /// Gets or sets the HLC node component. /// public string HlcNode { get; set; } = ""; } @@ -89,47 +87,47 @@ public class DocumentDto public class OplogDto { /// - /// Gets or sets the collection associated with the operation. + /// Gets or sets the collection associated with the operation. /// public string Collection { get; set; } = ""; /// - /// Gets or sets the key associated with the operation. + /// Gets or sets the key associated with the operation. /// public string Key { get; set; } = ""; /// - /// Gets or sets the operation code. + /// Gets or sets the operation code. /// public int Operation { get; set; } /// - /// Gets or sets the serialized operation payload. + /// Gets or sets the serialized operation payload. /// public string? JsonData { get; set; } /// - /// Gets or sets the HLC wall-clock component. + /// Gets or sets the HLC wall-clock component. /// public long HlcWall { get; set; } /// - /// Gets or sets the HLC logical counter component. + /// Gets or sets the HLC logical counter component. /// public int HlcLogic { get; set; } /// - /// Gets or sets the HLC node component. + /// Gets or sets the HLC node component. /// public string HlcNode { get; set; } = ""; /// - /// Gets or sets the current entry hash. + /// Gets or sets the current entry hash. /// public string Hash { get; set; } = ""; /// - /// Gets or sets the previous entry hash. + /// Gets or sets the previous entry hash. /// public string? PreviousHash { get; set; } } @@ -137,22 +135,22 @@ public class OplogDto public class SnapshotMetadataDto { /// - /// Gets or sets the node identifier. + /// Gets or sets the node identifier. /// public string NodeId { get; set; } = ""; /// - /// Gets or sets the HLC wall-clock component. + /// Gets or sets the HLC wall-clock component. /// public long HlcWall { get; set; } /// - /// Gets or sets the HLC logical counter component. + /// Gets or sets the HLC logical counter component. /// public int HlcLogic { get; set; } /// - /// Gets or sets the metadata hash. + /// Gets or sets the metadata hash. /// public string Hash { get; set; } = ""; } @@ -160,22 +158,22 @@ public class SnapshotMetadataDto public class RemotePeerDto { /// - /// Gets or sets the remote node identifier. + /// Gets or sets the remote node identifier. /// public string NodeId { get; set; } = ""; /// - /// Gets or sets the remote peer address. + /// Gets or sets the remote peer address. /// public string Address { get; set; } = ""; /// - /// Gets or sets the peer type. + /// Gets or sets the peer type. /// public int Type { get; set; } /// - /// Gets or sets a value indicating whether the peer is enabled. + /// Gets or sets a value indicating whether the peer is enabled. /// public bool IsEnabled { get; set; } } @@ -183,37 +181,37 @@ public class RemotePeerDto public class PeerOplogConfirmationDto { /// - /// Gets or sets the tracked peer node identifier. + /// Gets or sets the tracked peer node identifier. /// public string PeerNodeId { get; set; } = ""; /// - /// Gets or sets the source node identifier. + /// Gets or sets the source node identifier. /// public string SourceNodeId { get; set; } = ""; /// - /// Gets or sets the confirmed HLC wall-clock component. + /// Gets or sets the confirmed HLC wall-clock component. /// public long ConfirmedWall { get; set; } /// - /// Gets or sets the confirmed HLC logical counter component. + /// Gets or sets the confirmed HLC logical counter component. /// public int ConfirmedLogic { get; set; } /// - /// Gets or sets the confirmed oplog hash. + /// Gets or sets the confirmed oplog hash. /// public string ConfirmedHash { get; set; } = ""; /// - /// Gets or sets the last-confirmed timestamp in Unix milliseconds. + /// Gets or sets the last-confirmed timestamp in Unix milliseconds. /// public long LastConfirmedUtcMs { get; set; } /// - /// Gets or sets a value indicating whether the tracked peer is active. + /// Gets or sets a value indicating whether the tracked peer is active. /// public bool IsActive { get; set; } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/SnapshotMetadataStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/SnapshotMetadataStore.cs index d5f410b..c5e80e0 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/SnapshotMetadataStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/SnapshotMetadataStore.cs @@ -6,28 +6,29 @@ namespace ZB.MOM.WW.CBDDC.Persistence; public abstract class SnapshotMetadataStore : ISnapshotMetadataStore { /// - /// Drops all snapshot metadata. + /// Drops all snapshot metadata. /// /// A token used to cancel the operation. public abstract Task DropAsync(CancellationToken cancellationToken = default); /// - /// Exports all snapshot metadata items. + /// Exports all snapshot metadata items. /// /// A token used to cancel the operation. /// All snapshot metadata items. public abstract Task> ExportAsync(CancellationToken cancellationToken = default); /// - /// Gets snapshot metadata for the specified node. + /// Gets snapshot metadata for the specified node. /// /// The node identifier. /// A token used to cancel the operation. /// The snapshot metadata, or null if none exists. - public abstract Task GetSnapshotMetadataAsync(string nodeId, CancellationToken cancellationToken = default); + public abstract Task GetSnapshotMetadataAsync(string nodeId, + CancellationToken cancellationToken = default); /// - /// Gets the snapshot hash for the specified node. + /// Gets the snapshot hash for the specified node. /// /// The node identifier. /// A token used to cancel the operation. @@ -35,38 +36,41 @@ public abstract class SnapshotMetadataStore : ISnapshotMetadataStore public abstract Task GetSnapshotHashAsync(string nodeId, CancellationToken cancellationToken = default); /// - /// Imports snapshot metadata items. + /// Imports snapshot metadata items. /// /// The items to import. /// A token used to cancel the operation. - public abstract Task ImportAsync(IEnumerable items, CancellationToken cancellationToken = default); + public abstract Task ImportAsync(IEnumerable items, + CancellationToken cancellationToken = default); /// - /// Inserts a snapshot metadata item. + /// Inserts a snapshot metadata item. /// /// The metadata item to insert. /// A token used to cancel the operation. - public abstract Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata, CancellationToken cancellationToken = default); + public abstract Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata, + CancellationToken cancellationToken = default); /// - /// Merges snapshot metadata items into the store. + /// Merges snapshot metadata items into the store. /// /// The items to merge. /// A token used to cancel the operation. public abstract Task MergeAsync(IEnumerable items, CancellationToken cancellationToken = default); /// - /// Updates an existing snapshot metadata item. + /// Updates an existing snapshot metadata item. /// /// The metadata item to update. /// A token used to cancel the operation. - public abstract Task UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta, CancellationToken cancellationToken); + public abstract Task + UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta, CancellationToken cancellationToken); /// - /// Gets all snapshot metadata items. + /// Gets all snapshot metadata items. /// /// A token used to cancel the operation. /// All snapshot metadata items. - public abstract Task> GetAllSnapshotMetadataAsync(CancellationToken cancellationToken = default); -} - + public abstract Task> GetAllSnapshotMetadataAsync( + CancellationToken cancellationToken = default); +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/SnapshotStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/SnapshotStore.cs index 99097f7..379bf4c 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/SnapshotStore.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/SnapshotStore.cs @@ -1,54 +1,77 @@ -using System.Text.Json; -using Microsoft.Extensions.Logging; -using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Network; -using ZB.MOM.WW.CBDDC.Core.Storage; -using ZB.MOM.WW.CBDDC.Core.Sync; -using ZB.MOM.WW.CBDDC.Persistence.Snapshot; - -namespace ZB.MOM.WW.CBDDC.Persistence; - -public class SnapshotStore : ISnapshotService -{ - /// - /// Represents the document store used for data persistence and retrieval operations. - /// - protected readonly IDocumentStore _documentStore; - /// - /// Provides access to the peer configuration store used for retrieving and managing peer configuration data. - /// - protected readonly IPeerConfigurationStore _peerConfigurationStore; - /// - /// Provides access to the underlying oplog store used for persisting and retrieving operation logs. - /// - protected readonly IOplogStore _oplogStore; +using System.Text.Json; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Core; +using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Core.Storage; +using ZB.MOM.WW.CBDDC.Core.Sync; +using ZB.MOM.WW.CBDDC.Persistence.Snapshot; + +namespace ZB.MOM.WW.CBDDC.Persistence; + +public class SnapshotStore : ISnapshotService +{ /// - /// Provides access to the conflict resolution strategy used by the containing class. + /// Provides access to the conflict resolution strategy used by the containing class. /// - /// This field is intended for use by derived classes to resolve conflicts according to the logic - /// defined by the associated implementation. The specific behavior depends on the - /// implementation provided. - protected readonly IConflictResolver _conflictResolver; - /// - /// The logger instance used for logging + /// + /// This field is intended for use by derived classes to resolve conflicts according to the logic + /// defined by the associated implementation. The specific behavior depends on the + /// implementation provided. + /// + protected readonly IConflictResolver _conflictResolver; + + /// + /// Represents the document store used for data persistence and retrieval operations. + /// + protected readonly IDocumentStore _documentStore; + + /// + /// The logger instance used for logging /// protected readonly ILogger _logger; + /// - /// Provides access to persisted peer oplog confirmation state used for sync/pruning coordination. + /// Provides access to the underlying oplog store used for persisting and retrieving operation logs. + /// + protected readonly IOplogStore _oplogStore; + + /// + /// Provides access to the peer configuration store used for retrieving and managing peer configuration data. + /// + protected readonly IPeerConfigurationStore _peerConfigurationStore; + + /// + /// Provides access to persisted peer oplog confirmation state used for sync/pruning coordination. /// protected readonly IPeerOplogConfirmationStore? _peerOplogConfirmationStore; - - /// - /// Initializes a new instance of the SnapshotStore class using the specified document, peer configuration, and - /// oplog stores, conflict resolver, and optional logger. - /// - /// The document store used to persist and retrieve documents for snapshots. Cannot be null. - /// The peer configuration store used to manage peer settings and metadata. Cannot be null. - /// The oplog store used to track and apply operation logs for snapshot consistency. Cannot be null. - /// The conflict resolver used to handle conflicts during snapshot creation and restoration. Cannot be null. - /// The optional logger used for logging diagnostic and operational information. If null, a default logger is used. + + /// + /// Initializes a new instance of the SnapshotStore class using the specified document, peer configuration, and + /// oplog stores, conflict resolver, and optional logger. + /// + /// The document store used to persist and retrieve documents for snapshots. Cannot be null. + /// + /// The peer configuration store used to manage peer settings and metadata. Cannot be + /// null. + /// + /// + /// The oplog store used to track and apply operation logs for snapshot consistency. Cannot be + /// null. + /// + /// + /// The conflict resolver used to handle conflicts during snapshot creation and restoration. + /// Cannot be null. + /// + /// + /// The optional logger used for logging diagnostic and operational information. If null, a default + /// logger is used. + /// /// The optional store used to persist peer oplog confirmation watermarks. - /// Thrown if any of the parameters documentStore, peerConfigurationStore, oplogStore, or conflictResolver is null. + /// + /// Thrown if any of the parameters documentStore, peerConfigurationStore, + /// oplogStore, or conflictResolver is null. + /// public SnapshotStore( IDocumentStore documentStore, IPeerConfigurationStore peerConfigurationStore, @@ -58,122 +81,125 @@ public class SnapshotStore : ISnapshotService IPeerOplogConfirmationStore? peerOplogConfirmationStore = null) { _documentStore = documentStore ?? throw new ArgumentNullException(nameof(documentStore)); - _peerConfigurationStore = peerConfigurationStore ?? throw new ArgumentNullException(nameof(peerConfigurationStore)); + _peerConfigurationStore = + peerConfigurationStore ?? throw new ArgumentNullException(nameof(peerConfigurationStore)); _oplogStore = oplogStore ?? throw new ArgumentNullException(nameof(oplogStore)); _conflictResolver = conflictResolver ?? throw new ArgumentNullException(nameof(conflictResolver)); - _logger = logger ?? Microsoft.Extensions.Logging.Abstractions.NullLogger.Instance; + _logger = logger ?? NullLogger.Instance; _peerOplogConfirmationStore = peerOplogConfirmationStore; } - - private async Task ClearAllDataAsync(CancellationToken cancellationToken = default) + + /// + public async Task CreateSnapshotAsync(Stream destination, CancellationToken cancellationToken = default) { - await _documentStore.DropAsync(cancellationToken); - await _peerConfigurationStore.DropAsync(cancellationToken); - await _oplogStore.DropAsync(cancellationToken); - if (_peerOplogConfirmationStore != null) - { - await _peerOplogConfirmationStore.DropAsync(cancellationToken); - } - } - - /// - public async Task CreateSnapshotAsync(Stream destination, CancellationToken cancellationToken = default) - { - _logger.LogInformation("Creating snapshot..."); - + _logger.LogInformation("Creating snapshot..."); + var documents = await _documentStore.ExportAsync(cancellationToken); var remotePeers = await _peerConfigurationStore.ExportAsync(cancellationToken); var oplogEntries = await _oplogStore.ExportAsync(cancellationToken); var peerConfirmations = _peerOplogConfirmationStore == null ? [] : await _peerOplogConfirmationStore.ExportAsync(cancellationToken); - - var snapshot = new SnapshotDto - { - Version = "1.0", - CreatedAt = DateTime.UtcNow.ToString("O"), - NodeId = "", // Will be set by caller if needed - Documents = [.. documents.Select(d => new DocumentDto - { - Collection = d.Collection, - Key = d.Key, - JsonData = d.Content.GetRawText(), - IsDeleted = d.IsDeleted, - HlcWall = d.UpdatedAt.PhysicalTime, - HlcLogic = d.UpdatedAt.LogicalCounter, - HlcNode = d.UpdatedAt.NodeId - })], - Oplog = [.. oplogEntries.Select(o => new OplogDto - { - Collection = o.Collection, - Key = o.Key, - Operation = (int)o.Operation, - JsonData = o.Payload?.GetRawText() ?? "", - HlcWall = o.Timestamp.PhysicalTime, - HlcLogic = o.Timestamp.LogicalCounter, - HlcNode = o.Timestamp.NodeId, - Hash = o.Hash ?? "", - PreviousHash = o.PreviousHash - })], - SnapshotMetadata = [], // Can be filled in by caller if needed - RemotePeers = [.. remotePeers.Select(p => new RemotePeerDto - { - NodeId = p.NodeId, - Address = p.Address, - Type = (int)p.Type, - IsEnabled = p.IsEnabled - })], - PeerConfirmations = [.. peerConfirmations.Select(c => new PeerOplogConfirmationDto - { - PeerNodeId = c.PeerNodeId, - SourceNodeId = c.SourceNodeId, - ConfirmedWall = c.ConfirmedWall, - ConfirmedLogic = c.ConfirmedLogic, - ConfirmedHash = c.ConfirmedHash, - LastConfirmedUtcMs = c.LastConfirmedUtc.ToUnixTimeMilliseconds(), - IsActive = c.IsActive - })] + + var snapshot = new SnapshotDto + { + Version = "1.0", + CreatedAt = DateTime.UtcNow.ToString("O"), + NodeId = "", // Will be set by caller if needed + Documents = + [ + .. documents.Select(d => new DocumentDto + { + Collection = d.Collection, + Key = d.Key, + JsonData = d.Content.GetRawText(), + IsDeleted = d.IsDeleted, + HlcWall = d.UpdatedAt.PhysicalTime, + HlcLogic = d.UpdatedAt.LogicalCounter, + HlcNode = d.UpdatedAt.NodeId + }) + ], + Oplog = + [ + .. oplogEntries.Select(o => new OplogDto + { + Collection = o.Collection, + Key = o.Key, + Operation = (int)o.Operation, + JsonData = o.Payload?.GetRawText() ?? "", + HlcWall = o.Timestamp.PhysicalTime, + HlcLogic = o.Timestamp.LogicalCounter, + HlcNode = o.Timestamp.NodeId, + Hash = o.Hash ?? "", + PreviousHash = o.PreviousHash + }) + ], + SnapshotMetadata = [], // Can be filled in by caller if needed + RemotePeers = + [ + .. remotePeers.Select(p => new RemotePeerDto + { + NodeId = p.NodeId, + Address = p.Address, + Type = (int)p.Type, + IsEnabled = p.IsEnabled + }) + ], + PeerConfirmations = + [ + .. peerConfirmations.Select(c => new PeerOplogConfirmationDto + { + PeerNodeId = c.PeerNodeId, + SourceNodeId = c.SourceNodeId, + ConfirmedWall = c.ConfirmedWall, + ConfirmedLogic = c.ConfirmedLogic, + ConfirmedHash = c.ConfirmedHash, + LastConfirmedUtcMs = c.LastConfirmedUtc.ToUnixTimeMilliseconds(), + IsActive = c.IsActive + }) + ] }; - - // Serialize snapshot to the destination stream - await JsonSerializer.SerializeAsync(destination, snapshot, cancellationToken: cancellationToken); - await destination.FlushAsync(cancellationToken); - + + // Serialize snapshot to the destination stream + await JsonSerializer.SerializeAsync(destination, snapshot, cancellationToken: cancellationToken); + await destination.FlushAsync(cancellationToken); + _logger.LogInformation( "Snapshot created: {DocumentCount} documents, {OplogCount} oplog entries, {PeerConfirmationCount} peer confirmations", snapshot.Documents.Count, snapshot.Oplog.Count, snapshot.PeerConfirmations.Count); } - - /// - public async Task ReplaceDatabaseAsync(Stream databaseStream, CancellationToken cancellationToken = default) - { - _logger.LogWarning("Replacing data from snapshot stream..."); - - await ClearAllDataAsync(cancellationToken); - - var snapshot = await JsonSerializer.DeserializeAsync(databaseStream, cancellationToken: cancellationToken); - if (snapshot == null) throw new InvalidOperationException("Failed to deserialize snapshot"); - - var documents = snapshot.Documents.Select(d => new Document( - d.Collection, - d.Key, - JsonDocument.Parse(d.JsonData ?? "{}").RootElement, - new HlcTimestamp(d.HlcWall, d.HlcLogic, d.HlcNode), - d.IsDeleted)).ToList(); - + + /// + public async Task ReplaceDatabaseAsync(Stream databaseStream, CancellationToken cancellationToken = default) + { + _logger.LogWarning("Replacing data from snapshot stream..."); + + await ClearAllDataAsync(cancellationToken); + + var snapshot = + await JsonSerializer.DeserializeAsync(databaseStream, cancellationToken: cancellationToken); + if (snapshot == null) throw new InvalidOperationException("Failed to deserialize snapshot"); + + var documents = snapshot.Documents.Select(d => new Document( + d.Collection, + d.Key, + JsonDocument.Parse(d.JsonData ?? "{}").RootElement, + new HlcTimestamp(d.HlcWall, d.HlcLogic, d.HlcNode), + d.IsDeleted)).ToList(); + var oplogEntries = snapshot.Oplog.Select(o => new OplogEntry( o.Collection, o.Key, (OperationType)o.Operation, string.IsNullOrWhiteSpace(o.JsonData) - ? (JsonElement?)null + ? null : JsonSerializer.Deserialize(o.JsonData), new HlcTimestamp(o.HlcWall, o.HlcLogic, o.HlcNode), o.PreviousHash ?? string.Empty, string.IsNullOrWhiteSpace(o.Hash) ? null : o.Hash)).ToList(); - + var remotePeers = snapshot.RemotePeers.Select(p => new RemotePeerConfiguration { NodeId = p.NodeId, @@ -197,31 +223,30 @@ public class SnapshotStore : ISnapshotService await _oplogStore.ImportAsync(oplogEntries, cancellationToken); await _peerConfigurationStore.ImportAsync(remotePeers, cancellationToken); if (_peerOplogConfirmationStore != null) - { await _peerOplogConfirmationStore.ImportAsync(peerConfirmations, cancellationToken); - } _logger.LogInformation("Database replaced successfully."); - } - - /// - public async Task MergeSnapshotAsync(Stream snapshotStream, CancellationToken cancellationToken = default) - { - _logger.LogInformation("Merging snapshot from stream..."); - var snapshot = await JsonSerializer.DeserializeAsync(snapshotStream, cancellationToken: cancellationToken); - if (snapshot == null) throw new InvalidOperationException("Failed to deserialize snapshot"); - var documents = snapshot.Documents.Select(d => new Document( - d.Collection, - d.Key, - JsonDocument.Parse(d.JsonData ?? "{}").RootElement, - new HlcTimestamp(d.HlcWall, d.HlcLogic, d.HlcNode), - d.IsDeleted)).ToList(); + } + + /// + public async Task MergeSnapshotAsync(Stream snapshotStream, CancellationToken cancellationToken = default) + { + _logger.LogInformation("Merging snapshot from stream..."); + var snapshot = + await JsonSerializer.DeserializeAsync(snapshotStream, cancellationToken: cancellationToken); + if (snapshot == null) throw new InvalidOperationException("Failed to deserialize snapshot"); + var documents = snapshot.Documents.Select(d => new Document( + d.Collection, + d.Key, + JsonDocument.Parse(d.JsonData ?? "{}").RootElement, + new HlcTimestamp(d.HlcWall, d.HlcLogic, d.HlcNode), + d.IsDeleted)).ToList(); var oplogEntries = snapshot.Oplog.Select(o => new OplogEntry( o.Collection, o.Key, (OperationType)o.Operation, string.IsNullOrWhiteSpace(o.JsonData) - ? (JsonElement?)null + ? null : JsonSerializer.Deserialize(o.JsonData), new HlcTimestamp(o.HlcWall, o.HlcLogic, o.HlcNode), o.PreviousHash ?? string.Empty, @@ -249,10 +274,16 @@ public class SnapshotStore : ISnapshotService await _oplogStore.MergeAsync(oplogEntries, cancellationToken); await _peerConfigurationStore.MergeAsync(remotePeers, cancellationToken); if (_peerOplogConfirmationStore != null) - { await _peerOplogConfirmationStore.MergeAsync(peerConfirmations, cancellationToken); - } _logger.LogInformation("Snapshot merged successfully."); } -} + + private async Task ClearAllDataAsync(CancellationToken cancellationToken = default) + { + await _documentStore.DropAsync(cancellationToken); + await _peerConfigurationStore.DropAsync(cancellationToken); + await _oplogStore.DropAsync(cancellationToken); + if (_peerOplogConfirmationStore != null) await _peerOplogConfirmationStore.DropAsync(cancellationToken); + } +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/VectorClockService.cs b/src/ZB.MOM.WW.CBDDC.Persistence/VectorClockService.cs index e4ee351..9cbcc3d 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/VectorClockService.cs +++ b/src/ZB.MOM.WW.CBDDC.Persistence/VectorClockService.cs @@ -4,13 +4,13 @@ using ZB.MOM.WW.CBDDC.Core.Storage; namespace ZB.MOM.WW.CBDDC.Persistence; /// -/// Thread-safe in-memory cache for Vector Clock state. -/// Updated by DocumentStore (local CDC) and OplogStore (remote sync). +/// Thread-safe in-memory cache for Vector Clock state. +/// Updated by DocumentStore (local CDC) and OplogStore (remote sync). /// public class VectorClockService : IVectorClockService { - private readonly SemaphoreSlim _lock = new SemaphoreSlim(1, 1); - private readonly Dictionary _cache = new Dictionary(StringComparer.Ordinal); + private readonly Dictionary _cache = new(StringComparer.Ordinal); + private readonly SemaphoreSlim _lock = new(1, 1); /// public bool IsInitialized { get; set; } @@ -21,15 +21,13 @@ public class VectorClockService : IVectorClockService _lock.Wait(); try { - var nodeId = entry.Timestamp.NodeId; + string nodeId = entry.Timestamp.NodeId; if (!_cache.TryGetValue(nodeId, out var existing) || entry.Timestamp.CompareTo(existing.Timestamp) > 0) - { _cache[nodeId] = new NodeCacheEntry { Timestamp = entry.Timestamp, Hash = entry.Hash ?? "" }; - } } finally { @@ -44,13 +42,11 @@ public class VectorClockService : IVectorClockService try { if (!_cache.TryGetValue(nodeId, out var existing) || timestamp.CompareTo(existing.Timestamp) > 0) - { _cache[nodeId] = new NodeCacheEntry { Timestamp = timestamp, Hash = hash }; - } } finally { @@ -65,10 +61,7 @@ public class VectorClockService : IVectorClockService try { var vectorClock = new VectorClock(); - foreach (var kvp in _cache) - { - vectorClock.SetTimestamp(kvp.Key, kvp.Value.Timestamp); - } + foreach (var kvp in _cache) vectorClock.SetTimestamp(kvp.Key, kvp.Value.Timestamp); return Task.FromResult(vectorClock); } finally @@ -83,10 +76,7 @@ public class VectorClockService : IVectorClockService _lock.Wait(); try { - if (_cache.Count == 0) - { - return Task.FromResult(new HlcTimestamp(0, 0, "")); - } + if (_cache.Count == 0) return Task.FromResult(new HlcTimestamp(0, 0, "")); var maxTimestamp = _cache.Values .Select(e => e.Timestamp) @@ -129,4 +119,4 @@ public class VectorClockService : IVectorClockService _lock.Release(); } } -} +} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/ZB.MOM.WW.CBDDC.Persistence.csproj b/src/ZB.MOM.WW.CBDDC.Persistence/ZB.MOM.WW.CBDDC.Persistence.csproj index 7a7a9b6..f4f8d2c 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/ZB.MOM.WW.CBDDC.Persistence.csproj +++ b/src/ZB.MOM.WW.CBDDC.Persistence/ZB.MOM.WW.CBDDC.Persistence.csproj @@ -1,40 +1,40 @@ - - ZB.MOM.WW.CBDDC.Persistence - ZB.MOM.WW.CBDDC.Persistence - ZB.MOM.WW.CBDDC.Persistence - net10.0 - latest - enable - enable - 1.0.3 - MrDevRobot - Persistence provider for CBDDC. - MIT - p2p;database;sqlite;persistence;storage;wal - https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net - https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net - git - README.md - + + ZB.MOM.WW.CBDDC.Persistence + ZB.MOM.WW.CBDDC.Persistence + ZB.MOM.WW.CBDDC.Persistence + net10.0 + latest + enable + enable + 1.0.3 + MrDevRobot + Persistence provider for CBDDC. + MIT + p2p;database;sqlite;persistence;storage;wal + https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net + https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net + git + README.md + - - - - all - runtime; build; native; contentfiles; analyzers; buildtransitive - - - - + + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + + + + - - - + + + - - - + + + diff --git a/tests/ZB.MOM.WW.CBDDC.Core.Tests/ArchitectureFitnessTests.cs b/tests/ZB.MOM.WW.CBDDC.Core.Tests/ArchitectureFitnessTests.cs index cb74091..2eda1a9 100644 --- a/tests/ZB.MOM.WW.CBDDC.Core.Tests/ArchitectureFitnessTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Core.Tests/ArchitectureFitnessTests.cs @@ -1,14 +1,12 @@ -using System.Reflection; using System.Text.RegularExpressions; using System.Xml.Linq; -using ZB.MOM.WW.CBDDC.Core; namespace ZB.MOM.WW.CBDDC.Core.Tests; public class ArchitectureFitnessTests { /// - /// Verifies that the core assembly does not reference outer-layer assemblies. + /// Verifies that the core assembly does not reference outer-layer assemblies. /// [Fact] public void CoreAssembly_ShouldNotReferenceOuterAssemblies() @@ -25,71 +23,71 @@ public class ArchitectureFitnessTests } /// - /// Verifies that project references under src form an acyclic graph. + /// Verifies that project references under src form an acyclic graph. /// [Fact] public void SourceProjectGraph_ShouldBeAcyclic() { - var repoRoot = FindRepoRoot(); - var srcRoot = Path.Combine(repoRoot, "src"); + string repoRoot = FindRepoRoot(); + string srcRoot = Path.Combine(repoRoot, "src"); var projectFiles = Directory .EnumerateFiles(srcRoot, "*.csproj", SearchOption.AllDirectories) - .Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}", StringComparison.Ordinal) - && !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}", StringComparison.Ordinal)) + .Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}", + StringComparison.Ordinal) + && !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}", + StringComparison.Ordinal)) .ToList(); var nodes = projectFiles.ToDictionary( p => Path.GetFileNameWithoutExtension(p), p => new HashSet(StringComparer.Ordinal)); - foreach (var projectFile in projectFiles) + foreach (string projectFile in projectFiles) { - var projectName = Path.GetFileNameWithoutExtension(projectFile); + string projectName = Path.GetFileNameWithoutExtension(projectFile); var doc = XDocument.Load(projectFile); var refs = doc.Descendants("ProjectReference") .Select(x => x.Attribute("Include")?.Value) .Where(v => !string.IsNullOrWhiteSpace(v)) .Select(v => Path.GetFileNameWithoutExtension(v!.Replace('\\', '/'))); - foreach (var reference in refs) - { + foreach (string reference in refs) if (nodes.ContainsKey(reference)) - { nodes[projectName].Add(reference); - } - } } HasCycle(nodes).ShouldBeFalse(); } /// - /// Verifies the allowed dependency graph between source projects. + /// Verifies the allowed dependency graph between source projects. /// [Fact] public void SourceProjectReferences_ShouldMatchAllowedDependencyGraph() { - var repoRoot = FindRepoRoot(); - var srcRoot = Path.Combine(repoRoot, "src"); + string repoRoot = FindRepoRoot(); + string srcRoot = Path.Combine(repoRoot, "src"); var projectFiles = Directory .EnumerateFiles(srcRoot, "*.csproj", SearchOption.AllDirectories) - .Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}", StringComparison.Ordinal) - && !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}", StringComparison.Ordinal)) + .Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}", + StringComparison.Ordinal) + && !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}", + StringComparison.Ordinal)) .ToList(); var allowedDependencies = new Dictionary>(StringComparer.Ordinal) { - ["ZB.MOM.WW.CBDDC.Core"] = new HashSet(StringComparer.Ordinal), - ["ZB.MOM.WW.CBDDC.Network"] = new HashSet(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Core" }, - ["ZB.MOM.WW.CBDDC.Persistence"] = new HashSet(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Core" }, - ["ZB.MOM.WW.CBDDC.Hosting"] = new HashSet(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Network" } + ["ZB.MOM.WW.CBDDC.Core"] = new(StringComparer.Ordinal), + ["ZB.MOM.WW.CBDDC.Network"] = new(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Core" }, + ["ZB.MOM.WW.CBDDC.Persistence"] = new(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Core" }, + ["ZB.MOM.WW.CBDDC.Hosting"] = new(StringComparer.Ordinal) { "ZB.MOM.WW.CBDDC.Network" } }; - foreach (var projectFile in projectFiles) + foreach (string projectFile in projectFiles) { - var projectName = Path.GetFileNameWithoutExtension(projectFile); + string projectName = Path.GetFileNameWithoutExtension(projectFile); allowedDependencies.ContainsKey(projectName) .ShouldBeTrue($"Unexpected source project found: {projectName}"); @@ -105,18 +103,19 @@ public class ArchitectureFitnessTests var missing = expected.Where(e => !references.Contains(e)).ToList(); extra.ShouldBeEmpty($"Project {projectName} has disallowed references: {string.Join(", ", extra)}"); - missing.ShouldBeEmpty($"Project {projectName} is missing required references: {string.Join(", ", missing)}"); + missing.ShouldBeEmpty( + $"Project {projectName} is missing required references: {string.Join(", ", missing)}"); } } /// - /// Verifies non-generic ILogger usage is restricted to explicit compatibility shims. + /// Verifies non-generic ILogger usage is restricted to explicit compatibility shims. /// [Fact] public void SourceCode_ShouldRestrictNonGenericILoggerUsage() { - var repoRoot = FindRepoRoot(); - var srcRoot = Path.Combine(repoRoot, "src"); + string repoRoot = FindRepoRoot(); + string srcRoot = Path.Combine(repoRoot, "src"); var loggerPattern = new Regex(@"\bILogger\b(?!\s*<|\s*Factory\b)", RegexOptions.Compiled); var allowedSnippets = new[] @@ -130,45 +129,39 @@ public class ArchitectureFitnessTests var violations = new List(); var sourceFiles = Directory.EnumerateFiles(srcRoot, "*.cs", SearchOption.AllDirectories) - .Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}", StringComparison.Ordinal) - && !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}", StringComparison.Ordinal)); + .Where(p => !p.Contains($"{Path.DirectorySeparatorChar}obj{Path.DirectorySeparatorChar}", + StringComparison.Ordinal) + && !p.Contains($"{Path.DirectorySeparatorChar}bin{Path.DirectorySeparatorChar}", + StringComparison.Ordinal)); - foreach (var file in sourceFiles) + foreach (string file in sourceFiles) { - var lines = File.ReadAllLines(file); + string[] lines = File.ReadAllLines(file); for (var i = 0; i < lines.Length; i++) { - var line = lines[i].Trim(); - if (string.IsNullOrWhiteSpace(line) || line.StartsWith("//", StringComparison.Ordinal)) - { - continue; - } + string line = lines[i].Trim(); + if (string.IsNullOrWhiteSpace(line) || line.StartsWith("//", StringComparison.Ordinal)) continue; - if (!loggerPattern.IsMatch(line)) - { - continue; - } + if (!loggerPattern.IsMatch(line)) continue; - if (allowedSnippets.Any(line.Contains)) - { - continue; - } + if (allowedSnippets.Any(line.Contains)) continue; - var relativePath = Path.GetRelativePath(repoRoot, file).Replace('\\', '/'); + string relativePath = Path.GetRelativePath(repoRoot, file).Replace('\\', '/'); violations.Add($"{relativePath}:{i + 1} -> {line}"); } } - violations.ShouldBeEmpty($"Unexpected non-generic ILogger usage:{Environment.NewLine}{string.Join(Environment.NewLine, violations)}"); + violations.ShouldBeEmpty( + $"Unexpected non-generic ILogger usage:{Environment.NewLine}{string.Join(Environment.NewLine, violations)}"); } /// - /// Verifies log boundaries push operation context for hosted/background entry points. + /// Verifies log boundaries push operation context for hosted/background entry points. /// [Fact] public void BoundaryServices_ShouldPushOperationLogContext() { - var repoRoot = FindRepoRoot(); + string repoRoot = FindRepoRoot(); var boundaryFiles = new[] { "src/ZB.MOM.WW.CBDDC.Network/CBDDCNodeService.cs", @@ -180,24 +173,24 @@ public class ArchitectureFitnessTests "src/ZB.MOM.WW.CBDDC.Hosting/Services/NoOpSyncOrchestrator.cs" }; - foreach (var relativePath in boundaryFiles) + foreach (string relativePath in boundaryFiles) { - var filePath = Path.Combine(repoRoot, relativePath.Replace('/', Path.DirectorySeparatorChar)); + string filePath = Path.Combine(repoRoot, relativePath.Replace('/', Path.DirectorySeparatorChar)); File.Exists(filePath).ShouldBeTrue($"Missing expected boundary file: {relativePath}"); - var contents = File.ReadAllText(filePath); + string contents = File.ReadAllText(filePath); contents.Contains("LogContext.PushProperty(\"OperationId\"", StringComparison.Ordinal) .ShouldBeTrue($"Boundary file is missing OperationId log enrichment: {relativePath}"); } } /// - /// Verifies boundary projects include Serilog for LogContext support. + /// Verifies boundary projects include Serilog for LogContext support. /// [Fact] public void BoundaryProjects_ShouldReferenceSerilog() { - var repoRoot = FindRepoRoot(); + string repoRoot = FindRepoRoot(); var projects = new[] { "src/ZB.MOM.WW.CBDDC.Network/ZB.MOM.WW.CBDDC.Network.csproj", @@ -205,12 +198,12 @@ public class ArchitectureFitnessTests "samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj" }; - foreach (var relativePath in projects) + foreach (string relativePath in projects) { - var filePath = Path.Combine(repoRoot, relativePath.Replace('/', Path.DirectorySeparatorChar)); + string filePath = Path.Combine(repoRoot, relativePath.Replace('/', Path.DirectorySeparatorChar)); File.Exists(filePath).ShouldBeTrue($"Missing project file: {relativePath}"); - var contents = File.ReadAllText(filePath); + string contents = File.ReadAllText(filePath); contents.Contains(" - /// Verifies that hash computation is deterministic even when payload content differs. - /// - [Fact] - public void ComputeHash_ShouldBeDeterministic_RegardlessOfPayload() - { - // Arrange - var collection = "test-collection"; - var key = "test-key"; - var op = OperationType.Put; - var timestamp = new HlcTimestamp(100, 0, "node-1"); - var prevHash = "prev-hash"; + /// + /// Verifies that hash computation is deterministic even when payload content differs. + /// + [Fact] + public void ComputeHash_ShouldBeDeterministic_RegardlessOfPayload() + { + // Arrange + var collection = "test-collection"; + var key = "test-key"; + var op = OperationType.Put; + var timestamp = new HlcTimestamp(100, 0, "node-1"); + var prevHash = "prev-hash"; - var payload1 = JsonDocument.Parse("{\"prop\": 1}").RootElement; - var payload2 = JsonDocument.Parse("{\"prop\": 2, \"extra\": \"whitespace\"}").RootElement; + var payload1 = JsonDocument.Parse("{\"prop\": 1}").RootElement; + var payload2 = JsonDocument.Parse("{\"prop\": 2, \"extra\": \"whitespace\"}").RootElement; + + // Act + var entry1 = new OplogEntry(collection, key, op, payload1, timestamp, prevHash); + var entry2 = new OplogEntry(collection, key, op, payload2, timestamp, prevHash); + + // Assert + entry2.Hash.ShouldBe(entry1.Hash); + } + + /// + /// Verifies that hash computation uses invariant culture formatting for timestamp values. + /// + [Fact] + public void ComputeHash_ShouldUseInvariantCulture_ForTimestamp() + { + // Arrange + var originalCulture = CultureInfo.CurrentCulture; + try + { + var culture = CultureInfo.GetCultureInfo("de-DE"); + CultureInfo.CurrentCulture = culture; + + var timestamp = new HlcTimestamp(123456789, 1, "node"); + var entry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev"); // Act - var entry1 = new OplogEntry(collection, key, op, payload1, timestamp, prevHash); - var entry2 = new OplogEntry(collection, key, op, payload2, timestamp, prevHash); + string hash = entry.ComputeHash(); // Assert - entry2.Hash.ShouldBe(entry1.Hash); - } - - /// - /// Verifies that hash computation uses invariant culture formatting for timestamp values. - /// - [Fact] - public void ComputeHash_ShouldUseInvariantCulture_ForTimestamp() - { - // Arrange - var originalCulture = CultureInfo.CurrentCulture; - try - { - var culture = CultureInfo.GetCultureInfo("de-DE"); - CultureInfo.CurrentCulture = culture; - - var timestamp = new HlcTimestamp(123456789, 1, "node"); - var entry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev"); - - // Act - var hash = entry.ComputeHash(); - - // Assert - CultureInfo.CurrentCulture = CultureInfo.InvariantCulture; - var expectedEntry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev"); - hash.ShouldBe(expectedEntry.Hash); - } - finally - { - CultureInfo.CurrentCulture = originalCulture; - } - } - - /// - /// Verifies that an entry is valid when its stored hash matches computed content. - /// - [Fact] - public void IsValid_ShouldReturnTrue_WhenHashMatches() - { - var timestamp = new HlcTimestamp(100, 0, "node-1"); - var entry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev"); - - entry.IsValid().ShouldBeTrue(); + CultureInfo.CurrentCulture = CultureInfo.InvariantCulture; + var expectedEntry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev"); + hash.ShouldBe(expectedEntry.Hash); + } + finally + { + CultureInfo.CurrentCulture = originalCulture; } } -} + + /// + /// Verifies that an entry is valid when its stored hash matches computed content. + /// + [Fact] + public void IsValid_ShouldReturnTrue_WhenHashMatches() + { + var timestamp = new HlcTimestamp(100, 0, "node-1"); + var entry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev"); + + entry.IsValid().ShouldBeTrue(); + } +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Core.Tests/PeerManagementServiceTests.cs b/tests/ZB.MOM.WW.CBDDC.Core.Tests/PeerManagementServiceTests.cs index bd980ae..350880a 100644 --- a/tests/ZB.MOM.WW.CBDDC.Core.Tests/PeerManagementServiceTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Core.Tests/PeerManagementServiceTests.cs @@ -6,7 +6,8 @@ namespace ZB.MOM.WW.CBDDC.Core.Tests; public class PeerManagementServiceTests { /// - /// Verifies that removing peer tracking with remote removal enabled removes both tracking and remote peer configuration. + /// Verifies that removing peer tracking with remote removal enabled removes both tracking and remote peer + /// configuration. /// [Fact] public async Task RemovePeerTrackingAsync_WhenRemoveRemoteConfigTrue_RemovesTrackingAndRemoteConfig() @@ -16,14 +17,14 @@ public class PeerManagementServiceTests var service = new PeerManagementService(configStore, confirmationStore); var token = new CancellationTokenSource().Token; - await service.RemovePeerTrackingAsync("peer-1", removeRemoteConfig: true, token); + await service.RemovePeerTrackingAsync("peer-1", true, token); await confirmationStore.Received(1).RemovePeerTrackingAsync("peer-1", token); await configStore.Received(1).RemoveRemotePeerAsync("peer-1", token); } /// - /// Verifies that removing peer tracking with remote removal disabled removes only tracking data. + /// Verifies that removing peer tracking with remote removal disabled removes only tracking data. /// [Fact] public async Task RemovePeerTrackingAsync_WhenRemoveRemoteConfigFalse_RemovesTrackingOnly() @@ -32,14 +33,14 @@ public class PeerManagementServiceTests var confirmationStore = Substitute.For(); var service = new PeerManagementService(configStore, confirmationStore); - await service.RemovePeerTrackingAsync("peer-1", removeRemoteConfig: false); + await service.RemovePeerTrackingAsync("peer-1", false); await confirmationStore.Received(1).RemovePeerTrackingAsync("peer-1", Arg.Any()); await configStore.DidNotReceive().RemoveRemotePeerAsync(Arg.Any(), Arg.Any()); } /// - /// Verifies that removing a remote peer delegates to tracking removal with remote configuration cleanup enabled. + /// Verifies that removing a remote peer delegates to tracking removal with remote configuration cleanup enabled. /// [Fact] public async Task RemoveRemotePeerAsync_DelegatesToTrackingRemovalWithRemoteConfig() @@ -56,7 +57,7 @@ public class PeerManagementServiceTests } /// - /// Verifies that removing peer tracking with an invalid node identifier throws an . + /// Verifies that removing peer tracking with an invalid node identifier throws an . /// [Fact] public async Task RemovePeerTrackingAsync_WhenNodeIdInvalid_ThrowsArgumentException() @@ -65,9 +66,10 @@ public class PeerManagementServiceTests var confirmationStore = Substitute.For(); var service = new PeerManagementService(configStore, confirmationStore); - await Should.ThrowAsync(() => service.RemovePeerTrackingAsync(" ", removeRemoteConfig: true)); + await Should.ThrowAsync(() => service.RemovePeerTrackingAsync(" ", true)); - await confirmationStore.DidNotReceive().RemovePeerTrackingAsync(Arg.Any(), Arg.Any()); + await confirmationStore.DidNotReceive() + .RemovePeerTrackingAsync(Arg.Any(), Arg.Any()); await configStore.DidNotReceive().RemoveRemotePeerAsync(Arg.Any(), Arg.Any()); } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Core.Tests/PerformanceRegressionTests.cs b/tests/ZB.MOM.WW.CBDDC.Core.Tests/PerformanceRegressionTests.cs index 6c4f38f..a9dc395 100755 --- a/tests/ZB.MOM.WW.CBDDC.Core.Tests/PerformanceRegressionTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Core.Tests/PerformanceRegressionTests.cs @@ -6,43 +6,43 @@ namespace ZB.MOM.WW.CBDDC.Core.Tests; public class PerformanceRegressionTests { - private readonly RecursiveNodeMergeConflictResolver _resolver; private readonly Dictionary _limits; + private readonly RecursiveNodeMergeConflictResolver _resolver; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// public PerformanceRegressionTests() { _resolver = new RecursiveNodeMergeConflictResolver(); // Load limits - var json = File.ReadAllText("benchmark_limits.json"); + string json = File.ReadAllText("benchmark_limits.json"); _limits = JsonSerializer.Deserialize>(json) ?? new Dictionary(); } private Document CreateDoc(string key, object data, HlcTimestamp ts) { - var json = JsonSerializer.Serialize(data); + string json = JsonSerializer.Serialize(data); var element = JsonDocument.Parse(json).RootElement; return new Document("test", key, element, ts, false); } private OplogEntry CreateOp(string key, object data, HlcTimestamp ts) { - var json = JsonSerializer.Serialize(data); + string json = JsonSerializer.Serialize(data); var element = JsonDocument.Parse(json).RootElement; return new OplogEntry("test", key, OperationType.Put, element, ts, string.Empty); } /// - /// Verifies simple recursive merge operations stay within configured performance limits. + /// Verifies simple recursive merge operations stay within configured performance limits. /// [Fact] public void RecursiveMerge_Simple_ShouldBeWithinLimits() { - int iterations = 10000; - string limitKey = "RecursiveMerge_Simple_10k_Ops_MaxMs"; + var iterations = 10000; + var limitKey = "RecursiveMerge_Simple_10k_Ops_MaxMs"; var ts1 = new HlcTimestamp(100, 0, "n1"); var ts2 = new HlcTimestamp(200, 0, "n2"); @@ -50,72 +50,61 @@ public class PerformanceRegressionTests var op = CreateOp("k1", new { name = "Bob", age = 31 }, ts2); // Warmup - for (int i = 0; i < 100; i++) _resolver.Resolve(doc, op); + for (var i = 0; i < 100; i++) _resolver.Resolve(doc, op); // Run var sw = Stopwatch.StartNew(); - for (int i = 0; i < iterations; i++) - { - _resolver.Resolve(doc, op); - } + for (var i = 0; i < iterations; i++) _resolver.Resolve(doc, op); sw.Stop(); long elapsed = sw.ElapsedMilliseconds; Console.WriteLine($"Executed {iterations} merges in {elapsed}ms"); if (_limits.TryGetValue(limitKey, out int maxMs)) - { elapsed.ShouldBeLessThan(maxMs, $"Performance regression! Expected < {maxMs}ms but took {elapsed}ms"); - } else - { Console.WriteLine($"Warning: No limit found for key '{limitKey}'"); - } } /// - /// Verifies deep array recursive merge operations stay within configured performance limits. + /// Verifies deep array recursive merge operations stay within configured performance limits. /// [Fact] public void RecursiveMerge_DeepArray_ShouldBeWithinLimits() { - int iterations = 1000; // Lower iterations for heavier op - string limitKey = "RecursiveMerge_Array_1k_Ops_MaxMs"; + var iterations = 1000; // Lower iterations for heavier op + var limitKey = "RecursiveMerge_Array_1k_Ops_MaxMs"; var ts1 = new HlcTimestamp(100, 0, "n1"); var ts2 = new HlcTimestamp(200, 0, "n2"); var items = new List(); - for (int i = 0; i < 100; i++) items.Add(new { id = i.ToString(), val = i }); + for (var i = 0; i < 100; i++) items.Add(new { id = i.ToString(), val = i }); - var doc = CreateDoc("k1", new { items = items }, ts1); - var op = CreateDoc("k1", new { items = items }, ts2).ToOplogEntry(OperationType.Put); // Same content to force id check traversal + var doc = CreateDoc("k1", new { items }, ts1); + var op = CreateDoc("k1", new { items }, ts2) + .ToOplogEntry(OperationType.Put); // Same content to force id check traversal // Warmup _resolver.Resolve(doc, op); // Run var sw = Stopwatch.StartNew(); - for (int i = 0; i < iterations; i++) - { - _resolver.Resolve(doc, op); - } + for (var i = 0; i < iterations; i++) _resolver.Resolve(doc, op); sw.Stop(); long elapsed = sw.ElapsedMilliseconds; Console.WriteLine($"Executed {iterations} array merges in {elapsed}ms"); if (_limits.TryGetValue(limitKey, out int maxMs)) - { elapsed.ShouldBeLessThan(maxMs, $"Performance regression! Expected < {maxMs}ms but took {elapsed}ms"); - } } } public static class DocExt { /// - /// Creates an operation log entry from a document instance. + /// Creates an operation log entry from a document instance. /// /// The source document. /// The operation type to apply to the created entry. @@ -124,4 +113,4 @@ public static class DocExt { return new OplogEntry(d.Collection, d.Key, t, d.Content, d.UpdatedAt, string.Empty); } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Core.Tests/RecursiveNodeMergeConflictResolverTests.cs b/tests/ZB.MOM.WW.CBDDC.Core.Tests/RecursiveNodeMergeConflictResolverTests.cs index 4d7407e..e7a4bef 100755 --- a/tests/ZB.MOM.WW.CBDDC.Core.Tests/RecursiveNodeMergeConflictResolverTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Core.Tests/RecursiveNodeMergeConflictResolverTests.cs @@ -8,7 +8,7 @@ public class RecursiveNodeMergeConflictResolverTests private readonly RecursiveNodeMergeConflictResolver _resolver; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// public RecursiveNodeMergeConflictResolverTests() { @@ -17,20 +17,20 @@ public class RecursiveNodeMergeConflictResolverTests private Document CreateDoc(string key, object data, HlcTimestamp ts) { - var json = JsonSerializer.Serialize(data); + string json = JsonSerializer.Serialize(data); var element = JsonDocument.Parse(json).RootElement; return new Document("test", key, element, ts, false); } private OplogEntry CreateOp(string key, object data, HlcTimestamp ts) { - var json = JsonSerializer.Serialize(data); + string json = JsonSerializer.Serialize(data); var element = JsonDocument.Parse(json).RootElement; return new OplogEntry("test", key, OperationType.Put, element, ts, string.Empty); } /// - /// Verifies that disjoint fields are merged into a single document. + /// Verifies that disjoint fields are merged into a single document. /// [Fact] public void Resolve_ShouldMergeDisjointFields() @@ -56,7 +56,7 @@ public class RecursiveNodeMergeConflictResolverTests } /// - /// Verifies that primitive collisions are resolved using the higher timestamp value. + /// Verifies that primitive collisions are resolved using the higher timestamp value. /// [Fact] public void Resolve_ShouldPrioritizeHigherTimestamp_PrimitiveCollision() @@ -81,7 +81,7 @@ public class RecursiveNodeMergeConflictResolverTests } /// - /// Verifies that nested object content is merged recursively. + /// Verifies that nested object content is merged recursively. /// [Fact] public void Resolve_ShouldRecursivelyMergeObjects() @@ -104,7 +104,7 @@ public class RecursiveNodeMergeConflictResolverTests } /// - /// Verifies that arrays containing object identifiers are merged by item identity. + /// Verifies that arrays containing object identifiers are merged by item identity. /// [Fact] public void Resolve_ShouldMergeArraysById() @@ -115,7 +115,8 @@ public class RecursiveNodeMergeConflictResolverTests var doc = CreateDoc("k1", new { - items = new[] { + items = new[] + { new { id = "1", val = "A" }, new { id = "2", val = "B" } } @@ -123,9 +124,10 @@ public class RecursiveNodeMergeConflictResolverTests var op = CreateOp("k1", new { - items = new[] { + items = new[] + { new { id = "1", val = "A-Updated" }, // Update - new { id = "3", val = "C" } // Insert + new { id = "3", val = "C" } // Insert } }, ts2); @@ -133,14 +135,14 @@ public class RecursiveNodeMergeConflictResolverTests var result = _resolver.Resolve(doc, op); // Assert - Action validate = (root) => + Action validate = root => { var items = root.GetProperty("items"); items.GetArrayLength().ShouldBe(3); // Order is not guaranteed, so find by id // But simplified test checking content exists - var text = items.GetRawText(); + string text = items.GetRawText(); text.ShouldContain("A-Updated"); text.ShouldContain("B"); text.ShouldContain("C"); @@ -150,7 +152,7 @@ public class RecursiveNodeMergeConflictResolverTests } /// - /// Verifies that primitive arrays fall back to last-write-wins behavior. + /// Verifies that primitive arrays fall back to last-write-wins behavior. /// [Fact] public void Resolve_ShouldFallbackToLWW_ForPrimitiveArrays() @@ -170,4 +172,4 @@ public class RecursiveNodeMergeConflictResolverTests tags.GetArrayLength().ShouldBe(1); tags[0].GetString().ShouldBe("c"); } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Core.Tests/VectorClockTests.cs b/tests/ZB.MOM.WW.CBDDC.Core.Tests/VectorClockTests.cs index afbcdf7..6d39285 100755 --- a/tests/ZB.MOM.WW.CBDDC.Core.Tests/VectorClockTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Core.Tests/VectorClockTests.cs @@ -1,17 +1,13 @@ -using ZB.MOM.WW.CBDDC.Core; -using System.Linq; -using Xunit; - namespace ZB.MOM.WW.CBDDC.Core.Tests; -public class VectorClockTests -{ - /// - /// Verifies an empty vector clock returns the default timestamp for unknown nodes. - /// - [Fact] - public void EmptyVectorClock_ShouldReturnDefaultTimestamp() - { +public class VectorClockTests +{ + /// + /// Verifies an empty vector clock returns the default timestamp for unknown nodes. + /// + [Fact] + public void EmptyVectorClock_ShouldReturnDefaultTimestamp() + { // Arrange var vc = new VectorClock(); @@ -19,15 +15,15 @@ public class VectorClockTests var ts = vc.GetTimestamp("node1"); // Assert - ts.ShouldBe(default(HlcTimestamp)); - } - - /// - /// Verifies setting a timestamp stores it for the specified node. - /// - [Fact] - public void SetTimestamp_ShouldStoreTimestamp() - { + ts.ShouldBe(default); + } + + /// + /// Verifies setting a timestamp stores it for the specified node. + /// + [Fact] + public void SetTimestamp_ShouldStoreTimestamp() + { // Arrange var vc = new VectorClock(); var ts = new HlcTimestamp(100, 1, "node1"); @@ -36,15 +32,15 @@ public class VectorClockTests vc.SetTimestamp("node1", ts); // Assert - vc.GetTimestamp("node1").ShouldBe(ts); - } - - /// - /// Verifies node identifiers are returned for all known nodes. - /// - [Fact] - public void NodeIds_ShouldReturnAllNodes() - { + vc.GetTimestamp("node1").ShouldBe(ts); + } + + /// + /// Verifies node identifiers are returned for all known nodes. + /// + [Fact] + public void NodeIds_ShouldReturnAllNodes() + { // Arrange var vc = new VectorClock(); vc.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1")); @@ -56,15 +52,15 @@ public class VectorClockTests // Assert nodeIds.Count.ShouldBe(2); nodeIds.ShouldContain("node1"); - nodeIds.ShouldContain("node2"); - } - - /// - /// Verifies equal vector clocks are compared as equal. - /// - [Fact] - public void CompareTo_EqualClocks_ShouldReturnEqual() - { + nodeIds.ShouldContain("node2"); + } + + /// + /// Verifies equal vector clocks are compared as equal. + /// + [Fact] + public void CompareTo_EqualClocks_ShouldReturnEqual() + { // Arrange var vc1 = new VectorClock(); vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1")); @@ -78,15 +74,15 @@ public class VectorClockTests var result = vc1.CompareTo(vc2); // Assert - result.ShouldBe(CausalityRelation.Equal); - } - - /// - /// Verifies a clock strictly ahead of another is reported as strictly ahead. - /// - [Fact] - public void CompareTo_StrictlyAhead_ShouldReturnStrictlyAhead() - { + result.ShouldBe(CausalityRelation.Equal); + } + + /// + /// Verifies a clock strictly ahead of another is reported as strictly ahead. + /// + [Fact] + public void CompareTo_StrictlyAhead_ShouldReturnStrictlyAhead() + { // Arrange var vc1 = new VectorClock(); vc1.SetTimestamp("node1", new HlcTimestamp(200, 1, "node1")); // Ahead @@ -100,15 +96,15 @@ public class VectorClockTests var result = vc1.CompareTo(vc2); // Assert - result.ShouldBe(CausalityRelation.StrictlyAhead); - } - - /// - /// Verifies a clock strictly behind another is reported as strictly behind. - /// - [Fact] - public void CompareTo_StrictlyBehind_ShouldReturnStrictlyBehind() - { + result.ShouldBe(CausalityRelation.StrictlyAhead); + } + + /// + /// Verifies a clock strictly behind another is reported as strictly behind. + /// + [Fact] + public void CompareTo_StrictlyBehind_ShouldReturnStrictlyBehind() + { // Arrange var vc1 = new VectorClock(); vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1")); // Behind @@ -122,15 +118,15 @@ public class VectorClockTests var result = vc1.CompareTo(vc2); // Assert - result.ShouldBe(CausalityRelation.StrictlyBehind); - } - - /// - /// Verifies divergent per-node progress is reported as concurrent. - /// - [Fact] - public void CompareTo_Concurrent_ShouldReturnConcurrent() - { + result.ShouldBe(CausalityRelation.StrictlyBehind); + } + + /// + /// Verifies divergent per-node progress is reported as concurrent. + /// + [Fact] + public void CompareTo_Concurrent_ShouldReturnConcurrent() + { // Arrange - Split brain scenario var vc1 = new VectorClock(); vc1.SetTimestamp("node1", new HlcTimestamp(200, 1, "node1")); // Node1 ahead @@ -144,15 +140,15 @@ public class VectorClockTests var result = vc1.CompareTo(vc2); // Assert - result.ShouldBe(CausalityRelation.Concurrent); - } - - /// - /// Verifies pull candidates include nodes where the other clock is ahead. - /// - [Fact] - public void GetNodesWithUpdates_ShouldReturnNodesWhereOtherIsAhead() - { + result.ShouldBe(CausalityRelation.Concurrent); + } + + /// + /// Verifies pull candidates include nodes where the other clock is ahead. + /// + [Fact] + public void GetNodesWithUpdates_ShouldReturnNodesWhereOtherIsAhead() + { // Arrange var vc1 = new VectorClock(); vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1")); @@ -167,15 +163,15 @@ public class VectorClockTests // Assert nodesToPull.Count().ShouldBe(1); - nodesToPull.ShouldContain("node1"); - } - - /// - /// Verifies push candidates include nodes where this clock is ahead. - /// - [Fact] - public void GetNodesToPush_ShouldReturnNodesWhereThisIsAhead() - { + nodesToPull.ShouldContain("node1"); + } + + /// + /// Verifies push candidates include nodes where this clock is ahead. + /// + [Fact] + public void GetNodesToPush_ShouldReturnNodesWhereThisIsAhead() + { // Arrange var vc1 = new VectorClock(); vc1.SetTimestamp("node1", new HlcTimestamp(200, 1, "node1")); // Ahead @@ -190,15 +186,15 @@ public class VectorClockTests // Assert nodesToPush.Count().ShouldBe(1); - nodesToPush.ShouldContain("node1"); - } - - /// - /// Verifies a newly introduced remote node is included in pull candidates. - /// - [Fact] - public void GetNodesWithUpdates_WhenNewNodeAppearsInOther_ShouldReturnIt() - { + nodesToPush.ShouldContain("node1"); + } + + /// + /// Verifies a newly introduced remote node is included in pull candidates. + /// + [Fact] + public void GetNodesWithUpdates_WhenNewNodeAppearsInOther_ShouldReturnIt() + { // Arrange - Simulates a new node joining the cluster var vc1 = new VectorClock(); vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1")); @@ -212,15 +208,15 @@ public class VectorClockTests // Assert nodesToPull.Count().ShouldBe(1); - nodesToPull.ShouldContain("node3"); - } - - /// - /// Verifies merge keeps the maximum timestamp per node. - /// - [Fact] - public void Merge_ShouldTakeMaximumForEachNode() - { + nodesToPull.ShouldContain("node3"); + } + + /// + /// Verifies merge keeps the maximum timestamp per node. + /// + [Fact] + public void Merge_ShouldTakeMaximumForEachNode() + { // Arrange var vc1 = new VectorClock(); vc1.SetTimestamp("node1", new HlcTimestamp(200, 1, "node1")); @@ -234,18 +230,18 @@ public class VectorClockTests // Act vc1.Merge(vc2); - // Assert - vc1.GetTimestamp("node1").ShouldBe(new HlcTimestamp(200, 1, "node1")); // Kept max - vc1.GetTimestamp("node2").ShouldBe(new HlcTimestamp(200, 2, "node2")); // Merged max - vc1.GetTimestamp("node3").ShouldBe(new HlcTimestamp(150, 1, "node3")); // Added new - } - - /// - /// Verifies cloning creates an independent copy of the vector clock. - /// - [Fact] - public void Clone_ShouldCreateIndependentCopy() - { + // Assert + vc1.GetTimestamp("node1").ShouldBe(new HlcTimestamp(200, 1, "node1")); // Kept max + vc1.GetTimestamp("node2").ShouldBe(new HlcTimestamp(200, 2, "node2")); // Merged max + vc1.GetTimestamp("node3").ShouldBe(new HlcTimestamp(150, 1, "node3")); // Added new + } + + /// + /// Verifies cloning creates an independent copy of the vector clock. + /// + [Fact] + public void Clone_ShouldCreateIndependentCopy() + { // Arrange var vc1 = new VectorClock(); vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1")); @@ -256,15 +252,15 @@ public class VectorClockTests // Assert vc1.NodeIds.Count().ShouldBe(1); - vc2.NodeIds.Count().ShouldBe(2); - } - - /// - /// Verifies the string representation includes serialized node timestamps. - /// - [Fact] - public void ToString_ShouldReturnReadableFormat() - { + vc2.NodeIds.Count().ShouldBe(2); + } + + /// + /// Verifies the string representation includes serialized node timestamps. + /// + [Fact] + public void ToString_ShouldReturnReadableFormat() + { // Arrange var vc = new VectorClock(); vc.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1")); @@ -275,15 +271,15 @@ public class VectorClockTests // Assert str.ShouldContain("node1:100:1:node1"); - str.ShouldContain("node2:200:2:node2"); - } - - /// - /// Verifies split-brain updates are detected as concurrent. - /// - [Fact] - public void SplitBrainScenario_ShouldDetectConcurrency() - { + str.ShouldContain("node2:200:2:node2"); + } + + /// + /// Verifies split-brain updates are detected as concurrent. + /// + [Fact] + public void SplitBrainScenario_ShouldDetectConcurrency() + { // Arrange - Simulating a network partition scenario // Partition 1: node1 and node2 are alive var vcPartition1 = new VectorClock(); @@ -310,4 +306,4 @@ public class VectorClockTests partition1NeedsToPush.ShouldContain("node1"); partition1NeedsToPush.ShouldContain("node2"); } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Core.Tests/ZB.MOM.WW.CBDDC.Core.Tests.csproj b/tests/ZB.MOM.WW.CBDDC.Core.Tests/ZB.MOM.WW.CBDDC.Core.Tests.csproj index 98d29d2..48ae3d6 100755 --- a/tests/ZB.MOM.WW.CBDDC.Core.Tests/ZB.MOM.WW.CBDDC.Core.Tests.csproj +++ b/tests/ZB.MOM.WW.CBDDC.Core.Tests/ZB.MOM.WW.CBDDC.Core.Tests.csproj @@ -1,37 +1,37 @@ ο»Ώ - - ZB.MOM.WW.CBDDC.Core.Tests - ZB.MOM.WW.CBDDC.Core.Tests - ZB.MOM.WW.CBDDC.Core.Tests - net10.0 - enable - enable - $(NoWarn);xUnit1031;xUnit1051 - false - + + ZB.MOM.WW.CBDDC.Core.Tests + ZB.MOM.WW.CBDDC.Core.Tests + ZB.MOM.WW.CBDDC.Core.Tests + net10.0 + enable + enable + $(NoWarn);xUnit1031;xUnit1051 + false + - - - - - - - - + + + + + + + + - - - + + + - - - PreserveNewest - - + + + PreserveNewest + + - - - + + + - + diff --git a/tests/ZB.MOM.WW.CBDDC.Core.Tests/benchmark_limits.json b/tests/ZB.MOM.WW.CBDDC.Core.Tests/benchmark_limits.json index b93910b..0652949 100755 --- a/tests/ZB.MOM.WW.CBDDC.Core.Tests/benchmark_limits.json +++ b/tests/ZB.MOM.WW.CBDDC.Core.Tests/benchmark_limits.json @@ -1,4 +1,4 @@ { - "RecursiveMerge_Simple_10k_Ops_MaxMs": 500, - "RecursiveMerge_Array_1k_Ops_MaxMs": 1500 + "RecursiveMerge_Simple_10k_Ops_MaxMs": 500, + "RecursiveMerge_Array_1k_Ops_MaxMs": 1500 } \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.E2E.Tests/ClusterCrudSyncE2ETests.cs b/tests/ZB.MOM.WW.CBDDC.E2E.Tests/ClusterCrudSyncE2ETests.cs index e3233ff..cb4eb19 100644 --- a/tests/ZB.MOM.WW.CBDDC.E2E.Tests/ClusterCrudSyncE2ETests.cs +++ b/tests/ZB.MOM.WW.CBDDC.E2E.Tests/ClusterCrudSyncE2ETests.cs @@ -1,6 +1,6 @@ +using System.Collections.Concurrent; using System.Net; using System.Net.Sockets; -using System.Collections.Concurrent; using System.Text.Json; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Logging; @@ -16,24 +16,20 @@ namespace ZB.MOM.WW.CBDDC.E2E.Tests; public class ClusterCrudSyncE2ETests { /// - /// Verifies two real peers replicate create, update, and delete operations in both directions. + /// Verifies two real peers replicate create, update, and delete operations in both directions. /// [Fact] public async Task TwoPeers_ShouldReplicateCrudBidirectionally() { var clusterToken = Guid.NewGuid().ToString("N"); - var nodeAPort = GetAvailableTcpPort(); - var nodeBPort = GetAvailableTcpPort(); - while (nodeBPort == nodeAPort) - { - nodeBPort = GetAvailableTcpPort(); - } + int nodeAPort = GetAvailableTcpPort(); + int nodeBPort = GetAvailableTcpPort(); + while (nodeBPort == nodeAPort) nodeBPort = GetAvailableTcpPort(); await using var nodeA = TestPeerNode.Create( - nodeId: "node-a", - tcpPort: nodeAPort, - authToken: clusterToken, - knownPeers: + "node-a", + nodeAPort, + clusterToken, [ new KnownPeerConfiguration { @@ -44,10 +40,9 @@ public class ClusterCrudSyncE2ETests ]); await using var nodeB = TestPeerNode.Create( - nodeId: "node-b", - tcpPort: nodeBPort, - authToken: clusterToken, - knownPeers: + "node-b", + nodeBPort, + clusterToken, [ new KnownPeerConfiguration { @@ -75,9 +70,9 @@ public class ClusterCrudSyncE2ETests { var replicated = nodeB.ReadUser(nodeAUserId); return replicated is not null - && replicated.Name == "Alice" - && replicated.Age == 30 - && replicated.Address?.City == "Austin"; + && replicated.Name == "Alice" + && replicated.Age == 30 + && replicated.Address?.City == "Austin"; }, timeoutSeconds, "Node B did not receive create from node A.", () => BuildDiagnostics(nodeA, nodeB)); await AssertEventuallyAsync( @@ -100,9 +95,9 @@ public class ClusterCrudSyncE2ETests { var replicated = nodeB.ReadUser(nodeAUserId); return replicated is not null - && replicated.Name == "Alice Updated" - && replicated.Age == 31 - && replicated.Address?.City == "Dallas"; + && replicated.Name == "Alice Updated" + && replicated.Age == 31 + && replicated.Address?.City == "Dallas"; }, timeoutSeconds, "Node B did not receive update from node A.", () => BuildDiagnostics(nodeA, nodeB)); await nodeA.DeleteUserAsync(nodeAUserId); @@ -126,9 +121,9 @@ public class ClusterCrudSyncE2ETests { var replicated = nodeA.ReadUser(nodeBUserId); return replicated is not null - && replicated.Name == "Bob" - && replicated.Age == 40 - && replicated.Address?.City == "Boston"; + && replicated.Name == "Bob" + && replicated.Age == 40 + && replicated.Address?.City == "Boston"; }, timeoutSeconds, "Node A did not receive create from node B.", () => BuildDiagnostics(nodeA, nodeB)); await AssertEventuallyAsync( @@ -151,9 +146,9 @@ public class ClusterCrudSyncE2ETests { var replicated = nodeA.ReadUser(nodeBUserId); return replicated is not null - && replicated.Name == "Bob Updated" - && replicated.Age == 41 - && replicated.Address?.City == "Denver"; + && replicated.Name == "Bob Updated" + && replicated.Age == 41 + && replicated.Address?.City == "Denver"; }, timeoutSeconds, "Node A did not receive update from node B.", () => BuildDiagnostics(nodeA, nodeB)); await nodeB.DeleteUserAsync(nodeBUserId); @@ -175,36 +170,35 @@ public class ClusterCrudSyncE2ETests var startedAt = DateTime.UtcNow; while (DateTime.UtcNow - startedAt < timeout) { - if (predicate()) - { - return; - } + if (predicate()) return; await Task.Delay(250); } - var suffix = diagnostics is null ? string.Empty : $"{Environment.NewLine}{diagnostics()}"; - throw new Shouldly.ShouldAssertException($"{failureMessage}{suffix}"); + string suffix = diagnostics is null ? string.Empty : $"{Environment.NewLine}{diagnostics()}"; + throw new ShouldAssertException($"{failureMessage}{suffix}"); } private static string BuildDiagnostics(TestPeerNode nodeA, TestPeerNode nodeB) { - var nodeAUserCount = nodeA.Context.Users.FindAll().Count(); - var nodeBUserCount = nodeB.Context.Users.FindAll().Count(); - var nodeAOplogCount = nodeA.Context.OplogEntries.FindAll().Count(); - var nodeBOplogCount = nodeB.Context.OplogEntries.FindAll().Count(); - var nodeAOplogByAuthor = string.Join( + int nodeAUserCount = nodeA.Context.Users.FindAll().Count(); + int nodeBUserCount = nodeB.Context.Users.FindAll().Count(); + int nodeAOplogCount = nodeA.Context.OplogEntries.FindAll().Count(); + int nodeBOplogCount = nodeB.Context.OplogEntries.FindAll().Count(); + string nodeAOplogByAuthor = string.Join( ", ", nodeA.Context.OplogEntries.FindAll() .GroupBy(e => e.TimestampNodeId) .Select(g => $"{g.Key}:{g.Count()}")); - var nodeBOplogByAuthor = string.Join( + string nodeBOplogByAuthor = string.Join( ", ", nodeB.Context.OplogEntries.FindAll() .GroupBy(e => e.TimestampNodeId) .Select(g => $"{g.Key}:{g.Count()}")); - var nodeAUsers = string.Join(", ", nodeA.Context.Users.FindAll().Select(u => $"{u.Id}:{u.Name}:{u.Age}:{u.Address?.City}")); - var nodeBUsers = string.Join(", ", nodeB.Context.Users.FindAll().Select(u => $"{u.Id}:{u.Name}:{u.Age}:{u.Address?.City}")); + string nodeAUsers = string.Join(", ", + nodeA.Context.Users.FindAll().Select(u => $"{u.Id}:{u.Name}:{u.Age}:{u.Address?.City}")); + string nodeBUsers = string.Join(", ", + nodeB.Context.Users.FindAll().Select(u => $"{u.Id}:{u.Name}:{u.Age}:{u.Address?.City}")); return string.Join( Environment.NewLine, @@ -230,20 +224,15 @@ public class ClusterCrudSyncE2ETests private sealed class TestPeerNode : IAsyncDisposable { - private readonly ServiceProvider _services; - private readonly ICBDDCNode _node; - private readonly IOplogStore _oplogStore; - private readonly string _nodeId; - private readonly string _workDir; private readonly InMemoryLogSink _logSink; - private bool _started; + private readonly ICBDDCNode _node; + private readonly string _nodeId; + private readonly IOplogStore _oplogStore; + private readonly ServiceProvider _services; + private readonly string _workDir; private long _lastPhysicalTime = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); private int _logicalCounter; - - /// - /// Gets the LiteDB-backed context used by this test peer. - /// - public SampleDbContext Context { get; } + private bool _started; private TestPeerNode( ServiceProvider services, @@ -264,82 +253,9 @@ public class ClusterCrudSyncE2ETests } /// - /// Creates a test peer node and wires all required services. + /// Gets the LiteDB-backed context used by this test peer. /// - /// The unique node identifier. - /// The TCP port used by the node listener. - /// The cluster authentication token. - /// The known peers this node can connect to. - /// A configured instance. - public static TestPeerNode Create( - string nodeId, - int tcpPort, - string authToken, - IReadOnlyList knownPeers) - { - var workDir = Path.Combine(Path.GetTempPath(), $"cbddc-e2e-{nodeId}-{Guid.NewGuid():N}"); - Directory.CreateDirectory(workDir); - var dbPath = Path.Combine(workDir, "node.blite"); - - var configProvider = new StaticPeerNodeConfigurationProvider(new PeerNodeConfiguration - { - NodeId = nodeId, - TcpPort = tcpPort, - AuthToken = authToken, - KnownPeers = knownPeers.ToList() - }); - - var services = new ServiceCollection(); - services.AddSingleton(new InMemoryLogSink(nodeId)); - services.AddSingleton(); - services.AddLogging(builder => builder.SetMinimumLevel(LogLevel.Debug)); - services.AddSingleton(configProvider); - services.AddSingleton(configProvider); - services.AddCBDDCCore() - .AddCBDDCBLite(_ => new SampleDbContext(dbPath)) - .AddCBDDCNetwork(useHostedService: false); - - // Deterministic tests: sync uses explicit known peers, so disable UDP discovery. - services.AddSingleton(); - services.AddSingleton(); - - var provider = services.BuildServiceProvider(); - var node = provider.GetRequiredService(); - var oplogStore = provider.GetRequiredService(); - var context = provider.GetRequiredService(); - var logSink = provider.GetRequiredService(); - return new TestPeerNode(provider, node, oplogStore, context, logSink, workDir, nodeId); - } - - /// - /// Starts the underlying node when it has not been started yet. - /// - /// A task that represents the asynchronous operation. - public async Task StartAsync() - { - if (_started) - { - return; - } - - await _node.Start(); - _started = true; - } - - /// - /// Stops the underlying node when it is currently running. - /// - /// A task that represents the asynchronous operation. - public async Task StopAsync() - { - if (!_started) - { - return; - } - - await _node.Stop(); - _started = false; - } + public SampleDbContext Context { get; } /// public async ValueTask DisposeAsync() @@ -357,17 +273,89 @@ public class ClusterCrudSyncE2ETests } /// - /// Reads a user document by identifier. + /// Creates a test peer node and wires all required services. + /// + /// The unique node identifier. + /// The TCP port used by the node listener. + /// The cluster authentication token. + /// The known peers this node can connect to. + /// A configured instance. + public static TestPeerNode Create( + string nodeId, + int tcpPort, + string authToken, + IReadOnlyList knownPeers) + { + string workDir = Path.Combine(Path.GetTempPath(), $"cbddc-e2e-{nodeId}-{Guid.NewGuid():N}"); + Directory.CreateDirectory(workDir); + string dbPath = Path.Combine(workDir, "node.blite"); + + var configProvider = new StaticPeerNodeConfigurationProvider(new PeerNodeConfiguration + { + NodeId = nodeId, + TcpPort = tcpPort, + AuthToken = authToken, + KnownPeers = knownPeers.ToList() + }); + + var services = new ServiceCollection(); + services.AddSingleton(new InMemoryLogSink(nodeId)); + services.AddSingleton(); + services.AddLogging(builder => builder.SetMinimumLevel(LogLevel.Debug)); + services.AddSingleton(configProvider); + services.AddSingleton(configProvider); + services.AddCBDDCCore() + .AddCBDDCBLite(_ => new SampleDbContext(dbPath)) + .AddCBDDCNetwork(false); + + // Deterministic tests: sync uses explicit known peers, so disable UDP discovery. + services.AddSingleton(); + services.AddSingleton(); + + var provider = services.BuildServiceProvider(); + var node = provider.GetRequiredService(); + var oplogStore = provider.GetRequiredService(); + var context = provider.GetRequiredService(); + var logSink = provider.GetRequiredService(); + return new TestPeerNode(provider, node, oplogStore, context, logSink, workDir, nodeId); + } + + /// + /// Starts the underlying node when it has not been started yet. + /// + /// A task that represents the asynchronous operation. + public async Task StartAsync() + { + if (_started) return; + + await _node.Start(); + _started = true; + } + + /// + /// Stops the underlying node when it is currently running. + /// + /// A task that represents the asynchronous operation. + public async Task StopAsync() + { + if (!_started) return; + + await _node.Stop(); + _started = false; + } + + /// + /// Reads a user document by identifier. /// /// The identifier of the user to read. - /// The matching user when found; otherwise . + /// The matching user when found; otherwise . public User? ReadUser(string userId) { return Context.Users.Find(u => u.Id == userId).FirstOrDefault(); } /// - /// Inserts or updates a user and persists the matching oplog entry. + /// Inserts or updates a user and persists the matching oplog entry. /// /// The user payload to upsert. /// A task that represents the asynchronous operation. @@ -381,20 +369,16 @@ public class ClusterCrudSyncE2ETests { var existing = Context.Users.Find(u => u.Id == user.Id).FirstOrDefault(); if (existing == null) - { await Context.Users.InsertAsync(user); - } else - { await Context.Users.UpdateAsync(user); - } await Context.SaveChangesAsync(); }); } /// - /// Deletes a user and persists the matching oplog entry. + /// Deletes a user and persists the matching oplog entry. /// /// The identifier of the user to delete. /// A task that represents the asynchronous operation. @@ -403,7 +387,7 @@ public class ClusterCrudSyncE2ETests await PersistUserMutationWithOplogFallbackAsync( userId, OperationType.Delete, - payload: null, + null, async () => { await Context.Users.DeleteAsync(userId); @@ -412,7 +396,7 @@ public class ClusterCrudSyncE2ETests } /// - /// Gets recent in-memory logs captured for this node. + /// Gets recent in-memory logs captured for this node. /// /// The maximum number of log entries to return. /// A newline-delimited string of recent log entries. @@ -427,29 +411,26 @@ public class ClusterCrudSyncE2ETests JsonElement? payload, Func mutation) { - var oplogCountBefore = Context.OplogEntries.FindAll().Count(); + int oplogCountBefore = Context.OplogEntries.FindAll().Count(); await mutation(); // Prefer native CDC path; fallback only when CDC fails to emit. var deadline = DateTime.UtcNow.AddSeconds(3); while (DateTime.UtcNow < deadline) { - if (Context.OplogEntries.FindAll().Count() > oplogCountBefore) - { - return; - } + if (Context.OplogEntries.FindAll().Count() > oplogCountBefore) return; await Task.Delay(50); } - var previousHash = await _oplogStore.GetLastEntryHashAsync(_nodeId) ?? string.Empty; + string previousHash = await _oplogStore.GetLastEntryHashAsync(_nodeId) ?? string.Empty; var fallbackEntry = new OplogEntry( - collection: "Users", - key: userId, - operation: operationType, - payload: payload, - timestamp: NextTimestamp(), - previousHash: previousHash); + "Users", + userId, + operationType, + payload, + NextTimestamp(), + previousHash); await _oplogStore.AppendOplogEntryAsync(fallbackEntry); await Context.SaveChangesAsync(); @@ -457,7 +438,7 @@ public class ClusterCrudSyncE2ETests private HlcTimestamp NextTimestamp() { - var now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); + long now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); if (now > _lastPhysicalTime) { _lastPhysicalTime = now; @@ -473,14 +454,11 @@ public class ClusterCrudSyncE2ETests private static void TryDeleteDirectory(string path) { - if (!Directory.Exists(path)) - { - return; - } + if (!Directory.Exists(path)) return; try { - Directory.Delete(path, recursive: true); + Directory.Delete(path, true); } catch { @@ -514,7 +492,7 @@ public class ClusterCrudSyncE2ETests private PeerNodeConfiguration _configuration; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The initial peer node configuration. public StaticPeerNodeConfigurationProvider(PeerNodeConfiguration configuration) @@ -545,7 +523,7 @@ public class ClusterCrudSyncE2ETests private readonly string _nodeId; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The node identifier associated with emitted logs. public InMemoryLogSink(string nodeId) @@ -554,7 +532,7 @@ public class ClusterCrudSyncE2ETests } /// - /// Adds a log entry to the in-memory sink. + /// Adds a log entry to the in-memory sink. /// /// The log category. /// The log level. @@ -563,10 +541,7 @@ public class ClusterCrudSyncE2ETests public void Add(string category, LogLevel level, string message, Exception? exception) { var text = $"[{DateTime.UtcNow:O}] {_nodeId} {level} {category}: {message}"; - if (exception is not null) - { - text = $"{text}{Environment.NewLine}{exception}"; - } + if (exception is not null) text = $"{text}{Environment.NewLine}{exception}"; _entries.Enqueue(text); while (_entries.Count > 500 && _entries.TryDequeue(out _)) @@ -575,17 +550,14 @@ public class ClusterCrudSyncE2ETests } /// - /// Gets the most recent log entries from the sink. + /// Gets the most recent log entries from the sink. /// /// The maximum number of entries to return. /// A newline-delimited string of recent log entries, or a placeholder when none exist. public string GetRecent(int max) { - var entries = _entries.ToArray(); - if (entries.Length == 0) - { - return ""; - } + string[] entries = _entries.ToArray(); + if (entries.Length == 0) return ""; return string.Join(Environment.NewLine, entries.TakeLast(max)); } @@ -596,7 +568,7 @@ public class ClusterCrudSyncE2ETests private readonly InMemoryLogSink _sink; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The shared sink used to capture log messages. public InMemoryLoggerProvider(InMemoryLogSink sink) @@ -622,7 +594,7 @@ public class ClusterCrudSyncE2ETests private readonly InMemoryLogSink _sink; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// The logger category name. /// The sink that stores emitted log messages. @@ -665,4 +637,4 @@ public class ClusterCrudSyncE2ETests { } } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.E2E.Tests/GlobalUsings.cs b/tests/ZB.MOM.WW.CBDDC.E2E.Tests/GlobalUsings.cs index 71bbd5d..0b76f3d 100644 --- a/tests/ZB.MOM.WW.CBDDC.E2E.Tests/GlobalUsings.cs +++ b/tests/ZB.MOM.WW.CBDDC.E2E.Tests/GlobalUsings.cs @@ -1,2 +1,2 @@ global using Shouldly; -global using ZB.MOM.WW.CBDDC.Sample.Console; +global using ZB.MOM.WW.CBDDC.Sample.Console; \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.E2E.Tests/ZB.MOM.WW.CBDDC.E2E.Tests.csproj b/tests/ZB.MOM.WW.CBDDC.E2E.Tests/ZB.MOM.WW.CBDDC.E2E.Tests.csproj index a3a3892..ee08832 100644 --- a/tests/ZB.MOM.WW.CBDDC.E2E.Tests/ZB.MOM.WW.CBDDC.E2E.Tests.csproj +++ b/tests/ZB.MOM.WW.CBDDC.E2E.Tests/ZB.MOM.WW.CBDDC.E2E.Tests.csproj @@ -1,33 +1,33 @@ - - ZB.MOM.WW.CBDDC.E2E.Tests - ZB.MOM.WW.CBDDC.E2E.Tests - ZB.MOM.WW.CBDDC.E2E.Tests - net10.0 - enable - enable - $(NoWarn);xUnit1031;xUnit1051 - false - + + ZB.MOM.WW.CBDDC.E2E.Tests + ZB.MOM.WW.CBDDC.E2E.Tests + ZB.MOM.WW.CBDDC.E2E.Tests + net10.0 + enable + enable + $(NoWarn);xUnit1031;xUnit1051 + false + - - - - - - - + + + + + + + - - - + + + - - - - - - + + + + + + diff --git a/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/CBDDCHealthCheckTests.cs b/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/CBDDCHealthCheckTests.cs index d55662a..9db233e 100644 --- a/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/CBDDCHealthCheckTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/CBDDCHealthCheckTests.cs @@ -9,7 +9,7 @@ namespace ZB.MOM.WW.CBDDC.Hosting.Tests; public class CBDDCHealthCheckTests { /// - /// Verifies that health is reported as healthy when persistence is available and all peers are within lag thresholds. + /// Verifies that health is reported as healthy when persistence is available and all peers are within lag thresholds. /// [Fact] public async Task CheckHealthAsync_WhenPersistenceOkAndPeersWithinLagThreshold_ReturnsHealthyWithPayload() @@ -52,7 +52,7 @@ public class CBDDCHealthCheckTests var healthCheck = new CBDDCHealthCheck( store, confirmationStore, - CreateOptions(lagThresholdMs: 20, criticalLagThresholdMs: 50)); + CreateOptions(20, 50)); var result = await healthCheck.CheckHealthAsync(new HealthCheckContext()); @@ -69,7 +69,7 @@ public class CBDDCHealthCheckTests } /// - /// Verifies that health is reported as degraded when at least one peer is lagging or has no confirmation. + /// Verifies that health is reported as degraded when at least one peer is lagging or has no confirmation. /// [Fact] public async Task CheckHealthAsync_WhenPeersLaggingOrUnconfirmed_ReturnsDegradedWithPayload() @@ -113,7 +113,7 @@ public class CBDDCHealthCheckTests var healthCheck = new CBDDCHealthCheck( store, confirmationStore, - CreateOptions(lagThresholdMs: 30, criticalLagThresholdMs: 100)); + CreateOptions(30, 100)); var result = await healthCheck.CheckHealthAsync(new HealthCheckContext()); @@ -130,7 +130,7 @@ public class CBDDCHealthCheckTests } /// - /// Verifies that health is reported as unhealthy when critical lag threshold is exceeded. + /// Verifies that health is reported as unhealthy when critical lag threshold is exceeded. /// [Fact] public async Task CheckHealthAsync_WhenCriticalLagBreached_ReturnsUnhealthyWithPayload() @@ -158,7 +158,7 @@ public class CBDDCHealthCheckTests var healthCheck = new CBDDCHealthCheck( store, confirmationStore, - CreateOptions(lagThresholdMs: 30, criticalLagThresholdMs: 80)); + CreateOptions(30, 80)); var result = await healthCheck.CheckHealthAsync(new HealthCheckContext()); @@ -168,7 +168,7 @@ public class CBDDCHealthCheckTests } /// - /// Verifies that worst-case lag is used when a peer has multiple source confirmations. + /// Verifies that worst-case lag is used when a peer has multiple source confirmations. /// [Fact] public async Task CheckHealthAsync_WhenPeerHasMultipleSourceConfirmations_UsesWorstCaseLag() @@ -205,7 +205,7 @@ public class CBDDCHealthCheckTests var healthCheck = new CBDDCHealthCheck( store, confirmationStore, - CreateOptions(lagThresholdMs: 80, criticalLagThresholdMs: 150)); + CreateOptions(80, 150)); var result = await healthCheck.CheckHealthAsync(new HealthCheckContext()); @@ -215,7 +215,7 @@ public class CBDDCHealthCheckTests } /// - /// Verifies that health is reported as unhealthy when the persistence store throws. + /// Verifies that health is reported as unhealthy when the persistence store throws. /// [Fact] public async Task CheckHealthAsync_WhenStoreThrows_ReturnsUnhealthy() @@ -253,4 +253,4 @@ public class CBDDCHealthCheckTests } }; } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/CBDDCHostingExtensionsTests.cs b/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/CBDDCHostingExtensionsTests.cs index d7d4d96..9159e18 100644 --- a/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/CBDDCHostingExtensionsTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/CBDDCHostingExtensionsTests.cs @@ -12,7 +12,7 @@ namespace ZB.MOM.WW.CBDDC.Hosting.Tests; public class CBDDCHostingExtensionsTests { /// - /// Verifies that adding CBDDC hosting throws when the service collection is null. + /// Verifies that adding CBDDC hosting throws when the service collection is null. /// [Fact] public void AddCBDDCHosting_WithNullServices_ThrowsArgumentNullException() @@ -22,7 +22,7 @@ public class CBDDCHostingExtensionsTests } /// - /// Verifies that adding CBDDC hosting throws when the configuration delegate is null. + /// Verifies that adding CBDDC hosting throws when the configuration delegate is null. /// [Fact] public void AddCBDDCHosting_WithNullConfigure_ThrowsArgumentNullException() @@ -33,7 +33,7 @@ public class CBDDCHostingExtensionsTests } /// - /// Verifies that single-cluster hosting registers expected services and configured options. + /// Verifies that single-cluster hosting registers expected services and configured options. /// [Fact] public void AddCBDDCHostingSingleCluster_RegistersExpectedServicesAndOptions() @@ -73,7 +73,7 @@ public class CBDDCHostingExtensionsTests } /// - /// Verifies that single-cluster hosting uses default options when no configuration delegate is provided. + /// Verifies that single-cluster hosting uses default options when no configuration delegate is provided. /// [Fact] public void AddCBDDCHostingSingleCluster_WithNullConfigure_UsesDefaults() @@ -90,17 +90,14 @@ public class CBDDCHostingExtensionsTests } /// - /// Verifies that health check registration is skipped when health checks are disabled. + /// Verifies that health check registration is skipped when health checks are disabled. /// [Fact] public void AddCBDDCHosting_WithHealthChecksDisabled_DoesNotRegisterCBDDCHealthCheck() { var services = new ServiceCollection(); - services.AddCBDDCHosting(options => - { - options.EnableHealthChecks = false; - }); + services.AddCBDDCHosting(options => { options.EnableHealthChecks = false; }); services.Any(d => d.ServiceType == typeof(IConfigureOptions)) .ShouldBeFalse(); @@ -121,4 +118,4 @@ public class CBDDCHostingExtensionsTests d.ImplementationType == typeof(THostedService)) .ShouldBeTrue(); } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/GlobalUsings.cs b/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/GlobalUsings.cs index 223b6a4..5529f83 100644 --- a/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/GlobalUsings.cs +++ b/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/GlobalUsings.cs @@ -1,2 +1,2 @@ global using NSubstitute; -global using Shouldly; +global using Shouldly; \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/HostedServicesTests.cs b/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/HostedServicesTests.cs index f04ff45..4b13a49 100644 --- a/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/HostedServicesTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/HostedServicesTests.cs @@ -7,7 +7,7 @@ namespace ZB.MOM.WW.CBDDC.Hosting.Tests; public class HostedServicesTests { /// - /// Verifies that the TCP sync server hosted service starts and stops the server lifecycle. + /// Verifies that the TCP sync server hosted service starts and stops the server lifecycle. /// [Fact] public async Task TcpSyncServerHostedService_StartAndStop_CallsServerLifecycle() @@ -24,7 +24,7 @@ public class HostedServicesTests } /// - /// Verifies that the discovery hosted service starts and stops the discovery lifecycle. + /// Verifies that the discovery hosted service starts and stops the discovery lifecycle. /// [Fact] public async Task DiscoveryServiceHostedService_StartAndStop_CallsDiscoveryLifecycle() @@ -39,4 +39,4 @@ public class HostedServicesTests await discoveryService.Received(1).Start(); await discoveryService.Received(1).Stop(); } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/NoOpServicesTests.cs b/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/NoOpServicesTests.cs index ded7e5b..8b77765 100644 --- a/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/NoOpServicesTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/NoOpServicesTests.cs @@ -5,7 +5,7 @@ namespace ZB.MOM.WW.CBDDC.Hosting.Tests; public class NoOpServicesTests { /// - /// Verifies that no-op discovery service lifecycle calls complete and no peers are returned. + /// Verifies that no-op discovery service lifecycle calls complete and no peers are returned. /// [Fact] public async Task NoOpDiscoveryService_ReturnsNoPeers_AndCompletesLifecycleCalls() @@ -20,7 +20,7 @@ public class NoOpServicesTests } /// - /// Verifies that no-op sync orchestrator lifecycle calls complete without exceptions. + /// Verifies that no-op sync orchestrator lifecycle calls complete without exceptions. /// [Fact] public async Task NoOpSyncOrchestrator_CompletesLifecycleCalls() @@ -32,4 +32,4 @@ public class NoOpServicesTests orchestrator.Dispose(); } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/ZB.MOM.WW.CBDDC.Hosting.Tests.csproj b/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/ZB.MOM.WW.CBDDC.Hosting.Tests.csproj index 311f45a..a7f8f1b 100644 --- a/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/ZB.MOM.WW.CBDDC.Hosting.Tests.csproj +++ b/tests/ZB.MOM.WW.CBDDC.Hosting.Tests/ZB.MOM.WW.CBDDC.Hosting.Tests.csproj @@ -1,32 +1,32 @@ - - ZB.MOM.WW.CBDDC.Hosting.Tests - ZB.MOM.WW.CBDDC.Hosting.Tests - ZB.MOM.WW.CBDDC.Hosting.Tests - net10.0 - enable - enable - $(NoWarn);xUnit1031;xUnit1051 - false - + + ZB.MOM.WW.CBDDC.Hosting.Tests + ZB.MOM.WW.CBDDC.Hosting.Tests + ZB.MOM.WW.CBDDC.Hosting.Tests + net10.0 + enable + enable + $(NoWarn);xUnit1031;xUnit1051 + false + - - - - - - - - - + + + + + + + + + - - - + + + - - - + + + diff --git a/tests/ZB.MOM.WW.CBDDC.Network.Tests/BullyLeaderElectionServiceTests.cs b/tests/ZB.MOM.WW.CBDDC.Network.Tests/BullyLeaderElectionServiceTests.cs index c2b0895..3247f2d 100644 --- a/tests/ZB.MOM.WW.CBDDC.Network.Tests/BullyLeaderElectionServiceTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Network.Tests/BullyLeaderElectionServiceTests.cs @@ -1,4 +1,3 @@ -using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Network.Leadership; @@ -21,7 +20,7 @@ public class BullyLeaderElectionServiceTests } /// - /// Verifies that a single node elects itself as leader. + /// Verifies that a single node elects itself as leader. /// [Fact] public async Task SingleNode_ShouldBecomeLeader() @@ -48,15 +47,15 @@ public class BullyLeaderElectionServiceTests } /// - /// Verifies that the smallest node ID is elected as leader among LAN peers. + /// Verifies that the smallest node ID is elected as leader among LAN peers. /// [Fact] public async Task MultipleNodes_SmallestNodeIdShouldBeLeader() { var peers = new List { - new("node-B", "192.168.1.2:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered), - new("node-C", "192.168.1.3:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered) + new("node-B", "192.168.1.2:9000", DateTimeOffset.UtcNow), + new("node-C", "192.168.1.3:9000", DateTimeOffset.UtcNow) }; var electionService = new BullyLeaderElectionService( @@ -74,15 +73,15 @@ public class BullyLeaderElectionServiceTests } /// - /// Verifies that the local node is not elected when it is not the smallest node ID. + /// Verifies that the local node is not elected when it is not the smallest node ID. /// [Fact] public async Task LocalNodeNotSmallest_ShouldNotBeLeader() { var peers = new List { - new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered), - new("node-B", "192.168.1.2:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered) + new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow), + new("node-B", "192.168.1.2:9000", DateTimeOffset.UtcNow) }; var electionService = new BullyLeaderElectionService( @@ -100,14 +99,14 @@ public class BullyLeaderElectionServiceTests } /// - /// Verifies that leadership is re-elected when the current leader fails. + /// Verifies that leadership is re-elected when the current leader fails. /// [Fact] public async Task LeaderFailure_ShouldReelect() { var peers = new List { - new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered) + new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow) }; var electionService = new BullyLeaderElectionService( @@ -136,14 +135,14 @@ public class BullyLeaderElectionServiceTests } /// - /// Verifies that cloud peers are excluded from LAN gateway election. + /// Verifies that cloud peers are excluded from LAN gateway election. /// [Fact] public async Task CloudPeersExcludedFromElection() { var peers = new List { - new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow, PeerType.LanDiscovered), + new("node-A", "192.168.1.1:9000", DateTimeOffset.UtcNow), new("cloud-node-Z", "cloud.example.com:9000", DateTimeOffset.UtcNow, PeerType.CloudRemote) }; @@ -159,4 +158,4 @@ public class BullyLeaderElectionServiceTests await electionService.Stop(); } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Network.Tests/ConnectionTests.cs b/tests/ZB.MOM.WW.CBDDC.Network.Tests/ConnectionTests.cs index 8d63ad8..dc14746 100644 --- a/tests/ZB.MOM.WW.CBDDC.Network.Tests/ConnectionTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Network.Tests/ConnectionTests.cs @@ -1,17 +1,16 @@ -using System.IO; using System.Net.Sockets; +using Microsoft.Extensions.Logging.Abstractions; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Network.Security; -using Microsoft.Extensions.Logging.Abstractions; namespace ZB.MOM.WW.CBDDC.Network.Tests; public class ConnectionTests { /// - /// Verifies that the server rejects new clients when the configured connection limit is reached. + /// Verifies that the server rejects new clients when the configured connection limit is reached. /// [Fact] public async Task Server_Should_Reject_Clients_When_Limit_Reached() @@ -22,9 +21,11 @@ public class ConnectionTests .Returns(new HlcTimestamp(0, 0, "node")); oplogStore.GetVectorClockAsync(Arg.Any()) .Returns(new VectorClock()); - oplogStore.GetOplogAfterAsync(Arg.Any(), Arg.Any?>(), Arg.Any()) + oplogStore.GetOplogAfterAsync(Arg.Any(), Arg.Any?>(), + Arg.Any()) .Returns(Array.Empty()); - oplogStore.GetOplogForNodeAfterAsync(Arg.Any(), Arg.Any(), Arg.Any?>(), Arg.Any()) + oplogStore.GetOplogForNodeAfterAsync(Arg.Any(), Arg.Any(), + Arg.Any?>(), Arg.Any()) .Returns(Array.Empty()); var configProvider = Substitute.For(); @@ -44,7 +45,8 @@ public class ConnectionTests authenticator.ValidateAsync(Arg.Any(), Arg.Any()).Returns(true); var handshakeService = Substitute.For(); - handshakeService.HandshakeAsync(Arg.Any(), Arg.Any(), Arg.Any(), Arg.Any()) + handshakeService.HandshakeAsync(Arg.Any(), Arg.Any(), Arg.Any(), + Arg.Any()) .Returns((CipherState?)null); var server = new TcpSyncServer( @@ -59,7 +61,7 @@ public class ConnectionTests server.MaxConnections = 2; await server.Start(); - var port = server.ListeningPort ?? throw new Exception("Server not started"); + int port = server.ListeningPort ?? throw new Exception("Server not started"); using var client1 = new TcpClient(); using var client2 = new TcpClient(); @@ -76,7 +78,7 @@ public class ConnectionTests // Assert var stream3 = client3.GetStream(); var buffer = new byte[10]; - var read = await stream3.ReadAsync(buffer, 0, 10); + int read = await stream3.ReadAsync(buffer, 0, 10); read.ShouldBe(0, "Server should close connection immediately for client 3"); client1.Connected.ShouldBeTrue(); @@ -87,4 +89,4 @@ public class ConnectionTests await server.Stop(); } } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Network.Tests/CryptoHelperTests.cs b/tests/ZB.MOM.WW.CBDDC.Network.Tests/CryptoHelperTests.cs index dd3f4e5..166a14b 100755 --- a/tests/ZB.MOM.WW.CBDDC.Network.Tests/CryptoHelperTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Network.Tests/CryptoHelperTests.cs @@ -1,42 +1,41 @@ using System.Security.Cryptography; using ZB.MOM.WW.CBDDC.Network.Security; -using Xunit; namespace ZB.MOM.WW.CBDDC.Network.Tests; -public class CryptoHelperTests -{ - /// - /// Verifies that encrypted data can be decrypted back to the original payload. - /// - [Fact] - public void EncryptDecrypt_ShouldPreserveData() - { +public class CryptoHelperTests +{ + /// + /// Verifies that encrypted data can be decrypted back to the original payload. + /// + [Fact] + public void EncryptDecrypt_ShouldPreserveData() + { // Arrange var key = new byte[32]; // 256 bits - RandomNumberGenerator.Fill(key); - + RandomNumberGenerator.Fill(key); + var original = new byte[] { 1, 2, 3, 4, 5, 255, 0, 10 }; // Act - var (ciphertext, iv, tag) = CryptoHelper.Encrypt(original, key); - var decrypted = CryptoHelper.Decrypt(ciphertext, iv, tag, key); + (byte[] ciphertext, byte[] iv, byte[] tag) = CryptoHelper.Encrypt(original, key); + byte[] decrypted = CryptoHelper.Decrypt(ciphertext, iv, tag, key); // Assert decrypted.ShouldBe(original); } - /// - /// Verifies that decryption fails when ciphertext is tampered with. - /// - [Fact] - public void Decrypt_ShouldFail_IfTampered() - { + /// + /// Verifies that decryption fails when ciphertext is tampered with. + /// + [Fact] + public void Decrypt_ShouldFail_IfTampered() + { // Arrange var key = new byte[32]; RandomNumberGenerator.Fill(key); var original = new byte[] { 1, 2, 3 }; - var (ciphertext, iv, tag) = CryptoHelper.Encrypt(original, key); + (byte[] ciphertext, byte[] iv, byte[] tag) = CryptoHelper.Encrypt(original, key); // Tamper ciphertext ciphertext[0] ^= 0xFF; @@ -45,6 +44,6 @@ public class CryptoHelperTests Action act = () => CryptoHelper.Decrypt(ciphertext, iv, tag, key); // Assert - Should.Throw(act); + Should.Throw(act); } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Network.Tests/GlobalUsings.cs b/tests/ZB.MOM.WW.CBDDC.Network.Tests/GlobalUsings.cs index 223b6a4..5529f83 100644 --- a/tests/ZB.MOM.WW.CBDDC.Network.Tests/GlobalUsings.cs +++ b/tests/ZB.MOM.WW.CBDDC.Network.Tests/GlobalUsings.cs @@ -1,2 +1,2 @@ global using NSubstitute; -global using Shouldly; +global using Shouldly; \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Network.Tests/HandshakeRegressionTests.cs b/tests/ZB.MOM.WW.CBDDC.Network.Tests/HandshakeRegressionTests.cs index d60af9a..026704c 100644 --- a/tests/ZB.MOM.WW.CBDDC.Network.Tests/HandshakeRegressionTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Network.Tests/HandshakeRegressionTests.cs @@ -1,17 +1,16 @@ -using System.IO; using System.Net.Sockets; +using Microsoft.Extensions.Logging.Abstractions; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Network.Security; -using Microsoft.Extensions.Logging.Abstractions; namespace ZB.MOM.WW.CBDDC.Network.Tests; public class HandshakeRegressionTests { /// - /// Verifies that the server invokes the handshake service when a client connects. + /// Verifies that the server invokes the handshake service when a client connects. /// [Fact] public async Task Server_Should_Call_HandshakeService_On_Client_Connection() @@ -22,9 +21,11 @@ public class HandshakeRegressionTests .Returns(new HlcTimestamp(0, 0, "node")); oplogStore.GetVectorClockAsync(Arg.Any()) .Returns(new VectorClock()); - oplogStore.GetOplogAfterAsync(Arg.Any(), Arg.Any?>(), Arg.Any()) + oplogStore.GetOplogAfterAsync(Arg.Any(), Arg.Any?>(), + Arg.Any()) .Returns(Array.Empty()); - oplogStore.GetOplogForNodeAfterAsync(Arg.Any(), Arg.Any(), Arg.Any?>(), Arg.Any()) + oplogStore.GetOplogForNodeAfterAsync(Arg.Any(), Arg.Any(), + Arg.Any?>(), Arg.Any()) .Returns(Array.Empty()); var configProvider = Substitute.For(); @@ -44,7 +45,8 @@ public class HandshakeRegressionTests authenticator.ValidateAsync(Arg.Any(), Arg.Any()).Returns(true); var handshakeService = Substitute.For(); - handshakeService.HandshakeAsync(Arg.Any(), Arg.Any(), Arg.Any(), Arg.Any()) + handshakeService.HandshakeAsync(Arg.Any(), Arg.Any(), Arg.Any(), + Arg.Any()) .Returns((CipherState?)null); var server = new TcpSyncServer( @@ -57,7 +59,7 @@ public class HandshakeRegressionTests handshakeService); await server.Start(); - var port = server.ListeningPort ?? throw new Exception("Server did not start or report port"); + int port = server.ListeningPort ?? throw new Exception("Server did not start or report port"); // Act using (var client = new TcpClient()) @@ -72,4 +74,4 @@ public class HandshakeRegressionTests await handshakeService.Received(1) .HandshakeAsync(Arg.Any(), false, "server-node", Arg.Any()); } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Network.Tests/ProtocolTests.cs b/tests/ZB.MOM.WW.CBDDC.Network.Tests/ProtocolTests.cs index cd15c99..0724fdd 100755 --- a/tests/ZB.MOM.WW.CBDDC.Network.Tests/ProtocolTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Network.Tests/ProtocolTests.cs @@ -1,178 +1,172 @@ -using System; -using System.IO; -using System.Linq; -using System.Threading.Tasks; -using ZB.MOM.WW.CBDDC.Network.Proto; -using ZB.MOM.WW.CBDDC.Network.Protocol; -using ZB.MOM.WW.CBDDC.Network.Security; -using Google.Protobuf; -using Microsoft.Extensions.Logging.Abstractions; -using Xunit; - -namespace ZB.MOM.WW.CBDDC.Network.Tests -{ - public class ProtocolTests +using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Network.Proto; +using ZB.MOM.WW.CBDDC.Network.Protocol; +using ZB.MOM.WW.CBDDC.Network.Security; + +namespace ZB.MOM.WW.CBDDC.Network.Tests; + +public class ProtocolTests +{ + private readonly ProtocolHandler _handler; + + /// + /// Initializes a new instance of the class. + /// + public ProtocolTests() { - private readonly ProtocolHandler _handler; + _handler = new ProtocolHandler(NullLogger.Instance); + } + + /// + /// Verifies a plain message can be written and read without transformation. + /// + [Fact] + public async Task RoundTrip_ShouldWorks_WithPlainMessage() + { + // Arrange + var stream = new MemoryStream(); + var message = new HandshakeRequest { NodeId = "node-1", AuthToken = "token" }; + + // Act + await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, false, null); + + stream.Position = 0; // Reset for reading + (var type, byte[] payload) = await _handler.ReadMessageAsync(stream, null); + + // Assert + type.ShouldBe(MessageType.HandshakeReq); + var decoded = HandshakeRequest.Parser.ParseFrom(payload); + decoded.NodeId.ShouldBe("node-1"); + decoded.AuthToken.ShouldBe("token"); + } + + /// + /// Verifies a compressed message can be written and read successfully. + /// + [Fact] + public async Task RoundTrip_ShouldWork_WithCompression() + { + // Arrange + var stream = new MemoryStream(); + // Create a large message to trigger compression logic (threshold is small but let's be safe) + string largeData = string.Join("", Enumerable.Repeat("ABCDEF0123456789", 100)); + var message = new HandshakeRequest { NodeId = largeData, AuthToken = "token" }; + + // Act + await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, true, null); + + stream.Position = 0; + (var type, byte[] payload) = await _handler.ReadMessageAsync(stream, null); + + // Assert + type.ShouldBe(MessageType.HandshakeReq); + var decoded = HandshakeRequest.Parser.ParseFrom(payload); + decoded.NodeId.ShouldBe(largeData); + } + + /// + /// Verifies an encrypted message can be written and read successfully. + /// + [Fact] + public async Task RoundTrip_ShouldWork_WithEncryption() + { + // Arrange + var stream = new MemoryStream(); + var message = new HandshakeRequest { NodeId = "secure-node", AuthToken = "secure-token" }; + + // Mock CipherState + var key = new byte[32]; // 256-bit key + new Random().NextBytes(key); + var cipherState = new CipherState(key, key); // Encrypt and Decrypt with same key for loopback + + // Act + await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, false, cipherState); + + stream.Position = 0; + (var type, byte[] payload) = await _handler.ReadMessageAsync(stream, cipherState); + + // Assert + type.ShouldBe(MessageType.HandshakeReq); + var decoded = HandshakeRequest.Parser.ParseFrom(payload); + decoded.NodeId.ShouldBe("secure-node"); + } + + /// + /// Verifies a message can be round-tripped when both compression and encryption are enabled. + /// + [Fact] + public async Task RoundTrip_ShouldWork_WithEncryption_And_Compression() + { + // Arrange + var stream = new MemoryStream(); + string largeData = string.Join("", Enumerable.Repeat("SECURECOMPRESSION", 100)); + var message = new HandshakeRequest { NodeId = largeData }; + + var key = new byte[32]; + new Random().NextBytes(key); + var cipherState = new CipherState(key, key); + + // Act: Compress THEN Encrypt + await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, true, cipherState); + + stream.Position = 0; + // Verify wire encryption (should be MessageType.SecureEnv) + // But ReadMessageAsync abstracts this away. + // We can peek at the stream if we want, but let's trust ReadMessageAsync handles it. + + (var type, byte[] payload) = await _handler.ReadMessageAsync(stream, cipherState); + + // Assert + type.ShouldBe(MessageType.HandshakeReq); + var decoded = HandshakeRequest.Parser.ParseFrom(payload); + decoded.NodeId.ShouldBe(largeData); + } + + /// + /// Verifies that message reads succeed when bytes arrive in small fragments. + /// + [Fact] + public async Task ReadMessage_ShouldHandle_Fragmentation() + { + // Arrange + var fullStream = new MemoryStream(); + var message = new HandshakeRequest { NodeId = "fragmented" }; + await _handler.SendMessageAsync(fullStream, MessageType.HandshakeReq, message, false, null); + + byte[] completeBytes = fullStream.ToArray(); + var fragmentedStream = new FragmentedMemoryStream(completeBytes, 2); // Read 2 bytes at a time + + // Act + (var type, byte[] payload) = await _handler.ReadMessageAsync(fragmentedStream, null); + + // Assert + type.ShouldBe(MessageType.HandshakeReq); + var decoded = HandshakeRequest.Parser.ParseFrom(payload); + decoded.NodeId.ShouldBe("fragmented"); + } + + // Helper Stream for fragmentation test + private class FragmentedMemoryStream : MemoryStream + { + private readonly int _chunkSize; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// - public ProtocolTests() + /// The backing stream buffer. + /// The maximum bytes returned per read. + public FragmentedMemoryStream(byte[] buffer, int chunkSize) : base(buffer) { - _handler = new ProtocolHandler(NullLogger.Instance); + _chunkSize = chunkSize; } - /// - /// Verifies a plain message can be written and read without transformation. - /// - [Fact] - public async Task RoundTrip_ShouldWorks_WithPlainMessage() + /// + public override async Task ReadAsync(byte[] buffer, int offset, int count, + CancellationToken cancellationToken) { - // Arrange - var stream = new MemoryStream(); - var message = new HandshakeRequest { NodeId = "node-1", AuthToken = "token" }; - - // Act - await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, false, null); - - stream.Position = 0; // Reset for reading - var (type, payload) = await _handler.ReadMessageAsync(stream, null); - - // Assert - type.ShouldBe(MessageType.HandshakeReq); - var decoded = HandshakeRequest.Parser.ParseFrom(payload); - decoded.NodeId.ShouldBe("node-1"); - decoded.AuthToken.ShouldBe("token"); + // Force read to be max _chunkSize, even if more is requested + int toRead = Math.Min(count, _chunkSize); + return await base.ReadAsync(buffer, offset, toRead, cancellationToken); } - - /// - /// Verifies a compressed message can be written and read successfully. - /// - [Fact] - public async Task RoundTrip_ShouldWork_WithCompression() - { - // Arrange - var stream = new MemoryStream(); - // Create a large message to trigger compression logic (threshold is small but let's be safe) - var largeData = string.Join("", Enumerable.Repeat("ABCDEF0123456789", 100)); - var message = new HandshakeRequest { NodeId = largeData, AuthToken = "token" }; - - // Act - await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, true, null); - - stream.Position = 0; - var (type, payload) = await _handler.ReadMessageAsync(stream, null); - - // Assert - type.ShouldBe(MessageType.HandshakeReq); - var decoded = HandshakeRequest.Parser.ParseFrom(payload); - decoded.NodeId.ShouldBe(largeData); - } - - /// - /// Verifies an encrypted message can be written and read successfully. - /// - [Fact] - public async Task RoundTrip_ShouldWork_WithEncryption() - { - // Arrange - var stream = new MemoryStream(); - var message = new HandshakeRequest { NodeId = "secure-node", AuthToken = "secure-token" }; - - // Mock CipherState - var key = new byte[32]; // 256-bit key - new Random().NextBytes(key); - var cipherState = new CipherState(key, key); // Encrypt and Decrypt with same key for loopback - - // Act - await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, false, cipherState); - - stream.Position = 0; - var (type, payload) = await _handler.ReadMessageAsync(stream, cipherState); - - // Assert - type.ShouldBe(MessageType.HandshakeReq); - var decoded = HandshakeRequest.Parser.ParseFrom(payload); - decoded.NodeId.ShouldBe("secure-node"); - } - - /// - /// Verifies a message can be round-tripped when both compression and encryption are enabled. - /// - [Fact] - public async Task RoundTrip_ShouldWork_WithEncryption_And_Compression() - { - // Arrange - var stream = new MemoryStream(); - var largeData = string.Join("", Enumerable.Repeat("SECURECOMPRESSION", 100)); - var message = new HandshakeRequest { NodeId = largeData }; - - var key = new byte[32]; - new Random().NextBytes(key); - var cipherState = new CipherState(key, key); - - // Act: Compress THEN Encrypt - await _handler.SendMessageAsync(stream, MessageType.HandshakeReq, message, true, cipherState); - - stream.Position = 0; - // Verify wire encryption (should be MessageType.SecureEnv) - // But ReadMessageAsync abstracts this away. - // We can peek at the stream if we want, but let's trust ReadMessageAsync handles it. - - var (type, payload) = await _handler.ReadMessageAsync(stream, cipherState); - - // Assert - type.ShouldBe(MessageType.HandshakeReq); - var decoded = HandshakeRequest.Parser.ParseFrom(payload); - decoded.NodeId.ShouldBe(largeData); - } - - /// - /// Verifies that message reads succeed when bytes arrive in small fragments. - /// - [Fact] - public async Task ReadMessage_ShouldHandle_Fragmentation() - { - // Arrange - var fullStream = new MemoryStream(); - var message = new HandshakeRequest { NodeId = "fragmented" }; - await _handler.SendMessageAsync(fullStream, MessageType.HandshakeReq, message, false, null); - - byte[] completeBytes = fullStream.ToArray(); - var fragmentedStream = new FragmentedMemoryStream(completeBytes, chunkSize: 2); // Read 2 bytes at a time - - // Act - var (type, payload) = await _handler.ReadMessageAsync(fragmentedStream, null); - - // Assert - type.ShouldBe(MessageType.HandshakeReq); - var decoded = HandshakeRequest.Parser.ParseFrom(payload); - decoded.NodeId.ShouldBe("fragmented"); - } - - // Helper Stream for fragmentation test - private class FragmentedMemoryStream : MemoryStream - { - private readonly int _chunkSize; - - /// - /// Initializes a new instance of the class. - /// - /// The backing stream buffer. - /// The maximum bytes returned per read. - public FragmentedMemoryStream(byte[] buffer, int chunkSize) : base(buffer) - { - _chunkSize = chunkSize; - } - - /// - public override async Task ReadAsync(byte[] buffer, int offset, int count, System.Threading.CancellationToken cancellationToken) - { - // Force read to be max _chunkSize, even if more is requested - int toRead = Math.Min(count, _chunkSize); - return await base.ReadAsync(buffer, offset, toRead, cancellationToken); - } - } - } -} + } +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Network.Tests/SecureHandshakeTests.cs b/tests/ZB.MOM.WW.CBDDC.Network.Tests/SecureHandshakeTests.cs index 41eb65b..2d0235d 100755 --- a/tests/ZB.MOM.WW.CBDDC.Network.Tests/SecureHandshakeTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Network.Tests/SecureHandshakeTests.cs @@ -1,177 +1,219 @@ -using System; -using System.IO; -using System.Threading; -using System.Threading.Tasks; -using ZB.MOM.WW.CBDDC.Network.Security; -using Microsoft.Extensions.Logging.Abstractions; -using Xunit; - -namespace ZB.MOM.WW.CBDDC.Network.Tests -{ - public class SecureHandshakeTests +using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Network.Security; + +namespace ZB.MOM.WW.CBDDC.Network.Tests; + +public class SecureHandshakeTests +{ + /// + /// Verifies handshake negotiation succeeds between initiator and responder services. + /// + [Fact] + public async Task Handshake_Should_Succeed_Between_Two_Services() { - /// - /// Verifies handshake negotiation succeeds between initiator and responder services. - /// - [Fact] - public async Task Handshake_Should_Succeed_Between_Two_Services() + // Arrange + var clientStream = new PipeStream(); + var serverStream = new PipeStream(); + + // Client writes to clientStream, server reads from clientStream + // Server writes to serverStream, client reads from serverStream + + var clientSocket = new DuplexStream(serverStream, clientStream); // Read from server, Write to client + var serverSocket = new DuplexStream(clientStream, serverStream); // Read from client, Write to server + + var clientService = new SecureHandshakeService(NullLogger.Instance); + var serverService = new SecureHandshakeService(NullLogger.Instance); + + var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5)); + + // Act + var clientTask = clientService.HandshakeAsync(clientSocket, true, "client", cts.Token); + var serverTask = serverService.HandshakeAsync(serverSocket, false, "server", cts.Token); + + await Task.WhenAll(clientTask, serverTask); + + // Assert + var clientState = clientTask.Result; + var serverState = serverTask.Result; + + clientState.ShouldNotBeNull(); + serverState.ShouldNotBeNull(); + + // Keys should match (Symmetric) + clientState!.EncryptKey.ShouldBe(serverState!.DecryptKey); + clientState.DecryptKey.ShouldBe(serverState.EncryptKey); + } + + // Simulates a pipe. Writes go to buffer, Reads drain buffer. + private class SimplexStream : MemoryStream + { + // Simple approach: Use one MemoryStream as a shared buffer? + // No, MemoryStream is not thread safe for concurrent Read/Write in this pipe manner really. + // Better to use a producer/consumer stream but for simplicity let's use a basic blocking queue logic or just wait. + // Actually, for unit tests, strictly ordered operations are better. But handshake is interactive. + // We need a proper pipe. + } + + // Let's use a simple PipeStream implementation using SemaphoreSlim for sync + private class PipeStream : Stream + { + private readonly MemoryStream _buffer = new(); + private readonly object _lock = new(); + private readonly SemaphoreSlim _readSemaphore = new(0); + + /// + public override bool CanRead => true; + + /// + public override bool CanSeek => false; + + /// + public override bool CanWrite => true; + + /// + public override long Length => _buffer.Length; + + /// + public override long Position { - // Arrange - var clientStream = new PipeStream(); - var serverStream = new PipeStream(); + get => _buffer.Position; + set => throw new NotSupportedException(); + } - // Client writes to clientStream, server reads from clientStream - // Server writes to serverStream, client reads from serverStream - - var clientSocket = new DuplexStream(serverStream, clientStream); // Read from server, Write to client - var serverSocket = new DuplexStream(clientStream, serverStream); // Read from client, Write to server - - var clientService = new SecureHandshakeService(NullLogger.Instance); - var serverService = new SecureHandshakeService(NullLogger.Instance); - - var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5)); - - // Act - var clientTask = clientService.HandshakeAsync(clientSocket, isInitiator: true, myNodeId: "client", token: cts.Token); - var serverTask = serverService.HandshakeAsync(serverSocket, isInitiator: false, myNodeId: "server", token: cts.Token); - - await Task.WhenAll(clientTask, serverTask); - - // Assert - var clientState = clientTask.Result; - var serverState = serverTask.Result; - - clientState.ShouldNotBeNull(); - serverState.ShouldNotBeNull(); - - // Keys should match (Symmetric) - clientState!.EncryptKey.ShouldBe(serverState!.DecryptKey); - clientState.DecryptKey.ShouldBe(serverState.EncryptKey); - } - - // Simulates a pipe. Writes go to buffer, Reads drain buffer. - class SimplexStream : MemoryStream - { - // Simple approach: Use one MemoryStream as a shared buffer? - // No, MemoryStream is not thread safe for concurrent Read/Write in this pipe manner really. - // Better to use a producer/consumer stream but for simplicity let's use a basic blocking queue logic or just wait. - // Actually, for unit tests, strictly ordered operations are better. But handshake is interactive. - // We need a proper pipe. - } - - // Let's use a simple PipeStream implementation using SemaphoreSlim for sync - class PipeStream : Stream + /// + public override void Flush() { - private readonly MemoryStream _buffer = new MemoryStream(); - private readonly SemaphoreSlim _readSemaphore = new SemaphoreSlim(0); - private readonly object _lock = new object(); + } - /// - public override bool CanRead => true; - /// - public override bool CanSeek => false; - /// - public override bool CanWrite => true; - /// - public override long Length => _buffer.Length; - /// - public override long Position { get => _buffer.Position; set => throw new NotSupportedException(); } + /// + public override int Read(byte[] buffer, int offset, int count) + { + throw new NotImplementedException("Use Async"); + } - /// - public override void Flush() { } - - /// - public override int Read(byte[] buffer, int offset, int count) => throw new NotImplementedException("Use Async"); - - /// - public override async Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + /// + public override async Task ReadAsync(byte[] buffer, int offset, int count, + CancellationToken cancellationToken) + { + await _readSemaphore.WaitAsync(cancellationToken); + lock (_lock) { - await _readSemaphore.WaitAsync(cancellationToken); - lock (_lock) - { - _buffer.Position = 0; - int read = _buffer.Read(buffer, offset, count); + _buffer.Position = 0; + int read = _buffer.Read(buffer, offset, count); - // Compact buffer (inefficient but works for unit tests) - byte[] remaining = _buffer.ToArray().Skip(read).ToArray(); - _buffer.SetLength(0); - _buffer.Write(remaining, 0, remaining.Length); + // Compact buffer (inefficient but works for unit tests) + byte[] remaining = _buffer.ToArray().Skip(read).ToArray(); + _buffer.SetLength(0); + _buffer.Write(remaining, 0, remaining.Length); - if (_buffer.Length > 0) _readSemaphore.Release(); // Signal if data remains + if (_buffer.Length > 0) _readSemaphore.Release(); // Signal if data remains - return read; - } + return read; + } + } + + /// + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + /// + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + + /// + public override void Write(byte[] buffer, int offset, int count) + { + lock (_lock) + { + long pos = _buffer.Position; + _buffer.Seek(0, SeekOrigin.End); + _buffer.Write(buffer, offset, count); + _buffer.Position = pos; } - /// - public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); - - /// - public override void SetLength(long value) => throw new NotSupportedException(); - - /// - public override void Write(byte[] buffer, int offset, int count) - { - lock (_lock) - { - long pos = _buffer.Position; - _buffer.Seek(0, SeekOrigin.End); - _buffer.Write(buffer, offset, count); - _buffer.Position = pos; - } - _readSemaphore.Release(); - } - } - - class DuplexStream : Stream - { - private readonly Stream _readSource; - private readonly Stream _writeTarget; - - /// - /// Initializes a new instance of the class. - /// - /// The underlying stream used for read operations. - /// The underlying stream used for write operations. - public DuplexStream(Stream readSource, Stream writeTarget) - { - _readSource = readSource; - _writeTarget = writeTarget; - } - - /// - public override bool CanRead => true; - /// - public override bool CanSeek => false; - /// - public override bool CanWrite => true; - /// - public override long Length => 0; - /// - public override long Position { get => 0; set { } } - - /// - public override void Flush() => _writeTarget.Flush(); - - /// - public override int Read(byte[] buffer, int offset, int count) => _readSource.Read(buffer, offset, count); - - /// - public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - => _readSource.ReadAsync(buffer, offset, count, cancellationToken); - - /// - public override void Write(byte[] buffer, int offset, int count) => _writeTarget.Write(buffer, offset, count); - - /// - public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) - => _writeTarget.WriteAsync(buffer, offset, count, cancellationToken); - - /// - public override long Seek(long offset, SeekOrigin origin) => throw new NotSupportedException(); - - /// - public override void SetLength(long value) => throw new NotSupportedException(); + _readSemaphore.Release(); } } -} + + private class DuplexStream : Stream + { + private readonly Stream _readSource; + private readonly Stream _writeTarget; + + /// + /// Initializes a new instance of the class. + /// + /// The underlying stream used for read operations. + /// The underlying stream used for write operations. + public DuplexStream(Stream readSource, Stream writeTarget) + { + _readSource = readSource; + _writeTarget = writeTarget; + } + + /// + public override bool CanRead => true; + + /// + public override bool CanSeek => false; + + /// + public override bool CanWrite => true; + + /// + public override long Length => 0; + + /// + public override long Position + { + get => 0; + set { } + } + + /// + public override void Flush() + { + _writeTarget.Flush(); + } + + /// + public override int Read(byte[] buffer, int offset, int count) + { + return _readSource.Read(buffer, offset, count); + } + + /// + public override Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + return _readSource.ReadAsync(buffer, offset, count, cancellationToken); + } + + /// + public override void Write(byte[] buffer, int offset, int count) + { + _writeTarget.Write(buffer, offset, count); + } + + /// + public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) + { + return _writeTarget.WriteAsync(buffer, offset, count, cancellationToken); + } + + /// + public override long Seek(long offset, SeekOrigin origin) + { + throw new NotSupportedException(); + } + + /// + public override void SetLength(long value) + { + throw new NotSupportedException(); + } + } +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Network.Tests/SnapshotReconnectRegressionTests.cs b/tests/ZB.MOM.WW.CBDDC.Network.Tests/SnapshotReconnectRegressionTests.cs index 8368ffe..13723d2 100755 --- a/tests/ZB.MOM.WW.CBDDC.Network.Tests/SnapshotReconnectRegressionTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Network.Tests/SnapshotReconnectRegressionTests.cs @@ -1,287 +1,286 @@ -using System; -using System.Collections.Generic; -using System.IO; -using System.Threading; -using System.Threading.Tasks; +using System.Reflection; using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Storage; -using ZB.MOM.WW.CBDDC.Network; -using ZB.MOM.WW.CBDDC.Network.Security; -using ZB.MOM.WW.CBDDC.Network.Telemetry; -using Microsoft.Extensions.Logging.Abstractions; -using Xunit; - -namespace ZB.MOM.WW.CBDDC.Network.Tests -{ - public class SnapshotReconnectRegressionTests - { - // Subclass to expose private method - private class TestableSyncOrchestrator : SyncOrchestrator + +namespace ZB.MOM.WW.CBDDC.Network.Tests; + +public class SnapshotReconnectRegressionTests +{ + private static ISnapshotMetadataStore CreateSnapshotMetadataStore() + { + var snapshotMetadataStore = Substitute.For(); + snapshotMetadataStore.GetSnapshotMetadataAsync(Arg.Any(), Arg.Any()) + .Returns((SnapshotMetadata?)null); + snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any(), Arg.Any()) + .Returns((string?)null); + snapshotMetadataStore.GetAllSnapshotMetadataAsync(Arg.Any()) + .Returns(Array.Empty()); + return snapshotMetadataStore; + } + + private static ISnapshotService CreateSnapshotService() + { + var snapshotService = Substitute.For(); + snapshotService.CreateSnapshotAsync(Arg.Any(), Arg.Any()) + .Returns(Task.CompletedTask); + snapshotService.ReplaceDatabaseAsync(Arg.Any(), Arg.Any()) + .Returns(Task.CompletedTask); + snapshotService.MergeSnapshotAsync(Arg.Any(), Arg.Any()) + .Returns(Task.CompletedTask); + return snapshotService; + } + + private static IDocumentStore CreateDocumentStore() + { + var documentStore = Substitute.For(); + documentStore.InterestedCollection.Returns(["Users", "TodoLists"]); + documentStore.GetDocumentAsync(Arg.Any(), Arg.Any(), Arg.Any()) + .Returns((Document?)null); + documentStore.GetDocumentsByCollectionAsync(Arg.Any(), Arg.Any()) + .Returns(Array.Empty()); + documentStore.GetDocumentsAsync(Arg.Any>(), Arg.Any()) + .Returns(Array.Empty()); + documentStore.PutDocumentAsync(Arg.Any(), Arg.Any()) + .Returns(true); + documentStore.InsertBatchDocumentsAsync(Arg.Any>(), Arg.Any()) + .Returns(true); + documentStore.UpdateBatchDocumentsAsync(Arg.Any>(), Arg.Any()) + .Returns(true); + documentStore.DeleteDocumentAsync(Arg.Any(), Arg.Any(), Arg.Any()) + .Returns(true); + documentStore.DeleteBatchDocumentsAsync(Arg.Any>(), Arg.Any()) + .Returns(true); + documentStore.MergeAsync(Arg.Any(), Arg.Any()) + .Returns(ci => ci.ArgAt(0)); + documentStore.DropAsync(Arg.Any()).Returns(Task.CompletedTask); + documentStore.ExportAsync(Arg.Any()).Returns(Array.Empty()); + documentStore.ImportAsync(Arg.Any>(), Arg.Any()) + .Returns(Task.CompletedTask); + documentStore.MergeAsync(Arg.Any>(), Arg.Any()) + .Returns(Task.CompletedTask); + return documentStore; + } + + private static IOplogStore CreateOplogStore(string? localHeadHash) + { + var oplogStore = Substitute.For(); + oplogStore.GetLastEntryHashAsync(Arg.Any(), Arg.Any()) + .Returns(localHeadHash); + oplogStore.ApplyBatchAsync(Arg.Any>(), Arg.Any()) + .Returns(Task.CompletedTask); + return oplogStore; + } + + private static TcpPeerClient CreateSnapshotRequiredClient() + { + var logger = Substitute.For>(); + var client = Substitute.For( + "127.0.0.1:0", + logger, + null, + null); + client.GetChainRangeAsync(Arg.Any(), Arg.Any(), Arg.Any()) + .Returns(_ => Task.FromException>(new SnapshotRequiredException())); + return client; + } + + private static IDiscoveryService CreateDiscovery() + { + var discovery = Substitute.For(); + discovery.GetActivePeers().Returns(Array.Empty()); + discovery.Start().Returns(Task.CompletedTask); + discovery.Stop().Returns(Task.CompletedTask); + return discovery; + } + + private static IPeerNodeConfigurationProvider CreateConfig() + { + var configProvider = Substitute.For(); + configProvider.GetConfiguration().Returns(new PeerNodeConfiguration { NodeId = "local" }); + return configProvider; + } + + private static IPeerOplogConfirmationStore CreatePeerOplogConfirmationStore() + { + var store = Substitute.For(); + store.EnsurePeerRegisteredAsync(Arg.Any(), Arg.Any(), Arg.Any(), + Arg.Any()) + .Returns(Task.CompletedTask); + store.UpdateConfirmationAsync(Arg.Any(), Arg.Any(), Arg.Any(), Arg.Any(), + Arg.Any()) + .Returns(Task.CompletedTask); + store.GetConfirmationsAsync(Arg.Any()).Returns(Array.Empty()); + store.GetConfirmationsForPeerAsync(Arg.Any(), Arg.Any()) + .Returns(Array.Empty()); + store.RemovePeerTrackingAsync(Arg.Any(), Arg.Any()).Returns(Task.CompletedTask); + store.GetActiveTrackedPeersAsync(Arg.Any()).Returns(Array.Empty()); + store.ExportAsync(Arg.Any()).Returns(Array.Empty()); + store.ImportAsync(Arg.Any>(), Arg.Any()) + .Returns(Task.CompletedTask); + store.MergeAsync(Arg.Any>(), Arg.Any()) + .Returns(Task.CompletedTask); + return store; + } + + /// + /// Verifies that gap recovery is skipped when an inbound entry already matches the snapshot boundary hash. + /// + [Fact] + public async Task ProcessInboundBatch_ShouldSkipGapRecovery_WhenEntryMatchesSnapshotBoundary() + { + // Arrange + var oplogStore = CreateOplogStore("snapshot-boundary-hash"); + var snapshotMetadataStore = CreateSnapshotMetadataStore(); + snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any(), Arg.Any()) + .Returns("snapshot-boundary-hash"); + var snapshotService = CreateSnapshotService(); + + var orch = new TestableSyncOrchestrator( + CreateDiscovery(), + oplogStore, + CreateDocumentStore(), + snapshotMetadataStore, + snapshotService, + CreateConfig(), + CreatePeerOplogConfirmationStore()); + + using var client = CreateSnapshotRequiredClient(); + + // Incoming entry that connects to snapshot boundary + var entries = new List { - /// - /// Initializes a new instance of the class. - /// - /// The discovery service. - /// The oplog store. - /// The document store. - /// The snapshot metadata store. - /// The snapshot service. - /// The peer node configuration provider. - /// The peer oplog confirmation store. - public TestableSyncOrchestrator( - IDiscoveryService discovery, - IOplogStore oplogStore, - IDocumentStore documentStore, - ISnapshotMetadataStore snapshotMetadataStore, - ISnapshotService snapshotService, - IPeerNodeConfigurationProvider peerNodeConfigurationProvider, - IPeerOplogConfirmationStore peerOplogConfirmationStore) - : base( - discovery, - oplogStore, - documentStore, - snapshotMetadataStore, - snapshotService, - peerNodeConfigurationProvider, - NullLoggerFactory.Instance, - peerOplogConfirmationStore) + new( + "col", "key", OperationType.Put, null, + new HlcTimestamp(100, 1, "remote-node"), + "snapshot-boundary-hash" // PreviousHash matches SnapshotHash! + ) + }; + + // Act + string result = await orch.TestProcessInboundBatchAsync(client, "remote-node", entries, CancellationToken.None); + + // Assert + result.ShouldBe("Success"); + await client.DidNotReceive() + .GetChainRangeAsync(Arg.Any(), Arg.Any(), Arg.Any()); + } + + /// + /// Verifies that gap recovery is attempted when the inbound entry does not match the snapshot boundary hash. + /// + [Fact] + public async Task ProcessInboundBatch_ShouldTryRecovery_WhenSnapshotMismatch() + { + // Arrange + var oplogStore = CreateOplogStore("some-old-hash"); + var snapshotMetadataStore = CreateSnapshotMetadataStore(); + snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any(), Arg.Any()) + .Returns("snapshot-boundary-hash"); + var snapshotService = CreateSnapshotService(); + + var orch = new TestableSyncOrchestrator( + CreateDiscovery(), + oplogStore, + CreateDocumentStore(), + snapshotMetadataStore, + snapshotService, + CreateConfig(), + CreatePeerOplogConfirmationStore()); + using var client = CreateSnapshotRequiredClient(); + + var entries = new List + { + new( + "col", "key", OperationType.Put, null, + new HlcTimestamp(100, 1, "remote-node"), + "different-hash" // Mismatch! + ) + }; + + // Act & Assert + // When gap recovery triggers, the client throws SnapshotRequiredException. + // SyncOrchestrator catches SnapshotRequiredException and re-throws it to trigger full sync + // So we expect SnapshotRequiredException to bubble up (wrapped in TargetInvocationException/AggregateException if not unwrapped by helper) + + await Should.ThrowAsync(async () => + await orch.TestProcessInboundBatchAsync(client, "remote-node", entries, CancellationToken.None)); + + await client.Received(1).GetChainRangeAsync(Arg.Any(), Arg.Any(), Arg.Any()); + } + + // Subclass to expose private method + private class TestableSyncOrchestrator : SyncOrchestrator + { + /// + /// Initializes a new instance of the class. + /// + /// The discovery service. + /// The oplog store. + /// The document store. + /// The snapshot metadata store. + /// The snapshot service. + /// The peer node configuration provider. + /// The peer oplog confirmation store. + public TestableSyncOrchestrator( + IDiscoveryService discovery, + IOplogStore oplogStore, + IDocumentStore documentStore, + ISnapshotMetadataStore snapshotMetadataStore, + ISnapshotService snapshotService, + IPeerNodeConfigurationProvider peerNodeConfigurationProvider, + IPeerOplogConfirmationStore peerOplogConfirmationStore) + : base( + discovery, + oplogStore, + documentStore, + snapshotMetadataStore, + snapshotService, + peerNodeConfigurationProvider, + NullLoggerFactory.Instance, + peerOplogConfirmationStore) + { + } + + /// + /// Invokes the inbound batch processing path through reflection for regression testing. + /// + /// The peer client. + /// The peer node identifier. + /// The incoming oplog changes. + /// The cancellation token. + public async Task TestProcessInboundBatchAsync( + TcpPeerClient client, + string peerNodeId, + IList changes, + CancellationToken token) + { + // Reflection to invoke private method since it's private not protected + var method = typeof(SyncOrchestrator).GetMethod( + "ProcessInboundBatchAsync", + BindingFlags.NonPublic | BindingFlags.Instance); + + if (method == null) + throw new InvalidOperationException("ProcessInboundBatchAsync method not found."); + + try { + var task = (Task)method.Invoke(this, new object[] { client, peerNodeId, changes, token })!; + await task.ConfigureAwait(false); + + // Access .Result via reflection because generic type is private + var resultProp = task.GetType().GetProperty("Result"); + object? result = resultProp?.GetValue(task); + + return result?.ToString() ?? "null"; + } + catch (TargetInvocationException ex) + { + if (ex.InnerException != null) throw ex.InnerException; + throw; } - - /// - /// Invokes the inbound batch processing path through reflection for regression testing. - /// - /// The peer client. - /// The peer node identifier. - /// The incoming oplog changes. - /// The cancellation token. - public async Task TestProcessInboundBatchAsync( - TcpPeerClient client, - string peerNodeId, - IList changes, - CancellationToken token) - { - // Reflection to invoke private method since it's private not protected - var method = typeof(SyncOrchestrator).GetMethod( - "ProcessInboundBatchAsync", - System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Instance); - - if (method == null) - throw new InvalidOperationException("ProcessInboundBatchAsync method not found."); - - try - { - var task = (Task)method.Invoke(this, new object[] { client, peerNodeId, changes, token })!; - await task.ConfigureAwait(false); - - // Access .Result via reflection because generic type is private - var resultProp = task.GetType().GetProperty("Result"); - var result = resultProp?.GetValue(task); - - return result?.ToString() ?? "null"; - } - catch (System.Reflection.TargetInvocationException ex) - { - if (ex.InnerException != null) throw ex.InnerException; - throw; - } - } - } - - private static ISnapshotMetadataStore CreateSnapshotMetadataStore() - { - var snapshotMetadataStore = Substitute.For(); - snapshotMetadataStore.GetSnapshotMetadataAsync(Arg.Any(), Arg.Any()) - .Returns((SnapshotMetadata?)null); - snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any(), Arg.Any()) - .Returns((string?)null); - snapshotMetadataStore.GetAllSnapshotMetadataAsync(Arg.Any()) - .Returns(Array.Empty()); - return snapshotMetadataStore; } - - private static ISnapshotService CreateSnapshotService() - { - var snapshotService = Substitute.For(); - snapshotService.CreateSnapshotAsync(Arg.Any(), Arg.Any()) - .Returns(Task.CompletedTask); - snapshotService.ReplaceDatabaseAsync(Arg.Any(), Arg.Any()) - .Returns(Task.CompletedTask); - snapshotService.MergeSnapshotAsync(Arg.Any(), Arg.Any()) - .Returns(Task.CompletedTask); - return snapshotService; - } - - private static IDocumentStore CreateDocumentStore() - { - var documentStore = Substitute.For(); - documentStore.InterestedCollection.Returns(["Users", "TodoLists"]); - documentStore.GetDocumentAsync(Arg.Any(), Arg.Any(), Arg.Any()) - .Returns((Document?)null); - documentStore.GetDocumentsByCollectionAsync(Arg.Any(), Arg.Any()) - .Returns(Array.Empty()); - documentStore.GetDocumentsAsync(Arg.Any>(), Arg.Any()) - .Returns(Array.Empty()); - documentStore.PutDocumentAsync(Arg.Any(), Arg.Any()) - .Returns(true); - documentStore.InsertBatchDocumentsAsync(Arg.Any>(), Arg.Any()) - .Returns(true); - documentStore.UpdateBatchDocumentsAsync(Arg.Any>(), Arg.Any()) - .Returns(true); - documentStore.DeleteDocumentAsync(Arg.Any(), Arg.Any(), Arg.Any()) - .Returns(true); - documentStore.DeleteBatchDocumentsAsync(Arg.Any>(), Arg.Any()) - .Returns(true); - documentStore.MergeAsync(Arg.Any(), Arg.Any()) - .Returns(ci => ci.ArgAt(0)); - documentStore.DropAsync(Arg.Any()).Returns(Task.CompletedTask); - documentStore.ExportAsync(Arg.Any()).Returns(Array.Empty()); - documentStore.ImportAsync(Arg.Any>(), Arg.Any()).Returns(Task.CompletedTask); - documentStore.MergeAsync(Arg.Any>(), Arg.Any()).Returns(Task.CompletedTask); - return documentStore; - } - - private static IOplogStore CreateOplogStore(string? localHeadHash) - { - var oplogStore = Substitute.For(); - oplogStore.GetLastEntryHashAsync(Arg.Any(), Arg.Any()) - .Returns(localHeadHash); - oplogStore.ApplyBatchAsync(Arg.Any>(), Arg.Any()) - .Returns(Task.CompletedTask); - return oplogStore; - } - - private static TcpPeerClient CreateSnapshotRequiredClient() - { - var logger = Substitute.For>(); - var client = Substitute.For( - "127.0.0.1:0", - logger, - (IPeerHandshakeService?)null, - (INetworkTelemetryService?)null); - client.GetChainRangeAsync(Arg.Any(), Arg.Any(), Arg.Any()) - .Returns(_ => Task.FromException>(new SnapshotRequiredException())); - return client; - } - - private static IDiscoveryService CreateDiscovery() - { - var discovery = Substitute.For(); - discovery.GetActivePeers().Returns(Array.Empty()); - discovery.Start().Returns(Task.CompletedTask); - discovery.Stop().Returns(Task.CompletedTask); - return discovery; - } - - private static IPeerNodeConfigurationProvider CreateConfig() - { - var configProvider = Substitute.For(); - configProvider.GetConfiguration().Returns(new PeerNodeConfiguration { NodeId = "local" }); - return configProvider; - } - - private static IPeerOplogConfirmationStore CreatePeerOplogConfirmationStore() - { - var store = Substitute.For(); - store.EnsurePeerRegisteredAsync(Arg.Any(), Arg.Any(), Arg.Any(), Arg.Any()) - .Returns(Task.CompletedTask); - store.UpdateConfirmationAsync(Arg.Any(), Arg.Any(), Arg.Any(), Arg.Any(), Arg.Any()) - .Returns(Task.CompletedTask); - store.GetConfirmationsAsync(Arg.Any()).Returns(Array.Empty()); - store.GetConfirmationsForPeerAsync(Arg.Any(), Arg.Any()).Returns(Array.Empty()); - store.RemovePeerTrackingAsync(Arg.Any(), Arg.Any()).Returns(Task.CompletedTask); - store.GetActiveTrackedPeersAsync(Arg.Any()).Returns(Array.Empty()); - store.ExportAsync(Arg.Any()).Returns(Array.Empty()); - store.ImportAsync(Arg.Any>(), Arg.Any()).Returns(Task.CompletedTask); - store.MergeAsync(Arg.Any>(), Arg.Any()).Returns(Task.CompletedTask); - return store; - } - - /// - /// Verifies that gap recovery is skipped when an inbound entry already matches the snapshot boundary hash. - /// - [Fact] - public async Task ProcessInboundBatch_ShouldSkipGapRecovery_WhenEntryMatchesSnapshotBoundary() - { - // Arrange - var oplogStore = CreateOplogStore("snapshot-boundary-hash"); - var snapshotMetadataStore = CreateSnapshotMetadataStore(); - snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any(), Arg.Any()) - .Returns("snapshot-boundary-hash"); - var snapshotService = CreateSnapshotService(); - - var orch = new TestableSyncOrchestrator( - CreateDiscovery(), - oplogStore, - CreateDocumentStore(), - snapshotMetadataStore, - snapshotService, - CreateConfig(), - CreatePeerOplogConfirmationStore()); - - using var client = CreateSnapshotRequiredClient(); - - // Incoming entry that connects to snapshot boundary - var entries = new List - { - new OplogEntry( - "col", "key", OperationType.Put, null, - new HlcTimestamp(100, 1, "remote-node"), - "snapshot-boundary-hash" // PreviousHash matches SnapshotHash! - ) - }; - - // Act - var result = await orch.TestProcessInboundBatchAsync(client, "remote-node", entries, CancellationToken.None); - - // Assert - result.ShouldBe("Success"); - await client.DidNotReceive().GetChainRangeAsync(Arg.Any(), Arg.Any(), Arg.Any()); - } - - /// - /// Verifies that gap recovery is attempted when the inbound entry does not match the snapshot boundary hash. - /// - [Fact] - public async Task ProcessInboundBatch_ShouldTryRecovery_WhenSnapshotMismatch() - { - // Arrange - var oplogStore = CreateOplogStore("some-old-hash"); - var snapshotMetadataStore = CreateSnapshotMetadataStore(); - snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any(), Arg.Any()) - .Returns("snapshot-boundary-hash"); - var snapshotService = CreateSnapshotService(); - - var orch = new TestableSyncOrchestrator( - CreateDiscovery(), - oplogStore, - CreateDocumentStore(), - snapshotMetadataStore, - snapshotService, - CreateConfig(), - CreatePeerOplogConfirmationStore()); - using var client = CreateSnapshotRequiredClient(); - - var entries = new List - { - new OplogEntry( - "col", "key", OperationType.Put, null, - new HlcTimestamp(100, 1, "remote-node"), - "different-hash" // Mismatch! - ) - }; - - // Act & Assert - // When gap recovery triggers, the client throws SnapshotRequiredException. - // SyncOrchestrator catches SnapshotRequiredException and re-throws it to trigger full sync - // So we expect SnapshotRequiredException to bubble up (wrapped in TargetInvocationException/AggregateException if not unwrapped by helper) - - await Should.ThrowAsync(async () => - await orch.TestProcessInboundBatchAsync(client, "remote-node", entries, CancellationToken.None)); - - await client.Received(1).GetChainRangeAsync(Arg.Any(), Arg.Any(), Arg.Any()); - } - } -} + } +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Network.Tests/SyncOrchestratorConfirmationTests.cs b/tests/ZB.MOM.WW.CBDDC.Network.Tests/SyncOrchestratorConfirmationTests.cs index bf53a0d..114ed76 100644 --- a/tests/ZB.MOM.WW.CBDDC.Network.Tests/SyncOrchestratorConfirmationTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Network.Tests/SyncOrchestratorConfirmationTests.cs @@ -1,9 +1,4 @@ -using System; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; using Microsoft.Extensions.Logging.Abstractions; -using Xunit; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Storage; @@ -13,7 +8,7 @@ namespace ZB.MOM.WW.CBDDC.Network.Tests; public class SyncOrchestratorConfirmationTests { /// - /// Verifies that merged peers are registered and the local node is skipped. + /// Verifies that merged peers are registered and the local node is skipped. /// [Fact] public async Task EnsurePeersRegisteredAsync_ShouldRegisterMergedPeers_AndSkipLocalNode() @@ -25,8 +20,8 @@ public class SyncOrchestratorConfirmationTests var now = DateTimeOffset.UtcNow; var discoveredPeers = new List { - new("local", "127.0.0.1:9000", now, PeerType.LanDiscovered), - new("peer-a", "10.0.0.1:9000", now, PeerType.LanDiscovered) + new("local", "127.0.0.1:9000", now), + new("peer-a", "10.0.0.1:9000", now) }; var knownPeers = new List @@ -60,7 +55,7 @@ public class SyncOrchestratorConfirmationTests } /// - /// Verifies that a newly discovered node is auto-registered when peer lists are refreshed. + /// Verifies that a newly discovered node is auto-registered when peer lists are refreshed. /// [Fact] public async Task EnsurePeersRegisteredAsync_WhenNewNodeJoins_ShouldAutoRegisterJoinedNode() @@ -85,7 +80,7 @@ public class SyncOrchestratorConfirmationTests var secondDiscovered = new List { new("peer-static", "10.0.0.10:9000", now, PeerType.StaticRemote), - new("peer-new", "10.0.0.25:9010", now, PeerType.LanDiscovered) + new("peer-new", "10.0.0.25:9010", now) }; var secondMerged = SyncOrchestrator.BuildMergedPeerList(secondDiscovered, knownPeers, "local"); await orchestrator.EnsurePeersRegisteredAsync(secondMerged, "local", CancellationToken.None); @@ -98,7 +93,7 @@ public class SyncOrchestratorConfirmationTests } /// - /// Verifies that confirmations advance only for nodes where remote vector-clock entries are at or ahead. + /// Verifies that confirmations advance only for nodes where remote vector-clock entries are at or ahead. /// [Fact] public async Task AdvanceConfirmationsFromVectorClockAsync_ShouldAdvanceOnlyForRemoteAtOrAhead() @@ -163,7 +158,7 @@ public class SyncOrchestratorConfirmationTests } /// - /// Verifies that pushed-batch confirmation uses the maximum timestamp and its matching hash. + /// Verifies that pushed-batch confirmation uses the maximum timestamp and its matching hash. /// [Fact] public async Task AdvanceConfirmationForPushedBatchAsync_ShouldUseMaxTimestampAndHash() @@ -179,7 +174,8 @@ public class SyncOrchestratorConfirmationTests CreateEntry("source-1", 110, 5, "hash-110") }; - await orchestrator.AdvanceConfirmationForPushedBatchAsync("peer-1", "source-1", pushedChanges, CancellationToken.None); + await orchestrator.AdvanceConfirmationForPushedBatchAsync("peer-1", "source-1", pushedChanges, + CancellationToken.None); await confirmationStore.Received(1).UpdateConfirmationAsync( "peer-1", @@ -190,7 +186,7 @@ public class SyncOrchestratorConfirmationTests } /// - /// Verifies that no confirmation update occurs when a pushed batch is empty. + /// Verifies that no confirmation update occurs when a pushed batch is empty. /// [Fact] public async Task AdvanceConfirmationForPushedBatchAsync_ShouldSkipEmptyBatch() @@ -213,7 +209,8 @@ public class SyncOrchestratorConfirmationTests Arg.Any()); } - private static SyncOrchestrator CreateOrchestrator(IOplogStore oplogStore, IPeerOplogConfirmationStore confirmationStore) + private static SyncOrchestrator CreateOrchestrator(IOplogStore oplogStore, + IPeerOplogConfirmationStore confirmationStore) { var discovery = Substitute.For(); discovery.GetActivePeers().Returns(Array.Empty()); @@ -243,9 +240,9 @@ public class SyncOrchestratorConfirmationTests "users", $"{nodeId}-{wall}-{logic}", OperationType.Put, - payload: null, - timestamp: new HlcTimestamp(wall, logic, nodeId), - previousHash: string.Empty, - hash: hash); + null, + new HlcTimestamp(wall, logic, nodeId), + string.Empty, + hash); } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Network.Tests/SyncOrchestratorMaintenancePruningTests.cs b/tests/ZB.MOM.WW.CBDDC.Network.Tests/SyncOrchestratorMaintenancePruningTests.cs index 8d91b47..cc6e36e 100644 --- a/tests/ZB.MOM.WW.CBDDC.Network.Tests/SyncOrchestratorMaintenancePruningTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Network.Tests/SyncOrchestratorMaintenancePruningTests.cs @@ -1,8 +1,4 @@ -using System; -using System.Threading; -using System.Threading.Tasks; using Microsoft.Extensions.Logging.Abstractions; -using Xunit; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Storage; @@ -12,7 +8,7 @@ namespace ZB.MOM.WW.CBDDC.Network.Tests; public class SyncOrchestratorMaintenancePruningTests { /// - /// Verifies that mixed peer confirmations produce the safest effective cutoff across peers and sources. + /// Verifies that mixed peer confirmations produce the safest effective cutoff across peers and sources. /// [Fact] public async Task CalculateEffectiveCutoffAsync_MixedPeerStates_ShouldUseSafestConfirmationAcrossPeers() @@ -33,16 +29,16 @@ public class SyncOrchestratorMaintenancePruningTests confirmationStore.GetConfirmationsForPeerAsync("peer-a", Arg.Any()) .Returns(new[] { - CreateConfirmation("peer-a", "node-local", wall: 300, logic: 0, isActive: true), - CreateConfirmation("peer-a", "node-secondary", wall: 120, logic: 1, isActive: true), - CreateConfirmation("peer-a", "node-secondary", wall: 500, logic: 0, isActive: false) + CreateConfirmation("peer-a", "node-local", 300, 0, true), + CreateConfirmation("peer-a", "node-secondary", 120, 1, true), + CreateConfirmation("peer-a", "node-secondary", 500, 0, false) }); confirmationStore.GetConfirmationsForPeerAsync("peer-b", Arg.Any()) .Returns(new[] { - CreateConfirmation("peer-b", "node-local", wall: 250, logic: 0, isActive: true), - CreateConfirmation("peer-b", "node-secondary", wall: 180, logic: 0, isActive: true) + CreateConfirmation("peer-b", "node-local", 250, 0, true), + CreateConfirmation("peer-b", "node-secondary", 180, 0, true) }); var decision = await calculator.CalculateEffectiveCutoffAsync( @@ -63,7 +59,7 @@ public class SyncOrchestratorMaintenancePruningTests } /// - /// Verifies that removing a peer from tracking immediately restores pruning eligibility. + /// Verifies that removing a peer from tracking immediately restores pruning eligibility. /// [Fact] public async Task CalculateEffectiveCutoffAsync_RemovingPeerFromTracking_ShouldImmediatelyRestoreEligibility() @@ -85,7 +81,7 @@ public class SyncOrchestratorMaintenancePruningTests confirmationStore.GetConfirmationsForPeerAsync("peer-active", Arg.Any()) .Returns(new[] { - CreateConfirmation("peer-active", "node-local", wall: 150, logic: 0, isActive: true) + CreateConfirmation("peer-active", "node-local", 150, 0, true) }); confirmationStore.GetConfirmationsForPeerAsync("peer-deprecated", Arg.Any()) .Returns(Array.Empty()); @@ -108,11 +104,12 @@ public class SyncOrchestratorMaintenancePruningTests unblockedDecision.EffectiveCutoff.Value.NodeId.ShouldBe("node-local"); await confirmationStore.Received(1).GetConfirmationsForPeerAsync("peer-active", Arg.Any()); - await confirmationStore.DidNotReceive().GetConfirmationsForPeerAsync("peer-deprecated", Arg.Any()); + await confirmationStore.DidNotReceive() + .GetConfirmationsForPeerAsync("peer-deprecated", Arg.Any()); } /// - /// Verifies that maintenance does not prune when peer confirmation is missing in a two-node topology. + /// Verifies that maintenance does not prune when peer confirmation is missing in a two-node topology. /// [Fact] public async Task RunMaintenanceIfDueAsync_TwoNode_ShouldNotPruneBeforePeerConfirmation() @@ -145,7 +142,7 @@ public class SyncOrchestratorMaintenancePruningTests } /// - /// Verifies that maintenance prunes after peer confirmation is available in a two-node topology. + /// Verifies that maintenance prunes after peer confirmation is available in a two-node topology. /// [Fact] public async Task RunMaintenanceIfDueAsync_TwoNode_ShouldPruneAfterPeerConfirmation() @@ -194,7 +191,7 @@ public class SyncOrchestratorMaintenancePruningTests } /// - /// Verifies that deprecated-node removal unblocks pruning on a subsequent maintenance run. + /// Verifies that deprecated-node removal unblocks pruning on a subsequent maintenance run. /// [Fact] public async Task RunMaintenanceIfDueAsync_DeprecatedNodeRemoval_ShouldUnblockPruning() @@ -217,7 +214,7 @@ public class SyncOrchestratorMaintenancePruningTests confirmationStore.GetConfirmationsForPeerAsync("node-active", Arg.Any()) .Returns(new[] { - CreateConfirmation("node-active", "node-local", wall: 100, logic: 0, isActive: true) + CreateConfirmation("node-active", "node-local", 100, 0, true) }); confirmationStore.GetConfirmationsForPeerAsync("node-deprecated", Arg.Any()) .Returns(Array.Empty()); @@ -289,4 +286,4 @@ public class SyncOrchestratorMaintenancePruningTests IsActive = isActive }; } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Network.Tests/TelemetryTests.cs b/tests/ZB.MOM.WW.CBDDC.Network.Tests/TelemetryTests.cs index cc8bb19..1783215 100755 --- a/tests/ZB.MOM.WW.CBDDC.Network.Tests/TelemetryTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Network.Tests/TelemetryTests.cs @@ -1,108 +1,103 @@ -using System; -using System.IO; -using System.Threading.Tasks; -using ZB.MOM.WW.CBDDC.Network.Telemetry; using Microsoft.Extensions.Logging.Abstractions; -using Xunit; +using ZB.MOM.WW.CBDDC.Network.Telemetry; -namespace ZB.MOM.WW.CBDDC.Network.Tests +namespace ZB.MOM.WW.CBDDC.Network.Tests; + +public class TelemetryTests : IDisposable { - public class TelemetryTests : IDisposable - { - private readonly string _tempFile; - - /// - /// Initializes a new instance of the class. - /// - public TelemetryTests() - { - _tempFile = Path.GetTempFileName(); - } - - /// - /// Cleans up temporary test artifacts created for telemetry persistence validation. - /// - public void Dispose() - { - if (File.Exists(_tempFile)) File.Delete(_tempFile); - } - - /// - /// Verifies that telemetry metrics are recorded and persisted to disk. - /// - [Fact] - public async Task Should_Record_And_Persist_Metrics() - { - // Arrange - using var service = new NetworkTelemetryService(NullLogger.Instance, _tempFile); + private readonly string _tempFile; - // Act - // Record some values for CompressionRatio - service.RecordValue(MetricType.CompressionRatio, 0.5); - service.RecordValue(MetricType.CompressionRatio, 0.7); - - // Record time metric - using (var timer = service.StartMetric(MetricType.EncryptionTime)) + /// + /// Initializes a new instance of the class. + /// + public TelemetryTests() + { + _tempFile = Path.GetTempFileName(); + } + + /// + /// Cleans up temporary test artifacts created for telemetry persistence validation. + /// + public void Dispose() + { + if (File.Exists(_tempFile)) File.Delete(_tempFile); + } + + /// + /// Verifies that telemetry metrics are recorded and persisted to disk. + /// + [Fact] + public async Task Should_Record_And_Persist_Metrics() + { + // Arrange + using var service = new NetworkTelemetryService(NullLogger.Instance, _tempFile); + + // Act + // Record some values for CompressionRatio + service.RecordValue(MetricType.CompressionRatio, 0.5); + service.RecordValue(MetricType.CompressionRatio, 0.7); + + // Record time metric + using (var timer = service.StartMetric(MetricType.EncryptionTime)) + { + await Task.Delay(10); // Should be > 0 ms + } + + // Allow channel to process + await Task.Delay(500); + + // Force persist to file + service.ForcePersist(); + + // Assert + File.Exists(_tempFile).ShouldBeTrue(); + var fileInfo = new FileInfo(_tempFile); + fileInfo.Length.ShouldBeGreaterThan(0); + + using var fs = File.OpenRead(_tempFile); + using var br = new BinaryReader(fs); + + // Header + byte version = br.ReadByte(); + version.ShouldBe((byte)1); + long timestamp = br.ReadInt64(); + long now = DateTimeOffset.UtcNow.ToUnixTimeSeconds(); + timestamp.ShouldBeInRange(now - 5, now + 5); + + // Metrics + // We expect all MetricTypes + int typeCount = Enum.GetValues(typeof(MetricType)).Length; + + var foundCompression = false; + var foundEncryption = false; + + for (var i = 0; i < typeCount; i++) + { + int typeInt = br.ReadInt32(); + var type = (MetricType)typeInt; + + // 4 Windows per type + for (var w = 0; w < 4; w++) { - await Task.Delay(10); // Should be > 0 ms - } + int window = br.ReadInt32(); // 60, 300, 600, 1800 + double avg = br.ReadDouble(); - // Allow channel to process - await Task.Delay(500); - - // Force persist to file - service.ForcePersist(); - - // Assert - File.Exists(_tempFile).ShouldBeTrue(); - var fileInfo = new FileInfo(_tempFile); - fileInfo.Length.ShouldBeGreaterThan(0); - - using var fs = File.OpenRead(_tempFile); - using var br = new BinaryReader(fs); - - // Header - byte version = br.ReadByte(); - version.ShouldBe((byte)1); - long timestamp = br.ReadInt64(); - var now = DateTimeOffset.UtcNow.ToUnixTimeSeconds(); - timestamp.ShouldBeInRange(now - 5, now + 5); - - // Metrics - // We expect all MetricTypes - int typeCount = Enum.GetValues(typeof(MetricType)).Length; - - bool foundCompression = false; - bool foundEncryption = false; - - for (int i = 0; i < typeCount; i++) - { - int typeInt = br.ReadInt32(); - var type = (MetricType)typeInt; - - // 4 Windows per type - for (int w = 0; w < 4; w++) + if (type == MetricType.CompressionRatio && window == 60) { - int window = br.ReadInt32(); // 60, 300, 600, 1800 - double avg = br.ReadDouble(); + // Avg of 0.5 and 0.7 is 0.6 + avg.ShouldBe(0.6, 0.001); + foundCompression = true; + } - if (type == MetricType.CompressionRatio && window == 60) - { - // Avg of 0.5 and 0.7 is 0.6 - avg.ShouldBe(0.6, 0.001); - foundCompression = true; - } - - if (type == MetricType.EncryptionTime && window == 60) - { - avg.ShouldBeGreaterThan(0); - foundEncryption = true; - } + if (type == MetricType.EncryptionTime && window == 60) + { + avg.ShouldBeGreaterThan(0); + foundEncryption = true; } } - - foundCompression.ShouldBeTrue(); - foundEncryption.ShouldBeTrue(); } + + foundCompression.ShouldBeTrue(); + foundEncryption.ShouldBeTrue(); } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Network.Tests/VectorClockSyncTests.cs b/tests/ZB.MOM.WW.CBDDC.Network.Tests/VectorClockSyncTests.cs index f5a434d..936d77c 100755 --- a/tests/ZB.MOM.WW.CBDDC.Network.Tests/VectorClockSyncTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Network.Tests/VectorClockSyncTests.cs @@ -1,18 +1,13 @@ -using System.Collections.Generic; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; +using System.Text.Json; using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Storage; -using Xunit; - -namespace ZB.MOM.WW.CBDDC.Network.Tests; - + +namespace ZB.MOM.WW.CBDDC.Network.Tests; + public class VectorClockSyncTests { /// - /// Verifies sync pull selection includes only nodes where the remote clock is ahead. + /// Verifies sync pull selection includes only nodes where the remote clock is ahead. /// [Fact] public async Task VectorClockSync_ShouldPullOnlyNodesWithUpdates() @@ -32,37 +27,37 @@ public class VectorClockSyncTests // Add oplog entries for node1 in remote remoteOplogEntries.Add(new OplogEntry( "users", "user1", OperationType.Put, - System.Text.Json.JsonSerializer.Deserialize("{\"name\":\"Alice\"}"), + JsonSerializer.Deserialize("{\"name\":\"Alice\"}"), new HlcTimestamp(150, 2, "node1"), "", "hash1" )); remoteOplogEntries.Add(new OplogEntry( "users", "user2", OperationType.Put, - System.Text.Json.JsonSerializer.Deserialize("{\"name\":\"Bob\"}"), + JsonSerializer.Deserialize("{\"name\":\"Bob\"}"), new HlcTimestamp(200, 5, "node1"), "hash1", "hash2" )); // Act - var localVC = await localStore.GetVectorClockAsync(default); + var localVC = await localStore.GetVectorClockAsync(); var remoteVC = remoteVectorClock; - - var nodesToPull = localVC.GetNodesWithUpdates(remoteVC).ToList(); - - // Assert - nodesToPull.Count().ShouldBe(1); - nodesToPull.ShouldContain("node1"); - - // Simulate pull - foreach (var nodeId in nodesToPull) - { - var localTs = localVC.GetTimestamp(nodeId); - var changes = await remoteStore.GetOplogForNodeAfterAsync(nodeId, localTs, default); + + var nodesToPull = localVC.GetNodesWithUpdates(remoteVC).ToList(); + + // Assert + nodesToPull.Count().ShouldBe(1); + nodesToPull.ShouldContain("node1"); + + // Simulate pull + foreach (string nodeId in nodesToPull) + { + var localTs = localVC.GetTimestamp(nodeId); + var changes = await remoteStore.GetOplogForNodeAfterAsync(nodeId, localTs); changes.Count().ShouldBe(2); } } /// - /// Verifies sync push selection includes only nodes where the local clock is ahead. + /// Verifies sync push selection includes only nodes where the local clock is ahead. /// [Fact] public async Task VectorClockSync_ShouldPushOnlyNodesWithLocalUpdates() @@ -82,32 +77,32 @@ public class VectorClockSyncTests // Add oplog entries for node1 in local localOplogEntries.Add(new OplogEntry( "users", "user1", OperationType.Put, - System.Text.Json.JsonSerializer.Deserialize("{\"name\":\"Charlie\"}"), + JsonSerializer.Deserialize("{\"name\":\"Charlie\"}"), new HlcTimestamp(150, 2, "node1"), "", "hash1" )); // Act var localVC = localVectorClock; var remoteVC = remoteVectorClock; - - var nodesToPush = localVC.GetNodesToPush(remoteVC).ToList(); - - // Assert - nodesToPush.Count().ShouldBe(1); - nodesToPush.ShouldContain("node1"); - - // Simulate push - foreach (var nodeId in nodesToPush) - { - var remoteTs = remoteVC.GetTimestamp(nodeId); - var changes = await localStore.GetOplogForNodeAfterAsync(nodeId, remoteTs, default); + + var nodesToPush = localVC.GetNodesToPush(remoteVC).ToList(); + + // Assert + nodesToPush.Count().ShouldBe(1); + nodesToPush.ShouldContain("node1"); + + // Simulate push + foreach (string nodeId in nodesToPush) + { + var remoteTs = remoteVC.GetTimestamp(nodeId); + var changes = await localStore.GetOplogForNodeAfterAsync(nodeId, remoteTs); changes.Count().ShouldBe(1); } } /// - /// Verifies split-brain clocks result in bidirectional synchronization requirements. + /// Verifies split-brain clocks result in bidirectional synchronization requirements. /// [Fact] public async Task VectorClockSync_SplitBrain_ShouldSyncBothDirections() @@ -128,72 +123,72 @@ public class VectorClockSyncTests partition1OplogEntries.Add(new OplogEntry( "users", "user1", OperationType.Put, - System.Text.Json.JsonSerializer.Deserialize("{\"name\":\"P1User\"}"), + JsonSerializer.Deserialize("{\"name\":\"P1User\"}"), new HlcTimestamp(300, 5, "node1"), "", "hash_p1" )); partition2OplogEntries.Add(new OplogEntry( "users", "user2", OperationType.Put, - System.Text.Json.JsonSerializer.Deserialize("{\"name\":\"P2User\"}"), + JsonSerializer.Deserialize("{\"name\":\"P2User\"}"), new HlcTimestamp(400, 8, "node3"), "", "hash_p2" )); // Act var vc1 = partition1VectorClock; var vc2 = partition2VectorClock; - - var relation = vc1.CompareTo(vc2); - var partition1NeedsToPull = vc1.GetNodesWithUpdates(vc2).ToList(); - var partition1NeedsToPush = vc1.GetNodesToPush(vc2).ToList(); - - // Assert - relation.ShouldBe(CausalityRelation.Concurrent); - - // Partition 1 needs to pull node3 - partition1NeedsToPull.Count().ShouldBe(1); - partition1NeedsToPull.ShouldContain("node3"); - - // Partition 1 needs to push node1 and node2 - partition1NeedsToPush.Count.ShouldBe(2); - partition1NeedsToPush.ShouldContain("node1"); - partition1NeedsToPush.ShouldContain("node2"); - - // Verify data can be synced - var changesToPull = await partition2Store.GetOplogForNodeAfterAsync("node3", vc1.GetTimestamp("node3"), default); - changesToPull.Count().ShouldBe(1); - - var changesToPush = await partition1Store.GetOplogForNodeAfterAsync("node1", vc2.GetTimestamp("node1"), default); + + var relation = vc1.CompareTo(vc2); + var partition1NeedsToPull = vc1.GetNodesWithUpdates(vc2).ToList(); + var partition1NeedsToPush = vc1.GetNodesToPush(vc2).ToList(); + + // Assert + relation.ShouldBe(CausalityRelation.Concurrent); + + // Partition 1 needs to pull node3 + partition1NeedsToPull.Count().ShouldBe(1); + partition1NeedsToPull.ShouldContain("node3"); + + // Partition 1 needs to push node1 and node2 + partition1NeedsToPush.Count.ShouldBe(2); + partition1NeedsToPush.ShouldContain("node1"); + partition1NeedsToPush.ShouldContain("node2"); + + // Verify data can be synced + var changesToPull = await partition2Store.GetOplogForNodeAfterAsync("node3", vc1.GetTimestamp("node3")); + changesToPull.Count().ShouldBe(1); + + var changesToPush = await partition1Store.GetOplogForNodeAfterAsync("node1", vc2.GetTimestamp("node1")); changesToPush.Count().ShouldBe(1); } /// - /// Verifies no pull or push is required when vector clocks are equal. + /// Verifies no pull or push is required when vector clocks are equal. /// [Fact] public void VectorClockSync_EqualClocks_ShouldNotSync() { - // Arrange - var vc1 = new VectorClock(); - vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1")); - vc1.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2")); - - var vc2 = new VectorClock(); - vc2.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1")); - vc2.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2")); - - // Act - var relation = vc1.CompareTo(vc2); - var nodesToPull = vc1.GetNodesWithUpdates(vc2).ToList(); - var nodesToPush = vc1.GetNodesToPush(vc2).ToList(); - - // Assert - relation.ShouldBe(CausalityRelation.Equal); + // Arrange + var vc1 = new VectorClock(); + vc1.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1")); + vc1.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2")); + + var vc2 = new VectorClock(); + vc2.SetTimestamp("node1", new HlcTimestamp(100, 1, "node1")); + vc2.SetTimestamp("node2", new HlcTimestamp(200, 2, "node2")); + + // Act + var relation = vc1.CompareTo(vc2); + var nodesToPull = vc1.GetNodesWithUpdates(vc2).ToList(); + var nodesToPush = vc1.GetNodesToPush(vc2).ToList(); + + // Assert + relation.ShouldBe(CausalityRelation.Equal); nodesToPull.ShouldBeEmpty(); nodesToPush.ShouldBeEmpty(); } /// - /// Verifies a newly observed node is detected as a required pull source. + /// Verifies a newly observed node is detected as a required pull source. /// [Fact] public async Task VectorClockSync_NewNodeJoins_ShouldBeDetected() @@ -210,24 +205,24 @@ public class VectorClockSyncTests newNodeOplogEntries.Add(new OplogEntry( "users", "user3", OperationType.Put, - System.Text.Json.JsonSerializer.Deserialize("{\"name\":\"NewNode\"}"), + JsonSerializer.Deserialize("{\"name\":\"NewNode\"}"), new HlcTimestamp(50, 1, "node3"), "", "hash_new" )); // Act var existingVC = existingNodeVectorClock; var newNodeVC = newNodeVectorClock; - - var nodesToPull = existingVC.GetNodesWithUpdates(newNodeVC).ToList(); - - // Assert - nodesToPull.Count().ShouldBe(1); - nodesToPull.ShouldContain("node3"); - - var changes = await newNodeStore.GetOplogForNodeAfterAsync("node3", existingVC.GetTimestamp("node3"), default); - changes.Count().ShouldBe(1); - } - + + var nodesToPull = existingVC.GetNodesWithUpdates(newNodeVC).ToList(); + + // Assert + nodesToPull.Count().ShouldBe(1); + nodesToPull.ShouldContain("node3"); + + var changes = await newNodeStore.GetOplogForNodeAfterAsync("node3", existingVC.GetTimestamp("node3")); + changes.Count().ShouldBe(1); + } + private static (IOplogStore Store, VectorClock VectorClock, List OplogEntries) CreatePeerStore() { var vectorClock = new VectorClock(); @@ -248,17 +243,14 @@ public class VectorClockSyncTests var since = callInfo.ArgAt(1); var collections = callInfo.ArgAt?>(2)?.ToList(); - IEnumerable query = oplogEntries + var query = oplogEntries .Where(e => e.Timestamp.NodeId == nodeId && e.Timestamp.CompareTo(since) > 0); - if (collections is { Count: > 0 }) - { - query = query.Where(e => collections.Contains(e.Collection)); - } + if (collections is { Count: > 0 }) query = query.Where(e => collections.Contains(e.Collection)); return Task.FromResult>(query.OrderBy(e => e.Timestamp).ToList()); }); return (store, vectorClock, oplogEntries); } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Network.Tests/ZB.MOM.WW.CBDDC.Network.Tests.csproj b/tests/ZB.MOM.WW.CBDDC.Network.Tests/ZB.MOM.WW.CBDDC.Network.Tests.csproj index 8e0332c..c9d8386 100755 --- a/tests/ZB.MOM.WW.CBDDC.Network.Tests/ZB.MOM.WW.CBDDC.Network.Tests.csproj +++ b/tests/ZB.MOM.WW.CBDDC.Network.Tests/ZB.MOM.WW.CBDDC.Network.Tests.csproj @@ -1,31 +1,31 @@ -ο»Ώ - - - ZB.MOM.WW.CBDDC.Network.Tests - ZB.MOM.WW.CBDDC.Network.Tests - ZB.MOM.WW.CBDDC.Network.Tests - net10.0 - enable - enable - $(NoWarn);xUnit1031;xUnit1051 - false - - - - - - - - - - - - - - - - - - - +ο»Ώ + + + ZB.MOM.WW.CBDDC.Network.Tests + ZB.MOM.WW.CBDDC.Network.Tests + ZB.MOM.WW.CBDDC.Network.Tests + net10.0 + enable + enable + $(NoWarn);xUnit1031;xUnit1051 + false + + + + + + + + + + + + + + + + + + + diff --git a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/BLiteStoreExportImportTests.cs b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/BLiteStoreExportImportTests.cs index 258b331..4de6cdf 100755 --- a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/BLiteStoreExportImportTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/BLiteStoreExportImportTests.cs @@ -1,30 +1,28 @@ +using System.Text.Json; +using Microsoft.Extensions.Logging.Abstractions; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Network; -using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Sync; -using ZB.MOM.WW.CBDDC.Persistence.BLite; -using Microsoft.Extensions.Logging.Abstractions; -using System.Text.Json; -using Xunit; using ZB.MOM.WW.CBDDC.Persistence; +using ZB.MOM.WW.CBDDC.Persistence.BLite; namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests; /// -/// Tests for BLite persistence stores: Export, Import, Merge, Drop operations. +/// Tests for BLite persistence stores: Export, Import, Merge, Drop operations. /// public class BLiteStoreExportImportTests : IDisposable { - private readonly string _testDbPath; + private readonly IPeerNodeConfigurationProvider _configProvider; private readonly SampleDbContext _context; private readonly SampleDocumentStore _documentStore; private readonly BLiteOplogStore _oplogStore; private readonly BLitePeerConfigurationStore _peerConfigStore; private readonly BLiteSnapshotMetadataStore _snapshotMetadataStore; - private readonly IPeerNodeConfigurationProvider _configProvider; + private readonly string _testDbPath; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// public BLiteStoreExportImportTests() { @@ -33,7 +31,8 @@ public class BLiteStoreExportImportTests : IDisposable _configProvider = CreateConfigProvider("test-node"); var vectorClock = new VectorClockService(); - _documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock, NullLogger.Instance); + _documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock, + NullLogger.Instance); _snapshotMetadataStore = new BLiteSnapshotMetadataStore( _context, NullLogger>.Instance); _oplogStore = new BLiteOplogStore( @@ -45,10 +44,42 @@ public class BLiteStoreExportImportTests : IDisposable _context, NullLogger>.Instance); } + /// + /// Disposes test resources and removes the temporary database file. + /// + public void Dispose() + { + _documentStore?.Dispose(); + _context?.Dispose(); + + if (File.Exists(_testDbPath)) + try + { + File.Delete(_testDbPath); + } + catch + { + } + } + + private static IPeerNodeConfigurationProvider CreateConfigProvider(string nodeId) + { + var configProvider = Substitute.For(); + configProvider.GetConfiguration().Returns(new PeerNodeConfiguration + { + NodeId = nodeId, + TcpPort = 5000, + AuthToken = "test-token", + OplogRetentionHours = 24, + MaintenanceIntervalMinutes = 60 + }); + return configProvider; + } + #region OplogStore Tests /// - /// Verifies that exporting oplog entries returns all persisted records. + /// Verifies that exporting oplog entries returns all persisted records. /// [Fact] public async Task OplogStore_ExportAsync_ReturnsAllEntries() @@ -69,7 +100,7 @@ public class BLiteStoreExportImportTests : IDisposable } /// - /// Verifies that importing oplog entries adds them to the store. + /// Verifies that importing oplog entries adds them to the store. /// [Fact] public async Task OplogStore_ImportAsync_AddsEntries() @@ -92,7 +123,7 @@ public class BLiteStoreExportImportTests : IDisposable } /// - /// Verifies that merging oplog entries adds only entries that are not already present. + /// Verifies that merging oplog entries adds only entries that are not already present. /// [Fact] public async Task OplogStore_MergeAsync_OnlyAddsNewEntries() @@ -117,7 +148,7 @@ public class BLiteStoreExportImportTests : IDisposable } /// - /// Verifies that chain range lookup resolves entries by hash and returns the expected range. + /// Verifies that chain range lookup resolves entries by hash and returns the expected range. /// [Fact] public async Task OplogStore_GetChainRangeAsync_UsesHashLookup() @@ -126,7 +157,8 @@ public class BLiteStoreExportImportTests : IDisposable var payload1 = JsonDocument.Parse("{\"test\":\"k1\"}").RootElement; var payload2 = JsonDocument.Parse("{\"test\":\"k2\"}").RootElement; var entry1 = new OplogEntry("col1", "k1", OperationType.Put, payload1, new HlcTimestamp(1000, 0, "node1"), ""); - var entry2 = new OplogEntry("col1", "k2", OperationType.Put, payload2, new HlcTimestamp(2000, 0, "node1"), entry1.Hash); + var entry2 = new OplogEntry("col1", "k2", OperationType.Put, payload2, new HlcTimestamp(2000, 0, "node1"), + entry1.Hash); await _oplogStore.AppendOplogEntryAsync(entry1); await _oplogStore.AppendOplogEntryAsync(entry2); @@ -141,7 +173,7 @@ public class BLiteStoreExportImportTests : IDisposable } /// - /// Verifies that dropping the oplog store removes all entries. + /// Verifies that dropping the oplog store removes all entries. /// [Fact] public async Task OplogStore_DropAsync_ClearsAllEntries() @@ -164,7 +196,7 @@ public class BLiteStoreExportImportTests : IDisposable #region PeerConfigurationStore Tests /// - /// Verifies that exporting peer configurations returns all persisted peers. + /// Verifies that exporting peer configurations returns all persisted peers. /// [Fact] public async Task PeerConfigStore_ExportAsync_ReturnsAllPeers() @@ -183,7 +215,7 @@ public class BLiteStoreExportImportTests : IDisposable } /// - /// Verifies that importing peer configurations adds peers to the store. + /// Verifies that importing peer configurations adds peers to the store. /// [Fact] public async Task PeerConfigStore_ImportAsync_AddsPeers() @@ -204,7 +236,7 @@ public class BLiteStoreExportImportTests : IDisposable } /// - /// Verifies that merging peer configurations adds only new peers. + /// Verifies that merging peer configurations adds only new peers. /// [Fact] public async Task PeerConfigStore_MergeAsync_OnlyAddsNewPeers() @@ -229,7 +261,7 @@ public class BLiteStoreExportImportTests : IDisposable } /// - /// Verifies that dropping peer configurations removes all peers. + /// Verifies that dropping peer configurations removes all peers. /// [Fact] public async Task PeerConfigStore_DropAsync_ClearsAllPeers() @@ -252,7 +284,7 @@ public class BLiteStoreExportImportTests : IDisposable #region SnapshotMetadataStore Tests /// - /// Verifies that exporting snapshot metadata returns all persisted metadata entries. + /// Verifies that exporting snapshot metadata returns all persisted metadata entries. /// [Fact] public async Task SnapshotMetadataStore_ExportAsync_ReturnsAllMetadata() @@ -273,7 +305,7 @@ public class BLiteStoreExportImportTests : IDisposable } /// - /// Verifies that importing snapshot metadata adds metadata entries to the store. + /// Verifies that importing snapshot metadata adds metadata entries to the store. /// [Fact] public async Task SnapshotMetadataStore_ImportAsync_AddsMetadata() @@ -294,7 +326,7 @@ public class BLiteStoreExportImportTests : IDisposable } /// - /// Verifies that merging snapshot metadata adds only entries with new node identifiers. + /// Verifies that merging snapshot metadata adds only entries with new node identifiers. /// [Fact] public async Task SnapshotMetadataStore_MergeAsync_OnlyAddsNewMetadata() @@ -318,7 +350,7 @@ public class BLiteStoreExportImportTests : IDisposable } /// - /// Verifies that dropping snapshot metadata removes all metadata entries. + /// Verifies that dropping snapshot metadata removes all metadata entries. /// [Fact] public async Task SnapshotMetadataStore_DropAsync_ClearsAllMetadata() @@ -340,7 +372,7 @@ public class BLiteStoreExportImportTests : IDisposable #region DocumentStore Tests /// - /// Verifies that exporting documents returns all persisted documents. + /// Verifies that exporting documents returns all persisted documents. /// [Fact] public async Task DocumentStore_ExportAsync_ReturnsAllDocuments() @@ -360,7 +392,7 @@ public class BLiteStoreExportImportTests : IDisposable } /// - /// Verifies that importing documents adds them to the underlying store. + /// Verifies that importing documents adds them to the underlying store. /// [Fact] public async Task DocumentStore_ImportAsync_AddsDocuments() @@ -385,7 +417,7 @@ public class BLiteStoreExportImportTests : IDisposable } /// - /// Verifies that document merge behavior honors conflict resolution. + /// Verifies that document merge behavior honors conflict resolution. /// [Fact] public async Task DocumentStore_MergeAsync_UsesConflictResolution() @@ -414,7 +446,7 @@ public class BLiteStoreExportImportTests : IDisposable } /// - /// Verifies that dropping documents removes all persisted documents. + /// Verifies that dropping documents removes all persisted documents. /// [Fact] public async Task DocumentStore_DropAsync_ClearsAllDocuments() @@ -468,38 +500,10 @@ public class BLiteStoreExportImportTests : IDisposable private static Document CreateDocument(string collection, string key, T entity) where T : class { - var json = JsonSerializer.Serialize(entity); + string json = JsonSerializer.Serialize(entity); var content = JsonDocument.Parse(json).RootElement; return new Document(collection, key, content, new HlcTimestamp(0, 0, ""), false); } #endregion - - /// - /// Disposes test resources and removes the temporary database file. - /// - public void Dispose() - { - _documentStore?.Dispose(); - _context?.Dispose(); - - if (File.Exists(_testDbPath)) - { - try { File.Delete(_testDbPath); } catch { } - } - } - - private static IPeerNodeConfigurationProvider CreateConfigProvider(string nodeId) - { - var configProvider = Substitute.For(); - configProvider.GetConfiguration().Returns(new PeerNodeConfiguration - { - NodeId = nodeId, - TcpPort = 5000, - AuthToken = "test-token", - OplogRetentionHours = 24, - MaintenanceIntervalMinutes = 60 - }); - return configProvider; - } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/GlobalUsings.cs b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/GlobalUsings.cs index 569ac36..7014df2 100644 --- a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/GlobalUsings.cs +++ b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/GlobalUsings.cs @@ -1,3 +1,3 @@ global using ZB.MOM.WW.CBDDC.Sample.Console; global using NSubstitute; -global using Shouldly; +global using Shouldly; \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/PeerOplogConfirmationStoreTests.cs b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/PeerOplogConfirmationStoreTests.cs index 9e6ecf5..56aeb3f 100644 --- a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/PeerOplogConfirmationStoreTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/PeerOplogConfirmationStoreTests.cs @@ -7,12 +7,12 @@ namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests; public class PeerOplogConfirmationStoreTests : IDisposable { - private readonly string _testDbPath; private readonly SampleDbContext _context; private readonly BLitePeerOplogConfirmationStore _store; + private readonly string _testDbPath; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// public PeerOplogConfirmationStoreTests() { @@ -23,8 +23,22 @@ public class PeerOplogConfirmationStoreTests : IDisposable NullLogger>.Instance); } + /// + public void Dispose() + { + _context?.Dispose(); + if (File.Exists(_testDbPath)) + try + { + File.Delete(_testDbPath); + } + catch + { + } + } + /// - /// Verifies that ensuring peer registration multiple times remains idempotent. + /// Verifies that ensuring peer registration multiple times remains idempotent. /// [Fact] public async Task EnsurePeerRegisteredAsync_IsIdempotent() @@ -41,7 +55,7 @@ public class PeerOplogConfirmationStoreTests : IDisposable } /// - /// Verifies create, update, and read flows for peer oplog confirmations. + /// Verifies create, update, and read flows for peer oplog confirmations. /// [Fact] public async Task ConfirmationStore_CrudFlow_Works() @@ -74,7 +88,7 @@ public class PeerOplogConfirmationStoreTests : IDisposable } /// - /// Verifies that removing peer tracking deactivates tracking records for that peer. + /// Verifies that removing peer tracking deactivates tracking records for that peer. /// [Fact] public async Task RemovePeerTrackingAsync_DeactivatesPeerTracking() @@ -95,14 +109,4 @@ public class PeerOplogConfirmationStoreTests : IDisposable peerARows.ShouldNotBeEmpty(); peerARows.All(x => !x.IsActive).ShouldBeTrue(); } - - /// - public void Dispose() - { - _context?.Dispose(); - if (File.Exists(_testDbPath)) - { - try { File.Delete(_testDbPath); } catch { } - } - } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SampleDbContextTests.cs b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SampleDbContextTests.cs index cf4c944..e719797 100755 --- a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SampleDbContextTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SampleDbContextTests.cs @@ -1,19 +1,12 @@ -using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Storage; -using ZB.MOM.WW.CBDDC.Core.Sync; -using ZB.MOM.WW.CBDDC.Persistence.BLite; -using Microsoft.Extensions.Logging.Abstractions; -using System.Text.Json; - namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests; public class SampleDbContextTests : IDisposable { - private readonly string _dbPath; private readonly SampleDbContext _context; + private readonly string _dbPath; /// - /// Initializes a new test context backed by a temporary database file. + /// Initializes a new test context backed by a temporary database file. /// public SampleDbContextTests() { @@ -22,19 +15,23 @@ public class SampleDbContextTests : IDisposable } /// - /// Releases test resources and removes the temporary database file. + /// Releases test resources and removes the temporary database file. /// public void Dispose() { _context?.Dispose(); if (File.Exists(_dbPath)) - { - try { File.Delete(_dbPath); } catch { } - } + try + { + File.Delete(_dbPath); + } + catch + { + } } /// - /// Verifies that required collections are initialized in the context. + /// Verifies that required collections are initialized in the context. /// [Fact] public void Context_ShouldInitializeCollections() @@ -47,7 +44,7 @@ public class SampleDbContextTests : IDisposable } /// - /// Verifies that inserting a user persists the document. + /// Verifies that inserting a user persists the document. /// [Fact] public async Task Users_Insert_ShouldPersist() @@ -74,153 +71,153 @@ public class SampleDbContextTests : IDisposable } /// - /// Verifies that updating a user modifies the existing document. + /// Verifies that updating a user modifies the existing document. /// [Fact] public async Task Users_Update_ShouldModifyExisting() { - // Arrange - var user = new User { Id = "user2", Name = "Bob", Age = 25 }; - await _context.Users.InsertAsync(user); - await _context.SaveChangesAsync(); - - // Act - user.Age = 26; - user.Address = new Address { City = "Milan" }; - await _context.Users.UpdateAsync(user); - await _context.SaveChangesAsync(); - - // Assert - var retrieved = _context.Users.FindById("user2"); - retrieved.ShouldNotBeNull(); - retrieved!.Age.ShouldBe(26); - retrieved.Address?.City.ShouldBe("Milan"); - } + // Arrange + var user = new User { Id = "user2", Name = "Bob", Age = 25 }; + await _context.Users.InsertAsync(user); + await _context.SaveChangesAsync(); + + // Act + user.Age = 26; + user.Address = new Address { City = "Milan" }; + await _context.Users.UpdateAsync(user); + await _context.SaveChangesAsync(); + + // Assert + var retrieved = _context.Users.FindById("user2"); + retrieved.ShouldNotBeNull(); + retrieved!.Age.ShouldBe(26); + retrieved.Address?.City.ShouldBe("Milan"); + } /// - /// Verifies that deleting a user removes the document. + /// Verifies that deleting a user removes the document. /// [Fact] public async Task Users_Delete_ShouldRemove() { - // Arrange - var user = new User { Id = "user3", Name = "Charlie", Age = 35 }; - await _context.Users.InsertAsync(user); - await _context.SaveChangesAsync(); - - // Act - await _context.Users.DeleteAsync("user3"); - await _context.SaveChangesAsync(); - - // Assert - var retrieved = _context.Users.FindById("user3"); - retrieved.ShouldBeNull(); - } + // Arrange + var user = new User { Id = "user3", Name = "Charlie", Age = 35 }; + await _context.Users.InsertAsync(user); + await _context.SaveChangesAsync(); + + // Act + await _context.Users.DeleteAsync("user3"); + await _context.SaveChangesAsync(); + + // Assert + var retrieved = _context.Users.FindById("user3"); + retrieved.ShouldBeNull(); + } /// - /// Verifies that inserting a todo list with items persists nested data. + /// Verifies that inserting a todo list with items persists nested data. /// [Fact] public async Task TodoLists_InsertWithItems_ShouldPersist() { - // Arrange - var todoList = new TodoList - { - Id = "list1", - Name = "Shopping", - Items = new List - { - new() { Task = "Buy milk", Completed = false }, - new() { Task = "Buy bread", Completed = true } - } - }; - - // Act - await _context.TodoLists.InsertAsync(todoList); - await _context.SaveChangesAsync(); - - // Assert - var retrieved = _context.TodoLists.FindById("list1"); - retrieved.ShouldNotBeNull(); - retrieved!.Name.ShouldBe("Shopping"); + // Arrange + var todoList = new TodoList + { + Id = "list1", + Name = "Shopping", + Items = new List + { + new() { Task = "Buy milk", Completed = false }, + new() { Task = "Buy bread", Completed = true } + } + }; + + // Act + await _context.TodoLists.InsertAsync(todoList); + await _context.SaveChangesAsync(); + + // Assert + var retrieved = _context.TodoLists.FindById("list1"); + retrieved.ShouldNotBeNull(); + retrieved!.Name.ShouldBe("Shopping"); retrieved.Items.Count.ShouldBe(2); - retrieved.Items.ShouldContain(i => i.Task == "Buy milk" && !i.Completed); - retrieved.Items.ShouldContain(i => i.Task == "Buy bread" && i.Completed); - } + retrieved.Items.ShouldContain(i => i.Task == "Buy milk" && !i.Completed); + retrieved.Items.ShouldContain(i => i.Task == "Buy bread" && i.Completed); + } /// - /// Verifies that updating todo items modifies the nested collection. + /// Verifies that updating todo items modifies the nested collection. /// [Fact] public async Task TodoLists_UpdateItems_ShouldModifyNestedCollection() { - // Arrange - var todoList = new TodoList - { - Id = "list2", - Name = "Work Tasks", - Items = new List - { - new() { Task = "Write report", Completed = false } - } - }; - await _context.TodoLists.InsertAsync(todoList); - await _context.SaveChangesAsync(); - - // Act - Mark task as completed and add new task - todoList.Items[0].Completed = true; - todoList.Items.Add(new TodoItem { Task = "Review report", Completed = false }); - await _context.TodoLists.UpdateAsync(todoList); - await _context.SaveChangesAsync(); - - // Assert - var retrieved = _context.TodoLists.FindById("list2"); - retrieved.ShouldNotBeNull(); + // Arrange + var todoList = new TodoList + { + Id = "list2", + Name = "Work Tasks", + Items = new List + { + new() { Task = "Write report", Completed = false } + } + }; + await _context.TodoLists.InsertAsync(todoList); + await _context.SaveChangesAsync(); + + // Act - Mark task as completed and add new task + todoList.Items[0].Completed = true; + todoList.Items.Add(new TodoItem { Task = "Review report", Completed = false }); + await _context.TodoLists.UpdateAsync(todoList); + await _context.SaveChangesAsync(); + + // Assert + var retrieved = _context.TodoLists.FindById("list2"); + retrieved.ShouldNotBeNull(); retrieved!.Items.Count.ShouldBe(2); - retrieved.Items.First().Completed.ShouldBe(true); - retrieved.Items.Last().Completed.ShouldBe(false); - } + retrieved.Items.First().Completed.ShouldBe(true); + retrieved.Items.Last().Completed.ShouldBe(false); + } /// - /// Verifies that querying all users returns all inserted users. + /// Verifies that querying all users returns all inserted users. /// [Fact] public void Users_FindAll_ShouldReturnAllUsers() { - // Arrange - _context.Users.InsertAsync(new User { Id = "u1", Name = "User1", Age = 20 }).Wait(); - _context.Users.InsertAsync(new User { Id = "u2", Name = "User2", Age = 30 }).Wait(); - _context.Users.InsertAsync(new User { Id = "u3", Name = "User3", Age = 40 }).Wait(); - _context.SaveChangesAsync().Wait(); - - // Act - var allUsers = _context.Users.FindAll().ToList(); - - // Assert + // Arrange + _context.Users.InsertAsync(new User { Id = "u1", Name = "User1", Age = 20 }).Wait(); + _context.Users.InsertAsync(new User { Id = "u2", Name = "User2", Age = 30 }).Wait(); + _context.Users.InsertAsync(new User { Id = "u3", Name = "User3", Age = 40 }).Wait(); + _context.SaveChangesAsync().Wait(); + + // Act + var allUsers = _context.Users.FindAll().ToList(); + + // Assert allUsers.Count.ShouldBe(3); allUsers.Select(u => u.Name).ShouldContain("User1"); allUsers.Select(u => u.Name).ShouldContain("User2"); allUsers.Select(u => u.Name).ShouldContain("User3"); - } + } /// - /// Verifies that predicate-based queries return only matching users. + /// Verifies that predicate-based queries return only matching users. /// [Fact] public void Users_Find_WithPredicate_ShouldFilterCorrectly() { - // Arrange - _context.Users.InsertAsync(new User { Id = "f1", Name = "Young", Age = 18 }).Wait(); - _context.Users.InsertAsync(new User { Id = "f2", Name = "Adult", Age = 30 }).Wait(); - _context.Users.InsertAsync(new User { Id = "f3", Name = "Senior", Age = 65 }).Wait(); - _context.SaveChangesAsync().Wait(); - - // Act - var adults = _context.Users.Find(u => u.Age >= 30).ToList(); - - // Assert + // Arrange + _context.Users.InsertAsync(new User { Id = "f1", Name = "Young", Age = 18 }).Wait(); + _context.Users.InsertAsync(new User { Id = "f2", Name = "Adult", Age = 30 }).Wait(); + _context.Users.InsertAsync(new User { Id = "f3", Name = "Senior", Age = 65 }).Wait(); + _context.SaveChangesAsync().Wait(); + + // Act + var adults = _context.Users.Find(u => u.Age >= 30).ToList(); + + // Assert adults.Count.ShouldBe(2); adults.Select(u => u.Name).ShouldContain("Adult"); adults.Select(u => u.Name).ShouldContain("Senior"); - } -} + } +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SnapshotStoreTests.cs b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SnapshotStoreTests.cs index 93f5096..788d039 100755 --- a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SnapshotStoreTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SnapshotStoreTests.cs @@ -1,29 +1,27 @@ +using System.Text.Json; +using System.Text.Json.Nodes; +using Microsoft.Extensions.Logging.Abstractions; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Network; -using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Sync; -using ZB.MOM.WW.CBDDC.Persistence.BLite; -using Microsoft.Extensions.Logging.Abstractions; -using System.Text.Json; -using System.Text.Json.Nodes; -using Xunit; -using ZB.MOM.WW.CBDDC.Persistence; +using ZB.MOM.WW.CBDDC.Persistence; +using ZB.MOM.WW.CBDDC.Persistence.BLite; namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests; public class SnapshotStoreTests : IDisposable { - private readonly string _testDbPath; + private readonly IPeerNodeConfigurationProvider _configProvider; private readonly SampleDbContext _context; - private readonly SampleDocumentStore _documentStore; - private readonly BLiteOplogStore _oplogStore; - private readonly BLitePeerConfigurationStore _peerConfigStore; - private readonly BLitePeerOplogConfirmationStore _peerConfirmationStore; - private readonly SnapshotStore _snapshotStore; - private readonly IPeerNodeConfigurationProvider _configProvider; + private readonly SampleDocumentStore _documentStore; + private readonly BLiteOplogStore _oplogStore; + private readonly BLitePeerConfigurationStore _peerConfigStore; + private readonly BLitePeerOplogConfirmationStore _peerConfirmationStore; + private readonly SnapshotStore _snapshotStore; + private readonly string _testDbPath; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// public SnapshotStoreTests() { @@ -32,7 +30,8 @@ public class SnapshotStoreTests : IDisposable _configProvider = CreateConfigProvider("test-node"); var vectorClock = new VectorClockService(); - _documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock, NullLogger.Instance); + _documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock, + NullLogger.Instance); var snapshotMetadataStore = new BLiteSnapshotMetadataStore( _context, NullLogger>.Instance); @@ -43,25 +42,43 @@ public class SnapshotStoreTests : IDisposable vectorClock, snapshotMetadataStore, NullLogger>.Instance); - _peerConfigStore = new BLitePeerConfigurationStore( - _context, - NullLogger>.Instance); - _peerConfirmationStore = new BLitePeerOplogConfirmationStore( - _context, - NullLogger>.Instance); - - _snapshotStore = new SnapshotStore( - _documentStore, - _peerConfigStore, - _oplogStore, - new LastWriteWinsConflictResolver(), - NullLogger.Instance, - _peerConfirmationStore); + _peerConfigStore = new BLitePeerConfigurationStore( + _context, + NullLogger>.Instance); + _peerConfirmationStore = new BLitePeerOplogConfirmationStore( + _context, + NullLogger>.Instance); + + _snapshotStore = new SnapshotStore( + _documentStore, + _peerConfigStore, + _oplogStore, + new LastWriteWinsConflictResolver(), + NullLogger.Instance, + _peerConfirmationStore); + } + + /// + /// Releases resources created for test execution. + /// + public void Dispose() + { + _documentStore?.Dispose(); + _context?.Dispose(); + + if (File.Exists(_testDbPath)) + try + { + File.Delete(_testDbPath); + } + catch + { + } } /// - /// Verifies that creating a snapshot writes valid JSON to the output stream. + /// Verifies that creating a snapshot writes valid JSON to the output stream. /// [Fact] public async Task CreateSnapshotAsync_WritesValidJsonToStream() @@ -80,7 +97,7 @@ public class SnapshotStoreTests : IDisposable // Reset stream position and verify JSON is valid stream.Position = 0; - var json = await new StreamReader(stream).ReadToEndAsync(); + string json = await new StreamReader(stream).ReadToEndAsync(); string.IsNullOrWhiteSpace(json).ShouldBeFalse("Snapshot JSON should not be empty"); json.Trim().ShouldStartWith("{"); @@ -90,14 +107,15 @@ public class SnapshotStoreTests : IDisposable doc.ShouldNotBeNull(); // Verify structure - doc.RootElement.TryGetProperty("Version", out _).ShouldBeTrue("Should have Version property"); - doc.RootElement.TryGetProperty("Documents", out _).ShouldBeTrue("Should have Documents property"); - doc.RootElement.TryGetProperty("Oplog", out _).ShouldBeTrue("Should have Oplog property"); - doc.RootElement.TryGetProperty("PeerConfirmations", out _).ShouldBeTrue("Should have PeerConfirmations property"); + doc.RootElement.TryGetProperty("Version", out _).ShouldBeTrue("Should have Version property"); + doc.RootElement.TryGetProperty("Documents", out _).ShouldBeTrue("Should have Documents property"); + doc.RootElement.TryGetProperty("Oplog", out _).ShouldBeTrue("Should have Oplog property"); + doc.RootElement.TryGetProperty("PeerConfirmations", out _) + .ShouldBeTrue("Should have PeerConfirmations property"); } /// - /// Verifies that snapshot creation includes all persisted documents. + /// Verifies that snapshot creation includes all persisted documents. /// [Fact] public async Task CreateSnapshotAsync_IncludesAllDocuments() @@ -119,7 +137,7 @@ public class SnapshotStoreTests : IDisposable // Assert stream.Position = 0; - var json = await new StreamReader(stream).ReadToEndAsync(); + string json = await new StreamReader(stream).ReadToEndAsync(); var doc = JsonDocument.Parse(json); var documents = doc.RootElement.GetProperty("Documents"); @@ -127,38 +145,39 @@ public class SnapshotStoreTests : IDisposable } /// - /// Verifies that creating and replacing a snapshot preserves document data. + /// Verifies that creating and replacing a snapshot preserves document data. /// [Fact] - public async Task RoundTrip_CreateAndReplace_PreservesData() - { - // Arrange - Add data to source - var originalUser = new User { Id = "user-rt", Name = "RoundTrip User", Age = 42 }; - await _context.Users.InsertAsync(originalUser); - await _peerConfirmationStore.UpdateConfirmationAsync( - "peer-rt", - "source-rt", - new HlcTimestamp(500, 2, "source-rt"), - "hash-rt"); - await _context.SaveChangesAsync(); + public async Task RoundTrip_CreateAndReplace_PreservesData() + { + // Arrange - Add data to source + var originalUser = new User { Id = "user-rt", Name = "RoundTrip User", Age = 42 }; + await _context.Users.InsertAsync(originalUser); + await _peerConfirmationStore.UpdateConfirmationAsync( + "peer-rt", + "source-rt", + new HlcTimestamp(500, 2, "source-rt"), + "hash-rt"); + await _context.SaveChangesAsync(); - // Create snapshot - using var snapshotStream = new MemoryStream(); - await _snapshotStore.CreateSnapshotAsync(snapshotStream); - snapshotStream.Position = 0; - var snapshotJson = await new StreamReader(snapshotStream).ReadToEndAsync(); - var snapshotDoc = JsonDocument.Parse(snapshotJson); - snapshotDoc.RootElement.GetProperty("PeerConfirmations").GetArrayLength().ShouldBe(1); - snapshotStream.Position = 0; + // Create snapshot + using var snapshotStream = new MemoryStream(); + await _snapshotStore.CreateSnapshotAsync(snapshotStream); + snapshotStream.Position = 0; + string snapshotJson = await new StreamReader(snapshotStream).ReadToEndAsync(); + var snapshotDoc = JsonDocument.Parse(snapshotJson); + snapshotDoc.RootElement.GetProperty("PeerConfirmations").GetArrayLength().ShouldBe(1); + snapshotStream.Position = 0; // Create a new context/stores (simulating a different node) - var newDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-target-{Guid.NewGuid()}.blite"); + string newDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-target-{Guid.NewGuid()}.blite"); try { using var newContext = new SampleDbContext(newDbPath); var newConfigProvider = CreateConfigProvider("test-new-node"); var newVectorClock = new VectorClockService(); - var newDocStore = new SampleDocumentStore(newContext, newConfigProvider, newVectorClock, NullLogger.Instance); + var newDocStore = new SampleDocumentStore(newContext, newConfigProvider, newVectorClock, + NullLogger.Instance); var newSnapshotMetaStore = new BLiteSnapshotMetadataStore( newContext, NullLogger>.Instance); var newOplogStore = new BLiteOplogStore( @@ -166,66 +185,72 @@ public class SnapshotStoreTests : IDisposable newVectorClock, newSnapshotMetaStore, NullLogger>.Instance); - var newPeerStore = new BLitePeerConfigurationStore( - newContext, NullLogger>.Instance); - var newPeerConfirmationStore = new BLitePeerOplogConfirmationStore( - newContext, - NullLogger>.Instance); - - var newSnapshotStore = new SnapshotStore( - newDocStore, - newPeerStore, - newOplogStore, - new LastWriteWinsConflictResolver(), - NullLogger.Instance, - newPeerConfirmationStore); + var newPeerStore = new BLitePeerConfigurationStore( + newContext, NullLogger>.Instance); + var newPeerConfirmationStore = new BLitePeerOplogConfirmationStore( + newContext, + NullLogger>.Instance); + + var newSnapshotStore = new SnapshotStore( + newDocStore, + newPeerStore, + newOplogStore, + new LastWriteWinsConflictResolver(), + NullLogger.Instance, + newPeerConfirmationStore); // Act - Replace database with snapshot await newSnapshotStore.ReplaceDatabaseAsync(snapshotStream); // Assert - Data should be restored var restoredUser = newContext.Users.FindById("user-rt"); - restoredUser.ShouldNotBeNull(); - restoredUser.Name.ShouldBe("RoundTrip User"); - restoredUser.Age.ShouldBe(42); - - var restoredConfirmations = (await newPeerConfirmationStore.GetConfirmationsAsync()).ToList(); - restoredConfirmations.Count.ShouldBe(1); - restoredConfirmations[0].PeerNodeId.ShouldBe("peer-rt"); - restoredConfirmations[0].SourceNodeId.ShouldBe("source-rt"); - restoredConfirmations[0].ConfirmedWall.ShouldBe(500); - restoredConfirmations[0].ConfirmedLogic.ShouldBe(2); - restoredConfirmations[0].ConfirmedHash.ShouldBe("hash-rt"); - } - finally - { + restoredUser.ShouldNotBeNull(); + restoredUser.Name.ShouldBe("RoundTrip User"); + restoredUser.Age.ShouldBe(42); + + var restoredConfirmations = (await newPeerConfirmationStore.GetConfirmationsAsync()).ToList(); + restoredConfirmations.Count.ShouldBe(1); + restoredConfirmations[0].PeerNodeId.ShouldBe("peer-rt"); + restoredConfirmations[0].SourceNodeId.ShouldBe("source-rt"); + restoredConfirmations[0].ConfirmedWall.ShouldBe(500); + restoredConfirmations[0].ConfirmedLogic.ShouldBe(2); + restoredConfirmations[0].ConfirmedHash.ShouldBe("hash-rt"); + } + finally + { if (File.Exists(newDbPath)) - try { File.Delete(newDbPath); } catch { } + try + { + File.Delete(newDbPath); + } + catch + { + } } } /// - /// Verifies that merging a snapshot preserves existing data and adds new data. + /// Verifies that merging a snapshot preserves existing data and adds new data. /// [Fact] - public async Task MergeSnapshotAsync_MergesWithExistingData() - { - // Arrange - Add initial data - await _context.Users.InsertAsync(new User { Id = "existing", Name = "Existing User", Age = 30 }); - await _peerConfirmationStore.UpdateConfirmationAsync( - "peer-merge", - "source-a", - new HlcTimestamp(100, 0, "source-a"), - "target-hash-old"); - await _peerConfirmationStore.UpdateConfirmationAsync( - "peer-local-only", - "source-local", - new HlcTimestamp(50, 0, "source-local"), - "target-local-hash"); - await _context.SaveChangesAsync(); + public async Task MergeSnapshotAsync_MergesWithExistingData() + { + // Arrange - Add initial data + await _context.Users.InsertAsync(new User { Id = "existing", Name = "Existing User", Age = 30 }); + await _peerConfirmationStore.UpdateConfirmationAsync( + "peer-merge", + "source-a", + new HlcTimestamp(100, 0, "source-a"), + "target-hash-old"); + await _peerConfirmationStore.UpdateConfirmationAsync( + "peer-local-only", + "source-local", + new HlcTimestamp(50, 0, "source-local"), + "target-local-hash"); + await _context.SaveChangesAsync(); // Create snapshot with different data - var sourceDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-source-{Guid.NewGuid()}.blite"); + string sourceDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-source-{Guid.NewGuid()}.blite"); MemoryStream snapshotStream; try @@ -236,7 +261,8 @@ public class SnapshotStoreTests : IDisposable var sourceConfigProvider = CreateConfigProvider("test-source-node"); var sourceVectorClock = new VectorClockService(); - var sourceDocStore = new SampleDocumentStore(sourceContext, sourceConfigProvider, sourceVectorClock, NullLogger.Instance); + var sourceDocStore = new SampleDocumentStore(sourceContext, sourceConfigProvider, sourceVectorClock, + NullLogger.Instance); var sourceSnapshotMetaStore = new BLiteSnapshotMetadataStore( sourceContext, NullLogger>.Instance); var sourceOplogStore = new BLiteOplogStore( @@ -244,29 +270,29 @@ public class SnapshotStoreTests : IDisposable sourceVectorClock, sourceSnapshotMetaStore, NullLogger>.Instance); - var sourcePeerStore = new BLitePeerConfigurationStore( - sourceContext, NullLogger>.Instance); - var sourcePeerConfirmationStore = new BLitePeerOplogConfirmationStore( - sourceContext, - NullLogger>.Instance); - await sourcePeerConfirmationStore.UpdateConfirmationAsync( - "peer-merge", - "source-a", - new HlcTimestamp(200, 1, "source-a"), - "source-hash-new"); - await sourcePeerConfirmationStore.UpdateConfirmationAsync( - "peer-merge", - "source-b", - new HlcTimestamp(300, 0, "source-b"), - "source-hash-b"); - - var sourceSnapshotStore = new SnapshotStore( - sourceDocStore, - sourcePeerStore, - sourceOplogStore, - new LastWriteWinsConflictResolver(), - NullLogger.Instance, - sourcePeerConfirmationStore); + var sourcePeerStore = new BLitePeerConfigurationStore( + sourceContext, NullLogger>.Instance); + var sourcePeerConfirmationStore = new BLitePeerOplogConfirmationStore( + sourceContext, + NullLogger>.Instance); + await sourcePeerConfirmationStore.UpdateConfirmationAsync( + "peer-merge", + "source-a", + new HlcTimestamp(200, 1, "source-a"), + "source-hash-new"); + await sourcePeerConfirmationStore.UpdateConfirmationAsync( + "peer-merge", + "source-b", + new HlcTimestamp(300, 0, "source-b"), + "source-hash-b"); + + var sourceSnapshotStore = new SnapshotStore( + sourceDocStore, + sourcePeerStore, + sourceOplogStore, + new LastWriteWinsConflictResolver(), + NullLogger.Instance, + sourcePeerConfirmationStore); snapshotStream = new MemoryStream(); await sourceSnapshotStore.CreateSnapshotAsync(snapshotStream); @@ -275,7 +301,13 @@ public class SnapshotStoreTests : IDisposable finally { if (File.Exists(sourceDbPath)) - try { File.Delete(sourceDbPath); } catch { } + try + { + File.Delete(sourceDbPath); + } + catch + { + } } // Act - Merge snapshot into existing data @@ -285,70 +317,71 @@ public class SnapshotStoreTests : IDisposable var existingUser = _context.Users.FindById("existing"); var newUser = _context.Users.FindById("new-user"); - existingUser.ShouldNotBeNull(); - newUser.ShouldNotBeNull(); - existingUser.Name.ShouldBe("Existing User"); - newUser.Name.ShouldBe("New User"); - - var confirmations = (await _peerConfirmationStore.GetConfirmationsAsync()) - .OrderBy(c => c.PeerNodeId) - .ThenBy(c => c.SourceNodeId) - .ToList(); - - confirmations.Count.ShouldBe(3); - confirmations[0].PeerNodeId.ShouldBe("peer-local-only"); - confirmations[0].SourceNodeId.ShouldBe("source-local"); - confirmations[0].ConfirmedWall.ShouldBe(50); - confirmations[0].ConfirmedHash.ShouldBe("target-local-hash"); - - confirmations[1].PeerNodeId.ShouldBe("peer-merge"); - confirmations[1].SourceNodeId.ShouldBe("source-a"); - confirmations[1].ConfirmedWall.ShouldBe(200); - confirmations[1].ConfirmedLogic.ShouldBe(1); - confirmations[1].ConfirmedHash.ShouldBe("source-hash-new"); - - confirmations[2].PeerNodeId.ShouldBe("peer-merge"); - confirmations[2].SourceNodeId.ShouldBe("source-b"); - confirmations[2].ConfirmedWall.ShouldBe(300); - confirmations[2].ConfirmedHash.ShouldBe("source-hash-b"); - } - - /// - /// Verifies that replace can consume legacy snapshots that do not include peer confirmations. - /// - [Fact] - public async Task ReplaceDatabaseAsync_LegacySnapshotWithoutPeerConfirmations_IsSupported() - { - // Arrange - await _context.Users.InsertAsync(new User { Id = "legacy-user", Name = "Legacy User", Age = 33 }); - await _context.SaveChangesAsync(); - - using var snapshotStream = new MemoryStream(); - await _snapshotStore.CreateSnapshotAsync(snapshotStream); - snapshotStream.Position = 0; - var snapshotJson = await new StreamReader(snapshotStream).ReadToEndAsync(); - - var legacySnapshot = JsonNode.Parse(snapshotJson)!.AsObject(); - legacySnapshot.Remove("PeerConfirmations"); - - using var legacyStream = new MemoryStream(); - await using (var writer = new Utf8JsonWriter(legacyStream)) - { - legacySnapshot.WriteTo(writer); - } - legacyStream.Position = 0; - - // Act - await _snapshotStore.ReplaceDatabaseAsync(legacyStream); - - // Assert - _context.Users.FindById("legacy-user").ShouldNotBeNull(); - (await _peerConfirmationStore.GetConfirmationsAsync()).Count().ShouldBe(0); - } - - /// - /// Verifies that snapshot creation succeeds for an empty database. - /// + existingUser.ShouldNotBeNull(); + newUser.ShouldNotBeNull(); + existingUser.Name.ShouldBe("Existing User"); + newUser.Name.ShouldBe("New User"); + + var confirmations = (await _peerConfirmationStore.GetConfirmationsAsync()) + .OrderBy(c => c.PeerNodeId) + .ThenBy(c => c.SourceNodeId) + .ToList(); + + confirmations.Count.ShouldBe(3); + confirmations[0].PeerNodeId.ShouldBe("peer-local-only"); + confirmations[0].SourceNodeId.ShouldBe("source-local"); + confirmations[0].ConfirmedWall.ShouldBe(50); + confirmations[0].ConfirmedHash.ShouldBe("target-local-hash"); + + confirmations[1].PeerNodeId.ShouldBe("peer-merge"); + confirmations[1].SourceNodeId.ShouldBe("source-a"); + confirmations[1].ConfirmedWall.ShouldBe(200); + confirmations[1].ConfirmedLogic.ShouldBe(1); + confirmations[1].ConfirmedHash.ShouldBe("source-hash-new"); + + confirmations[2].PeerNodeId.ShouldBe("peer-merge"); + confirmations[2].SourceNodeId.ShouldBe("source-b"); + confirmations[2].ConfirmedWall.ShouldBe(300); + confirmations[2].ConfirmedHash.ShouldBe("source-hash-b"); + } + + /// + /// Verifies that replace can consume legacy snapshots that do not include peer confirmations. + /// + [Fact] + public async Task ReplaceDatabaseAsync_LegacySnapshotWithoutPeerConfirmations_IsSupported() + { + // Arrange + await _context.Users.InsertAsync(new User { Id = "legacy-user", Name = "Legacy User", Age = 33 }); + await _context.SaveChangesAsync(); + + using var snapshotStream = new MemoryStream(); + await _snapshotStore.CreateSnapshotAsync(snapshotStream); + snapshotStream.Position = 0; + string snapshotJson = await new StreamReader(snapshotStream).ReadToEndAsync(); + + var legacySnapshot = JsonNode.Parse(snapshotJson)!.AsObject(); + legacySnapshot.Remove("PeerConfirmations"); + + using var legacyStream = new MemoryStream(); + await using (var writer = new Utf8JsonWriter(legacyStream)) + { + legacySnapshot.WriteTo(writer); + } + + legacyStream.Position = 0; + + // Act + await _snapshotStore.ReplaceDatabaseAsync(legacyStream); + + // Assert + _context.Users.FindById("legacy-user").ShouldNotBeNull(); + (await _peerConfirmationStore.GetConfirmationsAsync()).Count().ShouldBe(0); + } + + /// + /// Verifies that snapshot creation succeeds for an empty database. + /// [Fact] public async Task CreateSnapshotAsync_HandlesEmptyDatabase() { @@ -360,7 +393,7 @@ public class SnapshotStoreTests : IDisposable (stream.Length > 0).ShouldBeTrue(); stream.Position = 0; - var json = await new StreamReader(stream).ReadToEndAsync(); + string json = await new StreamReader(stream).ReadToEndAsync(); var doc = JsonDocument.Parse(json); var documents = doc.RootElement.GetProperty("Documents"); @@ -368,7 +401,7 @@ public class SnapshotStoreTests : IDisposable } /// - /// Verifies that snapshot creation includes oplog entries. + /// Verifies that snapshot creation includes oplog entries. /// [Fact] public async Task CreateSnapshotAsync_IncludesOplogEntries() @@ -394,27 +427,13 @@ public class SnapshotStoreTests : IDisposable // Assert stream.Position = 0; - var json = await new StreamReader(stream).ReadToEndAsync(); + string json = await new StreamReader(stream).ReadToEndAsync(); var doc = JsonDocument.Parse(json); var oplog = doc.RootElement.GetProperty("Oplog"); (oplog.GetArrayLength() >= 1).ShouldBeTrue("Should have at least one oplog entry"); } - /// - /// Releases resources created for test execution. - /// - public void Dispose() - { - _documentStore?.Dispose(); - _context?.Dispose(); - - if (File.Exists(_testDbPath)) - { - try { File.Delete(_testDbPath); } catch { } - } - } - private static IPeerNodeConfigurationProvider CreateConfigProvider(string nodeId) { var configProvider = Substitute.For(); @@ -428,4 +447,4 @@ public class SnapshotStoreTests : IDisposable }); return configProvider; } -} +} \ No newline at end of file diff --git a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests.csproj b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests.csproj index 50dd091..bf471d1 100755 --- a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests.csproj +++ b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests.csproj @@ -1,32 +1,32 @@ - - - - ZB.MOM.WW.CBDDC.Sample.Console.Tests - ZB.MOM.WW.CBDDC.Sample.Console.Tests - ZB.MOM.WW.CBDDC.Sample.Console.Tests - net10.0 - enable - enable - $(NoWarn);xUnit1031;xUnit1051 - false - - - - - - - - - - - - - - - - - - - - - + + + + ZB.MOM.WW.CBDDC.Sample.Console.Tests + ZB.MOM.WW.CBDDC.Sample.Console.Tests + ZB.MOM.WW.CBDDC.Sample.Console.Tests + net10.0 + enable + enable + $(NoWarn);xUnit1031;xUnit1051 + false + + + + + + + + + + + + + + + + + + + + +