Reformat/cleanup
All checks were successful
NuGet Package Publish / nuget (push) Successful in 1m10s

This commit is contained in:
Joseph Doherty
2026-02-21 07:53:53 -05:00
parent c6f6d9329a
commit 7ebc2cb567
160 changed files with 7258 additions and 7262 deletions

View File

@@ -1,23 +1,23 @@
<Solution> <Solution>
<Configurations> <Configurations>
<Platform Name="Any CPU" /> <Platform Name="Any CPU"/>
<Platform Name="x64" /> <Platform Name="x64"/>
<Platform Name="x86" /> <Platform Name="x86"/>
</Configurations> </Configurations>
<Folder Name="/samples/"> <Folder Name="/samples/">
<Project Path="samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj" /> <Project Path="samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj"/>
</Folder> </Folder>
<Folder Name="/src/"> <Folder Name="/src/">
<Project Path="src/ZB.MOM.WW.CBDDC.Hosting/ZB.MOM.WW.CBDDC.Hosting.csproj" /> <Project Path="src/ZB.MOM.WW.CBDDC.Hosting/ZB.MOM.WW.CBDDC.Hosting.csproj"/>
<Project Path="src/ZB.MOM.WW.CBDDC.Core/ZB.MOM.WW.CBDDC.Core.csproj" /> <Project Path="src/ZB.MOM.WW.CBDDC.Core/ZB.MOM.WW.CBDDC.Core.csproj"/>
<Project Path="src/ZB.MOM.WW.CBDDC.Network/ZB.MOM.WW.CBDDC.Network.csproj" /> <Project Path="src/ZB.MOM.WW.CBDDC.Network/ZB.MOM.WW.CBDDC.Network.csproj"/>
<Project Path="src/ZB.MOM.WW.CBDDC.Persistence/ZB.MOM.WW.CBDDC.Persistence.csproj" /> <Project Path="src/ZB.MOM.WW.CBDDC.Persistence/ZB.MOM.WW.CBDDC.Persistence.csproj"/>
</Folder> </Folder>
<Folder Name="/tests/"> <Folder Name="/tests/">
<Project Path="tests/ZB.MOM.WW.CBDDC.Core.Tests/ZB.MOM.WW.CBDDC.Core.Tests.csproj" /> <Project Path="tests/ZB.MOM.WW.CBDDC.Core.Tests/ZB.MOM.WW.CBDDC.Core.Tests.csproj"/>
<Project Path="tests/ZB.MOM.WW.CBDDC.E2E.Tests/ZB.MOM.WW.CBDDC.E2E.Tests.csproj" /> <Project Path="tests/ZB.MOM.WW.CBDDC.E2E.Tests/ZB.MOM.WW.CBDDC.E2E.Tests.csproj"/>
<Project Path="tests/ZB.MOM.WW.CBDDC.Hosting.Tests/ZB.MOM.WW.CBDDC.Hosting.Tests.csproj" /> <Project Path="tests/ZB.MOM.WW.CBDDC.Hosting.Tests/ZB.MOM.WW.CBDDC.Hosting.Tests.csproj"/>
<Project Path="tests/ZB.MOM.WW.CBDDC.Network.Tests/ZB.MOM.WW.CBDDC.Network.Tests.csproj" /> <Project Path="tests/ZB.MOM.WW.CBDDC.Network.Tests/ZB.MOM.WW.CBDDC.Network.Tests.csproj"/>
<Project Path="tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests.csproj" /> <Project Path="tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests.csproj"/>
</Folder> </Folder>
</Solution> </Solution>

View File

@@ -1,37 +1,33 @@
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core; using Serilog.Context;
using ZB.MOM.WW.CBDDC.Core.Cache; using ZB.MOM.WW.CBDDC.Core.Cache;
using ZB.MOM.WW.CBDDC.Core.Diagnostics; using ZB.MOM.WW.CBDDC.Core.Diagnostics;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using Microsoft.Extensions.DependencyInjection; // For IServiceProvider if needed
using Serilog.Context;
using ZB.MOM.WW.CBDDC.Sample.Console;
using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Network.Security;
// For IServiceProvider if needed
namespace ZB.MOM.WW.CBDDC.Sample.Console; namespace ZB.MOM.WW.CBDDC.Sample.Console;
public class ConsoleInteractiveService : BackgroundService public class ConsoleInteractiveService : BackgroundService
{ {
private readonly ILogger<ConsoleInteractiveService> _logger;
private readonly SampleDbContext _db;
private readonly ICBDDCNode _node;
private readonly IHostApplicationLifetime _lifetime;
// Auxiliary services for status/commands // Auxiliary services for status/commands
private readonly IDocumentCache _cache; private readonly IDocumentCache _cache;
private readonly IOfflineQueue _queue;
private readonly ICBDDCHealthCheck _healthCheck;
private readonly ISyncStatusTracker _syncTracker;
private readonly IServiceProvider _serviceProvider;
private readonly IPeerNodeConfigurationProvider _configProvider; private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly SampleDbContext _db;
private readonly ICBDDCHealthCheck _healthCheck;
private readonly IHostApplicationLifetime _lifetime;
private readonly ILogger<ConsoleInteractiveService> _logger;
private readonly ICBDDCNode _node;
private readonly IOfflineQueue _queue;
private readonly IServiceProvider _serviceProvider;
private readonly ISyncStatusTracker _syncTracker;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="ConsoleInteractiveService"/> class. /// Initializes a new instance of the <see cref="ConsoleInteractiveService" /> class.
/// </summary> /// </summary>
/// <param name="logger">The logger used by the interactive service.</param> /// <param name="logger">The logger used by the interactive service.</param>
/// <param name="db">The sample database context.</param> /// <param name="db">The sample database context.</param>
@@ -72,7 +68,7 @@ public class ConsoleInteractiveService : BackgroundService
{ {
var config = await _configProvider.GetConfiguration(); var config = await _configProvider.GetConfiguration();
System.Console.WriteLine($"--- Interactive Console ---"); System.Console.WriteLine("--- Interactive Console ---");
System.Console.WriteLine($"Node ID: {config.NodeId}"); System.Console.WriteLine($"Node ID: {config.NodeId}");
PrintHelp(); PrintHelp();
@@ -85,7 +81,7 @@ public class ConsoleInteractiveService : BackgroundService
continue; continue;
} }
var input = System.Console.ReadLine(); string? input = System.Console.ReadLine();
if (string.IsNullOrEmpty(input)) continue; if (string.IsNullOrEmpty(input)) continue;
try try
@@ -126,34 +122,45 @@ public class ConsoleInteractiveService : BackgroundService
if (input.StartsWith("n")) if (input.StartsWith("n"))
{ {
var ts = DateTime.Now.ToString("HH:mm:ss.fff"); var ts = DateTime.Now.ToString("HH:mm:ss.fff");
var user = new User { Id = Guid.NewGuid().ToString(), Name = $"User-{ts}", Age = new Random().Next(18, 90), Address = new Address { City = "AutoCity" } }; var user = new User
{
Id = Guid.NewGuid().ToString(), Name = $"User-{ts}", Age = new Random().Next(18, 90),
Address = new Address { City = "AutoCity" }
};
await _db.Users.InsertAsync(user); await _db.Users.InsertAsync(user);
await _db.SaveChangesAsync(); await _db.SaveChangesAsync();
System.Console.WriteLine($"[+] Created {user.Name} with Id: {user.Id}..."); System.Console.WriteLine($"[+] Created {user.Name} with Id: {user.Id}...");
} }
else if (input.StartsWith("s")) else if (input.StartsWith("s"))
{ {
for (int i = 0; i < 5; i++) for (var i = 0; i < 5; i++)
{ {
var ts = DateTime.Now.ToString("HH:mm:ss.fff"); var ts = DateTime.Now.ToString("HH:mm:ss.fff");
var user = new User { Id = Guid.NewGuid().ToString(), Name = $"User-{ts}", Age = new Random().Next(18, 90), Address = new Address { City = "SpamCity" } }; var user = new User
{
Id = Guid.NewGuid().ToString(), Name = $"User-{ts}", Age = new Random().Next(18, 90),
Address = new Address { City = "SpamCity" }
};
await _db.Users.InsertAsync(user); await _db.Users.InsertAsync(user);
System.Console.WriteLine($"[+] Created {user.Name} with Id: {user.Id}..."); System.Console.WriteLine($"[+] Created {user.Name} with Id: {user.Id}...");
await Task.Delay(100); await Task.Delay(100);
} }
await _db.SaveChangesAsync(); await _db.SaveChangesAsync();
} }
else if (input.StartsWith("c")) else if (input.StartsWith("c"))
{ {
var userCount = _db.Users.FindAll().Count(); int userCount = _db.Users.FindAll().Count();
var todoCount = _db.TodoLists.FindAll().Count(); int todoCount = _db.TodoLists.FindAll().Count();
System.Console.WriteLine($"Collection 'Users': {userCount} documents"); System.Console.WriteLine($"Collection 'Users': {userCount} documents");
System.Console.WriteLine($"Collection 'TodoLists': {todoCount} documents"); System.Console.WriteLine($"Collection 'TodoLists': {todoCount} documents");
} }
else if (input.StartsWith("p")) else if (input.StartsWith("p"))
{ {
var alice = new User { Id = Guid.NewGuid().ToString(), Name = "Alice", Age = 30, Address = new Address { City = "Paris" } }; var alice = new User
var bob = new User { Id = Guid.NewGuid().ToString(), Name = "Bob", Age = 25, Address = new Address { City = "Rome" } }; { Id = Guid.NewGuid().ToString(), Name = "Alice", Age = 30, Address = new Address { City = "Paris" } };
var bob = new User
{ Id = Guid.NewGuid().ToString(), Name = "Bob", Age = 25, Address = new Address { City = "Rome" } };
await _db.Users.InsertAsync(alice); await _db.Users.InsertAsync(alice);
await _db.Users.InsertAsync(bob); await _db.Users.InsertAsync(bob);
await _db.SaveChangesAsync(); await _db.SaveChangesAsync();
@@ -162,17 +169,19 @@ public class ConsoleInteractiveService : BackgroundService
else if (input.StartsWith("g")) else if (input.StartsWith("g"))
{ {
System.Console.Write("Enter user Id: "); System.Console.Write("Enter user Id: ");
var id = System.Console.ReadLine(); string? id = System.Console.ReadLine();
if (!string.IsNullOrEmpty(id)) if (!string.IsNullOrEmpty(id))
{ {
var u = _db.Users.FindById(id); var u = _db.Users.FindById(id);
System.Console.WriteLine(u != null ? $"Got: {u.Name}, Age {u.Age}, City: {u.Address?.City}" : "Not found"); System.Console.WriteLine(u != null
? $"Got: {u.Name}, Age {u.Age}, City: {u.Address?.City}"
: "Not found");
} }
} }
else if (input.StartsWith("d")) else if (input.StartsWith("d"))
{ {
System.Console.Write("Enter user Id to delete: "); System.Console.Write("Enter user Id to delete: ");
var id = System.Console.ReadLine(); string? id = System.Console.ReadLine();
if (!string.IsNullOrEmpty(id)) if (!string.IsNullOrEmpty(id))
{ {
await _db.Users.DeleteAsync(id); await _db.Users.DeleteAsync(id);
@@ -183,8 +192,8 @@ public class ConsoleInteractiveService : BackgroundService
else if (input.StartsWith("l")) else if (input.StartsWith("l"))
{ {
var peers = _node.Discovery.GetActivePeers(); var peers = _node.Discovery.GetActivePeers();
var handshakeSvc = _serviceProvider.GetService<ZB.MOM.WW.CBDDC.Network.Security.IPeerHandshakeService>(); var handshakeSvc = _serviceProvider.GetService<IPeerHandshakeService>();
var secureIcon = handshakeSvc != null ? "🔒" : "🔓"; string secureIcon = handshakeSvc != null ? "🔒" : "🔓";
System.Console.WriteLine($"Active Peers ({secureIcon}):"); System.Console.WriteLine($"Active Peers ({secureIcon}):");
foreach (var p in peers) foreach (var p in peers)
@@ -203,7 +212,7 @@ public class ConsoleInteractiveService : BackgroundService
{ {
var health = await _healthCheck.CheckAsync(); var health = await _healthCheck.CheckAsync();
var syncStatus = _syncTracker.GetStatus(); var syncStatus = _syncTracker.GetStatus();
var handshakeSvc = _serviceProvider.GetService<ZB.MOM.WW.CBDDC.Network.Security.IPeerHandshakeService>(); var handshakeSvc = _serviceProvider.GetService<IPeerHandshakeService>();
System.Console.WriteLine("=== Health Check ==="); System.Console.WriteLine("=== Health Check ===");
System.Console.WriteLine($"Database: {(health.DatabaseHealthy ? "" : "")}"); System.Console.WriteLine($"Database: {(health.DatabaseHealthy ? "" : "")}");
@@ -216,17 +225,18 @@ public class ConsoleInteractiveService : BackgroundService
if (health.Errors.Any()) if (health.Errors.Any())
{ {
System.Console.WriteLine("Errors:"); System.Console.WriteLine("Errors:");
foreach (var err in health.Errors.Take(3)) System.Console.WriteLine($" - {err}"); foreach (string err in health.Errors.Take(3)) System.Console.WriteLine($" - {err}");
} }
} }
else if (input.StartsWith("ch") || input == "cache") else if (input.StartsWith("ch") || input == "cache")
{ {
var stats = _cache.GetStatistics(); var stats = _cache.GetStatistics();
System.Console.WriteLine($"=== Cache Stats ===\nSize: {stats.Size}\nHits: {stats.Hits}\nMisses: {stats.Misses}\nRate: {stats.HitRate:P1}"); System.Console.WriteLine(
$"=== Cache Stats ===\nSize: {stats.Size}\nHits: {stats.Hits}\nMisses: {stats.Misses}\nRate: {stats.HitRate:P1}");
} }
else if (input.StartsWith("r") && input.Contains("resolver")) else if (input.StartsWith("r") && input.Contains("resolver"))
{ {
var parts = input.Split(' '); string[] parts = input.Split(' ');
if (parts.Length > 1) if (parts.Length > 1)
{ {
var newResolver = parts[1].ToLower() switch var newResolver = parts[1].ToLower() switch
@@ -240,7 +250,7 @@ public class ConsoleInteractiveService : BackgroundService
{ {
// Note: Requires restart to fully apply. For demo, we inform user. // Note: Requires restart to fully apply. For demo, we inform user.
System.Console.WriteLine($"⚠️ Resolver changed to {parts[1].ToUpper()}. Restart node to apply."); System.Console.WriteLine($"⚠️ Resolver changed to {parts[1].ToUpper()}. Restart node to apply.");
System.Console.WriteLine($" (Current session continues with previous resolver)"); System.Console.WriteLine(" (Current session continues with previous resolver)");
} }
else else
{ {
@@ -262,7 +272,7 @@ public class ConsoleInteractiveService : BackgroundService
System.Console.WriteLine($"📋 {list.Name} ({list.Items.Count} items)"); System.Console.WriteLine($"📋 {list.Name} ({list.Items.Count} items)");
foreach (var item in list.Items) foreach (var item in list.Items)
{ {
var status = item.Completed ? "✓" : " "; string status = item.Completed ? "✓" : " ";
System.Console.WriteLine($" [{status}] {item.Task}"); System.Console.WriteLine($" [{status}] {item.Task}");
} }
} }
@@ -281,8 +291,8 @@ public class ConsoleInteractiveService : BackgroundService
Name = "Shopping List", Name = "Shopping List",
Items = new List<TodoItem> Items = new List<TodoItem>
{ {
new TodoItem { Task = "Buy milk", Completed = false }, new() { Task = "Buy milk", Completed = false },
new TodoItem { Task = "Buy bread", Completed = false } new() { Task = "Buy bread", Completed = false }
} }
}; };
@@ -325,23 +335,19 @@ public class ConsoleInteractiveService : BackgroundService
System.Console.WriteLine($" List: {merged.Name}"); System.Console.WriteLine($" List: {merged.Name}");
foreach (var item in merged.Items) foreach (var item in merged.Items)
{ {
var status = item.Completed ? "✓" : " "; string status = item.Completed ? "✓" : " ";
System.Console.WriteLine($" [{status}] {item.Task}"); System.Console.WriteLine($" [{status}] {item.Task}");
} }
var resolver = _serviceProvider.GetRequiredService<IConflictResolver>(); var resolver = _serviceProvider.GetRequiredService<IConflictResolver>();
var resolverType = resolver.GetType().Name; string resolverType = resolver.GetType().Name;
System.Console.WriteLine($"\n Resolution Strategy: {resolverType}"); System.Console.WriteLine($"\n Resolution Strategy: {resolverType}");
if (resolverType.Contains("Recursive")) if (resolverType.Contains("Recursive"))
{
System.Console.WriteLine(" → Items merged by 'id', both edits preserved"); System.Console.WriteLine(" → Items merged by 'id', both edits preserved");
}
else else
{
System.Console.WriteLine(" → Last write wins, Node B changes override Node A"); System.Console.WriteLine(" → Last write wins, Node B changes override Node A");
} }
}
System.Console.WriteLine("\n✓ Demo complete. Run 'todos' to see all lists.\n"); System.Console.WriteLine("\n✓ Demo complete. Run 'todos' to see all lists.\n");
} }

View File

@@ -1,33 +1,26 @@
using Microsoft.Extensions.Configuration; using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Cache;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Core.Diagnostics;
using ZB.MOM.WW.CBDDC.Core.Resilience;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using ZB.MOM.WW.CBDDC.Sample.Console;
using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using Serilog; using Serilog;
using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
namespace ZB.MOM.WW.CBDDC.Sample.Console; namespace ZB.MOM.WW.CBDDC.Sample.Console;
// Local User/Address classes removed in favor of Shared project // Local User/Address classes removed in favor of Shared project
class Program internal class Program
{ {
static async Task Main(string[] args) private static async Task Main(string[] args)
{ {
var builder = Host.CreateApplicationBuilder(args); var builder = Host.CreateApplicationBuilder(args);
// Configuration // Configuration
builder.Configuration.SetBasePath(Directory.GetCurrentDirectory()) builder.Configuration.SetBasePath(Directory.GetCurrentDirectory())
.AddJsonFile("appsettings.json", optional: true, reloadOnChange: true); .AddJsonFile("appsettings.json", true, true);
// Logging // Logging
builder.Logging.ClearProviders(); builder.Logging.ClearProviders();
@@ -38,34 +31,31 @@ class Program
.Enrich.WithProperty("Application", "CBDDC.Sample.Console") .Enrich.WithProperty("Application", "CBDDC.Sample.Console")
.WriteTo.Console()); .WriteTo.Console());
var randomPort = new Random().Next(1000, 9999); int randomPort = new Random().Next(1000, 9999);
// Node ID // Node ID
string nodeId = args.Length > 0 ? args[0] : ("node-" + randomPort); string nodeId = args.Length > 0 ? args[0] : "node-" + randomPort;
int tcpPort = args.Length > 1 ? int.Parse(args[1]) : randomPort; int tcpPort = args.Length > 1 ? int.Parse(args[1]) : randomPort;
// Conflict Resolution Strategy (can be switched at runtime via service replacement) // Conflict Resolution Strategy (can be switched at runtime via service replacement)
var useRecursiveMerge = args.Contains("--merge"); bool useRecursiveMerge = args.Contains("--merge");
if (useRecursiveMerge) if (useRecursiveMerge) builder.Services.AddSingleton<IConflictResolver, RecursiveNodeMergeConflictResolver>();
{
builder.Services.AddSingleton<IConflictResolver, RecursiveNodeMergeConflictResolver>();
}
IPeerNodeConfigurationProvider peerNodeConfigurationProvider = new StaticPeerNodeConfigurationProvider( IPeerNodeConfigurationProvider peerNodeConfigurationProvider = new StaticPeerNodeConfigurationProvider(
new PeerNodeConfiguration new PeerNodeConfiguration
{ {
NodeId = nodeId, NodeId = nodeId,
TcpPort = tcpPort, TcpPort = tcpPort,
AuthToken = "Test-Cluster-Key", AuthToken = "Test-Cluster-Key"
//KnownPeers = builder.Configuration.GetSection("CBDDC:KnownPeers").Get<List<KnownPeerConfiguration>>() ?? new() //KnownPeers = builder.Configuration.GetSection("CBDDC:KnownPeers").Get<List<KnownPeerConfiguration>>() ?? new()
}); });
builder.Services.AddSingleton<IPeerNodeConfigurationProvider>(peerNodeConfigurationProvider); builder.Services.AddSingleton(peerNodeConfigurationProvider);
// Database path // Database path
var dataPath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "data"); string dataPath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "data");
Directory.CreateDirectory(dataPath); Directory.CreateDirectory(dataPath);
var databasePath = Path.Combine(dataPath, $"{nodeId}.blite"); string databasePath = Path.Combine(dataPath, $"{nodeId}.blite");
// Register CBDDC Services using Fluent Extensions with BLite, SampleDbContext, and SampleDocumentStore // Register CBDDC Services using Fluent Extensions with BLite, SampleDbContext, and SampleDocumentStore
builder.Services.AddCBDDCCore() builder.Services.AddCBDDCCore()
@@ -86,12 +76,7 @@ class Program
private class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvider private class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvider
{ {
/// <summary> /// <summary>
/// Gets or sets the current peer node configuration. /// Initializes a new instance of the <see cref="StaticPeerNodeConfigurationProvider" /> class.
/// </summary>
public PeerNodeConfiguration Configuration { get; set; }
/// <summary>
/// Initializes a new instance of the <see cref="StaticPeerNodeConfigurationProvider"/> class.
/// </summary> /// </summary>
/// <param name="configuration">The initial peer node configuration.</param> /// <param name="configuration">The initial peer node configuration.</param>
public StaticPeerNodeConfigurationProvider(PeerNodeConfiguration configuration) public StaticPeerNodeConfigurationProvider(PeerNodeConfiguration configuration)
@@ -99,6 +84,11 @@ class Program
Configuration = configuration; Configuration = configuration;
} }
/// <summary>
/// Gets or sets the current peer node configuration.
/// </summary>
public PeerNodeConfiguration Configuration { get; }
/// <summary> /// <summary>
/// Occurs when the peer node configuration changes. /// Occurs when the peer node configuration changes.
/// </summary> /// </summary>
@@ -122,5 +112,4 @@ class Program
ConfigurationChanged?.Invoke(this, newConfig); ConfigurationChanged?.Invoke(this, newConfig);
} }
} }
} }

View File

@@ -5,21 +5,25 @@ This sample demonstrates the core features of CBDDC, a distributed peer-to-peer
## Features Demonstrated ## Features Demonstrated
### 🔑 Primary Keys & Auto-Generation ### 🔑 Primary Keys & Auto-Generation
- Automatic GUID generation for entities - Automatic GUID generation for entities
- Convention-based key detection (`Id` property) - Convention-based key detection (`Id` property)
- `[PrimaryKey]` attribute support - `[PrimaryKey]` attribute support
### 🎯 Generic Type-Safe API ### 🎯 Generic Type-Safe API
- `Collection<T>()` for compile-time type safety - `Collection<T>()` for compile-time type safety
- Keyless `Put(entity)` with auto-key extraction - Keyless `Put(entity)` with auto-key extraction
- IntelliSense-friendly operations - IntelliSense-friendly operations
### 🔍 LINQ Query Support ### 🔍 LINQ Query Support
- Expression-based queries - Expression-based queries
- Paging and sorting - Paging and sorting
- Complex predicates (>, >=, ==, !=, nested properties) - Complex predicates (>, >=, ==, !=, nested properties)
### 🌐 Network Synchronization ### 🌐 Network Synchronization
- UDP peer discovery - UDP peer discovery
- TCP synchronization - TCP synchronization
- Automatic conflict resolution (Last-Write-Wins) - Automatic conflict resolution (Last-Write-Wins)
@@ -35,16 +39,19 @@ dotnet run
### Multi-Node (Peer-to-Peer) ### Multi-Node (Peer-to-Peer)
Terminal 1: Terminal 1:
```bash ```bash
dotnet run -- --node-id node1 --tcp-port 5001 --udp-port 6001 dotnet run -- --node-id node1 --tcp-port 5001 --udp-port 6001
``` ```
Terminal 2: Terminal 2:
```bash ```bash
dotnet run -- --node-id node2 --tcp-port 5002 --udp-port 6002 dotnet run -- --node-id node2 --tcp-port 5002 --udp-port 6002
``` ```
Terminal 3: Terminal 3:
```bash ```bash
dotnet run -- --node-id node3 --tcp-port 5003 --udp-port 6003 dotnet run -- --node-id node3 --tcp-port 5003 --udp-port 6003
``` ```
@@ -54,7 +61,7 @@ Changes made on any node will automatically sync to all peers!
## Available Commands ## Available Commands
| Command | Description | | Command | Description |
|---------|-------------| |---------|----------------------------------------|
| `p` | Put Alice and Bob (auto-generated IDs) | | `p` | Put Alice and Bob (auto-generated IDs) |
| `g` | Get user by ID (prompts for ID) | | `g` | Get user by ID (prompts for ID) |
| `d` | Delete user by ID (prompts for ID) | | `d` | Delete user by ID (prompts for ID) |

View File

@@ -2,26 +2,11 @@
using BLite.Core.Metadata; using BLite.Core.Metadata;
using BLite.Core.Storage; using BLite.Core.Storage;
using ZB.MOM.WW.CBDDC.Persistence.BLite; using ZB.MOM.WW.CBDDC.Persistence.BLite;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Sample.Console; namespace ZB.MOM.WW.CBDDC.Sample.Console;
public partial class SampleDbContext : CBDDCDocumentDbContext public class SampleDbContext : CBDDCDocumentDbContext
{ {
/// <summary>
/// Gets the users collection.
/// </summary>
public DocumentCollection<string, User> Users { get; private set; } = null!;
/// <summary>
/// Gets the todo lists collection.
/// </summary>
public DocumentCollection<string, TodoList> TodoLists { get; private set; } = null!;
/// <summary> /// <summary>
/// Initializes a new instance of the SampleDbContext class using the specified database file path. /// Initializes a new instance of the SampleDbContext class using the specified database file path.
/// </summary> /// </summary>
@@ -40,6 +25,16 @@ public partial class SampleDbContext : CBDDCDocumentDbContext
{ {
} }
/// <summary>
/// Gets the users collection.
/// </summary>
public DocumentCollection<string, User> Users { get; private set; } = null!;
/// <summary>
/// Gets the todo lists collection.
/// </summary>
public DocumentCollection<string, TodoList> TodoLists { get; private set; } = null!;
/// <inheritdoc /> /// <inheritdoc />
protected override void OnModelCreating(ModelBuilder modelBuilder) protected override void OnModelCreating(ModelBuilder modelBuilder)
{ {

View File

@@ -1,10 +1,9 @@
using ZB.MOM.WW.CBDDC.Core; using System.Text.Json;
using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync; using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite; using ZB.MOM.WW.CBDDC.Persistence.BLite;
using Microsoft.Extensions.Logging;
using System.Text.Json;
namespace ZB.MOM.WW.CBDDC.Sample.Console; namespace ZB.MOM.WW.CBDDC.Sample.Console;
@@ -18,7 +17,7 @@ public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
private const string TodoListsCollection = "TodoLists"; private const string TodoListsCollection = "TodoLists";
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="SampleDocumentStore"/> class. /// Initializes a new instance of the <see cref="SampleDocumentStore" /> class.
/// </summary> /// </summary>
/// <param name="context">The sample database context.</param> /// <param name="context">The sample database context.</param>
/// <param name="configProvider">The peer node configuration provider.</param> /// <param name="configProvider">The peer node configuration provider.</param>
@@ -37,6 +36,16 @@ public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
WatchCollection(TodoListsCollection, context.TodoLists, t => t.Id); WatchCollection(TodoListsCollection, context.TodoLists, t => t.Id);
} }
#region Helper Methods
private static JsonElement? SerializeEntity<T>(T? entity) where T : class
{
if (entity == null) return null;
return JsonSerializer.SerializeToElement(entity);
}
#endregion
#region Abstract Method Implementations #region Abstract Method Implementations
/// <inheritdoc /> /// <inheritdoc />
@@ -49,12 +58,10 @@ public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
/// <inheritdoc /> /// <inheritdoc />
protected override async Task ApplyContentToEntitiesBatchAsync( protected override async Task ApplyContentToEntitiesBatchAsync(
IEnumerable<(string Collection, string Key, JsonElement Content)> documents, CancellationToken cancellationToken) IEnumerable<(string Collection, string Key, JsonElement Content)> documents,
CancellationToken cancellationToken)
{ {
foreach (var (collection, key, content) in documents) foreach ((string collection, string key, var content) in documents) UpsertEntity(collection, key, content);
{
UpsertEntity(collection, key, content);
}
await _context.SaveChangesAsync(cancellationToken); await _context.SaveChangesAsync(cancellationToken);
} }
@@ -91,7 +98,7 @@ public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
protected override Task<JsonElement?> GetEntityAsJsonAsync( protected override Task<JsonElement?> GetEntityAsJsonAsync(
string collection, string key, CancellationToken cancellationToken) string collection, string key, CancellationToken cancellationToken)
{ {
return Task.FromResult<JsonElement?>(collection switch return Task.FromResult(collection switch
{ {
UsersCollection => SerializeEntity(_context.Users.Find(u => u.Id == key).FirstOrDefault()), UsersCollection => SerializeEntity(_context.Users.Find(u => u.Id == key).FirstOrDefault()),
TodoListsCollection => SerializeEntity(_context.TodoLists.Find(t => t.Id == key).FirstOrDefault()), TodoListsCollection => SerializeEntity(_context.TodoLists.Find(t => t.Id == key).FirstOrDefault()),
@@ -111,10 +118,7 @@ public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
protected override async Task RemoveEntitiesBatchAsync( protected override async Task RemoveEntitiesBatchAsync(
IEnumerable<(string Collection, string Key)> documents, CancellationToken cancellationToken) IEnumerable<(string Collection, string Key)> documents, CancellationToken cancellationToken)
{ {
foreach (var (collection, key) in documents) foreach ((string collection, string key) in documents) DeleteEntity(collection, key);
{
DeleteEntity(collection, key);
}
await _context.SaveChangesAsync(cancellationToken); await _context.SaveChangesAsync(cancellationToken);
} }
@@ -151,14 +155,4 @@ public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
} }
#endregion #endregion
#region Helper Methods
private static JsonElement? SerializeEntity<T>(T? entity) where T : class
{
if (entity == null) return null;
return JsonSerializer.SerializeToElement(entity);
}
#endregion
} }

View File

@@ -1,4 +1,3 @@
using System.Collections.Generic;
using System.ComponentModel.DataAnnotations; using System.ComponentModel.DataAnnotations;
namespace ZB.MOM.WW.CBDDC.Sample.Console; namespace ZB.MOM.WW.CBDDC.Sample.Console;

View File

@@ -1,24 +1,24 @@
<Project Sdk="Microsoft.NET.Sdk"> <Project Sdk="Microsoft.NET.Sdk">
<ItemGroup> <ItemGroup>
<PackageReference Include="Lifter.Core" Version="1.1.0" /> <PackageReference Include="Lifter.Core" Version="1.1.0"/>
<PackageReference Include="BLite.SourceGenerators" Version="1.3.1"> <PackageReference Include="BLite.SourceGenerators" Version="1.3.1">
<PrivateAssets>all</PrivateAssets> <PrivateAssets>all</PrivateAssets>
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets> <IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
</PackageReference> </PackageReference>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj" /> <ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj"/>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj" /> <ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj"/>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Persistence\ZB.MOM.WW.CBDDC.Persistence.csproj" /> <ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Persistence\ZB.MOM.WW.CBDDC.Persistence.csproj"/>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<PackageReference Include="Microsoft.Extensions.Configuration.Binder" Version="9.0.0" /> <PackageReference Include="Microsoft.Extensions.Configuration.Binder" Version="9.0.0"/>
<PackageReference Include="Microsoft.Extensions.Configuration.Json" Version="9.0.0" /> <PackageReference Include="Microsoft.Extensions.Configuration.Json" Version="9.0.0"/>
<PackageReference Include="Microsoft.Extensions.DependencyInjection" Version="9.0.0" /> <PackageReference Include="Microsoft.Extensions.DependencyInjection" Version="9.0.0"/>
<PackageReference Include="Microsoft.Extensions.Hosting" Version="9.0.0" /> <PackageReference Include="Microsoft.Extensions.Hosting" Version="9.0.0"/>
<PackageReference Include="Serilog" Version="4.2.0" /> <PackageReference Include="Serilog" Version="4.2.0"/>
<PackageReference Include="Serilog.Extensions.Hosting" Version="9.0.0" /> <PackageReference Include="Serilog.Extensions.Hosting" Version="9.0.0"/>
<PackageReference Include="Serilog.Sinks.Console" Version="6.0.0" /> <PackageReference Include="Serilog.Sinks.Console" Version="6.0.0"/>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>

View File

@@ -1,5 +1,5 @@
{ {
"Logging": { "Logging": {
"LogLevel": { "LogLevel": {
"Default": "Information", "Default": "Information",
"Microsoft": "Warning", "Microsoft": "Warning",
@@ -9,7 +9,7 @@
"ZB.MOM.WW.CBDDC.Core.Storage.OplogCoordinator": "Warning", "ZB.MOM.WW.CBDDC.Core.Storage.OplogCoordinator": "Warning",
"ZB.MOM.WW.CBDDC.Persistence": "Warning" "ZB.MOM.WW.CBDDC.Persistence": "Warning"
} }
}, },
"CBDDC": { "CBDDC": {
"Network": { "Network": {
"TcpPort": 5001, "TcpPort": 5001,

View File

@@ -1,10 +1,8 @@
using System;
using System.Collections.Generic; using System.Collections.Generic;
using ZB.MOM.WW.CBDDC.Core; using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Network;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions; using Microsoft.Extensions.Logging.Abstractions;
using System.Threading.Tasks; using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Core.Cache; namespace ZB.MOM.WW.CBDDC.Core.Cache;
@@ -14,17 +12,7 @@ namespace ZB.MOM.WW.CBDDC.Core.Cache;
internal class CacheEntry internal class CacheEntry
{ {
/// <summary> /// <summary>
/// Gets the cached document. /// Initializes a new instance of the <see cref="CacheEntry" /> class.
/// </summary>
public Document Document { get; }
/// <summary>
/// Gets the linked-list node used for LRU tracking.
/// </summary>
public LinkedListNode<string> Node { get; }
/// <summary>
/// Initializes a new instance of the <see cref="CacheEntry"/> class.
/// </summary> /// </summary>
/// <param name="document">The cached document.</param> /// <param name="document">The cached document.</param>
/// <param name="node">The linked-list node used for LRU tracking.</param> /// <param name="node">The linked-list node used for LRU tracking.</param>
@@ -33,6 +21,16 @@ internal class CacheEntry
Document = document; Document = document;
Node = node; Node = node;
} }
/// <summary>
/// Gets the cached document.
/// </summary>
public Document Document { get; }
/// <summary>
/// Gets the linked-list node used for LRU tracking.
/// </summary>
public LinkedListNode<string> Node { get; }
} }
/// <summary> /// <summary>
@@ -40,22 +38,23 @@ internal class CacheEntry
/// </summary> /// </summary>
public class DocumentCache : IDocumentCache public class DocumentCache : IDocumentCache
{ {
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
private readonly Dictionary<string, CacheEntry> _cache = new(); private readonly Dictionary<string, CacheEntry> _cache = new();
private readonly LinkedList<string> _lru = new();
private readonly ILogger<DocumentCache> _logger;
private readonly object _lock = new(); private readonly object _lock = new();
private readonly ILogger<DocumentCache> _logger;
private readonly LinkedList<string> _lru = new();
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
// Statistics // Statistics
private long _hits = 0; private long _hits;
private long _misses = 0; private long _misses;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="DocumentCache"/> class. /// Initializes a new instance of the <see cref="DocumentCache" /> class.
/// </summary> /// </summary>
/// <param name="peerNodeConfigurationProvider">The configuration provider used for cache size limits.</param> /// <param name="peerNodeConfigurationProvider">The configuration provider used for cache size limits.</param>
/// <param name="logger">The logger instance.</param> /// <param name="logger">The logger instance.</param>
public DocumentCache(IPeerNodeConfigurationProvider peerNodeConfigurationProvider, ILogger<DocumentCache>? logger = null) public DocumentCache(IPeerNodeConfigurationProvider peerNodeConfigurationProvider,
ILogger<DocumentCache>? logger = null)
{ {
_peerNodeConfigurationProvider = peerNodeConfigurationProvider; _peerNodeConfigurationProvider = peerNodeConfigurationProvider;
_logger = logger ?? NullLogger<DocumentCache>.Instance; _logger = logger ?? NullLogger<DocumentCache>.Instance;
@@ -66,7 +65,7 @@ public class DocumentCache : IDocumentCache
/// </summary> /// </summary>
/// <param name="collection">The document collection name.</param> /// <param name="collection">The document collection name.</param>
/// <param name="key">The document key.</param> /// <param name="key">The document key.</param>
/// <returns>A task whose result is the cached document, or <see langword="null"/> if not found.</returns> /// <returns>A task whose result is the cached document, or <see langword="null" /> if not found.</returns>
public async Task<Document?> Get(string collection, string key) public async Task<Document?> Get(string collection, string key)
{ {
lock (_lock) lock (_lock)
@@ -118,7 +117,7 @@ public class DocumentCache : IDocumentCache
// Evict if full // Evict if full
if (_cache.Count >= peerConfig.MaxDocumentCacheSize) if (_cache.Count >= peerConfig.MaxDocumentCacheSize)
{ {
var oldest = _lru.Last!.Value; string oldest = _lru.Last!.Value;
_lru.RemoveLast(); _lru.RemoveLast();
_cache.Remove(oldest); _cache.Remove(oldest);
_logger.LogTrace("Evicted oldest cache entry {Key}", oldest); _logger.LogTrace("Evicted oldest cache entry {Key}", oldest);
@@ -157,7 +156,7 @@ public class DocumentCache : IDocumentCache
{ {
lock (_lock) lock (_lock)
{ {
var count = _cache.Count; int count = _cache.Count;
_cache.Clear(); _cache.Clear();
_lru.Clear(); _lru.Clear();
_logger.LogInformation("Cleared cache ({Count} entries)", count); _logger.LogInformation("Cleared cache ({Count} entries)", count);
@@ -171,8 +170,8 @@ public class DocumentCache : IDocumentCache
{ {
lock (_lock) lock (_lock)
{ {
var total = _hits + _misses; long total = _hits + _misses;
var hitRate = total > 0 ? (double)_hits / total : 0; double hitRate = total > 0 ? (double)_hits / total : 0;
return (_hits, _misses, _cache.Count, hitRate); return (_hits, _misses, _cache.Count, hitRate);
} }
} }

View File

@@ -1,12 +1,12 @@
using System.Threading.Tasks; using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Core.Cache namespace ZB.MOM.WW.CBDDC.Core.Cache;
/// <summary>
/// Defines operations for caching documents by collection and key.
/// </summary>
public interface IDocumentCache
{ {
/// <summary>
/// Defines operations for caching documents by collection and key.
/// </summary>
public interface IDocumentCache
{
/// <summary> /// <summary>
/// Clears all cached documents. /// Clears all cached documents.
/// </summary> /// </summary>
@@ -17,7 +17,7 @@ namespace ZB.MOM.WW.CBDDC.Core.Cache
/// </summary> /// </summary>
/// <param name="collection">The collection name.</param> /// <param name="collection">The collection name.</param>
/// <param name="key">The document key.</param> /// <param name="key">The document key.</param>
/// <returns>The cached document, or <see langword="null"/> if not found.</returns> /// <returns>The cached document, or <see langword="null" /> if not found.</returns>
Task<Document?> Get(string collection, string key); Task<Document?> Get(string collection, string key);
/// <summary> /// <summary>
@@ -41,5 +41,4 @@ namespace ZB.MOM.WW.CBDDC.Core.Cache
/// <param name="document">The document to cache.</param> /// <param name="document">The document to cache.</param>
/// <returns>A task that represents the asynchronous operation.</returns> /// <returns>A task that represents the asynchronous operation.</returns>
Task Set(string collection, string key, Document document); Task Set(string collection, string key, Document document);
}
} }

View File

@@ -9,16 +9,16 @@ namespace ZB.MOM.WW.CBDDC.Core;
public class ChangesAppliedEventArgs : EventArgs public class ChangesAppliedEventArgs : EventArgs
{ {
/// <summary> /// <summary>
/// Gets the changes that were applied. /// Initializes a new instance of the <see cref="ChangesAppliedEventArgs" /> class.
/// </summary>
public IEnumerable<OplogEntry> Changes { get; }
/// <summary>
/// Initializes a new instance of the <see cref="ChangesAppliedEventArgs"/> class.
/// </summary> /// </summary>
/// <param name="changes">The changes that were applied.</param> /// <param name="changes">The changes that were applied.</param>
public ChangesAppliedEventArgs(IEnumerable<OplogEntry> changes) public ChangesAppliedEventArgs(IEnumerable<OplogEntry> changes)
{ {
Changes = changes; Changes = changes;
} }
/// <summary>
/// Gets the changes that were applied.
/// </summary>
public IEnumerable<OplogEntry> Changes { get; }
} }

View File

@@ -1,5 +1,4 @@
using System; using System;
using System.Collections.Generic;
using System.Linq; using System.Linq;
using System.Threading; using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
@@ -14,12 +13,12 @@ namespace ZB.MOM.WW.CBDDC.Core.Diagnostics;
/// </summary> /// </summary>
public class CBDDCHealthCheck : ICBDDCHealthCheck public class CBDDCHealthCheck : ICBDDCHealthCheck
{ {
private readonly ILogger<CBDDCHealthCheck> _logger;
private readonly IOplogStore _store; private readonly IOplogStore _store;
private readonly ISyncStatusTracker _syncTracker; private readonly ISyncStatusTracker _syncTracker;
private readonly ILogger<CBDDCHealthCheck> _logger;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="CBDDCHealthCheck"/> class. /// Initializes a new instance of the <see cref="CBDDCHealthCheck" /> class.
/// </summary> /// </summary>
/// <param name="store">The oplog store used for database health checks.</param> /// <param name="store">The oplog store used for database health checks.</param>
/// <param name="syncTracker">The tracker that provides synchronization status.</param> /// <param name="syncTracker">The tracker that provides synchronization status.</param>
@@ -65,9 +64,7 @@ public class CBDDCHealthCheck : ICBDDCHealthCheck
// Add error messages from sync tracker // Add error messages from sync tracker
foreach (var error in syncStatus.SyncErrors.Take(5)) // Last 5 errors foreach (var error in syncStatus.SyncErrors.Take(5)) // Last 5 errors
{
status.Errors.Add($"{error.Timestamp:yyyy-MM-dd HH:mm:ss} - {error.Message}"); status.Errors.Add($"{error.Timestamp:yyyy-MM-dd HH:mm:ss} - {error.Message}");
}
// Add metadata // Add metadata
status.Metadata["TotalDocumentsSynced"] = syncStatus.TotalDocumentsSynced; status.Metadata["TotalDocumentsSynced"] = syncStatus.TotalDocumentsSynced;

View File

@@ -1,15 +1,14 @@
using System.Threading; using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Core.Diagnostics namespace ZB.MOM.WW.CBDDC.Core.Diagnostics;
public interface ICBDDCHealthCheck
{ {
public interface ICBDDCHealthCheck
{
/// <summary> /// <summary>
/// Performs a health check for the implementing component. /// Performs a health check for the implementing component.
/// </summary> /// </summary>
/// <param name="cancellationToken">Cancellation token.</param> /// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The resulting health status.</returns> /// <returns>The resulting health status.</returns>
Task<HealthStatus> CheckAsync(CancellationToken cancellationToken = default); Task<HealthStatus> CheckAsync(CancellationToken cancellationToken = default);
}
} }

View File

@@ -1,12 +1,12 @@
using System; using System;
namespace ZB.MOM.WW.CBDDC.Core.Diagnostics namespace ZB.MOM.WW.CBDDC.Core.Diagnostics;
/// <summary>
/// Tracks synchronization status and peer health metrics.
/// </summary>
public interface ISyncStatusTracker
{ {
/// <summary>
/// Tracks synchronization status and peer health metrics.
/// </summary>
public interface ISyncStatusTracker
{
/// <summary> /// <summary>
/// Removes peer entries that have been inactive longer than the specified threshold. /// Removes peer entries that have been inactive longer than the specified threshold.
/// </summary> /// </summary>
@@ -16,7 +16,7 @@ namespace ZB.MOM.WW.CBDDC.Core.Diagnostics
/// <summary> /// <summary>
/// Gets the current synchronization status snapshot. /// Gets the current synchronization status snapshot.
/// </summary> /// </summary>
/// <returns>The current <see cref="SyncStatus"/>.</returns> /// <returns>The current <see cref="SyncStatus" />.</returns>
SyncStatus GetStatus(); SyncStatus GetStatus();
/// <summary> /// <summary>
@@ -59,5 +59,4 @@ namespace ZB.MOM.WW.CBDDC.Core.Diagnostics
/// <param name="address">The peer network address.</param> /// <param name="address">The peer network address.</param>
/// <param name="isConnected">A value indicating whether the peer is connected.</param> /// <param name="isConnected">A value indicating whether the peer is connected.</param>
void UpdatePeer(string nodeId, string address, bool isConnected); void UpdatePeer(string nodeId, string address, bool isConnected);
}
} }

View File

@@ -11,19 +11,19 @@ namespace ZB.MOM.WW.CBDDC.Core.Diagnostics;
/// </summary> /// </summary>
public class SyncStatusTracker : ISyncStatusTracker public class SyncStatusTracker : ISyncStatusTracker
{ {
private readonly ILogger<SyncStatusTracker> _logger;
private readonly object _lock = new();
private bool _isOnline = false;
private DateTime? _lastSyncTime;
private readonly List<PeerInfo> _activePeers = new();
private readonly Queue<SyncError> _recentErrors = new();
private long _totalDocumentsSynced = 0;
private long _totalBytesTransferred = 0;
private const int MaxErrorHistory = 50; private const int MaxErrorHistory = 50;
private readonly List<PeerInfo> _activePeers = new();
private readonly object _lock = new();
private readonly ILogger<SyncStatusTracker> _logger;
private readonly Queue<SyncError> _recentErrors = new();
private bool _isOnline;
private DateTime? _lastSyncTime;
private long _totalBytesTransferred;
private long _totalDocumentsSynced;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="SyncStatusTracker"/> class. /// Initializes a new instance of the <see cref="SyncStatusTracker" /> class.
/// </summary> /// </summary>
/// <param name="logger">Optional logger instance.</param> /// <param name="logger">Optional logger instance.</param>
public SyncStatusTracker(ILogger<SyncStatusTracker>? logger = null) public SyncStatusTracker(ILogger<SyncStatusTracker>? logger = null)
@@ -84,10 +84,7 @@ public class SyncStatusTracker : ISyncStatusTracker
_recentErrors.Enqueue(error); _recentErrors.Enqueue(error);
while (_recentErrors.Count > MaxErrorHistory) while (_recentErrors.Count > MaxErrorHistory) _recentErrors.Dequeue();
{
_recentErrors.Dequeue();
}
_logger.LogWarning("Sync error recorded: {Message} (Peer: {Peer})", message, peerNodeId ?? "N/A"); _logger.LogWarning("Sync error recorded: {Message} (Peer: {Peer})", message, peerNodeId ?? "N/A");
} }
@@ -135,10 +132,7 @@ public class SyncStatusTracker : ISyncStatusTracker
lock (_lock) lock (_lock)
{ {
var peer = _activePeers.FirstOrDefault(p => p.NodeId == nodeId); var peer = _activePeers.FirstOrDefault(p => p.NodeId == nodeId);
if (peer != null) if (peer != null) peer.SuccessfulSyncs++;
{
peer.SuccessfulSyncs++;
}
} }
} }
@@ -151,10 +145,7 @@ public class SyncStatusTracker : ISyncStatusTracker
lock (_lock) lock (_lock)
{ {
var peer = _activePeers.FirstOrDefault(p => p.NodeId == nodeId); var peer = _activePeers.FirstOrDefault(p => p.NodeId == nodeId);
if (peer != null) if (peer != null) peer.FailedSyncs++;
{
peer.FailedSyncs++;
}
} }
} }
@@ -187,12 +178,9 @@ public class SyncStatusTracker : ISyncStatusTracker
lock (_lock) lock (_lock)
{ {
var cutoff = DateTime.UtcNow - inactiveThreshold; var cutoff = DateTime.UtcNow - inactiveThreshold;
var removed = _activePeers.RemoveAll(p => p.LastSeen < cutoff); int removed = _activePeers.RemoveAll(p => p.LastSeen < cutoff);
if (removed > 0) if (removed > 0) _logger.LogInformation("Removed {Count} inactive peers", removed);
{
_logger.LogInformation("Removed {Count} inactive peers", removed);
}
} }
} }
} }

View File

@@ -1,6 +1,5 @@
using ZB.MOM.WW.CBDDC.Core.Sync;
using System;
using System.Text.Json; using System.Text.Json;
using ZB.MOM.WW.CBDDC.Core.Sync;
namespace ZB.MOM.WW.CBDDC.Core; namespace ZB.MOM.WW.CBDDC.Core;
@@ -9,15 +8,32 @@ namespace ZB.MOM.WW.CBDDC.Core;
/// </summary> /// </summary>
public class Document public class Document
{ {
/// <summary>
/// Initializes a new instance of the <see cref="Document" /> class.
/// </summary>
/// <param name="collection">The collection that contains the document.</param>
/// <param name="key">The document key.</param>
/// <param name="content">The document content.</param>
/// <param name="updatedAt">The timestamp of the latest applied update.</param>
/// <param name="isDeleted">Whether the document is marked as deleted.</param>
public Document(string collection, string key, JsonElement content, HlcTimestamp updatedAt, bool isDeleted)
{
Collection = collection;
Key = key;
Content = content;
UpdatedAt = updatedAt;
IsDeleted = isDeleted;
}
/// <summary> /// <summary>
/// Gets the collection that contains the document. /// Gets the collection that contains the document.
/// </summary> /// </summary>
public string Collection { get; private set; } public string Collection { get; }
/// <summary> /// <summary>
/// Gets the document key. /// Gets the document key.
/// </summary> /// </summary>
public string Key { get; private set; } public string Key { get; }
/// <summary> /// <summary>
/// Gets the document content. /// Gets the document content.
@@ -34,23 +50,6 @@ public class Document
/// </summary> /// </summary>
public bool IsDeleted { get; private set; } public bool IsDeleted { get; private set; }
/// <summary>
/// Initializes a new instance of the <see cref="Document"/> class.
/// </summary>
/// <param name="collection">The collection that contains the document.</param>
/// <param name="key">The document key.</param>
/// <param name="content">The document content.</param>
/// <param name="updatedAt">The timestamp of the latest applied update.</param>
/// <param name="isDeleted">Whether the document is marked as deleted.</param>
public Document(string collection, string key, JsonElement content, HlcTimestamp updatedAt, bool isDeleted)
{
Collection = collection;
Key = key;
Content = content;
UpdatedAt = updatedAt;
IsDeleted = isDeleted;
}
/// <summary> /// <summary>
/// Merges a remote operation into the current document using last-write-wins or a conflict resolver. /// Merges a remote operation into the current document using last-write-wins or a conflict resolver.
/// </summary> /// </summary>
@@ -70,8 +69,10 @@ public class Document
UpdatedAt = oplogEntry.Timestamp; UpdatedAt = oplogEntry.Timestamp;
IsDeleted = oplogEntry.Operation == OperationType.Delete; IsDeleted = oplogEntry.Operation == OperationType.Delete;
} }
return; return;
} }
var resolutionResult = resolver.Resolve(this, oplogEntry); var resolutionResult = resolver.Resolve(this, oplogEntry);
if (resolutionResult.ShouldApply && resolutionResult.MergedDocument != null) if (resolutionResult.ShouldApply && resolutionResult.MergedDocument != null)
{ {

View File

@@ -8,12 +8,7 @@ namespace ZB.MOM.WW.CBDDC.Core.Exceptions;
public class CBDDCException : Exception public class CBDDCException : Exception
{ {
/// <summary> /// <summary>
/// Error code for programmatic error handling. /// Initializes a new instance of the <see cref="CBDDCException" /> class.
/// </summary>
public string ErrorCode { get; }
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCException"/> class.
/// </summary> /// </summary>
/// <param name="errorCode">The application-specific error code.</param> /// <param name="errorCode">The application-specific error code.</param>
/// <param name="message">The exception message.</param> /// <param name="message">The exception message.</param>
@@ -24,7 +19,7 @@ public class CBDDCException : Exception
} }
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="CBDDCException"/> class. /// Initializes a new instance of the <see cref="CBDDCException" /> class.
/// </summary> /// </summary>
/// <param name="errorCode">The application-specific error code.</param> /// <param name="errorCode">The application-specific error code.</param>
/// <param name="message">The exception message.</param> /// <param name="message">The exception message.</param>
@@ -34,6 +29,11 @@ public class CBDDCException : Exception
{ {
ErrorCode = errorCode; ErrorCode = errorCode;
} }
/// <summary>
/// Error code for programmatic error handling.
/// </summary>
public string ErrorCode { get; }
} }
/// <summary> /// <summary>
@@ -42,19 +42,23 @@ public class CBDDCException : Exception
public class NetworkException : CBDDCException public class NetworkException : CBDDCException
{ {
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="NetworkException"/> class. /// Initializes a new instance of the <see cref="NetworkException" /> class.
/// </summary> /// </summary>
/// <param name="message">The exception message.</param> /// <param name="message">The exception message.</param>
public NetworkException(string message) public NetworkException(string message)
: base("NETWORK_ERROR", message) { } : base("NETWORK_ERROR", message)
{
}
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="NetworkException"/> class. /// Initializes a new instance of the <see cref="NetworkException" /> class.
/// </summary> /// </summary>
/// <param name="message">The exception message.</param> /// <param name="message">The exception message.</param>
/// <param name="innerException">The exception that caused the current exception.</param> /// <param name="innerException">The exception that caused the current exception.</param>
public NetworkException(string message, Exception innerException) public NetworkException(string message, Exception innerException)
: base("NETWORK_ERROR", message, innerException) { } : base("NETWORK_ERROR", message, innerException)
{
}
} }
/// <summary> /// <summary>
@@ -63,19 +67,23 @@ public class NetworkException : CBDDCException
public class PersistenceException : CBDDCException public class PersistenceException : CBDDCException
{ {
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="PersistenceException"/> class. /// Initializes a new instance of the <see cref="PersistenceException" /> class.
/// </summary> /// </summary>
/// <param name="message">The exception message.</param> /// <param name="message">The exception message.</param>
public PersistenceException(string message) public PersistenceException(string message)
: base("PERSISTENCE_ERROR", message) { } : base("PERSISTENCE_ERROR", message)
{
}
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="PersistenceException"/> class. /// Initializes a new instance of the <see cref="PersistenceException" /> class.
/// </summary> /// </summary>
/// <param name="message">The exception message.</param> /// <param name="message">The exception message.</param>
/// <param name="innerException">The exception that caused the current exception.</param> /// <param name="innerException">The exception that caused the current exception.</param>
public PersistenceException(string message, Exception innerException) public PersistenceException(string message, Exception innerException)
: base("PERSISTENCE_ERROR", message, innerException) { } : base("PERSISTENCE_ERROR", message, innerException)
{
}
} }
/// <summary> /// <summary>
@@ -84,19 +92,23 @@ public class PersistenceException : CBDDCException
public class SyncException : CBDDCException public class SyncException : CBDDCException
{ {
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="SyncException"/> class. /// Initializes a new instance of the <see cref="SyncException" /> class.
/// </summary> /// </summary>
/// <param name="message">The exception message.</param> /// <param name="message">The exception message.</param>
public SyncException(string message) public SyncException(string message)
: base("SYNC_ERROR", message) { } : base("SYNC_ERROR", message)
{
}
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="SyncException"/> class. /// Initializes a new instance of the <see cref="SyncException" /> class.
/// </summary> /// </summary>
/// <param name="message">The exception message.</param> /// <param name="message">The exception message.</param>
/// <param name="innerException">The exception that caused the current exception.</param> /// <param name="innerException">The exception that caused the current exception.</param>
public SyncException(string message, Exception innerException) public SyncException(string message, Exception innerException)
: base("SYNC_ERROR", message, innerException) { } : base("SYNC_ERROR", message, innerException)
{
}
} }
/// <summary> /// <summary>
@@ -105,11 +117,13 @@ public class SyncException : CBDDCException
public class ConfigurationException : CBDDCException public class ConfigurationException : CBDDCException
{ {
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="ConfigurationException"/> class. /// Initializes a new instance of the <see cref="ConfigurationException" /> class.
/// </summary> /// </summary>
/// <param name="message">The exception message.</param> /// <param name="message">The exception message.</param>
public ConfigurationException(string message) public ConfigurationException(string message)
: base("CONFIG_ERROR", message) { } : base("CONFIG_ERROR", message)
{
}
} }
/// <summary> /// <summary>
@@ -118,19 +132,23 @@ public class ConfigurationException : CBDDCException
public class DatabaseCorruptionException : PersistenceException public class DatabaseCorruptionException : PersistenceException
{ {
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="DatabaseCorruptionException"/> class. /// Initializes a new instance of the <see cref="DatabaseCorruptionException" /> class.
/// </summary> /// </summary>
/// <param name="message">The exception message.</param> /// <param name="message">The exception message.</param>
public DatabaseCorruptionException(string message) public DatabaseCorruptionException(string message)
: base(message) { } : base(message)
{
}
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="DatabaseCorruptionException"/> class. /// Initializes a new instance of the <see cref="DatabaseCorruptionException" /> class.
/// </summary> /// </summary>
/// <param name="message">The exception message.</param> /// <param name="message">The exception message.</param>
/// <param name="innerException">The exception that caused the current exception.</param> /// <param name="innerException">The exception that caused the current exception.</param>
public DatabaseCorruptionException(string message, Exception innerException) public DatabaseCorruptionException(string message, Exception innerException)
: base(message, innerException) { } : base(message, innerException)
{
}
} }
/// <summary> /// <summary>
@@ -139,32 +157,23 @@ public class DatabaseCorruptionException : PersistenceException
public class TimeoutException : CBDDCException public class TimeoutException : CBDDCException
{ {
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="TimeoutException"/> class. /// Initializes a new instance of the <see cref="TimeoutException" /> class.
/// </summary> /// </summary>
/// <param name="operation">The operation that timed out.</param> /// <param name="operation">The operation that timed out.</param>
/// <param name="timeoutMs">The timeout in milliseconds.</param> /// <param name="timeoutMs">The timeout in milliseconds.</param>
public TimeoutException(string operation, int timeoutMs) public TimeoutException(string operation, int timeoutMs)
: base("TIMEOUT_ERROR", $"Operation '{operation}' timed out after {timeoutMs}ms") { } : base("TIMEOUT_ERROR", $"Operation '{operation}' timed out after {timeoutMs}ms")
{
}
} }
/// <summary> /// <summary>
/// Exception thrown when a document is not found in a collection. /// Exception thrown when a document is not found in a collection.
/// </summary> /// </summary>
public class DocumentNotFoundException : PersistenceException public class DocumentNotFoundException : PersistenceException
{ {
/// <summary> /// <summary>
/// Gets the document key that was not found. /// Initializes a new instance of the <see cref="DocumentNotFoundException" /> class.
/// </summary>
public string Key { get; }
/// <summary>
/// Gets the collection where the document was searched.
/// </summary>
public string Collection { get; }
/// <summary>
/// Initializes a new instance of the <see cref="DocumentNotFoundException"/> class.
/// </summary> /// </summary>
/// <param name="collection">The collection where the document was searched.</param> /// <param name="collection">The collection where the document was searched.</param>
/// <param name="key">The document key that was not found.</param> /// <param name="key">The document key that was not found.</param>
@@ -174,6 +183,16 @@ public class DocumentNotFoundException : PersistenceException
Collection = collection; Collection = collection;
Key = key; Key = key;
} }
/// <summary>
/// Gets the document key that was not found.
/// </summary>
public string Key { get; }
/// <summary>
/// Gets the collection where the document was searched.
/// </summary>
public string Collection { get; }
} }
/// <summary> /// <summary>
@@ -182,8 +201,10 @@ public class DocumentNotFoundException : PersistenceException
public class CBDDCConcurrencyException : PersistenceException public class CBDDCConcurrencyException : PersistenceException
{ {
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="CBDDCConcurrencyException"/> class. /// Initializes a new instance of the <see cref="CBDDCConcurrencyException" /> class.
/// </summary> /// </summary>
/// <param name="message">The exception message.</param> /// <param name="message">The exception message.</param>
public CBDDCConcurrencyException(string message) : base(message) { } public CBDDCConcurrencyException(string message) : base(message)
{
}
} }

View File

@@ -25,7 +25,7 @@ public readonly struct HlcTimestamp : IComparable<HlcTimestamp>, IComparable, IE
public string NodeId { get; } public string NodeId { get; }
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="HlcTimestamp"/> struct. /// Initializes a new instance of the <see cref="HlcTimestamp" /> struct.
/// </summary> /// </summary>
/// <param name="physicalTime">The physical time component.</param> /// <param name="physicalTime">The physical time component.</param>
/// <param name="logicalCounter">The logical counter component.</param> /// <param name="logicalCounter">The logical counter component.</param>
@@ -43,8 +43,8 @@ public readonly struct HlcTimestamp : IComparable<HlcTimestamp>, IComparable, IE
/// </summary> /// </summary>
/// <param name="other">The other timestamp to compare with this instance.</param> /// <param name="other">The other timestamp to compare with this instance.</param>
/// <returns> /// <returns>
/// A value less than zero if this instance is earlier than <paramref name="other"/>, zero if they are equal, /// A value less than zero if this instance is earlier than <paramref name="other" />, zero if they are equal,
/// or greater than zero if this instance is later than <paramref name="other"/>. /// or greater than zero if this instance is later than <paramref name="other" />.
/// </returns> /// </returns>
public int CompareTo(HlcTimestamp other) public int CompareTo(HlcTimestamp other)
{ {
@@ -63,7 +63,7 @@ public readonly struct HlcTimestamp : IComparable<HlcTimestamp>, IComparable, IE
/// </summary> /// </summary>
/// <param name="obj">The object to compare with this instance.</param> /// <param name="obj">The object to compare with this instance.</param>
/// <returns> /// <returns>
/// A value less than zero if this instance is earlier than <paramref name="obj"/>, zero if equal, or greater /// A value less than zero if this instance is earlier than <paramref name="obj" />, zero if equal, or greater
/// than zero if later. /// than zero if later.
/// </returns> /// </returns>
public int CompareTo(object? obj) public int CompareTo(object? obj)
@@ -77,7 +77,7 @@ public readonly struct HlcTimestamp : IComparable<HlcTimestamp>, IComparable, IE
/// Determines whether this instance and another timestamp are equal. /// Determines whether this instance and another timestamp are equal.
/// </summary> /// </summary>
/// <param name="other">The other timestamp to compare.</param> /// <param name="other">The other timestamp to compare.</param>
/// <returns><see langword="true"/> if the timestamps are equal; otherwise, <see langword="false"/>.</returns> /// <returns><see langword="true" /> if the timestamps are equal; otherwise, <see langword="false" />.</returns>
public bool Equals(HlcTimestamp other) public bool Equals(HlcTimestamp other)
{ {
return PhysicalTime == other.PhysicalTime && return PhysicalTime == other.PhysicalTime &&
@@ -96,7 +96,7 @@ public readonly struct HlcTimestamp : IComparable<HlcTimestamp>, IComparable, IE
{ {
unchecked unchecked
{ {
var hashCode = PhysicalTime.GetHashCode(); int hashCode = PhysicalTime.GetHashCode();
hashCode = (hashCode * 397) ^ LogicalCounter; hashCode = (hashCode * 397) ^ LogicalCounter;
// Ensure HashCode uses the same comparison logic as Equals/CompareTo // Ensure HashCode uses the same comparison logic as Equals/CompareTo
// Handle null NodeId gracefully (possible via default(HlcTimestamp)) // Handle null NodeId gracefully (possible via default(HlcTimestamp))
@@ -105,33 +105,59 @@ public readonly struct HlcTimestamp : IComparable<HlcTimestamp>, IComparable, IE
} }
} }
public static bool operator ==(HlcTimestamp left, HlcTimestamp right) => left.Equals(right); public static bool operator ==(HlcTimestamp left, HlcTimestamp right)
public static bool operator !=(HlcTimestamp left, HlcTimestamp right) => !left.Equals(right); {
return left.Equals(right);
}
public static bool operator !=(HlcTimestamp left, HlcTimestamp right)
{
return !left.Equals(right);
}
// Standard comparison operators making usage in SyncOrchestrator cleaner (e.g., remote > local) // Standard comparison operators making usage in SyncOrchestrator cleaner (e.g., remote > local)
public static bool operator <(HlcTimestamp left, HlcTimestamp right) => left.CompareTo(right) < 0; public static bool operator <(HlcTimestamp left, HlcTimestamp right)
public static bool operator <=(HlcTimestamp left, HlcTimestamp right) => left.CompareTo(right) <= 0; {
public static bool operator >(HlcTimestamp left, HlcTimestamp right) => left.CompareTo(right) > 0; return left.CompareTo(right) < 0;
public static bool operator >=(HlcTimestamp left, HlcTimestamp right) => left.CompareTo(right) >= 0; }
public static bool operator <=(HlcTimestamp left, HlcTimestamp right)
{
return left.CompareTo(right) <= 0;
}
public static bool operator >(HlcTimestamp left, HlcTimestamp right)
{
return left.CompareTo(right) > 0;
}
public static bool operator >=(HlcTimestamp left, HlcTimestamp right)
{
return left.CompareTo(right) >= 0;
}
/// <inheritdoc /> /// <inheritdoc />
public override string ToString() => FormattableString.Invariant($"{PhysicalTime}:{LogicalCounter}:{NodeId}"); public override string ToString()
{
return FormattableString.Invariant($"{PhysicalTime}:{LogicalCounter}:{NodeId}");
}
/// <summary> /// <summary>
/// Parses a timestamp string. /// Parses a timestamp string.
/// </summary> /// </summary>
/// <param name="s">The string to parse, in the format "PhysicalTime:LogicalCounter:NodeId".</param> /// <param name="s">The string to parse, in the format "PhysicalTime:LogicalCounter:NodeId".</param>
/// <returns>The parsed <see cref="HlcTimestamp"/>.</returns> /// <returns>The parsed <see cref="HlcTimestamp" />.</returns>
public static HlcTimestamp Parse(string s) public static HlcTimestamp Parse(string s)
{ {
if (string.IsNullOrEmpty(s)) throw new ArgumentNullException(nameof(s)); if (string.IsNullOrEmpty(s)) throw new ArgumentNullException(nameof(s));
var parts = s.Split(':'); string[] parts = s.Split(':');
if (parts.Length != 3) throw new FormatException("Invalid HlcTimestamp format. Expected 'PhysicalTime:LogicalCounter:NodeId'."); if (parts.Length != 3)
if (!long.TryParse(parts[0], out var physicalTime)) throw new FormatException("Invalid HlcTimestamp format. Expected 'PhysicalTime:LogicalCounter:NodeId'.");
if (!long.TryParse(parts[0], out long physicalTime))
throw new FormatException("Invalid PhysicalTime component in HlcTimestamp."); throw new FormatException("Invalid PhysicalTime component in HlcTimestamp.");
if (!int.TryParse(parts[1], out var logicalCounter)) if (!int.TryParse(parts[1], out int logicalCounter))
throw new FormatException("Invalid LogicalCounter component in HlcTimestamp."); throw new FormatException("Invalid LogicalCounter component in HlcTimestamp.");
var nodeId = parts[2]; string nodeId = parts[2];
return new HlcTimestamp(physicalTime, logicalCounter, nodeId); return new HlcTimestamp(physicalTime, logicalCounter, nodeId);
} }
} }

View File

@@ -2,26 +2,25 @@ using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Threading; using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions; using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Core.Management; namespace ZB.MOM.WW.CBDDC.Core.Management;
/// <summary> /// <summary>
/// Implementation of peer management service. /// Implementation of peer management service.
/// Provides CRUD operations for managing remote peer configurations. /// Provides CRUD operations for managing remote peer configurations.
///
/// Remote peer configurations are stored in a synchronized collection and automatically /// Remote peer configurations are stored in a synchronized collection and automatically
/// replicated across all nodes in the cluster. Any change made on one node will be /// replicated across all nodes in the cluster. Any change made on one node will be
/// synchronized to all other nodes through the normal CBDDC sync process. /// synchronized to all other nodes through the normal CBDDC sync process.
/// </summary> /// </summary>
public class PeerManagementService : IPeerManagementService public class PeerManagementService : IPeerManagementService
{ {
private readonly IPeerConfigurationStore _store;
private readonly IPeerOplogConfirmationStore _peerOplogConfirmationStore;
private readonly ILogger<PeerManagementService> _logger; private readonly ILogger<PeerManagementService> _logger;
private readonly IPeerOplogConfirmationStore _peerOplogConfirmationStore;
private readonly IPeerConfigurationStore _store;
/// <summary> /// <summary>
/// Initializes a new instance of the PeerManagementService class. /// Initializes a new instance of the PeerManagementService class.
@@ -35,7 +34,8 @@ public class PeerManagementService : IPeerManagementService
ILogger<PeerManagementService>? logger = null) ILogger<PeerManagementService>? logger = null)
{ {
_store = store ?? throw new ArgumentNullException(nameof(store)); _store = store ?? throw new ArgumentNullException(nameof(store));
_peerOplogConfirmationStore = peerOplogConfirmationStore ?? throw new ArgumentNullException(nameof(peerOplogConfirmationStore)); _peerOplogConfirmationStore = peerOplogConfirmationStore ??
throw new ArgumentNullException(nameof(peerOplogConfirmationStore));
_logger = logger ?? NullLogger<PeerManagementService>.Instance; _logger = logger ?? NullLogger<PeerManagementService>.Instance;
} }
@@ -60,7 +60,8 @@ public class PeerManagementService : IPeerManagementService
}; };
await _store.SaveRemotePeerAsync(config, cancellationToken); await _store.SaveRemotePeerAsync(config, cancellationToken);
_logger.LogInformation("Added static remote peer: {NodeId} at {Address} (will sync to all cluster nodes)", nodeId, address); _logger.LogInformation("Added static remote peer: {NodeId} at {Address} (will sync to all cluster nodes)",
nodeId, address);
} }
/// <summary> /// <summary>
@@ -71,7 +72,7 @@ public class PeerManagementService : IPeerManagementService
/// <returns>A task that represents the asynchronous operation.</returns> /// <returns>A task that represents the asynchronous operation.</returns>
public async Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default) public async Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default)
{ {
await RemovePeerTrackingAsync(nodeId, removeRemoteConfig: true, cancellationToken); await RemovePeerTrackingAsync(nodeId, true, cancellationToken);
} }
/// <summary> /// <summary>
@@ -93,7 +94,8 @@ public class PeerManagementService : IPeerManagementService
if (removeRemoteConfig) if (removeRemoteConfig)
{ {
await _store.RemoveRemotePeerAsync(nodeId, cancellationToken); await _store.RemoveRemotePeerAsync(nodeId, cancellationToken);
_logger.LogInformation("Removed remote peer and tracking: {NodeId} (will sync to all cluster nodes)", nodeId); _logger.LogInformation("Removed remote peer and tracking: {NodeId} (will sync to all cluster nodes)",
nodeId);
return; return;
} }
@@ -105,7 +107,8 @@ public class PeerManagementService : IPeerManagementService
/// </summary> /// </summary>
/// <param name="cancellationToken">A token used to cancel the operation.</param> /// <param name="cancellationToken">A token used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result contains remote peer configurations.</returns> /// <returns>A task that represents the asynchronous operation. The task result contains remote peer configurations.</returns>
public async Task<IEnumerable<RemotePeerConfiguration>> GetAllRemotePeersAsync(CancellationToken cancellationToken = default) public async Task<IEnumerable<RemotePeerConfiguration>> GetAllRemotePeersAsync(
CancellationToken cancellationToken = default)
{ {
return await _store.GetRemotePeersAsync(cancellationToken); return await _store.GetRemotePeersAsync(cancellationToken);
} }
@@ -122,10 +125,7 @@ public class PeerManagementService : IPeerManagementService
var peer = await _store.GetRemotePeerAsync(nodeId, cancellationToken); var peer = await _store.GetRemotePeerAsync(nodeId, cancellationToken);
if (peer == null) if (peer == null) return; // Peer not found, nothing to enable
{
return; // Peer not found, nothing to enable
}
if (!peer.IsEnabled) if (!peer.IsEnabled)
{ {
@@ -147,10 +147,7 @@ public class PeerManagementService : IPeerManagementService
var peer = await _store.GetRemotePeerAsync(nodeId, cancellationToken); var peer = await _store.GetRemotePeerAsync(nodeId, cancellationToken);
if (peer == null) if (peer == null) return; // Peer not found, nothing to disable
{
return; // Peer not found, nothing to disable
}
if (peer.IsEnabled) if (peer.IsEnabled)
{ {
@@ -163,23 +160,16 @@ public class PeerManagementService : IPeerManagementService
private static void ValidateNodeId(string nodeId) private static void ValidateNodeId(string nodeId)
{ {
if (string.IsNullOrWhiteSpace(nodeId)) if (string.IsNullOrWhiteSpace(nodeId))
{
throw new ArgumentException("NodeId cannot be null or empty", nameof(nodeId)); throw new ArgumentException("NodeId cannot be null or empty", nameof(nodeId));
} }
}
private static void ValidateAddress(string address) private static void ValidateAddress(string address)
{ {
if (string.IsNullOrWhiteSpace(address)) if (string.IsNullOrWhiteSpace(address))
{
throw new ArgumentException("Address cannot be null or empty", nameof(address)); throw new ArgumentException("Address cannot be null or empty", nameof(address));
}
// Basic format validation (should contain host:port) // Basic format validation (should contain host:port)
if (!address.Contains(':')) if (!address.Contains(':'))
{
throw new ArgumentException("Address must be in format 'host:port'", nameof(address)); throw new ArgumentException("Address must be in format 'host:port'", nameof(address));
} }
}
} }

View File

@@ -1,4 +1,3 @@
using System;
using System.Threading.Tasks; using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Core.Network; namespace ZB.MOM.WW.CBDDC.Core.Network;
@@ -13,9 +12,11 @@ public delegate void PeerNodeConfigurationChangedEventHandler(object? sender, Pe
/// <summary> /// <summary>
/// Defines a contract for retrieving and monitoring configuration settings for a peer node. /// Defines a contract for retrieving and monitoring configuration settings for a peer node.
/// </summary> /// </summary>
/// <remarks>Implementations of this interface provide access to the current configuration and notify subscribers /// <remarks>
/// Implementations of this interface provide access to the current configuration and notify subscribers
/// when configuration changes occur. This interface is typically used by components that require up-to-date /// when configuration changes occur. This interface is typically used by components that require up-to-date
/// configuration information for peer-to-peer networking scenarios.</remarks> /// configuration information for peer-to-peer networking scenarios.
/// </remarks>
public interface IPeerNodeConfigurationProvider public interface IPeerNodeConfigurationProvider
{ {
/// <summary> /// <summary>
@@ -23,16 +24,17 @@ public interface IPeerNodeConfigurationProvider
/// </summary> /// </summary>
/// <returns> /// <returns>
/// A task that represents the asynchronous operation. The task result contains the current /// A task that represents the asynchronous operation. The task result contains the current
/// <see cref="PeerNodeConfiguration"/>. /// <see cref="PeerNodeConfiguration" />.
/// </returns> /// </returns>
public Task<PeerNodeConfiguration> GetConfiguration(); public Task<PeerNodeConfiguration> GetConfiguration();
/// <summary> /// <summary>
/// Occurs when the configuration of the peer node changes. /// Occurs when the configuration of the peer node changes.
/// </summary> /// </summary>
/// <remarks>Subscribe to this event to be notified when any configuration settings for the peer node are /// <remarks>
/// Subscribe to this event to be notified when any configuration settings for the peer node are
/// modified. Event handlers can use this notification to update dependent components or respond to configuration /// modified. Event handlers can use this notification to update dependent components or respond to configuration
/// changes as needed.</remarks> /// changes as needed.
/// </remarks>
public event PeerNodeConfigurationChangedEventHandler? ConfigurationChanged; public event PeerNodeConfigurationChangedEventHandler? ConfigurationChanged;
} }

View File

@@ -1,6 +1,5 @@
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Linq;
namespace ZB.MOM.WW.CBDDC.Core.Network; namespace ZB.MOM.WW.CBDDC.Core.Network;
@@ -10,6 +9,35 @@ namespace ZB.MOM.WW.CBDDC.Core.Network;
/// </summary> /// </summary>
public class PeerNode public class PeerNode
{ {
/// <summary>
/// Initializes a new instance of the PeerNode class with the specified node identifier, network address, and last
/// seen timestamp.
/// </summary>
/// <param name="nodeId">The unique identifier for the peer node. Cannot be null or empty.</param>
/// <param name="address">The network address of the peer node. Cannot be null or empty.</param>
/// <param name="lastSeen">The date and time when the peer node was last seen, expressed as a DateTimeOffset.</param>
/// <param name="type">The type of the peer node. Defaults to LanDiscovered.</param>
/// <param name="role">The role of the peer node. Defaults to Member.</param>
/// <param name="configuration">The peer node configuration</param>
/// <param name="interestingCollections">The list of collections this peer is interested in.</param>
public PeerNode(
string nodeId,
string address,
DateTimeOffset lastSeen,
PeerType type = PeerType.LanDiscovered,
NodeRole role = NodeRole.Member,
PeerNodeConfiguration? configuration = null,
IEnumerable<string>? interestingCollections = null)
{
NodeId = nodeId;
Address = address;
LastSeen = lastSeen;
Type = type;
Role = role;
Configuration = configuration;
InterestingCollections = new List<string>(interestingCollections ?? []).AsReadOnly();
}
/// <summary> /// <summary>
/// Gets the unique identifier for the node. /// Gets the unique identifier for the node.
/// </summary> /// </summary>
@@ -43,34 +71,5 @@ public class PeerNode
/// <summary> /// <summary>
/// Gets the list of collections this peer is interested in. /// Gets the list of collections this peer is interested in.
/// </summary> /// </summary>
public System.Collections.Generic.IReadOnlyList<string> InterestingCollections { get; } public IReadOnlyList<string> InterestingCollections { get; }
/// <summary>
/// Initializes a new instance of the PeerNode class with the specified node identifier, network address, and last
/// seen timestamp.
/// </summary>
/// <param name="nodeId">The unique identifier for the peer node. Cannot be null or empty.</param>
/// <param name="address">The network address of the peer node. Cannot be null or empty.</param>
/// <param name="lastSeen">The date and time when the peer node was last seen, expressed as a DateTimeOffset.</param>
/// <param name="type">The type of the peer node. Defaults to LanDiscovered.</param>
/// <param name="role">The role of the peer node. Defaults to Member.</param>
/// <param name="configuration">The peer node configuration</param>
/// <param name="interestingCollections">The list of collections this peer is interested in.</param>
public PeerNode(
string nodeId,
string address,
DateTimeOffset lastSeen,
PeerType type = PeerType.LanDiscovered,
NodeRole role = NodeRole.Member,
PeerNodeConfiguration? configuration = null,
IEnumerable<string>? interestingCollections = null)
{
NodeId = nodeId;
Address = address;
LastSeen = lastSeen;
Type = type;
Role = role;
Configuration = configuration;
InterestingCollections = new List<string>(interestingCollections ?? []).AsReadOnly();
}
} }

View File

@@ -1,13 +1,16 @@
using System; using System;
using System.Collections.Generic;
namespace ZB.MOM.WW.CBDDC.Core.Network; namespace ZB.MOM.WW.CBDDC.Core.Network;
/// <summary> /// <summary>
/// Represents the configuration settings for a peer node in a distributed network. /// Represents the configuration settings for a peer node in a distributed network.
/// </summary> /// </summary>
/// <remarks>Use this class to specify identification, network port, and authentication details required for a /// <remarks>
/// peer node to participate in a cluster or peer-to-peer environment. The <see cref="Default"/> property provides a /// Use this class to specify identification, network port, and authentication details required for a
/// basic configuration suitable for development or testing scenarios.</remarks> /// peer node to participate in a cluster or peer-to-peer environment. The <see cref="Default" /> property provides a
/// basic configuration suitable for development or testing scenarios.
/// </remarks>
public class PeerNodeConfiguration public class PeerNodeConfiguration
{ {
/// <summary> /// <summary>
@@ -58,15 +61,17 @@ public class PeerNodeConfiguration
/// <summary> /// <summary>
/// Gets or sets a list of known peers to connect to directly, bypassing discovery. /// Gets or sets a list of known peers to connect to directly, bypassing discovery.
/// </summary> /// </summary>
public System.Collections.Generic.List<KnownPeerConfiguration> KnownPeers { get; set; } = new(); public List<KnownPeerConfiguration> KnownPeers { get; set; } = new();
/// <summary> /// <summary>
/// Gets the default configuration settings for a peer node. /// Gets the default configuration settings for a peer node.
/// </summary> /// </summary>
/// <remarks>Each access returns a new instance of the configuration with a unique node identifier. The /// <remarks>
/// Each access returns a new instance of the configuration with a unique node identifier. The
/// default settings use TCP port 9000 and a generated authentication token. Modify the returned instance as needed /// default settings use TCP port 9000 and a generated authentication token. Modify the returned instance as needed
/// before use.</remarks> /// before use.
public static PeerNodeConfiguration Default => new PeerNodeConfiguration /// </remarks>
public static PeerNodeConfiguration Default => new()
{ {
NodeId = Guid.NewGuid().ToString(), NodeId = Guid.NewGuid().ToString(),
TcpPort = 9000, TcpPort = 9000,

View File

@@ -1,3 +1,4 @@
using System.Collections.Generic;
using System.ComponentModel.DataAnnotations; using System.ComponentModel.DataAnnotations;
namespace ZB.MOM.WW.CBDDC.Core.Network; namespace ZB.MOM.WW.CBDDC.Core.Network;
@@ -34,5 +35,5 @@ public class RemotePeerConfiguration
/// Gets or sets the list of collections this peer is interested in. /// Gets or sets the list of collections this peer is interested in.
/// If empty, the peer is interested in all collections. /// If empty, the peer is interested in all collections.
/// </summary> /// </summary>
public System.Collections.Generic.List<string> InterestingCollections { get; set; } = new(); public List<string> InterestingCollections { get; set; } = new();
} }

View File

@@ -9,6 +9,15 @@ public class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvide
{ {
private PeerNodeConfiguration _configuration = new(); private PeerNodeConfiguration _configuration = new();
/// <summary>
/// Initializes a new instance of the <see cref="StaticPeerNodeConfigurationProvider" /> class.
/// </summary>
/// <param name="configuration">The initial peer node configuration.</param>
public StaticPeerNodeConfigurationProvider(PeerNodeConfiguration configuration)
{
Configuration = configuration;
}
/// <summary> /// <summary>
/// Gets or sets the current peer node configuration. /// Gets or sets the current peer node configuration.
/// </summary> /// </summary>
@@ -25,15 +34,6 @@ public class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvide
} }
} }
/// <summary>
/// Initializes a new instance of the <see cref="StaticPeerNodeConfigurationProvider"/> class.
/// </summary>
/// <param name="configuration">The initial peer node configuration.</param>
public StaticPeerNodeConfigurationProvider(PeerNodeConfiguration configuration)
{
Configuration = configuration;
}
/// <summary> /// <summary>
/// Occurs when the peer node configuration changes. /// Occurs when the peer node configuration changes.
/// </summary> /// </summary>
@@ -49,7 +49,7 @@ public class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvide
} }
/// <summary> /// <summary>
/// Raises the <see cref="ConfigurationChanged"/> event. /// Raises the <see cref="ConfigurationChanged" /> event.
/// </summary> /// </summary>
/// <param name="newConfig">The new peer node configuration.</param> /// <param name="newConfig">The new peer node configuration.</param>
protected virtual void OnConfigurationChanged(PeerNodeConfiguration newConfig) protected virtual void OnConfigurationChanged(PeerNodeConfiguration newConfig)

View File

@@ -1,5 +1,7 @@
using System; using System;
using System.ComponentModel.DataAnnotations; using System.Globalization;
using System.Security.Cryptography;
using System.Text;
using System.Text.Json; using System.Text.Json;
namespace ZB.MOM.WW.CBDDC.Core; namespace ZB.MOM.WW.CBDDC.Core;
@@ -19,15 +21,15 @@ public static class OplogEntryExtensions
/// <returns>The lowercase hexadecimal SHA-256 hash of the entry.</returns> /// <returns>The lowercase hexadecimal SHA-256 hash of the entry.</returns>
public static string ComputeHash(this OplogEntry entry) public static string ComputeHash(this OplogEntry entry)
{ {
using var sha256 = System.Security.Cryptography.SHA256.Create(); using var sha256 = SHA256.Create();
var sb = new System.Text.StringBuilder(); var sb = new StringBuilder();
sb.Append(entry.Collection); sb.Append(entry.Collection);
sb.Append('|'); sb.Append('|');
sb.Append(entry.Key); sb.Append(entry.Key);
sb.Append('|'); sb.Append('|');
// Ensure stable string representation for Enum (integer value) // Ensure stable string representation for Enum (integer value)
sb.Append(((int)entry.Operation).ToString(System.Globalization.CultureInfo.InvariantCulture)); sb.Append(((int)entry.Operation).ToString(CultureInfo.InvariantCulture));
sb.Append('|'); sb.Append('|');
// Payload excluded from hash to avoid serialization non-determinism // Payload excluded from hash to avoid serialization non-determinism
// sb.Append(entry.Payload...); // sb.Append(entry.Payload...);
@@ -37,8 +39,8 @@ public static class OplogEntryExtensions
sb.Append('|'); sb.Append('|');
sb.Append(entry.PreviousHash); sb.Append(entry.PreviousHash);
var bytes = System.Text.Encoding.UTF8.GetBytes(sb.ToString()); byte[] bytes = Encoding.UTF8.GetBytes(sb.ToString());
var hashBytes = sha256.ComputeHash(bytes); byte[] hashBytes = sha256.ComputeHash(bytes);
// Convert to hex string // Convert to hex string
return BitConverter.ToString(hashBytes).Replace("-", "").ToLowerInvariant(); return BitConverter.ToString(hashBytes).Replace("-", "").ToLowerInvariant();
@@ -48,36 +50,7 @@ public static class OplogEntryExtensions
public class OplogEntry public class OplogEntry
{ {
/// <summary> /// <summary>
/// Gets the collection name associated with this entry. /// Initializes a new instance of the <see cref="OplogEntry" /> class.
/// </summary>
public string Collection { get; }
/// <summary>
/// Gets the document key associated with this entry.
/// </summary>
public string Key { get; }
/// <summary>
/// Gets the operation represented by this entry.
/// </summary>
public OperationType Operation { get; }
/// <summary>
/// Gets the serialized payload for the operation.
/// </summary>
public JsonElement? Payload { get; }
/// <summary>
/// Gets the logical timestamp for this entry.
/// </summary>
public HlcTimestamp Timestamp { get; }
/// <summary>
/// Gets the hash of this entry.
/// </summary>
public string Hash { get; }
/// <summary>
/// Gets the hash of the previous entry in the chain.
/// </summary>
public string PreviousHash { get; }
/// <summary>
/// Initializes a new instance of the <see cref="OplogEntry"/> class.
/// </summary> /// </summary>
/// <param name="collection">The collection name.</param> /// <param name="collection">The collection name.</param>
/// <param name="key">The document key.</param> /// <param name="key">The document key.</param>
@@ -86,7 +59,8 @@ public class OplogEntry
/// <param name="timestamp">The logical timestamp.</param> /// <param name="timestamp">The logical timestamp.</param>
/// <param name="previousHash">The previous entry hash.</param> /// <param name="previousHash">The previous entry hash.</param>
/// <param name="hash">The current entry hash. If null, it is computed.</param> /// <param name="hash">The current entry hash. If null, it is computed.</param>
public OplogEntry(string collection, string key, OperationType operation, JsonElement? payload, HlcTimestamp timestamp, string previousHash, string? hash = null) public OplogEntry(string collection, string key, OperationType operation, JsonElement? payload,
HlcTimestamp timestamp, string previousHash, string? hash = null)
{ {
Collection = collection; Collection = collection;
Key = key; Key = key;
@@ -97,6 +71,41 @@ public class OplogEntry
Hash = hash ?? this.ComputeHash(); Hash = hash ?? this.ComputeHash();
} }
/// <summary>
/// Gets the collection name associated with this entry.
/// </summary>
public string Collection { get; }
/// <summary>
/// Gets the document key associated with this entry.
/// </summary>
public string Key { get; }
/// <summary>
/// Gets the operation represented by this entry.
/// </summary>
public OperationType Operation { get; }
/// <summary>
/// Gets the serialized payload for the operation.
/// </summary>
public JsonElement? Payload { get; }
/// <summary>
/// Gets the logical timestamp for this entry.
/// </summary>
public HlcTimestamp Timestamp { get; }
/// <summary>
/// Gets the hash of this entry.
/// </summary>
public string Hash { get; }
/// <summary>
/// Gets the hash of the previous entry in the chain.
/// </summary>
public string PreviousHash { get; }
/// <summary> /// <summary>
/// Verifies if the stored Hash matches the content. /// Verifies if the stored Hash matches the content.
/// </summary> /// </summary>

View File

@@ -1,111 +1,22 @@
using System.Text.Json;
namespace ZB.MOM.WW.CBDDC.Core; namespace ZB.MOM.WW.CBDDC.Core;
public abstract class QueryNode { } public abstract class QueryNode
{
}
public class Eq : QueryNode public class Eq : QueryNode
{ {
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the value to compare against.
/// </summary>
public object Value { get; }
/// <summary> /// <summary>
/// Initializes a new equality query node. /// Initializes a new equality query node.
/// </summary> /// </summary>
/// <param name="field">The field name to compare.</param> /// <param name="field">The field name to compare.</param>
/// <param name="value">The value to compare against.</param> /// <param name="value">The value to compare against.</param>
public Eq(string field, object value) { Field = field; Value = value; } public Eq(string field, object value)
} {
Field = field;
Value = value;
}
public class Gt : QueryNode
{
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
/// <summary>
/// Initializes a new greater-than query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Gt(string field, object value) { Field = field; Value = value; }
}
public class Lt : QueryNode
{
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
/// <summary>
/// Initializes a new less-than query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Lt(string field, object value) { Field = field; Value = value; }
}
public class Gte : QueryNode
{
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
/// <summary>
/// Initializes a new greater-than-or-equal query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Gte(string field, object value) { Field = field; Value = value; }
}
public class Lte : QueryNode
{
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
/// <summary>
/// Initializes a new less-than-or-equal query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Lte(string field, object value) { Field = field; Value = value; }
}
public class Neq : QueryNode
{
/// <summary> /// <summary>
/// Gets the field name to compare. /// Gets the field name to compare.
/// </summary> /// </summary>
@@ -115,17 +26,141 @@ public class Neq : QueryNode
/// Gets the value to compare against. /// Gets the value to compare against.
/// </summary> /// </summary>
public object Value { get; } public object Value { get; }
}
public class Gt : QueryNode
{
/// <summary>
/// Initializes a new greater-than query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Gt(string field, object value)
{
Field = field;
Value = value;
}
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
}
public class Lt : QueryNode
{
/// <summary>
/// Initializes a new less-than query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Lt(string field, object value)
{
Field = field;
Value = value;
}
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
}
public class Gte : QueryNode
{
/// <summary>
/// Initializes a new greater-than-or-equal query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Gte(string field, object value)
{
Field = field;
Value = value;
}
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
}
public class Lte : QueryNode
{
/// <summary>
/// Initializes a new less-than-or-equal query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Lte(string field, object value)
{
Field = field;
Value = value;
}
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
}
public class Neq : QueryNode
{
/// <summary> /// <summary>
/// Initializes a new not-equal query node. /// Initializes a new not-equal query node.
/// </summary> /// </summary>
/// <param name="field">The field name to compare.</param> /// <param name="field">The field name to compare.</param>
/// <param name="value">The value to compare against.</param> /// <param name="value">The value to compare against.</param>
public Neq(string field, object value) { Field = field; Value = value; } public Neq(string field, object value)
{
Field = field;
Value = value;
}
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the value to compare against.
/// </summary>
public object Value { get; }
} }
public class In : QueryNode public class In : QueryNode
{ {
/// <summary>
/// Initializes a new in-list query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="values">The set of values to compare against.</param>
public In(string field, object[] values)
{
Field = field;
Values = values;
}
/// <summary> /// <summary>
/// Gets the field name to compare. /// Gets the field name to compare.
/// </summary> /// </summary>
@@ -135,17 +170,21 @@ public class In : QueryNode
/// Gets the set of values to compare against. /// Gets the set of values to compare against.
/// </summary> /// </summary>
public object[] Values { get; } public object[] Values { get; }
/// <summary>
/// Initializes a new in-list query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="values">The set of values to compare against.</param>
public In(string field, object[] values) { Field = field; Values = values; }
} }
public class Contains : QueryNode public class Contains : QueryNode
{ {
/// <summary>
/// Initializes a new contains query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The substring value to search for.</param>
public Contains(string field, string value)
{
Field = field;
Value = value;
}
/// <summary> /// <summary>
/// Gets the field name to compare. /// Gets the field name to compare.
/// </summary> /// </summary>
@@ -155,17 +194,21 @@ public class Contains : QueryNode
/// Gets the substring value to search for. /// Gets the substring value to search for.
/// </summary> /// </summary>
public string Value { get; } public string Value { get; }
/// <summary>
/// Initializes a new contains query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The substring value to search for.</param>
public Contains(string field, string value) { Field = field; Value = value; }
} }
public class NotContains : QueryNode public class NotContains : QueryNode
{ {
/// <summary>
/// Initializes a new not-contains query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The substring value to exclude.</param>
public NotContains(string field, string value)
{
Field = field;
Value = value;
}
/// <summary> /// <summary>
/// Gets the field name to compare. /// Gets the field name to compare.
/// </summary> /// </summary>
@@ -175,17 +218,21 @@ public class NotContains : QueryNode
/// Gets the substring value to exclude. /// Gets the substring value to exclude.
/// </summary> /// </summary>
public string Value { get; } public string Value { get; }
/// <summary>
/// Initializes a new not-contains query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The substring value to exclude.</param>
public NotContains(string field, string value) { Field = field; Value = value; }
} }
public class And : QueryNode public class And : QueryNode
{ {
/// <summary>
/// Initializes a new logical AND query node.
/// </summary>
/// <param name="left">The left query node.</param>
/// <param name="right">The right query node.</param>
public And(QueryNode left, QueryNode right)
{
Left = left;
Right = right;
}
/// <summary> /// <summary>
/// Gets the left side of the logical operation. /// Gets the left side of the logical operation.
/// </summary> /// </summary>
@@ -195,17 +242,21 @@ public class And : QueryNode
/// Gets the right side of the logical operation. /// Gets the right side of the logical operation.
/// </summary> /// </summary>
public QueryNode Right { get; } public QueryNode Right { get; }
/// <summary>
/// Initializes a new logical AND query node.
/// </summary>
/// <param name="left">The left query node.</param>
/// <param name="right">The right query node.</param>
public And(QueryNode left, QueryNode right) { Left = left; Right = right; }
} }
public class Or : QueryNode public class Or : QueryNode
{ {
/// <summary>
/// Initializes a new logical OR query node.
/// </summary>
/// <param name="left">The left query node.</param>
/// <param name="right">The right query node.</param>
public Or(QueryNode left, QueryNode right)
{
Left = left;
Right = right;
}
/// <summary> /// <summary>
/// Gets the left side of the logical operation. /// Gets the left side of the logical operation.
/// </summary> /// </summary>
@@ -215,11 +266,4 @@ public class Or : QueryNode
/// Gets the right side of the logical operation. /// Gets the right side of the logical operation.
/// </summary> /// </summary>
public QueryNode Right { get; } public QueryNode Right { get; }
/// <summary>
/// Initializes a new logical OR query node.
/// </summary>
/// <param name="left">The left query node.</param>
/// <param name="right">The right query node.</param>
public Or(QueryNode left, QueryNode right) { Left = left; Right = right; }
} }

View File

@@ -4,7 +4,9 @@ Core abstractions and logic for **CBDDC**, a peer-to-peer data synchronization m
## What Is CBDDC? ## What Is CBDDC?
CBDDC is **not** a database it's a sync layer that plugs into your existing data store (BLite) and enables automatic P2P replication across nodes in a mesh network. Your application reads and writes to its database as usual; CBDDC handles synchronization in the background. CBDDC is **not** a database <EFBFBD> it's a sync layer that plugs into your existing data store (BLite) and enables automatic
P2P replication across nodes in a mesh network. Your application reads and writes to its database as usual; CBDDC
handles synchronization in the background.
## What's In This Package ## What's In This Package
@@ -66,8 +68,8 @@ builder.Services.AddCBDDCCore()
## Key Concepts ## Key Concepts
| Concept | Description | | Concept | Description |
|---------|-------------| |-------------------|------------------------------------------------------------------------------|
| **CDC** | Change Data Capture watches collections registered via `WatchCollection()` | | **CDC** | Change Data Capture <EFBFBD> watches collections registered via `WatchCollection()` |
| **Oplog** | Append-only hash-chained journal of changes per node | | **Oplog** | Append-only hash-chained journal of changes per node |
| **VectorClock** | Tracks causal ordering across the mesh | | **VectorClock** | Tracks causal ordering across the mesh |
| **DocumentStore** | Your bridge between entities and the sync engine | | **DocumentStore** | Your bridge between entities and the sync engine |
@@ -92,14 +94,15 @@ Your App ? DbContext.SaveChangesAsync()
## Related Packages ## Related Packages
- **ZB.MOM.WW.CBDDC.Persistence** <20> BLite embedded provider (.NET 10+) - **ZB.MOM.WW.CBDDC.Persistence** <20> BLite embedded provider (.NET 10+)
- **ZB.MOM.WW.CBDDC.Network** P2P networking (UDP discovery, TCP sync, Gossip) - **ZB.MOM.WW.CBDDC.Network** <EFBFBD> P2P networking (UDP discovery, TCP sync, Gossip)
## Documentation ## Documentation
- **[Complete Documentation](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net)** - **[Complete Documentation](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net)**
- **[Sample Application](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net/tree/main/samples/ZB.MOM.WW.CBDDC.Sample.Console)** - **[Sample Application](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net/tree/main/samples/ZB.MOM.WW.CBDDC.Sample.Console)
**
- **[Integration Guide](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net#integrating-with-your-database)** - **[Integration Guide](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net#integrating-with-your-database)**
## License ## License
MIT see [LICENSE](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net/blob/main/LICENSE) MIT <EFBFBD> see [LICENSE](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net/blob/main/LICENSE)

View File

@@ -2,10 +2,10 @@
using System.Threading; using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Core.Resilience namespace ZB.MOM.WW.CBDDC.Core.Resilience;
public interface IRetryPolicy
{ {
public interface IRetryPolicy
{
/// <summary> /// <summary>
/// Executes an asynchronous operation with retry handling. /// Executes an asynchronous operation with retry handling.
/// </summary> /// </summary>
@@ -14,6 +14,7 @@ namespace ZB.MOM.WW.CBDDC.Core.Resilience
/// <param name="cancellationToken">A token used to cancel the operation.</param> /// <param name="cancellationToken">A token used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous execution.</returns> /// <returns>A task that represents the asynchronous execution.</returns>
Task ExecuteAsync(Func<Task> operation, string operationName, CancellationToken cancellationToken = default); Task ExecuteAsync(Func<Task> operation, string operationName, CancellationToken cancellationToken = default);
/// <summary> /// <summary>
/// Executes an asynchronous operation with retry handling and returns a result. /// Executes an asynchronous operation with retry handling and returns a result.
/// </summary> /// </summary>
@@ -22,6 +23,6 @@ namespace ZB.MOM.WW.CBDDC.Core.Resilience
/// <param name="operationName">The operation name used for diagnostics.</param> /// <param name="operationName">The operation name used for diagnostics.</param>
/// <param name="cancellationToken">A token used to cancel the operation.</param> /// <param name="cancellationToken">A token used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous execution and yields the operation result.</returns> /// <returns>A task that represents the asynchronous execution and yields the operation result.</returns>
Task<T> ExecuteAsync<T>(Func<Task<T>> operation, string operationName, CancellationToken cancellationToken = default); Task<T> ExecuteAsync<T>(Func<Task<T>> operation, string operationName,
} CancellationToken cancellationToken = default);
} }

View File

@@ -1,10 +1,13 @@
using System; using System;
using System.IO;
using System.Net.Sockets;
using System.Threading; using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Exceptions;
using ZB.MOM.WW.CBDDC.Core.Network;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions; using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core.Exceptions;
using ZB.MOM.WW.CBDDC.Core.Network;
using TimeoutException = ZB.MOM.WW.CBDDC.Core.Exceptions.TimeoutException;
namespace ZB.MOM.WW.CBDDC.Core.Resilience; namespace ZB.MOM.WW.CBDDC.Core.Resilience;
@@ -13,15 +16,16 @@ namespace ZB.MOM.WW.CBDDC.Core.Resilience;
/// </summary> /// </summary>
public class RetryPolicy : IRetryPolicy public class RetryPolicy : IRetryPolicy
{ {
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
private readonly ILogger<RetryPolicy> _logger; private readonly ILogger<RetryPolicy> _logger;
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="RetryPolicy"/> class. /// Initializes a new instance of the <see cref="RetryPolicy" /> class.
/// </summary> /// </summary>
/// <param name="peerNodeConfigurationProvider">The provider for retry configuration values.</param> /// <param name="peerNodeConfigurationProvider">The provider for retry configuration values.</param>
/// <param name="logger">The logger instance.</param> /// <param name="logger">The logger instance.</param>
public RetryPolicy(IPeerNodeConfigurationProvider peerNodeConfigurationProvider, ILogger<RetryPolicy>? logger = null) public RetryPolicy(IPeerNodeConfigurationProvider peerNodeConfigurationProvider,
ILogger<RetryPolicy>? logger = null)
{ {
_logger = logger ?? NullLogger<RetryPolicy>.Instance; _logger = logger ?? NullLogger<RetryPolicy>.Instance;
_peerNodeConfigurationProvider = peerNodeConfigurationProvider _peerNodeConfigurationProvider = peerNodeConfigurationProvider
@@ -43,8 +47,7 @@ public class RetryPolicy : IRetryPolicy
var config = await _peerNodeConfigurationProvider.GetConfiguration(); var config = await _peerNodeConfigurationProvider.GetConfiguration();
Exception? lastException = null; Exception? lastException = null;
for (int attempt = 1; attempt <= config.RetryAttempts; attempt++) for (var attempt = 1; attempt <= config.RetryAttempts; attempt++)
{
try try
{ {
_logger.LogDebug("Executing {Operation} (attempt {Attempt}/{Max})", _logger.LogDebug("Executing {Operation} (attempt {Attempt}/{Max})",
@@ -55,7 +58,7 @@ public class RetryPolicy : IRetryPolicy
catch (Exception ex) when (attempt < config.RetryAttempts && IsTransient(ex)) catch (Exception ex) when (attempt < config.RetryAttempts && IsTransient(ex))
{ {
lastException = ex; lastException = ex;
var delay = config.RetryDelayMs * attempt; // Exponential backoff int delay = config.RetryDelayMs * attempt; // Exponential backoff
_logger.LogWarning(ex, _logger.LogWarning(ex,
"Operation {Operation} failed (attempt {Attempt}/{Max}). Retrying in {Delay}ms...", "Operation {Operation} failed (attempt {Attempt}/{Max}). Retrying in {Delay}ms...",
@@ -63,20 +66,15 @@ public class RetryPolicy : IRetryPolicy
await Task.Delay(delay, cancellationToken); await Task.Delay(delay, cancellationToken);
} }
}
if (lastException != null) if (lastException != null)
{
_logger.LogError(lastException, _logger.LogError(lastException,
"Operation {Operation} failed after {Attempts} attempts", "Operation {Operation} failed after {Attempts} attempts",
operationName, config.RetryAttempts); operationName, config.RetryAttempts);
}
else else
{
_logger.LogError( _logger.LogError(
"Operation {Operation} failed after {Attempts} attempts", "Operation {Operation} failed after {Attempts} attempts",
operationName, config.RetryAttempts); operationName, config.RetryAttempts);
}
throw new CBDDCException("RETRY_EXHAUSTED", throw new CBDDCException("RETRY_EXHAUSTED",
$"Operation '{operationName}' failed after {config.RetryAttempts} attempts", $"Operation '{operationName}' failed after {config.RetryAttempts} attempts",
@@ -104,11 +102,11 @@ public class RetryPolicy : IRetryPolicy
private static bool IsTransient(Exception ex) private static bool IsTransient(Exception ex)
{ {
// Network errors are typically transient // Network errors are typically transient
if (ex is NetworkException or System.Net.Sockets.SocketException or System.IO.IOException) if (ex is NetworkException or SocketException or IOException)
return true; return true;
// Timeout errors are transient // Timeout errors are transient
if (ex is Exceptions.TimeoutException or OperationCanceledException) if (ex is TimeoutException or OperationCanceledException)
return true; return true;
return false; return false;

View File

@@ -6,14 +6,17 @@ public class SnapshotMetadata
/// Gets or sets the node identifier associated with the snapshot. /// Gets or sets the node identifier associated with the snapshot.
/// </summary> /// </summary>
public string NodeId { get; set; } = ""; public string NodeId { get; set; } = "";
/// <summary> /// <summary>
/// Gets or sets the physical time component of the snapshot timestamp. /// Gets or sets the physical time component of the snapshot timestamp.
/// </summary> /// </summary>
public long TimestampPhysicalTime { get; set; } public long TimestampPhysicalTime { get; set; }
/// <summary> /// <summary>
/// Gets or sets the logical counter component of the snapshot timestamp. /// Gets or sets the logical counter component of the snapshot timestamp.
/// </summary> /// </summary>
public int TimestampLogicalCounter { get; set; } public int TimestampLogicalCounter { get; set; }
/// <summary> /// <summary>
/// Gets or sets the snapshot hash. /// Gets or sets the snapshot hash.
/// </summary> /// </summary>

View File

@@ -8,9 +8,11 @@ namespace ZB.MOM.WW.CBDDC.Core.Storage;
public class CorruptDatabaseException : Exception public class CorruptDatabaseException : Exception
{ {
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="CorruptDatabaseException"/> class. /// Initializes a new instance of the <see cref="CorruptDatabaseException" /> class.
/// </summary> /// </summary>
/// <param name="message">The exception message.</param> /// <param name="message">The exception message.</param>
/// <param name="innerException">The underlying exception that caused this error.</param> /// <param name="innerException">The underlying exception that caused this error.</param>
public CorruptDatabaseException(string message, Exception innerException) : base(message, innerException) { } public CorruptDatabaseException(string message, Exception innerException) : base(message, innerException)
{
}
} }

View File

@@ -17,7 +17,8 @@ public interface IDocumentMetadataStore : ISnapshotable<DocumentMetadata>
/// <param name="key">The document key.</param> /// <param name="key">The document key.</param>
/// <param name="cancellationToken">A cancellation token.</param> /// <param name="cancellationToken">A cancellation token.</param>
/// <returns>The document metadata if found; otherwise null.</returns> /// <returns>The document metadata if found; otherwise null.</returns>
Task<DocumentMetadata?> GetMetadataAsync(string collection, string key, CancellationToken cancellationToken = default); Task<DocumentMetadata?> GetMetadataAsync(string collection, string key,
CancellationToken cancellationToken = default);
/// <summary> /// <summary>
/// Gets metadata for all documents in a collection. /// Gets metadata for all documents in a collection.
@@ -25,7 +26,8 @@ public interface IDocumentMetadataStore : ISnapshotable<DocumentMetadata>
/// <param name="collection">The collection name.</param> /// <param name="collection">The collection name.</param>
/// <param name="cancellationToken">A cancellation token.</param> /// <param name="cancellationToken">A cancellation token.</param>
/// <returns>Enumerable of document metadata for the collection.</returns> /// <returns>Enumerable of document metadata for the collection.</returns>
Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection, CancellationToken cancellationToken = default); Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection,
CancellationToken cancellationToken = default);
/// <summary> /// <summary>
/// Upserts (inserts or updates) metadata for a document. /// Upserts (inserts or updates) metadata for a document.
@@ -39,7 +41,8 @@ public interface IDocumentMetadataStore : ISnapshotable<DocumentMetadata>
/// </summary> /// </summary>
/// <param name="metadatas">The metadata items to upsert.</param> /// <param name="metadatas">The metadata items to upsert.</param>
/// <param name="cancellationToken">A cancellation token.</param> /// <param name="cancellationToken">A cancellation token.</param>
Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas, CancellationToken cancellationToken = default); Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas,
CancellationToken cancellationToken = default);
/// <summary> /// <summary>
/// Marks a document as deleted by setting IsDeleted=true and updating the timestamp. /// Marks a document as deleted by setting IsDeleted=true and updating the timestamp.
@@ -48,7 +51,8 @@ public interface IDocumentMetadataStore : ISnapshotable<DocumentMetadata>
/// <param name="key">The document key.</param> /// <param name="key">The document key.</param>
/// <param name="timestamp">The HLC timestamp of the deletion.</param> /// <param name="timestamp">The HLC timestamp of the deletion.</param>
/// <param name="cancellationToken">A cancellation token.</param> /// <param name="cancellationToken">A cancellation token.</param>
Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp, CancellationToken cancellationToken = default); Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp,
CancellationToken cancellationToken = default);
/// <summary> /// <summary>
/// Gets all document metadata with timestamps after the specified timestamp. /// Gets all document metadata with timestamps after the specified timestamp.
@@ -58,7 +62,8 @@ public interface IDocumentMetadataStore : ISnapshotable<DocumentMetadata>
/// <param name="collections">Optional collection filter.</param> /// <param name="collections">Optional collection filter.</param>
/// <param name="cancellationToken">A cancellation token.</param> /// <param name="cancellationToken">A cancellation token.</param>
/// <returns>Documents modified after the specified timestamp.</returns> /// <returns>Documents modified after the specified timestamp.</returns>
Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default); Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
} }
/// <summary> /// <summary>
@@ -66,6 +71,28 @@ public interface IDocumentMetadataStore : ISnapshotable<DocumentMetadata>
/// </summary> /// </summary>
public class DocumentMetadata public class DocumentMetadata
{ {
/// <summary>
/// Initializes a new instance of the <see cref="DocumentMetadata" /> class.
/// </summary>
public DocumentMetadata()
{
}
/// <summary>
/// Initializes a new instance of the <see cref="DocumentMetadata" /> class.
/// </summary>
/// <param name="collection">The collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="updatedAt">The last update timestamp.</param>
/// <param name="isDeleted">Whether the document is marked as deleted.</param>
public DocumentMetadata(string collection, string key, HlcTimestamp updatedAt, bool isDeleted = false)
{
Collection = collection;
Key = key;
UpdatedAt = updatedAt;
IsDeleted = isDeleted;
}
/// <summary> /// <summary>
/// Gets or sets the collection name. /// Gets or sets the collection name.
/// </summary> /// </summary>
@@ -85,24 +112,4 @@ public class DocumentMetadata
/// Gets or sets whether this document is marked as deleted (tombstone). /// Gets or sets whether this document is marked as deleted (tombstone).
/// </summary> /// </summary>
public bool IsDeleted { get; set; } public bool IsDeleted { get; set; }
/// <summary>
/// Initializes a new instance of the <see cref="DocumentMetadata"/> class.
/// </summary>
public DocumentMetadata() { }
/// <summary>
/// Initializes a new instance of the <see cref="DocumentMetadata"/> class.
/// </summary>
/// <param name="collection">The collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="updatedAt">The last update timestamp.</param>
/// <param name="isDeleted">Whether the document is marked as deleted.</param>
public DocumentMetadata(string collection, string key, HlcTimestamp updatedAt, bool isDeleted = false)
{
Collection = collection;
Key = key;
UpdatedAt = updatedAt;
IsDeleted = isDeleted;
}
} }

View File

@@ -1,4 +1,3 @@
using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Threading; using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
@@ -21,7 +20,10 @@ public interface IDocumentStore : ISnapshotable<Document>
/// <param name="collection">The name of the collection containing the incoming to retrieve. Cannot be null or empty.</param> /// <param name="collection">The name of the collection containing the incoming to retrieve. Cannot be null or empty.</param>
/// <param name="key">The unique key identifying the incoming within the collection. Cannot be null or empty.</param> /// <param name="key">The unique key identifying the incoming within the collection. Cannot be null or empty.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result contains the incoming if found; otherwise, null.</returns> /// <returns>
/// A task that represents the asynchronous operation. The task result contains the incoming if found; otherwise,
/// null.
/// </returns>
Task<Document?> GetDocumentAsync(string collection, string key, CancellationToken cancellationToken = default); Task<Document?> GetDocumentAsync(string collection, string key, CancellationToken cancellationToken = default);
/// <summary> /// <summary>
@@ -29,26 +31,34 @@ public interface IDocumentStore : ISnapshotable<Document>
/// </summary> /// </summary>
/// <param name="collection">The name of the collection from which to retrieve documents. Cannot be null or empty.</param> /// <param name="collection">The name of the collection from which to retrieve documents. Cannot be null or empty.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result contains an enumerable collection of /// <returns>
/// documents in the specified collection. The collection is empty if no documents are found.</returns> /// A task that represents the asynchronous operation. The task result contains an enumerable collection of
Task<IEnumerable<Document>> GetDocumentsByCollectionAsync(string collection, CancellationToken cancellationToken = default); /// documents in the specified collection. The collection is empty if no documents are found.
/// </returns>
Task<IEnumerable<Document>> GetDocumentsByCollectionAsync(string collection,
CancellationToken cancellationToken = default);
/// <summary> /// <summary>
/// Asynchronously inserts a batch of documents into the data store. /// Asynchronously inserts a batch of documents into the data store.
/// </summary> /// </summary>
/// <param name="documents">The collection of documents to insert. Cannot be null or contain null elements.</param> /// <param name="documents">The collection of documents to insert. Cannot be null or contain null elements.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result is <see langword="true"/> if all documents /// <returns>
/// were inserted successfully; otherwise, <see langword="false"/>.</returns> /// A task that represents the asynchronous operation. The task result is <see langword="true" /> if all documents
Task<bool> InsertBatchDocumentsAsync(IEnumerable<Document> documents, CancellationToken cancellationToken = default); /// were inserted successfully; otherwise, <see langword="false" />.
/// </returns>
Task<bool> InsertBatchDocumentsAsync(IEnumerable<Document> documents,
CancellationToken cancellationToken = default);
/// <summary> /// <summary>
/// Asynchronously updates the specified incoming in the data store. /// Asynchronously updates the specified incoming in the data store.
/// </summary> /// </summary>
/// <param name="document">The incoming to update. Cannot be null.</param> /// <param name="document">The incoming to update. Cannot be null.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the update operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the update operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result is <see langword="true"/> if the incoming was /// <returns>
/// successfully updated; otherwise, <see langword="false"/>.</returns> /// A task that represents the asynchronous operation. The task result is <see langword="true" /> if the incoming was
/// successfully updated; otherwise, <see langword="false" />.
/// </returns>
Task<bool> PutDocumentAsync(Document document, CancellationToken cancellationToken = default); Task<bool> PutDocumentAsync(Document document, CancellationToken cancellationToken = default);
/// <summary> /// <summary>
@@ -56,9 +66,12 @@ public interface IDocumentStore : ISnapshotable<Document>
/// </summary> /// </summary>
/// <param name="documents">The collection of documents to update. Cannot be null or contain null elements.</param> /// <param name="documents">The collection of documents to update. Cannot be null or contain null elements.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result is <see langword="true"/> if all documents /// <returns>
/// were updated successfully; otherwise, <see langword="false"/>.</returns> /// A task that represents the asynchronous operation. The task result is <see langword="true" /> if all documents
Task<bool> UpdateBatchDocumentsAsync(IEnumerable<Document> documents, CancellationToken cancellationToken = default); /// were updated successfully; otherwise, <see langword="false" />.
/// </returns>
Task<bool> UpdateBatchDocumentsAsync(IEnumerable<Document> documents,
CancellationToken cancellationToken = default);
/// <summary> /// <summary>
/// Asynchronously deletes a incoming identified by the specified key from the given collection. /// Asynchronously deletes a incoming identified by the specified key from the given collection.
@@ -66,23 +79,31 @@ public interface IDocumentStore : ISnapshotable<Document>
/// <param name="collection">The name of the collection containing the incoming to delete. Cannot be null or empty.</param> /// <param name="collection">The name of the collection containing the incoming to delete. Cannot be null or empty.</param>
/// <param name="key">The unique key identifying the incoming to delete. Cannot be null or empty.</param> /// <param name="key">The unique key identifying the incoming to delete. Cannot be null or empty.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the delete operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the delete operation.</param>
/// <returns>A task that represents the asynchronous delete operation. The task result is <see langword="true"/> if the /// <returns>
/// incoming was successfully deleted; otherwise, <see langword="false"/>.</returns> /// A task that represents the asynchronous delete operation. The task result is <see langword="true" /> if the
/// incoming was successfully deleted; otherwise, <see langword="false" />.
/// </returns>
Task<bool> DeleteDocumentAsync(string collection, string key, CancellationToken cancellationToken = default); Task<bool> DeleteDocumentAsync(string collection, string key, CancellationToken cancellationToken = default);
/// <summary> /// <summary>
/// Asynchronously deletes a batch of documents identified by their keys. /// Asynchronously deletes a batch of documents identified by their keys.
/// </summary> /// </summary>
/// <remarks> /// <remarks>
/// If any of the specified documents cannot be deleted, the method returns <see langword="false"/> but does not /// If any of the specified documents cannot be deleted, the method returns <see langword="false" /> but does not
/// throw an exception. The operation is performed asynchronously and may complete partially if cancellation is requested. /// throw an exception. The operation is performed asynchronously and may complete partially if cancellation is
/// requested.
/// </remarks> /// </remarks>
/// <param name="documentKeys">A collection of incoming keys that specify the documents to delete. Cannot be null or contain null or empty /// <param name="documentKeys">
/// values.</param> /// A collection of incoming keys that specify the documents to delete. Cannot be null or contain null or empty
/// values.
/// </param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the delete operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the delete operation.</param>
/// <returns>A task that represents the asynchronous delete operation. The task result is <see langword="true"/> if all /// <returns>
/// specified documents were successfully deleted; otherwise, <see langword="false"/>.</returns> /// A task that represents the asynchronous delete operation. The task result is <see langword="true" /> if all
Task<bool> DeleteBatchDocumentsAsync(IEnumerable<string> documentKeys, CancellationToken cancellationToken = default); /// specified documents were successfully deleted; otherwise, <see langword="false" />.
/// </returns>
Task<bool> DeleteBatchDocumentsAsync(IEnumerable<string> documentKeys,
CancellationToken cancellationToken = default);
/// <summary> /// <summary>
/// Asynchronously merges the specified incoming with existing data and returns the updated incoming. /// Asynchronously merges the specified incoming with existing data and returns the updated incoming.
@@ -95,9 +116,12 @@ public interface IDocumentStore : ISnapshotable<Document>
/// <summary> /// <summary>
/// Asynchronously retrieves documents identified by the specified collection and key pairs. /// Asynchronously retrieves documents identified by the specified collection and key pairs.
/// </summary> /// </summary>
/// <param name="documentKeys">A list of tuples, each containing the collection name and the document key that uniquely identify the documents /// <param name="documentKeys">
/// to retrieve. Cannot be null or empty.</param> /// A list of tuples, each containing the collection name and the document key that uniquely identify the documents
/// to retrieve. Cannot be null or empty.
/// </param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>A task that represents the asynchronous retrieval operation.</returns> /// <returns>A task that represents the asynchronous retrieval operation.</returns>
Task<IEnumerable<Document>> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys, CancellationToken cancellationToken); Task<IEnumerable<Document>> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys,
CancellationToken cancellationToken);
} }

View File

@@ -1,5 +1,4 @@
using System; using System;
using System.Buffers;
using System.Collections.Generic; using System.Collections.Generic;
using System.Threading; using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
@@ -31,7 +30,8 @@ public interface IOplogStore : ISnapshotable<OplogEntry>
/// <param name="collections">An optional collection of collection names to filter the results.</param> /// <param name="collections">An optional collection of collection names to filter the results.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>A task that represents the asynchronous operation containing matching oplog entries.</returns> /// <returns>A task that represents the asynchronous operation containing matching oplog entries.</returns>
Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default); Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp, IEnumerable<string>? collections = null,
CancellationToken cancellationToken = default);
/// <summary> /// <summary>
/// Asynchronously retrieves the latest observed hybrid logical clock (HLC) timestamp. /// Asynchronously retrieves the latest observed hybrid logical clock (HLC) timestamp.
@@ -55,24 +55,30 @@ public interface IOplogStore : ISnapshotable<OplogEntry>
/// <param name="collections">An optional collection of collection names to filter the oplog entries.</param> /// <param name="collections">An optional collection of collection names to filter the oplog entries.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>A task that represents the asynchronous operation containing oplog entries for the specified node.</returns> /// <returns>A task that represents the asynchronous operation containing oplog entries for the specified node.</returns>
Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default); Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
/// <summary> /// <summary>
/// Asynchronously retrieves the hash of the most recent entry for the specified node. /// Asynchronously retrieves the hash of the most recent entry for the specified node.
/// </summary> /// </summary>
/// <param name="nodeId">The unique identifier of the node for which to retrieve the last entry hash. Cannot be null or empty.</param> /// <param name="nodeId">
/// The unique identifier of the node for which to retrieve the last entry hash. Cannot be null or
/// empty.
/// </param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation containing the hash string of the last entry or null.</returns> /// <returns>A task that represents the asynchronous operation containing the hash string of the last entry or null.</returns>
Task<string?> GetLastEntryHashAsync(string nodeId, CancellationToken cancellationToken = default); Task<string?> GetLastEntryHashAsync(string nodeId, CancellationToken cancellationToken = default);
/// <summary> /// <summary>
/// Asynchronously retrieves a sequence of oplog entries representing the chain between the specified start and end hashes. /// Asynchronously retrieves a sequence of oplog entries representing the chain between the specified start and end
/// hashes.
/// </summary> /// </summary>
/// <param name="startHash">The hash of the first entry in the chain range. Cannot be null or empty.</param> /// <param name="startHash">The hash of the first entry in the chain range. Cannot be null or empty.</param>
/// <param name="endHash">The hash of the last entry in the chain range. Cannot be null or empty.</param> /// <param name="endHash">The hash of the last entry in the chain range. Cannot be null or empty.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>A task that represents the asynchronous operation containing OplogEntry objects in chain order.</returns> /// <returns>A task that represents the asynchronous operation containing OplogEntry objects in chain order.</returns>
Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash, CancellationToken cancellationToken = default); Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash,
CancellationToken cancellationToken = default);
/// <summary> /// <summary>
/// Asynchronously retrieves the oplog entry associated with the specified hash value. /// Asynchronously retrieves the oplog entry associated with the specified hash value.
@@ -97,5 +103,4 @@ public interface IOplogStore : ISnapshotable<OplogEntry>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the prune operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the prune operation.</param>
/// <returns>A task that represents the asynchronous prune operation.</returns> /// <returns>A task that represents the asynchronous prune operation.</returns>
Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default); Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default);
} }

View File

@@ -9,10 +9,15 @@ public interface ISnapshotMetadataStore : ISnapshotable<SnapshotMetadata>
/// <summary> /// <summary>
/// Asynchronously retrieves the snapshot metadata associated with the specified node identifier. /// Asynchronously retrieves the snapshot metadata associated with the specified node identifier.
/// </summary> /// </summary>
/// <param name="nodeId">The unique identifier of the node for which to retrieve snapshot metadata. Cannot be null or empty.</param> /// <param name="nodeId">
/// The unique identifier of the node for which to retrieve snapshot metadata. Cannot be null or
/// empty.
/// </param>
/// <param name="cancellationToken">A token to monitor for cancellation requests.</param> /// <param name="cancellationToken">A token to monitor for cancellation requests.</param>
/// <returns>A task that represents the asynchronous operation. The task result contains the <see cref="SnapshotMetadata"/> /// <returns>
/// for the specified node if found; otherwise, <see langword="null"/>.</returns> /// A task that represents the asynchronous operation. The task result contains the <see cref="SnapshotMetadata" />
/// for the specified node if found; otherwise, <see langword="null" />.
/// </returns>
Task<SnapshotMetadata?> GetSnapshotMetadataAsync(string nodeId, CancellationToken cancellationToken = default); Task<SnapshotMetadata?> GetSnapshotMetadataAsync(string nodeId, CancellationToken cancellationToken = default);
/// <summary> /// <summary>

View File

@@ -10,8 +10,10 @@ public interface ISnapshotable<T>
/// Asynchronously deletes the underlying data store and all of its contents. /// Asynchronously deletes the underlying data store and all of its contents.
/// </summary> /// </summary>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the drop operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the drop operation.</param>
/// <remarks>After calling this method, the data store and all stored data will be permanently removed. /// <remarks>
/// This operation cannot be undone. Any further operations on the data store may result in errors.</remarks> /// After calling this method, the data store and all stored data will be permanently removed.
/// This operation cannot be undone. Any further operations on the data store may result in errors.
/// </remarks>
/// <returns>A task that represents the asynchronous drop operation.</returns> /// <returns>A task that represents the asynchronous drop operation.</returns>
Task DropAsync(CancellationToken cancellationToken = default); Task DropAsync(CancellationToken cancellationToken = default);
@@ -19,8 +21,10 @@ public interface ISnapshotable<T>
/// Asynchronously exports a collection of items of type T. /// Asynchronously exports a collection of items of type T.
/// </summary> /// </summary>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the export operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the export operation.</param>
/// <returns>A task that represents the asynchronous export operation. The task result contains an enumerable collection of /// <returns>
/// exported items of type T.</returns> /// A task that represents the asynchronous export operation. The task result contains an enumerable collection of
/// exported items of type T.
/// </returns>
Task<IEnumerable<T>> ExportAsync(CancellationToken cancellationToken = default); Task<IEnumerable<T>> ExportAsync(CancellationToken cancellationToken = default);
/// <summary> /// <summary>
@@ -34,9 +38,11 @@ public interface ISnapshotable<T>
/// <summary> /// <summary>
/// Merges the specified collection of items into the target data store asynchronously. /// Merges the specified collection of items into the target data store asynchronously.
/// </summary> /// </summary>
/// <remarks>If the operation is canceled via the provided cancellation token, the returned task will be /// <remarks>
/// If the operation is canceled via the provided cancellation token, the returned task will be
/// in a canceled state. The merge operation may update existing items or add new items, depending on the /// in a canceled state. The merge operation may update existing items or add new items, depending on the
/// implementation.</remarks> /// implementation.
/// </remarks>
/// <param name="items">The collection of items to merge into the data store. Cannot be null.</param> /// <param name="items">The collection of items to merge into the data store. Cannot be null.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the merge operation.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the merge operation.</param>
/// <returns>A task that represents the asynchronous merge operation.</returns> /// <returns>A task that represents the asynchronous merge operation.</returns>

View File

@@ -11,7 +11,7 @@ public interface IVectorClockService
{ {
/// <summary> /// <summary>
/// Indicates whether the cache has been populated with initial data. /// Indicates whether the cache has been populated with initial data.
/// Reset to false by <see cref="Invalidate"/>. /// Reset to false by <see cref="Invalidate" />.
/// </summary> /// </summary>
bool IsInitialized { get; set; } bool IsInitialized { get; set; }
@@ -51,7 +51,7 @@ public interface IVectorClockService
void UpdateNode(string nodeId, HlcTimestamp timestamp, string hash); void UpdateNode(string nodeId, HlcTimestamp timestamp, string hash);
/// <summary> /// <summary>
/// Clears the cache and resets <see cref="IsInitialized"/> to false, /// Clears the cache and resets <see cref="IsInitialized" /> to false,
/// forcing re-initialization on next access. /// forcing re-initialization on next access.
/// </summary> /// </summary>
void Invalidate(); void Invalidate();

View File

@@ -1,21 +1,9 @@
using System.Text.Json;
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Core.Sync; namespace ZB.MOM.WW.CBDDC.Core.Sync;
public class ConflictResolutionResult public class ConflictResolutionResult
{ {
/// <summary> /// <summary>
/// Gets a value indicating whether the remote change should be applied. /// Initializes a new instance of the <see cref="ConflictResolutionResult" /> class.
/// </summary>
public bool ShouldApply { get; }
/// <summary>
/// Gets the merged document to apply when conflict resolution produced one.
/// </summary>
public Document? MergedDocument { get; }
/// <summary>
/// Initializes a new instance of the <see cref="ConflictResolutionResult"/> class.
/// </summary> /// </summary>
/// <param name="shouldApply">Indicates whether the change should be applied.</param> /// <param name="shouldApply">Indicates whether the change should be applied.</param>
/// <param name="mergedDocument">The merged document produced by resolution, if any.</param> /// <param name="mergedDocument">The merged document produced by resolution, if any.</param>
@@ -25,17 +13,34 @@ public class ConflictResolutionResult
MergedDocument = mergedDocument; MergedDocument = mergedDocument;
} }
/// <summary>
/// Gets a value indicating whether the remote change should be applied.
/// </summary>
public bool ShouldApply { get; }
/// <summary>
/// Gets the merged document to apply when conflict resolution produced one.
/// </summary>
public Document? MergedDocument { get; }
/// <summary> /// <summary>
/// Creates a result indicating that the resolved document should be applied. /// Creates a result indicating that the resolved document should be applied.
/// </summary> /// </summary>
/// <param name="document">The merged document to apply.</param> /// <param name="document">The merged document to apply.</param>
/// <returns>A resolution result that applies the provided document.</returns> /// <returns>A resolution result that applies the provided document.</returns>
public static ConflictResolutionResult Apply(Document document) => new(true, document); public static ConflictResolutionResult Apply(Document document)
{
return new ConflictResolutionResult(true, document);
}
/// <summary> /// <summary>
/// Creates a result indicating that the remote change should be ignored. /// Creates a result indicating that the remote change should be ignored.
/// </summary> /// </summary>
/// <returns>A resolution result that skips applying the remote change.</returns> /// <returns>A resolution result that skips applying the remote change.</returns>
public static ConflictResolutionResult Ignore() => new(false, null); public static ConflictResolutionResult Ignore()
{
return new ConflictResolutionResult(false, null);
}
} }
public interface IConflictResolver public interface IConflictResolver

View File

@@ -2,13 +2,13 @@
using System.Threading; using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Core.Sync namespace ZB.MOM.WW.CBDDC.Core.Sync;
/// <summary>
/// Represents a queue for operations that should be executed when connectivity is restored.
/// </summary>
public interface IOfflineQueue
{ {
/// <summary>
/// Represents a queue for operations that should be executed when connectivity is restored.
/// </summary>
public interface IOfflineQueue
{
/// <summary> /// <summary>
/// Gets the number of pending operations in the queue. /// Gets the number of pending operations in the queue.
/// </summary> /// </summary>
@@ -35,6 +35,6 @@ namespace ZB.MOM.WW.CBDDC.Core.Sync
/// <returns> /// <returns>
/// A task that returns a tuple containing the number of successful and failed operations. /// A task that returns a tuple containing the number of successful and failed operations.
/// </returns> /// </returns>
Task<(int Successful, int Failed)> FlushAsync(Func<PendingOperation, Task> executor, CancellationToken cancellationToken = default); Task<(int Successful, int Failed)> FlushAsync(Func<PendingOperation, Task> executor,
} CancellationToken cancellationToken = default);
} }

View File

@@ -1,6 +1,3 @@
using System.Text.Json;
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Core.Sync; namespace ZB.MOM.WW.CBDDC.Core.Sync;
public class LastWriteWinsConflictResolver : IConflictResolver public class LastWriteWinsConflictResolver : IConflictResolver
@@ -18,7 +15,8 @@ public class LastWriteWinsConflictResolver : IConflictResolver
{ {
// Construct new document from oplog entry // Construct new document from oplog entry
var content = remote.Payload ?? default; var content = remote.Payload ?? default;
var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp, remote.Operation == OperationType.Delete); var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp,
remote.Operation == OperationType.Delete);
return ConflictResolutionResult.Apply(newDoc); return ConflictResolutionResult.Apply(newDoc);
} }
@@ -27,7 +25,8 @@ public class LastWriteWinsConflictResolver : IConflictResolver
{ {
// Remote is newer, apply it // Remote is newer, apply it
var content = remote.Payload ?? default; var content = remote.Payload ?? default;
var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp, remote.Operation == OperationType.Delete); var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp,
remote.Operation == OperationType.Delete);
return ConflictResolutionResult.Apply(newDoc); return ConflictResolutionResult.Apply(newDoc);
} }

View File

@@ -1,11 +1,11 @@
using ZB.MOM.WW.CBDDC.Core.Network;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using System; using System;
using System.Collections.Generic; using System.Collections.Generic;
using System.Linq; using System.Linq;
using System.Threading; using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Core.Sync; namespace ZB.MOM.WW.CBDDC.Core.Sync;
@@ -14,17 +14,18 @@ namespace ZB.MOM.WW.CBDDC.Core.Sync;
/// </summary> /// </summary>
public class OfflineQueue : IOfflineQueue public class OfflineQueue : IOfflineQueue
{ {
private readonly object _lock = new();
private readonly ILogger<OfflineQueue> _logger;
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider; private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
private readonly Queue<PendingOperation> _queue = new(); private readonly Queue<PendingOperation> _queue = new();
private readonly ILogger<OfflineQueue> _logger;
private readonly object _lock = new();
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="OfflineQueue"/> class. /// Initializes a new instance of the <see cref="OfflineQueue" /> class.
/// </summary> /// </summary>
/// <param name="peerNodeConfigurationProvider">The configuration provider used for queue limits.</param> /// <param name="peerNodeConfigurationProvider">The configuration provider used for queue limits.</param>
/// <param name="logger">The logger instance.</param> /// <param name="logger">The logger instance.</param>
public OfflineQueue(IPeerNodeConfigurationProvider peerNodeConfigurationProvider, ILogger<OfflineQueue>? logger = null) public OfflineQueue(IPeerNodeConfigurationProvider peerNodeConfigurationProvider,
ILogger<OfflineQueue>? logger = null)
{ {
_peerNodeConfigurationProvider = peerNodeConfigurationProvider; _peerNodeConfigurationProvider = peerNodeConfigurationProvider;
_logger = logger ?? NullLogger<OfflineQueue>.Instance; _logger = logger ?? NullLogger<OfflineQueue>.Instance;
@@ -73,7 +74,8 @@ public class OfflineQueue : IOfflineQueue
/// <param name="executor">The delegate that executes each pending operation.</param> /// <param name="executor">The delegate that executes each pending operation.</param>
/// <param name="cancellationToken">A token used to cancel the operation.</param> /// <param name="cancellationToken">A token used to cancel the operation.</param>
/// <returns>A task whose result contains the number of successful and failed operations.</returns> /// <returns>A task whose result contains the number of successful and failed operations.</returns>
public async Task<(int Successful, int Failed)> FlushAsync(Func<PendingOperation, Task> executor, CancellationToken cancellationToken = default) public async Task<(int Successful, int Failed)> FlushAsync(Func<PendingOperation, Task> executor,
CancellationToken cancellationToken = default)
{ {
List<PendingOperation> operations; List<PendingOperation> operations;
@@ -91,11 +93,10 @@ public class OfflineQueue : IOfflineQueue
_logger.LogInformation("Flushing {Count} pending operations", operations.Count); _logger.LogInformation("Flushing {Count} pending operations", operations.Count);
int successful = 0; var successful = 0;
int failed = 0; var failed = 0;
foreach (var op in operations) foreach (var op in operations)
{
try try
{ {
await executor(op); await executor(op);
@@ -107,7 +108,6 @@ public class OfflineQueue : IOfflineQueue
_logger.LogError(ex, "Failed to execute pending {Type} operation for {Collection}:{Key}", _logger.LogError(ex, "Failed to execute pending {Type} operation for {Collection}:{Key}",
op.Type, op.Collection, op.Key); op.Type, op.Collection, op.Key);
} }
}
_logger.LogInformation("Flush completed: {Successful} successful, {Failed} failed", _logger.LogInformation("Flush completed: {Successful} successful, {Failed} failed",
successful, failed); successful, failed);
@@ -122,7 +122,7 @@ public class OfflineQueue : IOfflineQueue
{ {
lock (_lock) lock (_lock)
{ {
var count = _queue.Count; int count = _queue.Count;
_queue.Clear(); _queue.Clear();
_logger.LogInformation("Cleared {Count} pending operations", count); _logger.LogInformation("Cleared {Count} pending operations", count);
} }

View File

@@ -1,6 +1,4 @@
using System; using System;
using System.Threading;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Core.Sync; namespace ZB.MOM.WW.CBDDC.Core.Sync;
@@ -13,18 +11,22 @@ public class PendingOperation
/// Gets or sets the operation type. /// Gets or sets the operation type.
/// </summary> /// </summary>
public string Type { get; set; } = ""; public string Type { get; set; } = "";
/// <summary> /// <summary>
/// Gets or sets the collection targeted by the operation. /// Gets or sets the collection targeted by the operation.
/// </summary> /// </summary>
public string Collection { get; set; } = ""; public string Collection { get; set; } = "";
/// <summary> /// <summary>
/// Gets or sets the document key targeted by the operation. /// Gets or sets the document key targeted by the operation.
/// </summary> /// </summary>
public string Key { get; set; } = ""; public string Key { get; set; } = "";
/// <summary> /// <summary>
/// Gets or sets the payload associated with the operation. /// Gets or sets the payload associated with the operation.
/// </summary> /// </summary>
public object? Data { get; set; } public object? Data { get; set; }
/// <summary> /// <summary>
/// Gets or sets the UTC time when the operation was queued. /// Gets or sets the UTC time when the operation was queued.
/// </summary> /// </summary>

View File

@@ -1,7 +1,5 @@
using System;
using System.Buffers; using System.Buffers;
using System.Collections.Generic; using System.Collections.Generic;
using System.IO;
using System.Text.Json; using System.Text.Json;
namespace ZB.MOM.WW.CBDDC.Core.Sync; namespace ZB.MOM.WW.CBDDC.Core.Sync;
@@ -14,7 +12,7 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
/// <summary> /// <summary>
/// Resolves a conflict between a local document and a remote operation. /// Resolves a conflict between a local document and a remote operation.
/// </summary> /// </summary>
/// <param name="local">The local document, or <see langword="null"/> if none exists.</param> /// <param name="local">The local document, or <see langword="null" /> if none exists.</param>
/// <param name="remote">The remote operation to apply.</param> /// <param name="remote">The remote operation to apply.</param>
/// <returns>The conflict resolution result indicating whether and what to apply.</returns> /// <returns>The conflict resolution result indicating whether and what to apply.</returns>
public ConflictResolutionResult Resolve(Document? local, OplogEntry remote) public ConflictResolutionResult Resolve(Document? local, OplogEntry remote)
@@ -22,7 +20,8 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
if (local == null) if (local == null)
{ {
var content = remote.Payload ?? default; var content = remote.Payload ?? default;
var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp, remote.Operation == OperationType.Delete); var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp,
remote.Operation == OperationType.Delete);
return ConflictResolutionResult.Apply(newDoc); return ConflictResolutionResult.Apply(newDoc);
} }
@@ -33,6 +32,7 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
var newDoc = new Document(remote.Collection, remote.Key, default, remote.Timestamp, true); var newDoc = new Document(remote.Collection, remote.Key, default, remote.Timestamp, true);
return ConflictResolutionResult.Apply(newDoc); return ConflictResolutionResult.Apply(newDoc);
} }
return ConflictResolutionResult.Ignore(); return ConflictResolutionResult.Ignore();
} }
@@ -41,7 +41,9 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
var localTs = local.UpdatedAt; var localTs = local.UpdatedAt;
var remoteTs = remote.Timestamp; var remoteTs = remote.Timestamp;
if (localJson.ValueKind == JsonValueKind.Undefined) return ConflictResolutionResult.Apply(new Document(remote.Collection, remote.Key, remoteJson, remoteTs, false)); if (localJson.ValueKind == JsonValueKind.Undefined)
return ConflictResolutionResult.Apply(new Document(remote.Collection, remote.Key, remoteJson, remoteTs,
false));
if (remoteJson.ValueKind == JsonValueKind.Undefined) return ConflictResolutionResult.Ignore(); if (remoteJson.ValueKind == JsonValueKind.Undefined) return ConflictResolutionResult.Ignore();
// Optimization: Use ArrayBufferWriter (Net6.0) or MemoryStream (NS2.0) // Optimization: Use ArrayBufferWriter (Net6.0) or MemoryStream (NS2.0)
@@ -55,6 +57,7 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
{ {
MergeJson(writer, localJson, localTs, remoteJson, remoteTs); MergeJson(writer, localJson, localTs, remoteJson, remoteTs);
} }
mergedDocJson = JsonDocument.Parse(bufferWriter.WrittenMemory).RootElement; mergedDocJson = JsonDocument.Parse(bufferWriter.WrittenMemory).RootElement;
#else #else
using (var ms = new MemoryStream()) using (var ms = new MemoryStream())
@@ -73,7 +76,8 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
return ConflictResolutionResult.Apply(mergedDoc); return ConflictResolutionResult.Apply(mergedDoc);
} }
private void MergeJson(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote, HlcTimestamp remoteTs) private void MergeJson(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote,
HlcTimestamp remoteTs)
{ {
if (local.ValueKind != remote.ValueKind) if (local.ValueKind != remote.ValueKind)
{ {
@@ -102,11 +106,13 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
if (remoteTs.CompareTo(localTs) > 0) remote.WriteTo(writer); if (remoteTs.CompareTo(localTs) > 0) remote.WriteTo(writer);
else local.WriteTo(writer); else local.WriteTo(writer);
} }
break; break;
} }
} }
private void MergeObjects(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote, HlcTimestamp remoteTs) private void MergeObjects(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote,
HlcTimestamp remoteTs)
{ {
writer.WriteStartObject(); writer.WriteStartObject();
@@ -119,37 +125,32 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
foreach (var prop in local.EnumerateObject()) foreach (var prop in local.EnumerateObject())
{ {
var key = prop.Name; string key = prop.Name;
processedKeys.Add(key); // Mark as processed processedKeys.Add(key); // Mark as processed
writer.WritePropertyName(key); writer.WritePropertyName(key);
if (remote.TryGetProperty(key, out var remoteVal)) if (remote.TryGetProperty(key, out var remoteVal))
{
// Collision -> Merge // Collision -> Merge
MergeJson(writer, prop.Value, localTs, remoteVal, remoteTs); MergeJson(writer, prop.Value, localTs, remoteVal, remoteTs);
}
else else
{
// Only local // Only local
prop.Value.WriteTo(writer); prop.Value.WriteTo(writer);
} }
}
foreach (var prop in remote.EnumerateObject()) foreach (var prop in remote.EnumerateObject())
{
if (!processedKeys.Contains(prop.Name)) if (!processedKeys.Contains(prop.Name))
{ {
// New from remote // New from remote
writer.WritePropertyName(prop.Name); writer.WritePropertyName(prop.Name);
prop.Value.WriteTo(writer); prop.Value.WriteTo(writer);
} }
}
writer.WriteEndObject(); writer.WriteEndObject();
} }
private void MergeArrays(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote, HlcTimestamp remoteTs) private void MergeArrays(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote,
HlcTimestamp remoteTs)
{ {
// Heuristic check // Heuristic check
bool localIsObj = HasObjects(local); bool localIsObj = HasObjects(local);
@@ -198,30 +199,22 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
// 1. Process Local Items (Merge or Write) // 1. Process Local Items (Merge or Write)
foreach (var kvp in localMap) foreach (var kvp in localMap)
{ {
var id = kvp.Key; string id = kvp.Key;
var localItem = kvp.Value; var localItem = kvp.Value;
processedIds.Add(id); processedIds.Add(id);
if (remoteMap.TryGetValue(id, out var remoteItem)) if (remoteMap.TryGetValue(id, out var remoteItem))
{
// Merge recursively // Merge recursively
MergeJson(writer, localItem, localTs, remoteItem, remoteTs); MergeJson(writer, localItem, localTs, remoteItem, remoteTs);
}
else else
{
// Keep local item // Keep local item
localItem.WriteTo(writer); localItem.WriteTo(writer);
} }
}
// 2. Process New Remote Items // 2. Process New Remote Items
foreach (var kvp in remoteMap) foreach (var kvp in remoteMap)
{
if (!processedIds.Contains(kvp.Key)) if (!processedIds.Contains(kvp.Key))
{
kvp.Value.WriteTo(writer); kvp.Value.WriteTo(writer);
}
}
writer.WriteEndArray(); writer.WriteEndArray();
} }
@@ -249,6 +242,7 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
map[id] = item; map[id] = item;
} }
return map; return map;
} }
} }

View File

@@ -59,15 +59,13 @@ public class VectorClock
/// <param name="other">The vector clock to merge from.</param> /// <param name="other">The vector clock to merge from.</param>
public void Merge(VectorClock other) public void Merge(VectorClock other)
{ {
foreach (var nodeId in other.NodeIds) foreach (string nodeId in other.NodeIds)
{ {
var otherTs = other.GetTimestamp(nodeId); var otherTs = other.GetTimestamp(nodeId);
if (!_clock.TryGetValue(nodeId, out var currentTs) || otherTs.CompareTo(currentTs) > 0) if (!_clock.TryGetValue(nodeId, out var currentTs) || otherTs.CompareTo(currentTs) > 0)
{
_clock[nodeId] = otherTs; _clock[nodeId] = otherTs;
} }
} }
}
/// <summary> /// <summary>
/// Compares this vector clock with another to determine causality. /// Compares this vector clock with another to determine causality.
@@ -79,12 +77,12 @@ public class VectorClock
/// <param name="other">The vector clock to compare with.</param> /// <param name="other">The vector clock to compare with.</param>
public CausalityRelation CompareTo(VectorClock other) public CausalityRelation CompareTo(VectorClock other)
{ {
bool thisAhead = false; var thisAhead = false;
bool otherAhead = false; var otherAhead = false;
var allNodes = new HashSet<string>(_clock.Keys.Union(other._clock.Keys), StringComparer.Ordinal); var allNodes = new HashSet<string>(_clock.Keys.Union(other._clock.Keys), StringComparer.Ordinal);
foreach (var nodeId in allNodes) foreach (string nodeId in allNodes)
{ {
var thisTs = GetTimestamp(nodeId); var thisTs = GetTimestamp(nodeId);
var otherTs = other.GetTimestamp(nodeId); var otherTs = other.GetTimestamp(nodeId);
@@ -92,19 +90,11 @@ public class VectorClock
int cmp = thisTs.CompareTo(otherTs); int cmp = thisTs.CompareTo(otherTs);
if (cmp > 0) if (cmp > 0)
{
thisAhead = true; thisAhead = true;
} else if (cmp < 0) otherAhead = true;
else if (cmp < 0)
{
otherAhead = true;
}
// Early exit if concurrent // Early exit if concurrent
if (thisAhead && otherAhead) if (thisAhead && otherAhead) return CausalityRelation.Concurrent;
{
return CausalityRelation.Concurrent;
}
} }
if (thisAhead && !otherAhead) if (thisAhead && !otherAhead)
@@ -123,20 +113,14 @@ public class VectorClock
public IEnumerable<string> GetNodesWithUpdates(VectorClock other) public IEnumerable<string> GetNodesWithUpdates(VectorClock other)
{ {
var allNodes = new HashSet<string>(_clock.Keys, StringComparer.Ordinal); var allNodes = new HashSet<string>(_clock.Keys, StringComparer.Ordinal);
foreach (var nodeId in other._clock.Keys) foreach (string nodeId in other._clock.Keys) allNodes.Add(nodeId);
{
allNodes.Add(nodeId);
}
foreach (var nodeId in allNodes) foreach (string nodeId in allNodes)
{ {
var thisTs = GetTimestamp(nodeId); var thisTs = GetTimestamp(nodeId);
var otherTs = other.GetTimestamp(nodeId); var otherTs = other.GetTimestamp(nodeId);
if (otherTs.CompareTo(thisTs) > 0) if (otherTs.CompareTo(thisTs) > 0) yield return nodeId;
{
yield return nodeId;
}
} }
} }
@@ -149,15 +133,12 @@ public class VectorClock
{ {
var allNodes = new HashSet<string>(_clock.Keys.Union(other._clock.Keys), StringComparer.Ordinal); var allNodes = new HashSet<string>(_clock.Keys.Union(other._clock.Keys), StringComparer.Ordinal);
foreach (var nodeId in allNodes) foreach (string nodeId in allNodes)
{ {
var thisTs = GetTimestamp(nodeId); var thisTs = GetTimestamp(nodeId);
var otherTs = other.GetTimestamp(nodeId); var otherTs = other.GetTimestamp(nodeId);
if (thisTs.CompareTo(otherTs) > 0) if (thisTs.CompareTo(otherTs) > 0) yield return nodeId;
{
yield return nodeId;
}
} }
} }
@@ -187,10 +168,13 @@ public enum CausalityRelation
{ {
/// <summary>Both vector clocks are equal.</summary> /// <summary>Both vector clocks are equal.</summary>
Equal, Equal,
/// <summary>This vector clock is strictly ahead (dominates).</summary> /// <summary>This vector clock is strictly ahead (dominates).</summary>
StrictlyAhead, StrictlyAhead,
/// <summary>This vector clock is strictly behind (dominated).</summary> /// <summary>This vector clock is strictly behind (dominated).</summary>
StrictlyBehind, StrictlyBehind,
/// <summary>Vector clocks are concurrent (neither dominates).</summary> /// <summary>Vector clocks are concurrent (neither dominates).</summary>
Concurrent Concurrent
} }

View File

@@ -19,15 +19,15 @@
</PropertyGroup> </PropertyGroup>
<ItemGroup> <ItemGroup>
<None Include="README.md" Pack="true" PackagePath="\" /> <None Include="README.md" Pack="true" PackagePath="\"/>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="8.0.0" /> <PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="8.0.0"/>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<Folder Include="Storage\Events\" /> <Folder Include="Storage\Events\"/>
</ItemGroup> </ItemGroup>
</Project> </Project>

View File

@@ -1,12 +1,12 @@
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Diagnostics.HealthChecks;
using Microsoft.Extensions.Hosting;
using ZB.MOM.WW.CBDDC.Hosting.Configuration; using ZB.MOM.WW.CBDDC.Hosting.Configuration;
using ZB.MOM.WW.CBDDC.Hosting.HealthChecks; using ZB.MOM.WW.CBDDC.Hosting.HealthChecks;
using ZB.MOM.WW.CBDDC.Hosting.HostedServices; using ZB.MOM.WW.CBDDC.Hosting.HostedServices;
using ZB.MOM.WW.CBDDC.Hosting.Services; using ZB.MOM.WW.CBDDC.Hosting.Services;
using ZB.MOM.WW.CBDDC.Network; using ZB.MOM.WW.CBDDC.Network;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Diagnostics.HealthChecks;
using Microsoft.Extensions.Hosting;
namespace ZB.MOM.WW.CBDDC.Hosting; namespace ZB.MOM.WW.CBDDC.Hosting;
@@ -51,10 +51,7 @@ public static class CBDDCHostingExtensions
this IServiceCollection services, this IServiceCollection services,
Action<ClusterOptions>? configure = null) Action<ClusterOptions>? configure = null)
{ {
return services.AddCBDDCHosting(options => return services.AddCBDDCHosting(options => { configure?.Invoke(options.Cluster); });
{
configure?.Invoke(options.Cluster);
});
} }
private static void RegisterSingleClusterServices( private static void RegisterSingleClusterServices(
@@ -81,12 +78,10 @@ public static class CBDDCHostingExtensions
{ {
// Health checks // Health checks
if (options.EnableHealthChecks) if (options.EnableHealthChecks)
{
services.AddHealthChecks() services.AddHealthChecks()
.AddCheck<CBDDCHealthCheck>( .AddCheck<CBDDCHealthCheck>(
"cbddc", "cbddc",
failureStatus: HealthStatus.Unhealthy, HealthStatus.Unhealthy,
tags: new[] { "db", "ready" }); new[] { "db", "ready" });
}
} }
} }

View File

@@ -1,5 +1,3 @@
using System;
namespace ZB.MOM.WW.CBDDC.Hosting.Configuration; namespace ZB.MOM.WW.CBDDC.Hosting.Configuration;
/// <summary> /// <summary>
@@ -32,7 +30,8 @@ public class ClusterOptions
public long PeerConfirmationLagThresholdMs { get; set; } = 30_000; public long PeerConfirmationLagThresholdMs { get; set; } = 30_000;
/// <summary> /// <summary>
/// Gets or sets the critical lag threshold (in milliseconds) used to determine when a tracked peer causes unhealthy status. /// Gets or sets the critical lag threshold (in milliseconds) used to determine when a tracked peer causes unhealthy
/// status.
/// Peers above this threshold mark health as unhealthy. /// Peers above this threshold mark health as unhealthy.
/// Default: 120,000 ms. /// Default: 120,000 ms.
/// </summary> /// </summary>

View File

@@ -1,8 +1,3 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Diagnostics.HealthChecks; using Microsoft.Extensions.Diagnostics.HealthChecks;
using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Hosting.Configuration; using ZB.MOM.WW.CBDDC.Hosting.Configuration;
@@ -16,11 +11,11 @@ namespace ZB.MOM.WW.CBDDC.Hosting.HealthChecks;
public class CBDDCHealthCheck : IHealthCheck public class CBDDCHealthCheck : IHealthCheck
{ {
private readonly IOplogStore _oplogStore; private readonly IOplogStore _oplogStore;
private readonly IPeerOplogConfirmationStore _peerOplogConfirmationStore;
private readonly CBDDCHostingOptions _options; private readonly CBDDCHostingOptions _options;
private readonly IPeerOplogConfirmationStore _peerOplogConfirmationStore;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="CBDDCHealthCheck"/> class. /// Initializes a new instance of the <see cref="CBDDCHealthCheck" /> class.
/// </summary> /// </summary>
/// <param name="oplogStore">The oplog store used to verify persistence health.</param> /// <param name="oplogStore">The oplog store used to verify persistence health.</param>
/// <param name="peerOplogConfirmationStore">The peer confirmation store used for confirmation lag health checks.</param> /// <param name="peerOplogConfirmationStore">The peer confirmation store used for confirmation lag health checks.</param>
@@ -31,7 +26,8 @@ public class CBDDCHealthCheck : IHealthCheck
CBDDCHostingOptions options) CBDDCHostingOptions options)
{ {
_oplogStore = oplogStore ?? throw new ArgumentNullException(nameof(oplogStore)); _oplogStore = oplogStore ?? throw new ArgumentNullException(nameof(oplogStore));
_peerOplogConfirmationStore = peerOplogConfirmationStore ?? throw new ArgumentNullException(nameof(peerOplogConfirmationStore)); _peerOplogConfirmationStore = peerOplogConfirmationStore ??
throw new ArgumentNullException(nameof(peerOplogConfirmationStore));
_options = options ?? throw new ArgumentNullException(nameof(options)); _options = options ?? throw new ArgumentNullException(nameof(options));
} }
@@ -40,7 +36,7 @@ public class CBDDCHealthCheck : IHealthCheck
/// </summary> /// </summary>
/// <param name="context">The health check execution context.</param> /// <param name="context">The health check execution context.</param>
/// <param name="cancellationToken">A token used to cancel the health check.</param> /// <param name="cancellationToken">A token used to cancel the health check.</param>
/// <returns>A <see cref="HealthCheckResult"/> describing the health status.</returns> /// <returns>A <see cref="HealthCheckResult" /> describing the health status.</returns>
public async Task<HealthCheckResult> CheckHealthAsync( public async Task<HealthCheckResult> CheckHealthAsync(
HealthCheckContext context, HealthCheckContext context,
CancellationToken cancellationToken = default) CancellationToken cancellationToken = default)
@@ -58,15 +54,18 @@ public class CBDDCHealthCheck : IHealthCheck
var peersWithNoConfirmation = new List<string>(); var peersWithNoConfirmation = new List<string>();
var laggingPeers = new List<string>(); var laggingPeers = new List<string>();
var criticalLaggingPeers = new List<string>(); var criticalLaggingPeers = new List<string>();
var lastSuccessfulConfirmationUpdateByPeer = new Dictionary<string, DateTimeOffset?>(StringComparer.Ordinal); var lastSuccessfulConfirmationUpdateByPeer =
new Dictionary<string, DateTimeOffset?>(StringComparer.Ordinal);
var maxLagMs = 0L; var maxLagMs = 0L;
var lagThresholdMs = Math.Max(0, _options.Cluster.PeerConfirmationLagThresholdMs); long lagThresholdMs = Math.Max(0, _options.Cluster.PeerConfirmationLagThresholdMs);
var criticalLagThresholdMs = Math.Max(lagThresholdMs, _options.Cluster.PeerConfirmationCriticalLagThresholdMs); long criticalLagThresholdMs =
Math.Max(lagThresholdMs, _options.Cluster.PeerConfirmationCriticalLagThresholdMs);
foreach (var peerNodeId in trackedPeers) foreach (string peerNodeId in trackedPeers)
{ {
var confirmations = (await _peerOplogConfirmationStore.GetConfirmationsForPeerAsync(peerNodeId, cancellationToken)) var confirmations =
(await _peerOplogConfirmationStore.GetConfirmationsForPeerAsync(peerNodeId, cancellationToken))
.Where(confirmation => confirmation.IsActive) .Where(confirmation => confirmation.IsActive)
.ToList(); .ToList();
@@ -83,19 +82,14 @@ public class CBDDCHealthCheck : IHealthCheck
.ThenBy(confirmation => confirmation.ConfirmedLogic) .ThenBy(confirmation => confirmation.ConfirmedLogic)
.First(); .First();
var lagMs = Math.Max(0, localHead.PhysicalTime - oldestConfirmation.ConfirmedWall); long lagMs = Math.Max(0, localHead.PhysicalTime - oldestConfirmation.ConfirmedWall);
maxLagMs = Math.Max(maxLagMs, lagMs); maxLagMs = Math.Max(maxLagMs, lagMs);
lastSuccessfulConfirmationUpdateByPeer[peerNodeId] = confirmations.Max(confirmation => confirmation.LastConfirmedUtc); lastSuccessfulConfirmationUpdateByPeer[peerNodeId] =
confirmations.Max(confirmation => confirmation.LastConfirmedUtc);
if (lagMs > lagThresholdMs) if (lagMs > lagThresholdMs) laggingPeers.Add(peerNodeId);
{
laggingPeers.Add(peerNodeId);
}
if (lagMs > criticalLagThresholdMs) if (lagMs > criticalLagThresholdMs) criticalLaggingPeers.Add(peerNodeId);
{
criticalLaggingPeers.Add(peerNodeId);
}
} }
var payload = new Dictionary<string, object> var payload = new Dictionary<string, object>
@@ -108,18 +102,14 @@ public class CBDDCHealthCheck : IHealthCheck
}; };
if (criticalLaggingPeers.Count > 0) if (criticalLaggingPeers.Count > 0)
{
return HealthCheckResult.Unhealthy( return HealthCheckResult.Unhealthy(
$"CBDDC is unhealthy. Critical lag detected for {criticalLaggingPeers.Count} tracked peer(s).", $"CBDDC is unhealthy. Critical lag detected for {criticalLaggingPeers.Count} tracked peer(s).",
data: payload); data: payload);
}
if (peersWithNoConfirmation.Count > 0 || laggingPeers.Count > 0) if (peersWithNoConfirmation.Count > 0 || laggingPeers.Count > 0)
{
return HealthCheckResult.Degraded( return HealthCheckResult.Degraded(
$"CBDDC is degraded. Lagging peers: {laggingPeers.Count}, unconfirmed peers: {peersWithNoConfirmation.Count}.", $"CBDDC is degraded. Lagging peers: {laggingPeers.Count}, unconfirmed peers: {peersWithNoConfirmation.Count}.",
data: payload); data: payload);
}
return HealthCheckResult.Healthy( return HealthCheckResult.Healthy(
$"CBDDC is healthy. Latest timestamp: {localHead.PhysicalTime}.", $"CBDDC is healthy. Latest timestamp: {localHead.PhysicalTime}.",
@@ -129,7 +119,7 @@ public class CBDDCHealthCheck : IHealthCheck
{ {
return HealthCheckResult.Unhealthy( return HealthCheckResult.Unhealthy(
"CBDDC persistence layer is unavailable", "CBDDC persistence layer is unavailable",
exception: ex); ex);
} }
} }
} }

View File

@@ -1,5 +1,3 @@
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using Serilog.Context; using Serilog.Context;
@@ -16,7 +14,7 @@ public class DiscoveryServiceHostedService : IHostedService
private readonly ILogger<DiscoveryServiceHostedService> _logger; private readonly ILogger<DiscoveryServiceHostedService> _logger;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="DiscoveryServiceHostedService"/> class. /// Initializes a new instance of the <see cref="DiscoveryServiceHostedService" /> class.
/// </summary> /// </summary>
/// <param name="discoveryService">The discovery service to manage.</param> /// <param name="discoveryService">The discovery service to manage.</param>
/// <param name="logger">The logger used for service lifecycle events.</param> /// <param name="logger">The logger used for service lifecycle events.</param>

View File

@@ -1,5 +1,3 @@
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using Serilog.Context; using Serilog.Context;
@@ -12,11 +10,11 @@ namespace ZB.MOM.WW.CBDDC.Hosting.HostedServices;
/// </summary> /// </summary>
public class TcpSyncServerHostedService : IHostedService public class TcpSyncServerHostedService : IHostedService
{ {
private readonly ISyncServer _syncServer;
private readonly ILogger<TcpSyncServerHostedService> _logger; private readonly ILogger<TcpSyncServerHostedService> _logger;
private readonly ISyncServer _syncServer;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="TcpSyncServerHostedService"/> class. /// Initializes a new instance of the <see cref="TcpSyncServerHostedService" /> class.
/// </summary> /// </summary>
/// <param name="syncServer">The sync server to start and stop.</param> /// <param name="syncServer">The sync server to start and stop.</param>
/// <param name="logger">The logger instance.</param> /// <param name="logger">The logger instance.</param>

View File

@@ -41,6 +41,7 @@ app.Run();
## Health Checks ## Health Checks
CBDDC registers health checks that verify: CBDDC registers health checks that verify:
- Database connectivity - Database connectivity
- Latest timestamp retrieval - Latest timestamp retrieval
@@ -53,6 +54,7 @@ curl http://localhost:5000/health
### Cluster ### Cluster
Best for: Best for:
- Dedicated database servers - Dedicated database servers
- Simple deployments - Simple deployments
- Development/testing environments - Development/testing environments
@@ -60,6 +62,7 @@ Best for:
## Server Behavior ## Server Behavior
CBDDC servers operate in respond-only mode: CBDDC servers operate in respond-only mode:
- Accept incoming sync connections - Accept incoming sync connections
- Respond to sync requests - Respond to sync requests
- Do not initiate outbound sync - Do not initiate outbound sync
@@ -70,7 +73,7 @@ CBDDC servers operate in respond-only mode:
### ClusterOptions ### ClusterOptions
| Property | Type | Default | Description | | Property | Type | Default | Description |
|----------|------|---------|-------------| |--------------------|--------|-------------|------------------------|
| NodeId | string | MachineName | Unique node identifier | | NodeId | string | MachineName | Unique node identifier |
| TcpPort | int | 5001 | TCP port for sync | | TcpPort | int | 5001 | TCP port for sync |
| EnableUdpDiscovery | bool | false | Enable UDP discovery | | EnableUdpDiscovery | bool | false | Enable UDP discovery |

View File

@@ -1,5 +1,3 @@
using System;
using System.Collections.Generic;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions; using Microsoft.Extensions.Logging.Abstractions;
using Serilog.Context; using Serilog.Context;
@@ -17,9 +15,9 @@ public class NoOpDiscoveryService : IDiscoveryService
private readonly ILogger<NoOpDiscoveryService> _logger; private readonly ILogger<NoOpDiscoveryService> _logger;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="NoOpDiscoveryService"/> class. /// Initializes a new instance of the <see cref="NoOpDiscoveryService" /> class.
/// </summary> /// </summary>
/// <param name="logger">The logger instance to use, or <see langword="null"/> to use a no-op logger.</param> /// <param name="logger">The logger instance to use, or <see langword="null" /> to use a no-op logger.</param>
public NoOpDiscoveryService(ILogger<NoOpDiscoveryService>? logger = null) public NoOpDiscoveryService(ILogger<NoOpDiscoveryService>? logger = null)
{ {
_logger = logger ?? NullLogger<NoOpDiscoveryService>.Instance; _logger = logger ?? NullLogger<NoOpDiscoveryService>.Instance;

View File

@@ -1,4 +1,3 @@
using System.Threading.Tasks;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions; using Microsoft.Extensions.Logging.Abstractions;
using Serilog.Context; using Serilog.Context;
@@ -15,9 +14,9 @@ public class NoOpSyncOrchestrator : ISyncOrchestrator
private readonly ILogger<NoOpSyncOrchestrator> _logger; private readonly ILogger<NoOpSyncOrchestrator> _logger;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="NoOpSyncOrchestrator"/> class. /// Initializes a new instance of the <see cref="NoOpSyncOrchestrator" /> class.
/// </summary> /// </summary>
/// <param name="logger">The logger instance to use, or <see langword="null"/> for a no-op logger.</param> /// <param name="logger">The logger instance to use, or <see langword="null" /> for a no-op logger.</param>
public NoOpSyncOrchestrator(ILogger<NoOpSyncOrchestrator>? logger = null) public NoOpSyncOrchestrator(ILogger<NoOpSyncOrchestrator>? logger = null)
{ {
_logger = logger ?? NullLogger<NoOpSyncOrchestrator>.Instance; _logger = logger ?? NullLogger<NoOpSyncOrchestrator>.Instance;

View File

@@ -1,14 +1,14 @@
<Project Sdk="Microsoft.NET.Sdk"> <Project Sdk="Microsoft.NET.Sdk">
<ItemGroup> <ItemGroup>
<ProjectReference Include="..\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj" /> <ProjectReference Include="..\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj"/>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<PackageReference Include="Microsoft.Extensions.Diagnostics.HealthChecks" Version="8.0.0" /> <PackageReference Include="Microsoft.Extensions.Diagnostics.HealthChecks" Version="8.0.0"/>
<PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" Version="8.0.0" /> <PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" Version="8.0.0"/>
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="8.0.0" /> <PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="8.0.0"/>
<PackageReference Include="Serilog" Version="4.2.0" /> <PackageReference Include="Serilog" Version="4.2.0"/>
</ItemGroup> </ItemGroup>
<PropertyGroup> <PropertyGroup>
@@ -31,7 +31,7 @@
</PropertyGroup> </PropertyGroup>
<ItemGroup> <ItemGroup>
<None Include="README.md" Pack="true" PackagePath="\" /> <None Include="README.md" Pack="true" PackagePath="\"/>
</ItemGroup> </ItemGroup>
</Project> </Project>

View File

@@ -1,8 +1,7 @@
using System; using System.Net;
using System.Net.NetworkInformation;
using System.Net.Sockets;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using System.Linq;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Network;
@@ -14,24 +13,9 @@ public class CBDDCNode : ICBDDCNode
{ {
private readonly ILogger<CBDDCNode> _logger; private readonly ILogger<CBDDCNode> _logger;
/// <summary>
/// Gets the Sync Server instance.
/// </summary>
public ISyncServer Server { get; }
/// <summary> /// <summary>
/// Gets the Discovery Service instance. /// Initializes a new instance of the <see cref="CBDDCNode" /> class.
/// </summary>
public IDiscoveryService Discovery { get; }
/// <summary>
/// Gets the Synchronization Orchestrator instance.
/// </summary>
public ISyncOrchestrator Orchestrator { get; }
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCNode"/> class.
/// </summary> /// </summary>
/// <param name="server">The TCP server for handling incoming sync requests.</param> /// <param name="server">The TCP server for handling incoming sync requests.</param>
/// <param name="discovery">The UDP service for peer discovery.</param> /// <param name="discovery">The UDP service for peer discovery.</param>
@@ -49,6 +33,21 @@ public class CBDDCNode : ICBDDCNode
_logger = logger; _logger = logger;
} }
/// <summary>
/// Gets the Sync Server instance.
/// </summary>
public ISyncServer Server { get; }
/// <summary>
/// Gets the Discovery Service instance.
/// </summary>
public IDiscoveryService Discovery { get; }
/// <summary>
/// Gets the Synchronization Orchestrator instance.
/// </summary>
public ISyncOrchestrator Orchestrator { get; }
/// <summary> /// <summary>
/// Starts all node components (Server, Discovery, Orchestrator). /// Starts all node components (Server, Discovery, Orchestrator).
/// </summary> /// </summary>
@@ -93,12 +92,11 @@ public class CBDDCNode : ICBDDCNode
{ {
// If the server is listening on "Any" (0.0.0.0), we cannot advertise that as a connectable address. // If the server is listening on "Any" (0.0.0.0), we cannot advertise that as a connectable address.
// We must resolve the actual machine IP address that peers can reach. // We must resolve the actual machine IP address that peers can reach.
if (Equals(ep.Address, System.Net.IPAddress.Any) || Equals(ep.Address, System.Net.IPAddress.IPv6Any)) if (Equals(ep.Address, IPAddress.Any) || Equals(ep.Address, IPAddress.IPv6Any))
{
return new NodeAddress(GetLocalIpAddress(), ep.Port); return new NodeAddress(GetLocalIpAddress(), ep.Port);
}
return new NodeAddress(ep.Address.ToString(), ep.Port); return new NodeAddress(ep.Address.ToString(), ep.Port);
} }
return new NodeAddress("Unknown", 0); return new NodeAddress("Unknown", 0);
} }
} }
@@ -107,20 +105,17 @@ public class CBDDCNode : ICBDDCNode
{ {
try try
{ {
var interfaces = System.Net.NetworkInformation.NetworkInterface.GetAllNetworkInterfaces() var interfaces = NetworkInterface.GetAllNetworkInterfaces()
.Where(i => i.OperationalStatus == System.Net.NetworkInformation.OperationalStatus.Up .Where(i => i.OperationalStatus == OperationalStatus.Up
&& i.NetworkInterfaceType != System.Net.NetworkInformation.NetworkInterfaceType.Loopback); && i.NetworkInterfaceType != NetworkInterfaceType.Loopback);
foreach (var i in interfaces) foreach (var i in interfaces)
{ {
var props = i.GetIPProperties(); var props = i.GetIPProperties();
var ipInfo = props.UnicastAddresses var ipInfo = props.UnicastAddresses
.FirstOrDefault(u => u.Address.AddressFamily == System.Net.Sockets.AddressFamily.InterNetwork); // Prefer IPv4 .FirstOrDefault(u => u.Address.AddressFamily == AddressFamily.InterNetwork); // Prefer IPv4
if (ipInfo != null) if (ipInfo != null) return ipInfo.Address.ToString();
{
return ipInfo.Address.ToString();
}
} }
return "127.0.0.1"; return "127.0.0.1";
@@ -136,16 +131,7 @@ public class CBDDCNode : ICBDDCNode
public class NodeAddress public class NodeAddress
{ {
/// <summary> /// <summary>
/// Gets the host portion of the node address. /// Initializes a new instance of the <see cref="NodeAddress" /> class.
/// </summary>
public string Host { get; }
/// <summary>
/// Gets the port portion of the node address.
/// </summary>
public int Port { get; }
/// <summary>
/// Initializes a new instance of the <see cref="NodeAddress"/> class.
/// </summary> /// </summary>
/// <param name="host">The host name or IP address.</param> /// <param name="host">The host name or IP address.</param>
/// <param name="port">The port number.</param> /// <param name="port">The port number.</param>
@@ -155,6 +141,19 @@ public class NodeAddress
Port = port; Port = port;
} }
/// <summary>
/// Gets the host portion of the node address.
/// </summary>
public string Host { get; }
/// <summary>
/// Gets the port portion of the node address.
/// </summary>
public int Port { get; }
/// <inheritdoc /> /// <inheritdoc />
public override string ToString() => $"{Host}:{Port}"; public override string ToString()
{
return $"{Host}:{Port}";
}
} }

View File

@@ -1,9 +1,6 @@
using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using Serilog.Context; using Serilog.Context;
using System;
using System.Threading;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Network;
@@ -12,11 +9,11 @@ namespace ZB.MOM.WW.CBDDC.Network;
/// </summary> /// </summary>
public class CBDDCNodeService : IHostedService public class CBDDCNodeService : IHostedService
{ {
private readonly ICBDDCNode _node;
private readonly ILogger<CBDDCNodeService> _logger; private readonly ILogger<CBDDCNodeService> _logger;
private readonly ICBDDCNode _node;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="CBDDCNodeService"/> class. /// Initializes a new instance of the <see cref="CBDDCNodeService" /> class.
/// </summary> /// </summary>
/// <param name="node">The CBDDC node to manage.</param> /// <param name="node">The CBDDC node to manage.</param>
/// <param name="logger">The logger instance.</param> /// <param name="logger">The logger instance.</param>

View File

@@ -16,7 +16,7 @@ public static class CBDDCServiceCollectionExtensions
/// Registers core CBDDC service dependencies. /// Registers core CBDDC service dependencies.
/// </summary> /// </summary>
/// <param name="services">The service collection to update.</param> /// <param name="services">The service collection to update.</param>
/// <returns>The same <see cref="IServiceCollection"/> instance for chaining.</returns> /// <returns>The same <see cref="IServiceCollection" /> instance for chaining.</returns>
public static IServiceCollection AddCBDDCCore(this IServiceCollection services) public static IServiceCollection AddCBDDCCore(this IServiceCollection services)
{ {
ArgumentNullException.ThrowIfNull(services); ArgumentNullException.ThrowIfNull(services);

View File

@@ -1,35 +1,30 @@
using System;
using System.Collections.Concurrent; using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions; using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Network;
/// <summary> /// <summary>
/// Composite discovery service that combines UDP LAN discovery with persistent remote peers from the peerConfigurationStore. /// Composite discovery service that combines UDP LAN discovery with persistent remote peers from the
/// peerConfigurationStore.
/// Periodically refreshes the remote peer list and merges with actively discovered LAN peers. /// Periodically refreshes the remote peer list and merges with actively discovered LAN peers.
///
/// Remote peer configurations are stored in a synchronized collection that is automatically /// Remote peer configurations are stored in a synchronized collection that is automatically
/// replicated across all nodes in the cluster. Any node that adds a remote peer will have /// replicated across all nodes in the cluster. Any node that adds a remote peer will have
/// it synchronized to all other nodes automatically. /// it synchronized to all other nodes automatically.
/// </summary> /// </summary>
public class CompositeDiscoveryService : IDiscoveryService public class CompositeDiscoveryService : IDiscoveryService
{ {
private readonly IDiscoveryService _udpDiscovery;
private readonly IPeerConfigurationStore _peerConfigurationStore;
private readonly ILogger<CompositeDiscoveryService> _logger;
private readonly TimeSpan _refreshInterval;
private const string RemotePeersCollectionName = "_system_remote_peers"; private const string RemotePeersCollectionName = "_system_remote_peers";
private readonly ILogger<CompositeDiscoveryService> _logger;
private readonly IPeerConfigurationStore _peerConfigurationStore;
private readonly TimeSpan _refreshInterval;
private readonly ConcurrentDictionary<string, PeerNode> _remotePeers = new();
private readonly object _startStopLock = new();
private readonly IDiscoveryService _udpDiscovery;
private CancellationTokenSource? _cts; private CancellationTokenSource? _cts;
private readonly ConcurrentDictionary<string, PeerNode> _remotePeers = new();
private readonly object _startStopLock = new object();
/// <summary> /// <summary>
/// Initializes a new instance of the CompositeDiscoveryService class. /// Initializes a new instance of the CompositeDiscoveryService class.
@@ -45,7 +40,8 @@ public class CompositeDiscoveryService : IDiscoveryService
TimeSpan? refreshInterval = null) TimeSpan? refreshInterval = null)
{ {
_udpDiscovery = udpDiscovery ?? throw new ArgumentNullException(nameof(udpDiscovery)); _udpDiscovery = udpDiscovery ?? throw new ArgumentNullException(nameof(udpDiscovery));
_peerConfigurationStore = peerConfigurationStore ?? throw new ArgumentNullException(nameof(peerConfigurationStore)); _peerConfigurationStore =
peerConfigurationStore ?? throw new ArgumentNullException(nameof(peerConfigurationStore));
_logger = logger ?? NullLogger<CompositeDiscoveryService>.Instance; _logger = logger ?? NullLogger<CompositeDiscoveryService>.Instance;
_refreshInterval = refreshInterval ?? TimeSpan.FromMinutes(5); _refreshInterval = refreshInterval ?? TimeSpan.FromMinutes(5);
} }
@@ -76,6 +72,7 @@ public class CompositeDiscoveryService : IDiscoveryService
_logger.LogWarning("Composite discovery service already started"); _logger.LogWarning("Composite discovery service already started");
return; return;
} }
_cts = new CancellationTokenSource(); _cts = new CancellationTokenSource();
} }
@@ -143,7 +140,6 @@ public class CompositeDiscoveryService : IDiscoveryService
private async Task RefreshLoopAsync(CancellationToken cancellationToken) private async Task RefreshLoopAsync(CancellationToken cancellationToken)
{ {
while (!cancellationToken.IsCancellationRequested) while (!cancellationToken.IsCancellationRequested)
{
try try
{ {
await Task.Delay(_refreshInterval, cancellationToken); await Task.Delay(_refreshInterval, cancellationToken);
@@ -159,7 +155,6 @@ public class CompositeDiscoveryService : IDiscoveryService
_logger.LogError(ex, "Error during remote peer refresh"); _logger.LogError(ex, "Error during remote peer refresh");
} }
} }
}
private async Task RefreshRemotePeersAsync() private async Task RefreshRemotePeersAsync()
{ {
@@ -178,14 +173,14 @@ public class CompositeDiscoveryService : IDiscoveryService
config.NodeId, config.NodeId,
config.Address, config.Address,
now, // LastSeen is now for persistent peers (always considered active) now, // LastSeen is now for persistent peers (always considered active)
config.Type, config.Type // Remote peers are always members, never gateways
NodeRole.Member // Remote peers are always members, never gateways
); );
_remotePeers[config.NodeId] = peerNode; _remotePeers[config.NodeId] = peerNode;
} }
_logger.LogInformation("Refreshed remote peers: {Count} enabled peers loaded from synchronized collection", _remotePeers.Count); _logger.LogInformation("Refreshed remote peers: {Count} enabled peers loaded from synchronized collection",
_remotePeers.Count);
} }
catch (Exception ex) catch (Exception ex)
{ {

View File

@@ -1,5 +1,3 @@
using System;
using System.IO;
using System.IO.Compression; using System.IO.Compression;
namespace ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Network;
@@ -38,6 +36,7 @@ public static class CompressionHelper
{ {
brotli.Write(data, 0, data.Length); brotli.Write(data, 0, data.Length);
} }
return output.ToArray(); return output.ToArray();
#else #else
return data; return data;
@@ -58,6 +57,7 @@ public static class CompressionHelper
{ {
brotli.CopyTo(output); brotli.CopyTo(output);
} }
return output.ToArray(); return output.ToArray();
#else #else
throw new NotSupportedException("Brotli decompression not supported on this platform."); throw new NotSupportedException("Brotli decompression not supported on this platform.");

View File

@@ -1,21 +1,22 @@
using System.Threading.Tasks; namespace ZB.MOM.WW.CBDDC.Network;
namespace ZB.MOM.WW.CBDDC.Network public interface ICBDDCNode
{ {
public interface ICBDDCNode
{
/// <summary> /// <summary>
/// Gets the node address. /// Gets the node address.
/// </summary> /// </summary>
NodeAddress Address { get; } NodeAddress Address { get; }
/// <summary> /// <summary>
/// Gets the discovery service. /// Gets the discovery service.
/// </summary> /// </summary>
IDiscoveryService Discovery { get; } IDiscoveryService Discovery { get; }
/// <summary> /// <summary>
/// Gets the synchronization orchestrator. /// Gets the synchronization orchestrator.
/// </summary> /// </summary>
ISyncOrchestrator Orchestrator { get; } ISyncOrchestrator Orchestrator { get; }
/// <summary> /// <summary>
/// Gets the synchronization server. /// Gets the synchronization server.
/// </summary> /// </summary>
@@ -26,10 +27,10 @@ namespace ZB.MOM.WW.CBDDC.Network
/// </summary> /// </summary>
/// <returns>A task that represents the asynchronous start operation.</returns> /// <returns>A task that represents the asynchronous start operation.</returns>
Task Start(); Task Start();
/// <summary> /// <summary>
/// Stops the node services. /// Stops the node services.
/// </summary> /// </summary>
/// <returns>A task that represents the asynchronous stop operation.</returns> /// <returns>A task that represents the asynchronous stop operation.</returns>
Task Stop(); Task Stop();
}
} }

View File

@@ -1,14 +1,12 @@
using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Network;
using System.Collections.Generic;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network namespace ZB.MOM.WW.CBDDC.Network;
/// <summary>
/// Defines peer discovery operations.
/// </summary>
public interface IDiscoveryService
{ {
/// <summary>
/// Defines peer discovery operations.
/// </summary>
public interface IDiscoveryService
{
/// <summary> /// <summary>
/// Gets the currently active peers. /// Gets the currently active peers.
/// </summary> /// </summary>
@@ -26,5 +24,4 @@ namespace ZB.MOM.WW.CBDDC.Network
/// </summary> /// </summary>
/// <returns>A task that represents the asynchronous operation.</returns> /// <returns>A task that represents the asynchronous operation.</returns>
Task Stop(); Task Stop();
}
} }

View File

@@ -1,5 +1,3 @@
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Network;

View File

@@ -1,12 +1,10 @@
using System.Threading.Tasks; namespace ZB.MOM.WW.CBDDC.Network;
namespace ZB.MOM.WW.CBDDC.Network /// <summary>
/// Defines lifecycle operations for synchronization orchestration.
/// </summary>
public interface ISyncOrchestrator
{ {
/// <summary>
/// Defines lifecycle operations for synchronization orchestration.
/// </summary>
public interface ISyncOrchestrator
{
/// <summary> /// <summary>
/// Starts synchronization orchestration. /// Starts synchronization orchestration.
/// </summary> /// </summary>
@@ -18,5 +16,4 @@ namespace ZB.MOM.WW.CBDDC.Network
/// </summary> /// </summary>
/// <returns>A task that represents the asynchronous stop operation.</returns> /// <returns>A task that represents the asynchronous stop operation.</returns>
Task Stop(); Task Stop();
}
} }

View File

@@ -1,17 +1,24 @@
using System.Net; using System.Net;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Network;
/// <summary> /// <summary>
/// Defines the contract for a server that supports starting, stopping, and reporting its listening network endpoint for /// Defines the contract for a server that supports starting, stopping, and reporting its listening network endpoint
/// for
/// synchronization operations. /// synchronization operations.
/// </summary> /// </summary>
/// <remarks>Implementations of this interface are expected to provide asynchronous methods for starting and /// <remarks>
/// Implementations of this interface are expected to provide asynchronous methods for starting and
/// stopping the server. The listening endpoint may be null if the server is not currently active or has not been /// stopping the server. The listening endpoint may be null if the server is not currently active or has not been
/// started.</remarks> /// started.
/// </remarks>
public interface ISyncServer public interface ISyncServer
{ {
/// <summary>
/// Gets the network endpoint currently used by the server for listening.
/// </summary>
IPEndPoint? ListeningEndpoint { get; }
/// <summary> /// <summary>
/// Starts the synchronization server. /// Starts the synchronization server.
/// </summary> /// </summary>
@@ -23,9 +30,4 @@ public interface ISyncServer
/// </summary> /// </summary>
/// <returns>A task that represents the asynchronous operation.</returns> /// <returns>A task that represents the asynchronous operation.</returns>
Task Stop(); Task Stop();
/// <summary>
/// Gets the network endpoint currently used by the server for listening.
/// </summary>
IPEndPoint? ListeningEndpoint { get; }
} }

View File

@@ -1,11 +1,6 @@
using System;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions; using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Network.Leadership; namespace ZB.MOM.WW.CBDDC.Network.Leadership;
@@ -16,30 +11,13 @@ namespace ZB.MOM.WW.CBDDC.Network.Leadership;
/// </summary> /// </summary>
public class BullyLeaderElectionService : ILeaderElectionService public class BullyLeaderElectionService : ILeaderElectionService
{ {
private readonly IDiscoveryService _discoveryService;
private readonly IPeerNodeConfigurationProvider _configProvider; private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly ILogger<BullyLeaderElectionService> _logger; private readonly IDiscoveryService _discoveryService;
private readonly TimeSpan _electionInterval; private readonly TimeSpan _electionInterval;
private readonly ILogger<BullyLeaderElectionService> _logger;
private CancellationTokenSource? _cts; private CancellationTokenSource? _cts;
private string? _localNodeId; private string? _localNodeId;
private string? _currentGatewayNodeId;
private bool _isCloudGateway;
/// <summary>
/// Gets a value indicating whether this node is currently the cloud gateway leader.
/// </summary>
public bool IsCloudGateway => _isCloudGateway;
/// <summary>
/// Gets the current gateway node identifier.
/// </summary>
public string? CurrentGatewayNodeId => _currentGatewayNodeId;
/// <summary>
/// Occurs when leadership changes.
/// </summary>
public event EventHandler<LeadershipChangedEventArgs>? LeadershipChanged;
/// <summary> /// <summary>
/// Initializes a new instance of the BullyLeaderElectionService class. /// Initializes a new instance of the BullyLeaderElectionService class.
@@ -60,6 +38,21 @@ public class BullyLeaderElectionService : ILeaderElectionService
_electionInterval = electionInterval ?? TimeSpan.FromSeconds(5); _electionInterval = electionInterval ?? TimeSpan.FromSeconds(5);
} }
/// <summary>
/// Gets a value indicating whether this node is currently the cloud gateway leader.
/// </summary>
public bool IsCloudGateway { get; private set; }
/// <summary>
/// Gets the current gateway node identifier.
/// </summary>
public string? CurrentGatewayNodeId { get; private set; }
/// <summary>
/// Occurs when leadership changes.
/// </summary>
public event EventHandler<LeadershipChangedEventArgs>? LeadershipChanged;
/// <summary> /// <summary>
/// Starts the leader election loop. /// Starts the leader election loop.
/// </summary> /// </summary>
@@ -100,7 +93,6 @@ public class BullyLeaderElectionService : ILeaderElectionService
private async Task ElectionLoopAsync(CancellationToken cancellationToken) private async Task ElectionLoopAsync(CancellationToken cancellationToken)
{ {
while (!cancellationToken.IsCancellationRequested) while (!cancellationToken.IsCancellationRequested)
{
try try
{ {
await Task.Delay(_electionInterval, cancellationToken); await Task.Delay(_electionInterval, cancellationToken);
@@ -116,7 +108,6 @@ public class BullyLeaderElectionService : ILeaderElectionService
_logger.LogError(ex, "Error during leader election"); _logger.LogError(ex, "Error during leader election");
} }
} }
}
private void RunElection() private void RunElection()
{ {
@@ -132,34 +123,30 @@ public class BullyLeaderElectionService : ILeaderElectionService
lanPeers.Add(_localNodeId); lanPeers.Add(_localNodeId);
// Bully algorithm: smallest NodeId wins (lexicographic comparison) // Bully algorithm: smallest NodeId wins (lexicographic comparison)
var newLeader = lanPeers.OrderBy(id => id, StringComparer.Ordinal).FirstOrDefault(); string? newLeader = lanPeers.OrderBy(id => id, StringComparer.Ordinal).FirstOrDefault();
if (newLeader == null) if (newLeader == null)
{
// No peers available, local node is leader by default // No peers available, local node is leader by default
newLeader = _localNodeId; newLeader = _localNodeId;
}
// Check if leadership changed // Check if leadership changed
if (newLeader != _currentGatewayNodeId) if (newLeader != CurrentGatewayNodeId)
{ {
var wasLeader = _isCloudGateway; bool wasLeader = IsCloudGateway;
_currentGatewayNodeId = newLeader; CurrentGatewayNodeId = newLeader;
_isCloudGateway = newLeader == _localNodeId; IsCloudGateway = newLeader == _localNodeId;
if (wasLeader != _isCloudGateway) if (wasLeader != IsCloudGateway)
{ {
if (_isCloudGateway) if (IsCloudGateway)
{ _logger.LogInformation(
_logger.LogInformation("🔐 This node is now the CLOUD GATEWAY (Leader) - Will sync with remote cloud nodes"); "🔐 This node is now the CLOUD GATEWAY (Leader) - Will sync with remote cloud nodes");
}
else else
{ _logger.LogInformation("👤 This node is now a MEMBER - Cloud sync handled by gateway: {Gateway}",
_logger.LogInformation("👤 This node is now a MEMBER - Cloud sync handled by gateway: {Gateway}", _currentGatewayNodeId); CurrentGatewayNodeId);
}
// Raise event // Raise event
LeadershipChanged?.Invoke(this, new LeadershipChangedEventArgs(_currentGatewayNodeId, _isCloudGateway)); LeadershipChanged?.Invoke(this, new LeadershipChangedEventArgs(CurrentGatewayNodeId, IsCloudGateway));
} }
} }
} }

View File

@@ -1,6 +1,3 @@
using System;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network.Leadership; namespace ZB.MOM.WW.CBDDC.Network.Leadership;
/// <summary> /// <summary>
@@ -8,6 +5,20 @@ namespace ZB.MOM.WW.CBDDC.Network.Leadership;
/// </summary> /// </summary>
public class LeadershipChangedEventArgs : EventArgs public class LeadershipChangedEventArgs : EventArgs
{ {
/// <summary>
/// Initializes a new instance of the LeadershipChangedEventArgs class.
/// </summary>
/// <param name="currentGatewayNodeId">
/// The NodeId of the current gateway node, or <see langword="null" /> when none is
/// elected.
/// </param>
/// <param name="isLocalNodeGateway">A value indicating whether the local node is the gateway.</param>
public LeadershipChangedEventArgs(string? currentGatewayNodeId, bool isLocalNodeGateway)
{
CurrentGatewayNodeId = currentGatewayNodeId;
IsLocalNodeGateway = isLocalNodeGateway;
}
/// <summary> /// <summary>
/// Gets the NodeId of the current cloud gateway (leader). /// Gets the NodeId of the current cloud gateway (leader).
/// Null if no leader is elected. /// Null if no leader is elected.
@@ -18,17 +29,6 @@ public class LeadershipChangedEventArgs : EventArgs
/// Gets whether the local node is now the cloud gateway. /// Gets whether the local node is now the cloud gateway.
/// </summary> /// </summary>
public bool IsLocalNodeGateway { get; } public bool IsLocalNodeGateway { get; }
/// <summary>
/// Initializes a new instance of the LeadershipChangedEventArgs class.
/// </summary>
/// <param name="currentGatewayNodeId">The NodeId of the current gateway node, or <see langword="null"/> when none is elected.</param>
/// <param name="isLocalNodeGateway">A value indicating whether the local node is the gateway.</param>
public LeadershipChangedEventArgs(string? currentGatewayNodeId, bool isLocalNodeGateway)
{
CurrentGatewayNodeId = currentGatewayNodeId;
IsLocalNodeGateway = isLocalNodeGateway;
}
} }
/// <summary> /// <summary>

View File

@@ -1,8 +1,3 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Storage;
@@ -18,7 +13,7 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator
private readonly IPeerOplogConfirmationStore? _peerOplogConfirmationStore; private readonly IPeerOplogConfirmationStore? _peerOplogConfirmationStore;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="OplogPruneCutoffCalculator"/> class. /// Initializes a new instance of the <see cref="OplogPruneCutoffCalculator" /> class.
/// </summary> /// </summary>
/// <param name="oplogStore">The oplog store.</param> /// <param name="oplogStore">The oplog store.</param>
/// <param name="peerOplogConfirmationStore">The optional peer confirmation store.</param> /// <param name="peerOplogConfirmationStore">The optional peer confirmation store.</param>
@@ -39,23 +34,19 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator
var retentionCutoff = BuildRetentionCutoff(configuration); var retentionCutoff = BuildRetentionCutoff(configuration);
if (_peerOplogConfirmationStore == null) if (_peerOplogConfirmationStore == null)
{
return OplogPruneCutoffDecision.WithCutoff( return OplogPruneCutoffDecision.WithCutoff(
retentionCutoff, retentionCutoff,
confirmationCutoff: null, null,
effectiveCutoff: retentionCutoff, retentionCutoff,
reason: "Confirmation tracking is not configured."); "Confirmation tracking is not configured.");
}
var relevantSources = await GetRelevantSourceNodesAsync(cancellationToken); var relevantSources = await GetRelevantSourceNodesAsync(cancellationToken);
if (relevantSources.Count == 0) if (relevantSources.Count == 0)
{
return OplogPruneCutoffDecision.WithCutoff( return OplogPruneCutoffDecision.WithCutoff(
retentionCutoff, retentionCutoff,
confirmationCutoff: null, null,
effectiveCutoff: retentionCutoff, retentionCutoff,
reason: "No local non-default oplog/vector-clock sources were found."); "No local non-default oplog/vector-clock sources were found.");
}
var activeTrackedPeers = (await _peerOplogConfirmationStore.GetActiveTrackedPeersAsync(cancellationToken)) var activeTrackedPeers = (await _peerOplogConfirmationStore.GetActiveTrackedPeersAsync(cancellationToken))
.Where(peerNodeId => !string.IsNullOrWhiteSpace(peerNodeId)) .Where(peerNodeId => !string.IsNullOrWhiteSpace(peerNodeId))
@@ -63,19 +54,18 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator
.ToList(); .ToList();
if (activeTrackedPeers.Count == 0) if (activeTrackedPeers.Count == 0)
{
return OplogPruneCutoffDecision.WithCutoff( return OplogPruneCutoffDecision.WithCutoff(
retentionCutoff, retentionCutoff,
confirmationCutoff: null, null,
effectiveCutoff: retentionCutoff, retentionCutoff,
reason: "No active tracked peers found for confirmation gating."); "No active tracked peers found for confirmation gating.");
}
HlcTimestamp? confirmationCutoff = null; HlcTimestamp? confirmationCutoff = null;
foreach (var peerNodeId in activeTrackedPeers) foreach (string peerNodeId in activeTrackedPeers)
{ {
var confirmationsForPeer = (await _peerOplogConfirmationStore.GetConfirmationsForPeerAsync(peerNodeId, cancellationToken)) var confirmationsForPeer =
(await _peerOplogConfirmationStore.GetConfirmationsForPeerAsync(peerNodeId, cancellationToken))
.Where(confirmation => confirmation.IsActive) .Where(confirmation => confirmation.IsActive)
.Where(confirmation => !string.IsNullOrWhiteSpace(confirmation.SourceNodeId)) .Where(confirmation => !string.IsNullOrWhiteSpace(confirmation.SourceNodeId))
.GroupBy(confirmation => confirmation.SourceNodeId, StringComparer.Ordinal) .GroupBy(confirmation => confirmation.SourceNodeId, StringComparer.Ordinal)
@@ -87,30 +77,25 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator
.Last(), .Last(),
StringComparer.Ordinal); StringComparer.Ordinal);
foreach (var sourceNodeId in relevantSources) foreach (string sourceNodeId in relevantSources)
{
if (!confirmationsForPeer.TryGetValue(sourceNodeId, out var confirmedTimestamp) || confirmedTimestamp == default)
{ {
if (!confirmationsForPeer.TryGetValue(sourceNodeId, out var confirmedTimestamp) ||
confirmedTimestamp == default)
return OplogPruneCutoffDecision.NoCutoff( return OplogPruneCutoffDecision.NoCutoff(
retentionCutoff, retentionCutoff,
$"Active tracked peer '{peerNodeId}' is missing confirmation for source '{sourceNodeId}'."); $"Active tracked peer '{peerNodeId}' is missing confirmation for source '{sourceNodeId}'.");
}
if (!confirmationCutoff.HasValue || confirmedTimestamp < confirmationCutoff.Value) if (!confirmationCutoff.HasValue || confirmedTimestamp < confirmationCutoff.Value)
{
confirmationCutoff = confirmedTimestamp; confirmationCutoff = confirmedTimestamp;
} }
} }
}
if (!confirmationCutoff.HasValue) if (!confirmationCutoff.HasValue)
{
return OplogPruneCutoffDecision.WithCutoff( return OplogPruneCutoffDecision.WithCutoff(
retentionCutoff, retentionCutoff,
confirmationCutoff: null, null,
effectiveCutoff: retentionCutoff, retentionCutoff,
reason: "No confirmation cutoff could be determined."); "No confirmation cutoff could be determined.");
}
var effectiveCutoff = retentionCutoff <= confirmationCutoff.Value var effectiveCutoff = retentionCutoff <= confirmationCutoff.Value
? retentionCutoff ? retentionCutoff
@@ -124,7 +109,7 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator
private static HlcTimestamp BuildRetentionCutoff(PeerNodeConfiguration configuration) private static HlcTimestamp BuildRetentionCutoff(PeerNodeConfiguration configuration)
{ {
var retentionTimestamp = DateTimeOffset.UtcNow long retentionTimestamp = DateTimeOffset.UtcNow
.AddHours(-configuration.OplogRetentionHours) .AddHours(-configuration.OplogRetentionHours)
.ToUnixTimeMilliseconds(); .ToUnixTimeMilliseconds();
@@ -135,18 +120,12 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator
{ {
var localVectorClock = await _oplogStore.GetVectorClockAsync(cancellationToken); var localVectorClock = await _oplogStore.GetVectorClockAsync(cancellationToken);
var sourceNodes = new HashSet<string>(StringComparer.Ordinal); var sourceNodes = new HashSet<string>(StringComparer.Ordinal);
foreach (var sourceNodeId in localVectorClock.NodeIds) foreach (string sourceNodeId in localVectorClock.NodeIds)
{ {
if (string.IsNullOrWhiteSpace(sourceNodeId)) if (string.IsNullOrWhiteSpace(sourceNodeId)) continue;
{
continue;
}
var timestamp = localVectorClock.GetTimestamp(sourceNodeId); var timestamp = localVectorClock.GetTimestamp(sourceNodeId);
if (timestamp == default) if (timestamp == default) continue;
{
continue;
}
sourceNodes.Add(sourceNodeId); sourceNodes.Add(sourceNodeId);
} }

View File

@@ -37,7 +37,7 @@ public sealed class OplogPruneCutoffDecision
public HlcTimestamp? ConfirmationCutoff { get; } public HlcTimestamp? ConfirmationCutoff { get; }
/// <summary> /// <summary>
/// Gets the effective cutoff to use for pruning when <see cref="HasCutoff"/> is true. /// Gets the effective cutoff to use for pruning when <see cref="HasCutoff" /> is true.
/// </summary> /// </summary>
public HlcTimestamp? EffectiveCutoff { get; } public HlcTimestamp? EffectiveCutoff { get; }
@@ -60,11 +60,11 @@ public sealed class OplogPruneCutoffDecision
string reason = "") string reason = "")
{ {
return new OplogPruneCutoffDecision( return new OplogPruneCutoffDecision(
hasCutoff: true, true,
retentionCutoff: retentionCutoff, retentionCutoff,
confirmationCutoff: confirmationCutoff, confirmationCutoff,
effectiveCutoff: effectiveCutoff, effectiveCutoff,
reason: reason); reason);
} }
/// <summary> /// <summary>
@@ -75,10 +75,10 @@ public sealed class OplogPruneCutoffDecision
public static OplogPruneCutoffDecision NoCutoff(HlcTimestamp retentionCutoff, string reason) public static OplogPruneCutoffDecision NoCutoff(HlcTimestamp retentionCutoff, string reason)
{ {
return new OplogPruneCutoffDecision( return new OplogPruneCutoffDecision(
hasCutoff: false, false,
retentionCutoff: retentionCutoff, retentionCutoff,
confirmationCutoff: null, null,
effectiveCutoff: null, null,
reason: reason); reason);
} }
} }

View File

@@ -1,12 +1,10 @@
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network; // For IMeshNetwork if we implement it
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network.Security;
using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions; using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Hosting; using ZB.MOM.WW.CBDDC.Core.Network;
using System; using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Telemetry;
// For IMeshNetwork if we implement it
namespace ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Network;
@@ -17,7 +15,10 @@ public static class CBDDCNetworkExtensions
/// </summary> /// </summary>
/// <typeparam name="TPeerNodeConfigurationProvider">The peer node configuration provider implementation type.</typeparam> /// <typeparam name="TPeerNodeConfigurationProvider">The peer node configuration provider implementation type.</typeparam>
/// <param name="services">The service collection to register services into.</param> /// <param name="services">The service collection to register services into.</param>
/// <param name="useHostedService">If true, registers CBDDCNodeService as IHostedService to automatically start/stop the node.</param> /// <param name="useHostedService">
/// If true, registers CBDDCNodeService as IHostedService to automatically start/stop the
/// node.
/// </param>
public static IServiceCollection AddCBDDCNetwork<TPeerNodeConfigurationProvider>( public static IServiceCollection AddCBDDCNetwork<TPeerNodeConfigurationProvider>(
this IServiceCollection services, this IServiceCollection services,
bool useHostedService = true) bool useHostedService = true)
@@ -31,11 +32,11 @@ public static class CBDDCNetworkExtensions
services.TryAddSingleton<IDiscoveryService, UdpDiscoveryService>(); services.TryAddSingleton<IDiscoveryService, UdpDiscoveryService>();
services.TryAddSingleton<ZB.MOM.WW.CBDDC.Network.Telemetry.INetworkTelemetryService>(sp => services.TryAddSingleton<INetworkTelemetryService>(sp =>
{ {
var logger = sp.GetRequiredService<ILogger<ZB.MOM.WW.CBDDC.Network.Telemetry.NetworkTelemetryService>>(); var logger = sp.GetRequiredService<ILogger<NetworkTelemetryService>>();
var path = System.IO.Path.Combine(System.AppContext.BaseDirectory, "cbddc_metrics.bin"); string path = Path.Combine(AppContext.BaseDirectory, "cbddc_metrics.bin");
return new ZB.MOM.WW.CBDDC.Network.Telemetry.NetworkTelemetryService(logger, path); return new NetworkTelemetryService(logger, path);
}); });
services.TryAddSingleton<ISyncServer, TcpSyncServer>(); services.TryAddSingleton<ISyncServer, TcpSyncServer>();
@@ -47,10 +48,7 @@ public static class CBDDCNetworkExtensions
services.TryAddSingleton<ICBDDCNode, CBDDCNode>(); services.TryAddSingleton<ICBDDCNode, CBDDCNode>();
// Optionally register hosted service for automatic node lifecycle management // Optionally register hosted service for automatic node lifecycle management
if (useHostedService) if (useHostedService) services.AddHostedService<CBDDCNodeService>();
{
services.AddHostedService<CBDDCNodeService>();
}
return services; return services;
} }

View File

@@ -1,28 +1,24 @@
using System;
using System.IO;
using System.Threading;
using System.Threading.Tasks;
using Google.Protobuf; using Google.Protobuf;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Network.Proto; using ZB.MOM.WW.CBDDC.Network.Proto;
using ZB.MOM.WW.CBDDC.Network.Security; using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Telemetry; using ZB.MOM.WW.CBDDC.Network.Telemetry;
namespace ZB.MOM.WW.CBDDC.Network.Protocol namespace ZB.MOM.WW.CBDDC.Network.Protocol;
/// <summary>
/// Handles the low-level framing, compression, encryption, and serialization of CBDDC messages.
/// Encapsulates the wire format: [Length (4)] [Type (1)] [Compression (1)] [Payload (N)]
/// </summary>
internal class ProtocolHandler
{ {
/// <summary>
/// Handles the low-level framing, compression, encryption, and serialization of CBDDC messages.
/// Encapsulates the wire format: [Length (4)] [Type (1)] [Compression (1)] [Payload (N)]
/// </summary>
internal class ProtocolHandler
{
private readonly ILogger<ProtocolHandler> _logger; private readonly ILogger<ProtocolHandler> _logger;
private readonly SemaphoreSlim _readLock = new(1, 1);
private readonly INetworkTelemetryService? _telemetry; private readonly INetworkTelemetryService? _telemetry;
private readonly SemaphoreSlim _writeLock = new SemaphoreSlim(1, 1); private readonly SemaphoreSlim _writeLock = new(1, 1);
private readonly SemaphoreSlim _readLock = new SemaphoreSlim(1, 1);
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="ProtocolHandler"/> class. /// Initializes a new instance of the <see cref="ProtocolHandler" /> class.
/// </summary> /// </summary>
/// <param name="logger">The logger used for protocol diagnostics.</param> /// <param name="logger">The logger used for protocol diagnostics.</param>
/// <param name="telemetry">An optional telemetry service used to record network metrics.</param> /// <param name="telemetry">An optional telemetry service used to record network metrics.</param>
@@ -33,7 +29,7 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
} }
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="ProtocolHandler"/> class using a non-generic logger. /// Initializes a new instance of the <see cref="ProtocolHandler" /> class using a non-generic logger.
/// </summary> /// </summary>
/// <param name="logger">The logger used for protocol diagnostics.</param> /// <param name="logger">The logger used for protocol diagnostics.</param>
/// <param name="telemetry">An optional telemetry service used to record network metrics.</param> /// <param name="telemetry">An optional telemetry service used to record network metrics.</param>
@@ -52,7 +48,8 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
/// <param name="cipherState">Optional cipher state used to encrypt outgoing payloads.</param> /// <param name="cipherState">Optional cipher state used to encrypt outgoing payloads.</param>
/// <param name="token">Cancellation token.</param> /// <param name="token">Cancellation token.</param>
/// <returns>A task that represents the asynchronous send operation.</returns> /// <returns>A task that represents the asynchronous send operation.</returns>
public async Task SendMessageAsync(Stream stream, MessageType type, IMessage message, bool useCompression, CipherState? cipherState, CancellationToken token = default) public async Task SendMessageAsync(Stream stream, MessageType type, IMessage message, bool useCompression,
CipherState? cipherState, CancellationToken token = default)
{ {
if (stream == null) throw new ArgumentNullException(nameof(stream)); if (stream == null) throw new ArgumentNullException(nameof(stream));
@@ -87,7 +84,6 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
// 3. Encrypt // 3. Encrypt
if (cipherState != null) if (cipherState != null)
{
using (_telemetry?.StartMetric(MetricType.EncryptionTime)) using (_telemetry?.StartMetric(MetricType.EncryptionTime))
{ {
// Inner data: [Type (1)] [Compression (1)] [Payload (N)] // Inner data: [Type (1)] [Compression (1)] [Payload (N)]
@@ -96,7 +92,8 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
dataToEncrypt[1] = compressionFlag; dataToEncrypt[1] = compressionFlag;
Buffer.BlockCopy(payloadBytes, 0, dataToEncrypt, 2, payloadBytes.Length); Buffer.BlockCopy(payloadBytes, 0, dataToEncrypt, 2, payloadBytes.Length);
var (ciphertext, iv, tag) = CryptoHelper.Encrypt(dataToEncrypt, cipherState.EncryptKey); (byte[] ciphertext, byte[] iv, byte[] tag) =
CryptoHelper.Encrypt(dataToEncrypt, cipherState.EncryptKey);
var env = new SecureEnvelope var env = new SecureEnvelope
{ {
@@ -109,16 +106,16 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
type = MessageType.SecureEnv; type = MessageType.SecureEnv;
compressionFlag = 0x00; // Outer envelope is not compressed compressionFlag = 0x00; // Outer envelope is not compressed
} }
}
// 4. Thread-Safe Write // 4. Thread-Safe Write
await _writeLock.WaitAsync(token); await _writeLock.WaitAsync(token);
try try
{ {
_logger.LogDebug("Sending Message {Type}, OrgSize: {Org}, WireSize: {Wire}", type, originalSize, payloadBytes.Length); _logger.LogDebug("Sending Message {Type}, OrgSize: {Org}, WireSize: {Wire}", type, originalSize,
payloadBytes.Length);
// Framing: [Length (4)] [Type (1)] [Compression (1)] [Payload (N)] // Framing: [Length (4)] [Type (1)] [Compression (1)] [Payload (N)]
var lengthBytes = BitConverter.GetBytes(payloadBytes.Length); byte[] lengthBytes = BitConverter.GetBytes(payloadBytes.Length);
await stream.WriteAsync(lengthBytes, 0, 4, token); await stream.WriteAsync(lengthBytes, 0, 4, token);
stream.WriteByte((byte)type); stream.WriteByte((byte)type);
stream.WriteByte(compressionFlag); stream.WriteByte(compressionFlag);
@@ -138,7 +135,8 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
/// <param name="cipherState">Optional cipher state used to decrypt incoming payloads.</param> /// <param name="cipherState">Optional cipher state used to decrypt incoming payloads.</param>
/// <param name="token">Cancellation token.</param> /// <param name="token">Cancellation token.</param>
/// <returns>A tuple containing the decoded message type and payload bytes.</returns> /// <returns>A tuple containing the decoded message type and payload bytes.</returns>
public async Task<(MessageType, byte[])> ReadMessageAsync(Stream stream, CipherState? cipherState, CancellationToken token = default) public async Task<(MessageType, byte[])> ReadMessageAsync(Stream stream, CipherState? cipherState,
CancellationToken token = default)
{ {
await _readLock.WaitAsync(token); await _readLock.WaitAsync(token);
try try
@@ -147,7 +145,7 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
int read = await ReadExactAsync(stream, lenBuf, 0, 4, token); int read = await ReadExactAsync(stream, lenBuf, 0, 4, token);
if (read == 0) return (MessageType.Unknown, null!); if (read == 0) return (MessageType.Unknown, null!);
int length = BitConverter.ToInt32(lenBuf, 0); var length = BitConverter.ToInt32(lenBuf, 0);
int typeByte = stream.ReadByte(); int typeByte = stream.ReadByte();
if (typeByte == -1) throw new EndOfStreamException("Connection closed abruptly (type byte)"); if (typeByte == -1) throw new EndOfStreamException("Connection closed abruptly (type byte)");
@@ -163,7 +161,8 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
// Handle Secure Envelope // Handle Secure Envelope
if (msgType == MessageType.SecureEnv) if (msgType == MessageType.SecureEnv)
{ {
if (cipherState == null) throw new InvalidOperationException("Received encrypted message but no cipher state established"); if (cipherState == null)
throw new InvalidOperationException("Received encrypted message but no cipher state established");
byte[] decrypted; byte[] decrypted;
using (_telemetry?.StartMetric(MetricType.DecryptionTime)) using (_telemetry?.StartMetric(MetricType.DecryptionTime))
@@ -184,19 +183,13 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
var innerPayload = new byte[decrypted.Length - 2]; var innerPayload = new byte[decrypted.Length - 2];
Buffer.BlockCopy(decrypted, 2, innerPayload, 0, innerPayload.Length); Buffer.BlockCopy(decrypted, 2, innerPayload, 0, innerPayload.Length);
if (innerComp == 0x01) if (innerComp == 0x01) innerPayload = CompressionHelper.Decompress(innerPayload);
{
innerPayload = CompressionHelper.Decompress(innerPayload);
}
return (msgType, innerPayload); return (msgType, innerPayload);
} }
// Handle Unencrypted Compression // Handle Unencrypted Compression
if (compByte == 0x01) if (compByte == 0x01) payload = CompressionHelper.Decompress(payload);
{
payload = CompressionHelper.Decompress(payload);
}
_logger.LogDebug("Read Message {Type}, Size: {Size}", msgType, payload.Length); _logger.LogDebug("Read Message {Type}, Size: {Size}", msgType, payload.Length);
return (msgType, payload); return (msgType, payload);
@@ -209,13 +202,14 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
private async Task<int> ReadExactAsync(Stream stream, byte[] buffer, int offset, int count, CancellationToken token) private async Task<int> ReadExactAsync(Stream stream, byte[] buffer, int offset, int count, CancellationToken token)
{ {
int total = 0; var total = 0;
while (total < count) while (total < count)
{ {
int read = await stream.ReadAsync(buffer, offset + total, count - total, token); int read = await stream.ReadAsync(buffer, offset + total, count - total, token);
if (read == 0) return 0; // EOF if (read == 0) return 0; // EOF
total += read; total += read;
} }
return total; return total;
} }
@@ -224,7 +218,7 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
private readonly ILogger _inner; private readonly ILogger _inner;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="ForwardingLogger"/> class. /// Initializes a new instance of the <see cref="ForwardingLogger" /> class.
/// </summary> /// </summary>
/// <param name="inner">The underlying logger instance.</param> /// <param name="inner">The underlying logger instance.</param>
public ForwardingLogger(ILogger inner) public ForwardingLogger(ILogger inner)
@@ -255,5 +249,4 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
_inner.Log(logLevel, eventId, state, exception, formatter); _inner.Log(logLevel, eventId, state, exception, formatter);
} }
} }
}
} }

View File

@@ -48,12 +48,15 @@ node.Start();
## Features ## Features
### Automatic Discovery ### Automatic Discovery
Nodes broadcast their presence via UDP and automatically connect to peers on the same network. Nodes broadcast their presence via UDP and automatically connect to peers on the same network.
### Secure Synchronization ### Secure Synchronization
All nodes must share the same authentication token to sync data. All nodes must share the same authentication token to sync data.
### Scalable Gossip ### Scalable Gossip
Updates propagate exponentially - each node tells multiple peers, ensuring fast network-wide propagation. Updates propagate exponentially - each node tells multiple peers, ensuring fast network-wide propagation.
## Documentation ## Documentation

View File

@@ -1,7 +1,6 @@
using ZB.MOM.WW.CBDDC.Core.Network;
using System.Security.Cryptography; using System.Security.Cryptography;
using System.Text; using System.Text;
using System.Threading.Tasks; using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Network.Security; namespace ZB.MOM.WW.CBDDC.Network.Security;
@@ -14,7 +13,7 @@ public class ClusterKeyAuthenticator : IAuthenticator
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider; private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="ClusterKeyAuthenticator"/> class. /// Initializes a new instance of the <see cref="ClusterKeyAuthenticator" /> class.
/// </summary> /// </summary>
/// <param name="peerNodeConfigurationProvider">The provider for peer node configuration.</param> /// <param name="peerNodeConfigurationProvider">The provider for peer node configuration.</param>
public ClusterKeyAuthenticator(IPeerNodeConfigurationProvider peerNodeConfigurationProvider) public ClusterKeyAuthenticator(IPeerNodeConfigurationProvider peerNodeConfigurationProvider)
@@ -26,8 +25,8 @@ public class ClusterKeyAuthenticator : IAuthenticator
public async Task<bool> ValidateAsync(string nodeId, string token) public async Task<bool> ValidateAsync(string nodeId, string token)
{ {
var config = await _peerNodeConfigurationProvider.GetConfiguration(); var config = await _peerNodeConfigurationProvider.GetConfiguration();
var configuredHash = SHA256.HashData(Encoding.UTF8.GetBytes(config.AuthToken ?? string.Empty)); byte[] configuredHash = SHA256.HashData(Encoding.UTF8.GetBytes(config.AuthToken ?? string.Empty));
var presentedHash = SHA256.HashData(Encoding.UTF8.GetBytes(token ?? string.Empty)); byte[] presentedHash = SHA256.HashData(Encoding.UTF8.GetBytes(token ?? string.Empty));
return CryptographicOperations.FixedTimeEquals(configuredHash, presentedHash); return CryptographicOperations.FixedTimeEquals(configuredHash, presentedHash);
} }
} }

View File

@@ -1,5 +1,3 @@
using System;
using System.IO;
using System.Security.Cryptography; using System.Security.Cryptography;
namespace ZB.MOM.WW.CBDDC.Network.Security; namespace ZB.MOM.WW.CBDDC.Network.Security;
@@ -21,10 +19,10 @@ public static class CryptoHelper
using var aes = Aes.Create(); using var aes = Aes.Create();
aes.Key = key; aes.Key = key;
aes.GenerateIV(); aes.GenerateIV();
var iv = aes.IV; byte[] iv = aes.IV;
using var encryptor = aes.CreateEncryptor(); using var encryptor = aes.CreateEncryptor();
var ciphertext = encryptor.TransformFinalBlock(plaintext, 0, plaintext.Length); byte[] ciphertext = encryptor.TransformFinalBlock(plaintext, 0, plaintext.Length);
// Compute HMAC // Compute HMAC
using var hmac = new HMACSHA256(key); using var hmac = new HMACSHA256(key);
@@ -32,7 +30,7 @@ public static class CryptoHelper
var toSign = new byte[iv.Length + ciphertext.Length]; var toSign = new byte[iv.Length + ciphertext.Length];
Buffer.BlockCopy(iv, 0, toSign, 0, iv.Length); Buffer.BlockCopy(iv, 0, toSign, 0, iv.Length);
Buffer.BlockCopy(ciphertext, 0, toSign, iv.Length, ciphertext.Length); Buffer.BlockCopy(ciphertext, 0, toSign, iv.Length, ciphertext.Length);
var tag = hmac.ComputeHash(toSign); byte[] tag = hmac.ComputeHash(toSign);
return (ciphertext, iv, tag); return (ciphertext, iv, tag);
} }
@@ -52,12 +50,10 @@ public static class CryptoHelper
var toVerify = new byte[iv.Length + ciphertext.Length]; var toVerify = new byte[iv.Length + ciphertext.Length];
Buffer.BlockCopy(iv, 0, toVerify, 0, iv.Length); Buffer.BlockCopy(iv, 0, toVerify, 0, iv.Length);
Buffer.BlockCopy(ciphertext, 0, toVerify, iv.Length, ciphertext.Length); Buffer.BlockCopy(ciphertext, 0, toVerify, iv.Length, ciphertext.Length);
var computedTag = hmac.ComputeHash(toVerify); byte[] computedTag = hmac.ComputeHash(toVerify);
if (!FixedTimeEquals(tag, computedTag)) if (!FixedTimeEquals(tag, computedTag))
{
throw new CryptographicException("Authentication failed (HMAC mismatch)"); throw new CryptographicException("Authentication failed (HMAC mismatch)");
}
using var aes = Aes.Create(); using var aes = Aes.Create();
aes.Key = key; aes.Key = key;

View File

@@ -1,5 +1,3 @@
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network.Security; namespace ZB.MOM.WW.CBDDC.Network.Security;
public interface IAuthenticator public interface IAuthenticator
@@ -9,6 +7,6 @@ public interface IAuthenticator
/// </summary> /// </summary>
/// <param name="nodeId">The node identifier to validate.</param> /// <param name="nodeId">The node identifier to validate.</param>
/// <param name="token">The authentication token to validate.</param> /// <param name="token">The authentication token to validate.</param>
/// <returns><see langword="true"/> if the token is valid for the node; otherwise <see langword="false"/>.</returns> /// <returns><see langword="true" /> if the token is valid for the node; otherwise <see langword="false" />.</returns>
Task<bool> ValidateAsync(string nodeId, string token); Task<bool> ValidateAsync(string nodeId, string token);
} }

View File

@@ -1,6 +1,3 @@
using System.Threading;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network.Security; namespace ZB.MOM.WW.CBDDC.Network.Security;
public interface IPeerHandshakeService public interface IPeerHandshakeService
@@ -13,24 +10,16 @@ public interface IPeerHandshakeService
/// <param name="myNodeId">The local node identifier.</param> /// <param name="myNodeId">The local node identifier.</param>
/// <param name="token">Cancellation token.</param> /// <param name="token">Cancellation token.</param>
/// <returns>A CipherState if encryption is established, or null if plaintext.</returns> /// <returns>A CipherState if encryption is established, or null if plaintext.</returns>
Task<CipherState?> HandshakeAsync(System.IO.Stream stream, bool isInitiator, string myNodeId, CancellationToken token); Task<CipherState?> HandshakeAsync(Stream stream, bool isInitiator, string myNodeId, CancellationToken token);
} }
public class CipherState public class CipherState
{ {
/// <summary>
/// Gets the key used to encrypt outgoing messages.
/// </summary>
public byte[] EncryptKey { get; }
/// <summary>
/// Gets the key used to decrypt incoming messages.
/// </summary>
public byte[] DecryptKey { get; }
// For simplicity using IV chaining or explicit IVs. // For simplicity using IV chaining or explicit IVs.
// We'll store just the keys here and let the encryption helper handle IVs. // We'll store just the keys here and let the encryption helper handle IVs.
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="CipherState"/> class. /// Initializes a new instance of the <see cref="CipherState" /> class.
/// </summary> /// </summary>
/// <param name="encryptKey">The key used for encrypting outgoing payloads.</param> /// <param name="encryptKey">The key used for encrypting outgoing payloads.</param>
/// <param name="decryptKey">The key used for decrypting incoming payloads.</param> /// <param name="decryptKey">The key used for decrypting incoming payloads.</param>
@@ -39,4 +28,14 @@ public class CipherState
EncryptKey = encryptKey; EncryptKey = encryptKey;
DecryptKey = decryptKey; DecryptKey = decryptKey;
} }
/// <summary>
/// Gets the key used to encrypt outgoing messages.
/// </summary>
public byte[] EncryptKey { get; }
/// <summary>
/// Gets the key used to decrypt incoming messages.
/// </summary>
public byte[] DecryptKey { get; }
} }

View File

@@ -1,15 +1,13 @@
using System.IO;
using System.Threading;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network.Security; namespace ZB.MOM.WW.CBDDC.Network.Security;
/// <summary> /// <summary>
/// Provides a no-operation implementation of the peer handshake service that performs no handshake and always returns /// Provides a no-operation implementation of the peer handshake service that performs no handshake and always returns
/// null. /// null.
/// </summary> /// </summary>
/// <remarks>This class can be used in scenarios where a handshake is not required or for testing purposes. All /// <remarks>
/// handshake attempts using this service will result in no cipher state being established.</remarks> /// This class can be used in scenarios where a handshake is not required or for testing purposes. All
/// handshake attempts using this service will result in no cipher state being established.
/// </remarks>
public class NoOpHandshakeService : IPeerHandshakeService public class NoOpHandshakeService : IPeerHandshakeService
{ {
/// <summary> /// <summary>
@@ -17,11 +15,16 @@ public class NoOpHandshakeService : IPeerHandshakeService
/// asynchronously. /// asynchronously.
/// </summary> /// </summary>
/// <param name="stream">The stream used for exchanging handshake messages between nodes. Must be readable and writable.</param> /// <param name="stream">The stream used for exchanging handshake messages between nodes. Must be readable and writable.</param>
/// <param name="isInitiator">true to initiate the handshake as the local node; otherwise, false to respond as the remote node.</param> /// <param name="isInitiator">
/// true to initiate the handshake as the local node; otherwise, false to respond as the remote
/// node.
/// </param>
/// <param name="myNodeId">The unique identifier of the local node participating in the handshake. Cannot be null.</param> /// <param name="myNodeId">The unique identifier of the local node participating in the handshake. Cannot be null.</param>
/// <param name="token">A cancellation token that can be used to cancel the handshake operation.</param> /// <param name="token">A cancellation token that can be used to cancel the handshake operation.</param>
/// <returns>A task that represents the asynchronous handshake operation. The task result contains a CipherState if the /// <returns>
/// handshake succeeds; otherwise, null.</returns> /// A task that represents the asynchronous handshake operation. The task result contains a CipherState if the
/// handshake succeeds; otherwise, null.
/// </returns>
public Task<CipherState?> HandshakeAsync(Stream stream, bool isInitiator, string myNodeId, CancellationToken token) public Task<CipherState?> HandshakeAsync(Stream stream, bool isInitiator, string myNodeId, CancellationToken token)
{ {
return Task.FromResult<CipherState?>(null); return Task.FromResult<CipherState?>(null);

View File

@@ -1,8 +1,4 @@
using System;
using System.IO;
using System.Security.Cryptography; using System.Security.Cryptography;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
namespace ZB.MOM.WW.CBDDC.Network.Security; namespace ZB.MOM.WW.CBDDC.Network.Security;
@@ -12,7 +8,7 @@ public class SecureHandshakeService : IPeerHandshakeService
private readonly ILogger<SecureHandshakeService>? _logger; private readonly ILogger<SecureHandshakeService>? _logger;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="SecureHandshakeService"/> class. /// Initializes a new instance of the <see cref="SecureHandshakeService" /> class.
/// </summary> /// </summary>
/// <param name="logger">The optional logger instance.</param> /// <param name="logger">The optional logger instance.</param>
public SecureHandshakeService(ILogger<SecureHandshakeService>? logger = null) public SecureHandshakeService(ILogger<SecureHandshakeService>? logger = null)
@@ -33,17 +29,18 @@ public class SecureHandshakeService : IPeerHandshakeService
/// <param name="myNodeId">The local node identifier.</param> /// <param name="myNodeId">The local node identifier.</param>
/// <param name="token">A token used to cancel the handshake.</param> /// <param name="token">A token used to cancel the handshake.</param>
/// <returns> /// <returns>
/// A task that returns the negotiated <see cref="CipherState"/>, or <see langword="null"/> if unavailable. /// A task that returns the negotiated <see cref="CipherState" />, or <see langword="null" /> if unavailable.
/// </returns> /// </returns>
public async Task<CipherState?> HandshakeAsync(Stream stream, bool isInitiator, string myNodeId, CancellationToken token) public async Task<CipherState?> HandshakeAsync(Stream stream, bool isInitiator, string myNodeId,
CancellationToken token)
{ {
#if NET6_0_OR_GREATER #if NET6_0_OR_GREATER
using var ecdh = ECDiffieHellman.Create(); using var ecdh = ECDiffieHellman.Create();
ecdh.KeySize = 256; ecdh.KeySize = 256;
// 1. ExportAsync & Send Public Key // 1. ExportAsync & Send Public Key
var myPublicKey = ecdh.ExportSubjectPublicKeyInfo(); byte[] myPublicKey = ecdh.ExportSubjectPublicKeyInfo();
var lenBytes = BitConverter.GetBytes(myPublicKey.Length); byte[] lenBytes = BitConverter.GetBytes(myPublicKey.Length);
await stream.WriteAsync(lenBytes, 0, 4, token); await stream.WriteAsync(lenBytes, 0, 4, token);
await stream.WriteAsync(myPublicKey, 0, myPublicKey.Length, token); await stream.WriteAsync(myPublicKey, 0, myPublicKey.Length, token);
await stream.FlushAsync(token); // CRITICAL: Ensure data is sent immediately await stream.FlushAsync(token); // CRITICAL: Ensure data is sent immediately
@@ -51,13 +48,10 @@ public class SecureHandshakeService : IPeerHandshakeService
// 2. Receive Peer Public Key // 2. Receive Peer Public Key
var peerLenBuf = new byte[4]; var peerLenBuf = new byte[4];
await ReadExactAsync(stream, peerLenBuf, 0, 4, token); await ReadExactAsync(stream, peerLenBuf, 0, 4, token);
int peerLen = BitConverter.ToInt32(peerLenBuf, 0); var peerLen = BitConverter.ToInt32(peerLenBuf, 0);
// Validate peer key length to prevent DoS // Validate peer key length to prevent DoS
if (peerLen <= 0 || peerLen > 10000) if (peerLen <= 0 || peerLen > 10000) throw new InvalidOperationException($"Invalid peer key length: {peerLen}");
{
throw new InvalidOperationException($"Invalid peer key length: {peerLen}");
}
var peerKeyBytes = new byte[peerLen]; var peerKeyBytes = new byte[peerLen];
await ReadExactAsync(stream, peerKeyBytes, 0, peerLen, token); await ReadExactAsync(stream, peerKeyBytes, 0, peerLen, token);
@@ -77,18 +71,18 @@ public class SecureHandshakeService : IPeerHandshakeService
var k1Input = new byte[sharedSecret.Length + 1]; var k1Input = new byte[sharedSecret.Length + 1];
Buffer.BlockCopy(sharedSecret, 0, k1Input, 0, sharedSecret.Length); Buffer.BlockCopy(sharedSecret, 0, k1Input, 0, sharedSecret.Length);
k1Input[sharedSecret.Length] = 0; // "0" k1Input[sharedSecret.Length] = 0; // "0"
var key1 = sha.ComputeHash(k1Input); byte[] key1 = sha.ComputeHash(k1Input);
var k2Input = new byte[sharedSecret.Length + 1]; var k2Input = new byte[sharedSecret.Length + 1];
Buffer.BlockCopy(sharedSecret, 0, k2Input, 0, sharedSecret.Length); Buffer.BlockCopy(sharedSecret, 0, k2Input, 0, sharedSecret.Length);
k2Input[sharedSecret.Length] = 1; // "1" k2Input[sharedSecret.Length] = 1; // "1"
var key2 = sha.ComputeHash(k2Input); byte[] key2 = sha.ComputeHash(k2Input);
// If initiator: Encrypt with Key1, Decrypt with Key2 // If initiator: Encrypt with Key1, Decrypt with Key2
// If responder: Encrypt with Key2, Decrypt with Key1 // If responder: Encrypt with Key2, Decrypt with Key1
var encryptKey = isInitiator ? key1 : key2; byte[] encryptKey = isInitiator ? key1 : key2;
var decryptKey = isInitiator ? key2 : key1; byte[] decryptKey = isInitiator ? key2 : key1;
return new CipherState(encryptKey, decryptKey); return new CipherState(encryptKey, decryptKey);
#else #else
@@ -100,13 +94,14 @@ public class SecureHandshakeService : IPeerHandshakeService
private async Task<int> ReadExactAsync(Stream stream, byte[] buffer, int offset, int count, CancellationToken token) private async Task<int> ReadExactAsync(Stream stream, byte[] buffer, int offset, int count, CancellationToken token)
{ {
int total = 0; var total = 0;
while (total < count) while (total < count)
{ {
int read = await stream.ReadAsync(buffer, offset + total, count - total, token); int read = await stream.ReadAsync(buffer, offset + total, count - total, token);
if (read == 0) throw new EndOfStreamException(); if (read == 0) throw new EndOfStreamException();
total += read; total += read;
} }
return total; return total;
} }
} }

View File

@@ -1,18 +1,12 @@
using System.Collections.Concurrent;
using System.Net.Sockets;
using Microsoft.Extensions.Logging;
using Serilog.Context;
using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network.Security; using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Telemetry; using ZB.MOM.WW.CBDDC.Network.Telemetry;
using Microsoft.Extensions.Logging;
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net.Sockets;
using System.Threading;
using System.Threading.Tasks;
using Serilog.Context;
namespace ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Network;
@@ -22,43 +16,30 @@ namespace ZB.MOM.WW.CBDDC.Network;
/// </summary> /// </summary>
public class SyncOrchestrator : ISyncOrchestrator public class SyncOrchestrator : ISyncOrchestrator
{ {
private readonly IDiscoveryService _discovery;
private readonly IOplogStore _oplogStore;
private readonly IOplogPruneCutoffCalculator? _oplogPruneCutoffCalculator;
private readonly IPeerOplogConfirmationStore? _peerOplogConfirmationStore;
private readonly IDocumentStore _documentStore;
private readonly ISnapshotMetadataStore _snapshotMetadataStore;
private readonly ISnapshotService _snapshotService;
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
private readonly ILogger<SyncOrchestrator> _logger;
private readonly ILoggerFactory _loggerFactory;
private CancellationTokenSource? _cts;
private readonly Random _random = new Random();
private readonly object _startStopLock = new object();
// Persistent clients pool // Persistent clients pool
private readonly ConcurrentDictionary<string, TcpPeerClient> _clients = new(); private readonly ConcurrentDictionary<string, TcpPeerClient> _clients = new();
private readonly ConcurrentDictionary<string, PeerStatus> _peerStates = new(); private readonly IDiscoveryService _discovery;
private readonly IDocumentStore _documentStore;
private readonly IPeerHandshakeService? _handshakeService; private readonly IPeerHandshakeService? _handshakeService;
private readonly ILogger<SyncOrchestrator> _logger;
private readonly ILoggerFactory _loggerFactory;
private readonly IOplogPruneCutoffCalculator? _oplogPruneCutoffCalculator;
private readonly IOplogStore _oplogStore;
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
private readonly IPeerOplogConfirmationStore? _peerOplogConfirmationStore;
private readonly ConcurrentDictionary<string, PeerStatus> _peerStates = new();
private readonly Random _random = new();
private readonly ISnapshotMetadataStore _snapshotMetadataStore;
private readonly ISnapshotService _snapshotService;
private readonly object _startStopLock = new();
private readonly INetworkTelemetryService? _telemetry; private readonly INetworkTelemetryService? _telemetry;
private class PeerStatus private CancellationTokenSource? _cts;
{
/// <summary>
/// Gets or sets the number of consecutive failures for the peer.
/// </summary>
public int FailureCount { get; set; }
/// <summary>
/// Gets or sets the next time a retry attempt is allowed.
/// </summary>
public DateTime NextRetryTime { get; set; }
}
private DateTime _lastMaintenanceTime = DateTime.MinValue; private DateTime _lastMaintenanceTime = DateTime.MinValue;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="SyncOrchestrator"/> class. /// Initializes a new instance of the <see cref="SyncOrchestrator" /> class.
/// </summary> /// </summary>
/// <param name="discovery">The discovery service.</param> /// <param name="discovery">The discovery service.</param>
/// <param name="oplogStore">The oplog store.</param> /// <param name="oplogStore">The oplog store.</param>
@@ -111,6 +92,7 @@ public class SyncOrchestrator : ISyncOrchestrator
_logger.LogWarning("Sync Orchestrator already started"); _logger.LogWarning("Sync Orchestrator already started");
return; return;
} }
_cts = new CancellationTokenSource(); _cts = new CancellationTokenSource();
} }
@@ -165,7 +147,6 @@ public class SyncOrchestrator : ISyncOrchestrator
// Cleanup clients // Cleanup clients
foreach (var client in _clients.Values) foreach (var client in _clients.Values)
{
try try
{ {
client.Dispose(); client.Dispose();
@@ -174,7 +155,7 @@ public class SyncOrchestrator : ISyncOrchestrator
{ {
_logger.LogWarning(ex, "Error disposing client during shutdown"); _logger.LogWarning(ex, "Error disposing client during shutdown");
} }
}
_clients.Clear(); _clients.Clear();
await Task.CompletedTask; await Task.CompletedTask;
@@ -207,10 +188,7 @@ public class SyncOrchestrator : ISyncOrchestrator
var now = DateTime.UtcNow; var now = DateTime.UtcNow;
var eligiblePeers = allPeers.Where(p => var eligiblePeers = allPeers.Where(p =>
{ {
if (_peerStates.TryGetValue(p.NodeId, out var status)) if (_peerStates.TryGetValue(p.NodeId, out var status)) return status.NextRetryTime <= now;
{
return status.NextRetryTime <= now;
}
return true; return true;
}).ToList(); }).ToList();
@@ -259,10 +237,7 @@ public class SyncOrchestrator : ISyncOrchestrator
internal async Task RunMaintenanceIfDueAsync(PeerNodeConfiguration config, DateTime now, CancellationToken token) internal async Task RunMaintenanceIfDueAsync(PeerNodeConfiguration config, DateTime now, CancellationToken token)
{ {
var maintenanceInterval = TimeSpan.FromMinutes(config.MaintenanceIntervalMinutes); var maintenanceInterval = TimeSpan.FromMinutes(config.MaintenanceIntervalMinutes);
if ((now - _lastMaintenanceTime) < maintenanceInterval) if (now - _lastMaintenanceTime < maintenanceInterval) return;
{
return;
}
_logger.LogInformation("Running periodic maintenance (Oplog pruning)..."); _logger.LogInformation("Running periodic maintenance (Oplog pruning)...");
try try
@@ -271,7 +246,7 @@ public class SyncOrchestrator : ISyncOrchestrator
if (!cutoffDecision.HasCutoff || !cutoffDecision.EffectiveCutoff.HasValue) if (!cutoffDecision.HasCutoff || !cutoffDecision.EffectiveCutoff.HasValue)
{ {
_lastMaintenanceTime = now; _lastMaintenanceTime = now;
var reason = string.IsNullOrWhiteSpace(cutoffDecision.Reason) string reason = string.IsNullOrWhiteSpace(cutoffDecision.Reason)
? "No effective cutoff was produced." ? "No effective cutoff was produced."
: cutoffDecision.Reason; : cutoffDecision.Reason;
_logger.LogInformation("Skipping oplog prune for this maintenance cycle: {Reason}", reason); _logger.LogInformation("Skipping oplog prune for this maintenance cycle: {Reason}", reason);
@@ -282,28 +257,25 @@ public class SyncOrchestrator : ISyncOrchestrator
_lastMaintenanceTime = now; _lastMaintenanceTime = now;
if (cutoffDecision.ConfirmationCutoff.HasValue) if (cutoffDecision.ConfirmationCutoff.HasValue)
{
_logger.LogInformation( _logger.LogInformation(
"Maintenance completed successfully (Retention: {RetentionHours}h, EffectiveCutoff: {EffectiveCutoff}, ConfirmationCutoff: {ConfirmationCutoff}).", "Maintenance completed successfully (Retention: {RetentionHours}h, EffectiveCutoff: {EffectiveCutoff}, ConfirmationCutoff: {ConfirmationCutoff}).",
config.OplogRetentionHours, config.OplogRetentionHours,
cutoffDecision.EffectiveCutoff.Value, cutoffDecision.EffectiveCutoff.Value,
cutoffDecision.ConfirmationCutoff.Value); cutoffDecision.ConfirmationCutoff.Value);
}
else else
{
_logger.LogInformation( _logger.LogInformation(
"Maintenance completed successfully (Retention: {RetentionHours}h, EffectiveCutoff: {EffectiveCutoff}).", "Maintenance completed successfully (Retention: {RetentionHours}h, EffectiveCutoff: {EffectiveCutoff}).",
config.OplogRetentionHours, config.OplogRetentionHours,
cutoffDecision.EffectiveCutoff.Value); cutoffDecision.EffectiveCutoff.Value);
} }
}
catch (Exception maintenanceEx) catch (Exception maintenanceEx)
{ {
_logger.LogError(maintenanceEx, "Maintenance failed."); _logger.LogError(maintenanceEx, "Maintenance failed.");
} }
} }
private async Task<OplogPruneCutoffDecision> CalculatePruneCutoffAsync(PeerNodeConfiguration config, CancellationToken token) private async Task<OplogPruneCutoffDecision> CalculatePruneCutoffAsync(PeerNodeConfiguration config,
CancellationToken token)
{ {
if (_oplogPruneCutoffCalculator == null) if (_oplogPruneCutoffCalculator == null)
{ {
@@ -314,9 +286,9 @@ public class SyncOrchestrator : ISyncOrchestrator
return OplogPruneCutoffDecision.WithCutoff( return OplogPruneCutoffDecision.WithCutoff(
retentionCutoff, retentionCutoff,
confirmationCutoff: null, null,
effectiveCutoff: retentionCutoff, retentionCutoff,
reason: "Oplog prune cutoff calculator is not configured."); "Oplog prune cutoff calculator is not configured.");
} }
return await _oplogPruneCutoffCalculator.CalculateEffectiveCutoffAsync(config, token); return await _oplogPruneCutoffCalculator.CalculateEffectiveCutoffAsync(config, token);
@@ -334,8 +306,8 @@ public class SyncOrchestrator : ISyncOrchestrator
using var peerAddressContext = LogContext.PushProperty("PeerAddress", peer.Address); using var peerAddressContext = LogContext.PushProperty("PeerAddress", peer.Address);
TcpPeerClient? client = null; TcpPeerClient? client = null;
bool shouldRemoveClient = false; var shouldRemoveClient = false;
bool syncSuccessful = false; var syncSuccessful = false;
try try
{ {
@@ -349,13 +321,11 @@ public class SyncOrchestrator : ISyncOrchestrator
_telemetry)); _telemetry));
// Reconnect if disconnected // Reconnect if disconnected
if (!client.IsConnected) if (!client.IsConnected) await client.ConnectAsync(token);
{
await client.ConnectAsync(token);
}
// Handshake (idempotent) // Handshake (idempotent)
if (!await client.HandshakeAsync(config.NodeId, config.AuthToken, _documentStore.InterestedCollection, token)) if (!await client.HandshakeAsync(config.NodeId, config.AuthToken, _documentStore.InterestedCollection,
token))
{ {
_logger.LogWarning("Handshake rejected by {NodeId}", peer.NodeId); _logger.LogWarning("Handshake rejected by {NodeId}", peer.NodeId);
shouldRemoveClient = true; shouldRemoveClient = true;
@@ -381,7 +351,7 @@ public class SyncOrchestrator : ISyncOrchestrator
_logger.LogInformation("Pulling changes from {PeerNodeId} for {Count} nodes: {Nodes}", _logger.LogInformation("Pulling changes from {PeerNodeId} for {Count} nodes: {Nodes}",
peer.NodeId, nodesToPull.Count, string.Join(", ", nodesToPull)); peer.NodeId, nodesToPull.Count, string.Join(", ", nodesToPull));
foreach (var nodeId in nodesToPull) foreach (string nodeId in nodesToPull)
{ {
var localTs = localVectorClock.GetTimestamp(nodeId); var localTs = localVectorClock.GetTimestamp(nodeId);
var remoteTs = remoteVectorClock.GetTimestamp(nodeId); var remoteTs = remoteVectorClock.GetTimestamp(nodeId);
@@ -390,13 +360,16 @@ public class SyncOrchestrator : ISyncOrchestrator
nodeId, localTs, remoteTs); nodeId, localTs, remoteTs);
// PASS LOCAL INTERESTS TO PULL // PASS LOCAL INTERESTS TO PULL
var changes = await client.PullChangesFromNodeAsync(nodeId, localTs, _documentStore.InterestedCollection, token); var changes = await client.PullChangesFromNodeAsync(nodeId, localTs,
_documentStore.InterestedCollection, token);
if (changes != null && changes.Count > 0) if (changes != null && changes.Count > 0)
{ {
var result = await ProcessInboundBatchAsync(client, peer.NodeId, changes, token); var result = await ProcessInboundBatchAsync(client, peer.NodeId, changes, token);
if (result != SyncBatchResult.Success) if (result != SyncBatchResult.Success)
{ {
_logger.LogWarning("Inbound batch processing failed with status {Status}. Aborting sync for this session.", result); _logger.LogWarning(
"Inbound batch processing failed with status {Status}. Aborting sync for this session.",
result);
RecordFailure(peer.NodeId); RecordFailure(peer.NodeId);
return; return;
} }
@@ -410,13 +383,15 @@ public class SyncOrchestrator : ISyncOrchestrator
_logger.LogInformation("Pushing changes to {PeerNodeId} for {Count} nodes: {Nodes}", _logger.LogInformation("Pushing changes to {PeerNodeId} for {Count} nodes: {Nodes}",
peer.NodeId, nodesToPush.Count, string.Join(", ", nodesToPush)); peer.NodeId, nodesToPush.Count, string.Join(", ", nodesToPush));
foreach (var nodeId in nodesToPush) foreach (string nodeId in nodesToPush)
{ {
var remoteTs = remoteVectorClock.GetTimestamp(nodeId); var remoteTs = remoteVectorClock.GetTimestamp(nodeId);
// PUSH FILTERING: Pass remote receiver's interests to oplogStore for efficient retrieval // PUSH FILTERING: Pass remote receiver's interests to oplogStore for efficient retrieval
var remoteInterests = client.RemoteInterests; var remoteInterests = client.RemoteInterests;
var changes = (await _oplogStore.GetOplogForNodeAfterAsync(nodeId, remoteTs, remoteInterests, token)).ToList(); var changes =
(await _oplogStore.GetOplogForNodeAfterAsync(nodeId, remoteTs, remoteInterests, token))
.ToList();
if (changes.Any()) if (changes.Any())
{ {
@@ -429,13 +404,10 @@ public class SyncOrchestrator : ISyncOrchestrator
// 5. Handle Concurrent/Equal cases // 5. Handle Concurrent/Equal cases
if (causality == CausalityRelation.Equal) if (causality == CausalityRelation.Equal)
{
_logger.LogDebug("Vector clocks are equal with {PeerNodeId}. No sync needed.", peer.NodeId); _logger.LogDebug("Vector clocks are equal with {PeerNodeId}. No sync needed.", peer.NodeId);
}
else if (causality == CausalityRelation.Concurrent && !nodesToPull.Any() && !nodesToPush.Any()) else if (causality == CausalityRelation.Concurrent && !nodesToPull.Any() && !nodesToPush.Any())
{ _logger.LogDebug("Vector clocks are concurrent with {PeerNodeId}, but no divergence detected.",
_logger.LogDebug("Vector clocks are concurrent with {PeerNodeId}, but no divergence detected.", peer.NodeId); peer.NodeId);
}
syncSuccessful = true; syncSuccessful = true;
RecordSuccess(peer.NodeId); RecordSuccess(peer.NodeId);
@@ -466,7 +438,9 @@ public class SyncOrchestrator : ISyncOrchestrator
} }
catch (CorruptDatabaseException cex) catch (CorruptDatabaseException cex)
{ {
_logger.LogCritical(cex, "Local database corruption detected during sync with {NodeId}. Initiating EMERGENCY SNAPSHOT RECOVERY.", peer.NodeId); _logger.LogCritical(cex,
"Local database corruption detected during sync with {NodeId}. Initiating EMERGENCY SNAPSHOT RECOVERY.",
peer.NodeId);
if (client != null && client.IsConnected) if (client != null && client.IsConnected)
{ {
try try
@@ -498,7 +472,8 @@ public class SyncOrchestrator : ISyncOrchestrator
} }
catch (SocketException sex) catch (SocketException sex)
{ {
_logger.LogWarning("Network error syncing with {NodeId}: {Message}. Will retry later.", peer.NodeId, sex.Message); _logger.LogWarning("Network error syncing with {NodeId}: {Message}. Will retry later.", peer.NodeId,
sex.Message);
shouldRemoveClient = true; shouldRemoveClient = true;
RecordFailure(peer.NodeId); RecordFailure(peer.NodeId);
} }
@@ -511,18 +486,18 @@ public class SyncOrchestrator : ISyncOrchestrator
finally finally
{ {
if (shouldRemoveClient && client != null) if (shouldRemoveClient && client != null)
{
if (_clients.TryRemove(peer.NodeId, out var removedClient)) if (_clients.TryRemove(peer.NodeId, out var removedClient))
try
{ {
try { removedClient.Dispose(); } catch { /* Ignore disposal errors */ } removedClient.Dispose();
} }
catch
{
/* Ignore disposal errors */
} }
// Log successful sync outcome (failures are already logged in catch blocks) // Log successful sync outcome (failures are already logged in catch blocks)
if (syncSuccessful) if (syncSuccessful) _logger.LogInformation("Sync with {NodeId} completed successfully.", peer.NodeId);
{
_logger.LogInformation("Sync with {NodeId} completed successfully.", peer.NodeId);
}
} }
} }
@@ -530,7 +505,12 @@ public class SyncOrchestrator : ISyncOrchestrator
{ {
_peerStates.AddOrUpdate(nodeId, _peerStates.AddOrUpdate(nodeId,
new PeerStatus { FailureCount = 0, NextRetryTime = DateTime.MinValue }, new PeerStatus { FailureCount = 0, NextRetryTime = DateTime.MinValue },
(k, v) => { v.FailureCount = 0; v.NextRetryTime = DateTime.MinValue; return v; }); (k, v) =>
{
v.FailureCount = 0;
v.NextRetryTime = DateTime.MinValue;
return v;
});
} }
/// <summary> /// <summary>
@@ -560,23 +540,19 @@ public class SyncOrchestrator : ISyncOrchestrator
/// <param name="localNodeId">The local node identifier used to skip self-registration.</param> /// <param name="localNodeId">The local node identifier used to skip self-registration.</param>
/// <param name="token">The cancellation token.</param> /// <param name="token">The cancellation token.</param>
/// <returns>A task that represents the asynchronous registration operation.</returns> /// <returns>A task that represents the asynchronous registration operation.</returns>
internal async Task EnsurePeersRegisteredAsync(IEnumerable<PeerNode> peers, string localNodeId, CancellationToken token) internal async Task EnsurePeersRegisteredAsync(IEnumerable<PeerNode> peers, string localNodeId,
CancellationToken token)
{ {
if (_peerOplogConfirmationStore == null) if (_peerOplogConfirmationStore == null) return;
{
return;
}
foreach (var peer in peers) foreach (var peer in peers)
{ {
if (string.Equals(peer.NodeId, localNodeId, StringComparison.Ordinal)) if (string.Equals(peer.NodeId, localNodeId, StringComparison.Ordinal)) continue;
{
continue;
}
try try
{ {
await _peerOplogConfirmationStore.EnsurePeerRegisteredAsync(peer.NodeId, peer.Address, peer.Type, token); await _peerOplogConfirmationStore.EnsurePeerRegisteredAsync(peer.NodeId, peer.Address, peer.Type,
token);
} }
catch (OperationCanceledException) when (token.IsCancellationRequested) catch (OperationCanceledException) when (token.IsCancellationRequested)
{ {
@@ -603,30 +579,18 @@ public class SyncOrchestrator : ISyncOrchestrator
VectorClock remoteVectorClock, VectorClock remoteVectorClock,
CancellationToken token) CancellationToken token)
{ {
if (_peerOplogConfirmationStore == null) if (_peerOplogConfirmationStore == null) return;
{
return;
}
var nodeIds = new HashSet<string>(localVectorClock.NodeIds, StringComparer.Ordinal); var nodeIds = new HashSet<string>(localVectorClock.NodeIds, StringComparer.Ordinal);
foreach (var nodeId in remoteVectorClock.NodeIds) foreach (string nodeId in remoteVectorClock.NodeIds) nodeIds.Add(nodeId);
{
nodeIds.Add(nodeId);
}
foreach (var sourceNodeId in nodeIds) foreach (string sourceNodeId in nodeIds)
{ {
var localTimestamp = localVectorClock.GetTimestamp(sourceNodeId); var localTimestamp = localVectorClock.GetTimestamp(sourceNodeId);
if (localTimestamp == default) if (localTimestamp == default) continue;
{
continue;
}
var remoteTimestamp = remoteVectorClock.GetTimestamp(sourceNodeId); var remoteTimestamp = remoteVectorClock.GetTimestamp(sourceNodeId);
if (remoteTimestamp < localTimestamp) if (remoteTimestamp < localTimestamp) continue;
{
continue;
}
await UpdatePeerConfirmationAsync(peerNodeId, sourceNodeId, localTimestamp, token); await UpdatePeerConfirmationAsync(peerNodeId, sourceNodeId, localTimestamp, token);
} }
@@ -646,10 +610,7 @@ public class SyncOrchestrator : ISyncOrchestrator
IReadOnlyCollection<OplogEntry> pushedChanges, IReadOnlyCollection<OplogEntry> pushedChanges,
CancellationToken token) CancellationToken token)
{ {
if (_peerOplogConfirmationStore == null || pushedChanges.Count == 0) if (_peerOplogConfirmationStore == null || pushedChanges.Count == 0) return;
{
return;
}
var maxPushed = pushedChanges var maxPushed = pushedChanges
.OrderBy(entry => entry.Timestamp) .OrderBy(entry => entry.Timestamp)
@@ -682,15 +643,12 @@ public class SyncOrchestrator : ISyncOrchestrator
HlcTimestamp timestamp, HlcTimestamp timestamp,
CancellationToken token) CancellationToken token)
{ {
if (_peerOplogConfirmationStore == null) if (_peerOplogConfirmationStore == null) return;
{
return;
}
try try
{ {
// Best-effort hash lookup: IOplogStore exposes latest hash per source node. // Best-effort hash lookup: IOplogStore exposes latest hash per source node.
var hash = await _oplogStore.GetLastEntryHashAsync(sourceNodeId, token) ?? string.Empty; string hash = await _oplogStore.GetLastEntryHashAsync(sourceNodeId, token) ?? string.Empty;
await _peerOplogConfirmationStore.UpdateConfirmationAsync(peerNodeId, sourceNodeId, timestamp, hash, token); await _peerOplogConfirmationStore.UpdateConfirmationAsync(peerNodeId, sourceNodeId, timestamp, hash, token);
} }
catch (OperationCanceledException) when (token.IsCancellationRequested) catch (OperationCanceledException) when (token.IsCancellationRequested)
@@ -713,7 +671,7 @@ public class SyncOrchestrator : ISyncOrchestrator
{ {
v.FailureCount++; v.FailureCount++;
// Exponential backoff: 1s, 2s, 4s... max 60s // Exponential backoff: 1s, 2s, 4s... max 60s
var delaySeconds = Math.Min(Math.Pow(2, v.FailureCount), 60); double delaySeconds = Math.Min(Math.Pow(2, v.FailureCount), 60);
v.NextRetryTime = DateTime.UtcNow.AddSeconds(delaySeconds); v.NextRetryTime = DateTime.UtcNow.AddSeconds(delaySeconds);
return v; return v;
}); });
@@ -723,66 +681,51 @@ public class SyncOrchestrator : ISyncOrchestrator
/// Validates an inbound batch of changes, checks for gaps, performs recovery if needed, and applies to oplogStore. /// Validates an inbound batch of changes, checks for gaps, performs recovery if needed, and applies to oplogStore.
/// Extracted to enforce Single Responsibility Principle. /// Extracted to enforce Single Responsibility Principle.
/// </summary> /// </summary>
private enum SyncBatchResult private async Task<SyncBatchResult> ProcessInboundBatchAsync(TcpPeerClient client, string peerNodeId,
{ IList<OplogEntry> changes, CancellationToken token)
Success,
GapDetected,
IntegrityError,
ChainBroken
}
/// <summary>
/// Validates an inbound batch of changes, checks for gaps, performs recovery if needed, and applies to oplogStore.
/// Extracted to enforce Single Responsibility Principle.
/// </summary>
private async Task<SyncBatchResult> ProcessInboundBatchAsync(TcpPeerClient client, string peerNodeId, IList<OplogEntry> changes, CancellationToken token)
{ {
_logger.LogInformation("Received {Count} changes from {NodeId}", changes.Count, peerNodeId); _logger.LogInformation("Received {Count} changes from {NodeId}", changes.Count, peerNodeId);
// 1. Validate internal integrity of the batch (Hash check) // 1. Validate internal integrity of the batch (Hash check)
foreach (var entry in changes) foreach (var entry in changes)
{
if (!entry.IsValid()) if (!entry.IsValid())
{
// CHANGED: Log Critical Error but ACCEPT the entry to allow sync to progress (Soft Validation). // CHANGED: Log Critical Error but ACCEPT the entry to allow sync to progress (Soft Validation).
// Throwing here would cause an unrecoverable state where this batch blocks sync forever. // Throwing here would cause an unrecoverable state where this batch blocks sync forever.
_logger.LogError("Integrity Check Failed for Entry {Hash} (Node: {NodeId}). Expected: {computedHash}. ACCEPTING payload despite mismatch to maintain availability.", _logger.LogError(
"Integrity Check Failed for Entry {Hash} (Node: {NodeId}). Expected: {computedHash}. ACCEPTING payload despite mismatch to maintain availability.",
entry.Hash, entry.Timestamp.NodeId, entry.ComputeHash()); entry.Hash, entry.Timestamp.NodeId, entry.ComputeHash());
}
}
// 2. Group changes by Author Node to validate Source Chains independently // 2. Group changes by Author Node to validate Source Chains independently
var changesByNode = changes.GroupBy(c => c.Timestamp.NodeId); var changesByNode = changes.GroupBy(c => c.Timestamp.NodeId);
foreach (var group in changesByNode) foreach (var group in changesByNode)
{ {
var authorNodeId = group.Key; string authorNodeId = group.Key;
// FIX: Order by the full Timestamp (Physical + Logical), not just LogicalCounter. // FIX: Order by the full Timestamp (Physical + Logical), not just LogicalCounter.
// LogicalCounter resets when PhysicalTime advances, so sorting by Counter alone breaks chronological order. // LogicalCounter resets when PhysicalTime advances, so sorting by Counter alone breaks chronological order.
var authorChain = group.OrderBy(c => c.Timestamp).ToList(); var authorChain = group.OrderBy(c => c.Timestamp).ToList();
// Check linkage within the batch // Check linkage within the batch
for (int i = 1; i < authorChain.Count; i++) for (var i = 1; i < authorChain.Count; i++)
{
if (authorChain[i].PreviousHash != authorChain[i - 1].Hash) if (authorChain[i].PreviousHash != authorChain[i - 1].Hash)
{ {
_logger.LogError("Chain Broken in Batch for Node {AuthorId}", authorNodeId); _logger.LogError("Chain Broken in Batch for Node {AuthorId}", authorNodeId);
return SyncBatchResult.ChainBroken; return SyncBatchResult.ChainBroken;
} }
}
// Check linkage with Local State // Check linkage with Local State
var firstEntry = authorChain[0]; var firstEntry = authorChain[0];
var localHeadHash = await _oplogStore.GetLastEntryHashAsync(authorNodeId, token); string? localHeadHash = await _oplogStore.GetLastEntryHashAsync(authorNodeId, token);
_logger.LogDebug("Processing chain for Node {AuthorId}: FirstEntry.PrevHash={PrevHash}, FirstEntry.Hash={Hash}, LocalHeadHash={LocalHead}", _logger.LogDebug(
"Processing chain for Node {AuthorId}: FirstEntry.PrevHash={PrevHash}, FirstEntry.Hash={Hash}, LocalHeadHash={LocalHead}",
authorNodeId, firstEntry.PreviousHash, firstEntry.Hash, localHeadHash ?? "(null)"); authorNodeId, firstEntry.PreviousHash, firstEntry.Hash, localHeadHash ?? "(null)");
if (localHeadHash != null && firstEntry.PreviousHash != localHeadHash) if (localHeadHash != null && firstEntry.PreviousHash != localHeadHash)
{ {
// Check if entry starts from snapshot boundary (valid case after pruning) // Check if entry starts from snapshot boundary (valid case after pruning)
var snapshotHash = await _snapshotMetadataStore.GetSnapshotHashAsync(authorNodeId, token); string? snapshotHash = await _snapshotMetadataStore.GetSnapshotHashAsync(authorNodeId, token);
if (snapshotHash != null && firstEntry.PreviousHash == snapshotHash) if (snapshotHash != null && firstEntry.PreviousHash == snapshotHash)
{ {
@@ -797,7 +740,8 @@ public class SyncOrchestrator : ISyncOrchestrator
else else
{ {
// GAP DETECTED (not a snapshot boundary case) // GAP DETECTED (not a snapshot boundary case)
_logger.LogWarning("Gap Detected for Node {AuthorId}. Local Head: {Local}, Remote Prev: {Prev}. Initiating Recovery.", _logger.LogWarning(
"Gap Detected for Node {AuthorId}. Local Head: {Local}, Remote Prev: {Prev}. Initiating Recovery.",
authorNodeId, localHeadHash, firstEntry.PreviousHash); authorNodeId, localHeadHash, firstEntry.PreviousHash);
// Gap Recovery (Range Sync) // Gap Recovery (Range Sync)
@@ -821,10 +765,11 @@ public class SyncOrchestrator : ISyncOrchestrator
_logger.LogInformation("Gap Recovery: Retrieved {Count} missing entries.", missingChain.Count); _logger.LogInformation("Gap Recovery: Retrieved {Count} missing entries.", missingChain.Count);
// Validate Recovery Chain Linkage // Validate Recovery Chain Linkage
bool linkValid = true; var linkValid = true;
if (missingChain[0].PreviousHash != localHeadHash) linkValid = false; if (missingChain[0].PreviousHash != localHeadHash) linkValid = false;
for (int i = 1; i < missingChain.Count; i++) for (var i = 1; i < missingChain.Count; i++)
if (missingChain[i].PreviousHash != missingChain[i - 1].Hash) linkValid = false; if (missingChain[i].PreviousHash != missingChain[i - 1].Hash)
linkValid = false;
if (missingChain.Last().Hash != firstEntry.PreviousHash) linkValid = false; if (missingChain.Last().Hash != firstEntry.PreviousHash) linkValid = false;
if (!linkValid) if (!linkValid)
@@ -846,7 +791,8 @@ public class SyncOrchestrator : ISyncOrchestrator
// DECISION: Accept the entries anyway but log a warning // DECISION: Accept the entries anyway but log a warning
// This allows forward progress even with partial history // This allows forward progress even with partial history
_logger.LogWarning("Could not recover gap for Node {AuthorId}. Local Head: {Local}, Remote Prev: {Prev}. Accepting entries anyway (partial sync).", _logger.LogWarning(
"Could not recover gap for Node {AuthorId}. Local Head: {Local}, Remote Prev: {Prev}. Accepting entries anyway (partial sync).",
authorNodeId, localHeadHash, firstEntry.PreviousHash); authorNodeId, localHeadHash, firstEntry.PreviousHash);
// Optionally: Mark this as a partial sync in metadata // Optionally: Mark this as a partial sync in metadata
@@ -857,7 +803,8 @@ public class SyncOrchestrator : ISyncOrchestrator
else if (localHeadHash == null && !string.IsNullOrEmpty(firstEntry.PreviousHash)) else if (localHeadHash == null && !string.IsNullOrEmpty(firstEntry.PreviousHash))
{ {
// Implicit Accept / Partial Sync warning // Implicit Accept / Partial Sync warning
_logger.LogWarning("First contact with Node {AuthorId} at explicit state (Not Genesis). Accepting.", authorNodeId); _logger.LogWarning("First contact with Node {AuthorId} at explicit state (Not Genesis). Accepting.",
authorNodeId);
} }
// Apply original batch (grouped by node for clarity, but oplogStore usually handles bulk) // Apply original batch (grouped by node for clarity, but oplogStore usually handles bulk)
@@ -871,7 +818,7 @@ public class SyncOrchestrator : ISyncOrchestrator
{ {
_logger.LogInformation(mergeOnly ? "Starting Snapshot Merge..." : "Starting Full Database Replacement..."); _logger.LogInformation(mergeOnly ? "Starting Snapshot Merge..." : "Starting Full Database Replacement...");
var tempFile = Path.GetTempFileName(); string tempFile = Path.GetTempFileName();
try try
{ {
_logger.LogInformation("Downloading snapshot to {TempFile}...", tempFile); _logger.LogInformation("Downloading snapshot to {TempFile}...", tempFile);
@@ -885,14 +832,10 @@ public class SyncOrchestrator : ISyncOrchestrator
using (var fs = File.OpenRead(tempFile)) using (var fs = File.OpenRead(tempFile))
{ {
if (mergeOnly) if (mergeOnly)
{
await _snapshotService.MergeSnapshotAsync(fs, token); await _snapshotService.MergeSnapshotAsync(fs, token);
}
else else
{
await _snapshotService.ReplaceDatabaseAsync(fs, token); await _snapshotService.ReplaceDatabaseAsync(fs, token);
} }
}
_logger.LogInformation("Snapshot applied successfully."); _logger.LogInformation("Snapshot applied successfully.");
} }
@@ -904,7 +847,6 @@ public class SyncOrchestrator : ISyncOrchestrator
finally finally
{ {
if (File.Exists(tempFile)) if (File.Exists(tempFile))
{
try try
{ {
File.Delete(tempFile); File.Delete(tempFile);
@@ -915,5 +857,29 @@ public class SyncOrchestrator : ISyncOrchestrator
} }
} }
} }
private class PeerStatus
{
/// <summary>
/// Gets or sets the number of consecutive failures for the peer.
/// </summary>
public int FailureCount { get; set; }
/// <summary>
/// Gets or sets the next time a retry attempt is allowed.
/// </summary>
public DateTime NextRetryTime { get; set; }
}
/// <summary>
/// Validates an inbound batch of changes, checks for gaps, performs recovery if needed, and applies to oplogStore.
/// Extracted to enforce Single Responsibility Principle.
/// </summary>
private enum SyncBatchResult
{
Success,
GapDetected,
IntegrityError,
ChainBroken
} }
} }

View File

@@ -1,16 +1,10 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net.Sockets; using System.Net.Sockets;
using System.Threading; using System.Text.Json;
using System.Threading.Tasks;
using Google.Protobuf;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Network.Proto; using ZB.MOM.WW.CBDDC.Network.Proto;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Protocol; using ZB.MOM.WW.CBDDC.Network.Protocol;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Telemetry; using ZB.MOM.WW.CBDDC.Network.Telemetry;
namespace ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Network;
@@ -20,20 +14,42 @@ namespace ZB.MOM.WW.CBDDC.Network;
/// </summary> /// </summary>
public class TcpPeerClient : IDisposable public class TcpPeerClient : IDisposable
{ {
private readonly TcpClient _client;
private readonly string _peerAddress;
private readonly ILogger<TcpPeerClient> _logger;
private readonly IPeerHandshakeService? _handshakeService;
private NetworkStream? _stream;
private CipherState? _cipherState;
private readonly object _connectionLock = new object();
private bool _disposed = false;
private const int ConnectionTimeoutMs = 5000; private const int ConnectionTimeoutMs = 5000;
private const int OperationTimeoutMs = 30000; private const int OperationTimeoutMs = 30000;
private readonly TcpClient _client;
private readonly object _connectionLock = new();
private readonly IPeerHandshakeService? _handshakeService;
private readonly ILogger<TcpPeerClient> _logger;
private readonly string _peerAddress;
private readonly ProtocolHandler _protocol; private readonly ProtocolHandler _protocol;
private readonly INetworkTelemetryService? _telemetry;
private CipherState? _cipherState;
private bool _disposed;
private List<string> _remoteInterests = new();
private NetworkStream? _stream;
private bool _useCompression; // Negotiated after handshake
/// <summary>
/// Initializes a new instance of the <see cref="TcpPeerClient" /> class.
/// </summary>
/// <param name="peerAddress">The remote peer address in <c>host:port</c> format.</param>
/// <param name="logger">The logger used for connection and protocol events.</param>
/// <param name="handshakeService">The optional handshake service used to establish secure sessions.</param>
/// <param name="telemetry">The optional telemetry service for network metrics.</param>
public TcpPeerClient(string peerAddress, ILogger<TcpPeerClient> logger,
IPeerHandshakeService? handshakeService = null, INetworkTelemetryService? telemetry = null)
{
_client = new TcpClient();
_peerAddress = peerAddress;
_logger = logger;
_handshakeService = handshakeService;
_telemetry = telemetry;
_protocol = new ProtocolHandler(logger, telemetry);
}
/// <summary> /// <summary>
/// Gets a value indicating whether the client currently has an active connection. /// Gets a value indicating whether the client currently has an active connection.
/// </summary> /// </summary>
@@ -53,412 +69,10 @@ public class TcpPeerClient : IDisposable
/// </summary> /// </summary>
public bool HasHandshaked { get; private set; } public bool HasHandshaked { get; private set; }
private readonly INetworkTelemetryService? _telemetry;
/// <summary>
/// Initializes a new instance of the <see cref="TcpPeerClient"/> class.
/// </summary>
/// <param name="peerAddress">The remote peer address in <c>host:port</c> format.</param>
/// <param name="logger">The logger used for connection and protocol events.</param>
/// <param name="handshakeService">The optional handshake service used to establish secure sessions.</param>
/// <param name="telemetry">The optional telemetry service for network metrics.</param>
public TcpPeerClient(string peerAddress, ILogger<TcpPeerClient> logger, IPeerHandshakeService? handshakeService = null, INetworkTelemetryService? telemetry = null)
{
_client = new TcpClient();
_peerAddress = peerAddress;
_logger = logger;
_handshakeService = handshakeService;
_telemetry = telemetry;
_protocol = new ProtocolHandler(logger, telemetry);
}
/// <summary>
/// Connects to the configured remote peer.
/// </summary>
/// <param name="token">A token used to cancel the connection attempt.</param>
/// <returns>A task that represents the asynchronous connect operation.</returns>
public async Task ConnectAsync(CancellationToken token)
{
lock (_connectionLock)
{
if (_disposed)
{
throw new ObjectDisposedException(nameof(TcpPeerClient));
}
if (IsConnected) return;
}
var parts = _peerAddress.Split(':');
if (parts.Length != 2)
{
throw new ArgumentException($"Invalid address format: {_peerAddress}. Expected format: host:port");
}
if (!int.TryParse(parts[1], out int port) || port <= 0 || port > 65535)
{
throw new ArgumentException($"Invalid port number: {parts[1]}");
}
// Connect with timeout
using var timeoutCts = new CancellationTokenSource(ConnectionTimeoutMs);
using var linkedCts = CancellationTokenSource.CreateLinkedTokenSource(token, timeoutCts.Token);
try
{
await _client.ConnectAsync(parts[0], port);
lock (_connectionLock)
{
if (_disposed)
{
throw new ObjectDisposedException(nameof(TcpPeerClient));
}
_stream = _client.GetStream();
// CRITICAL for Android: Disable Nagle's algorithm to prevent buffering delays
// This ensures immediate packet transmission for handshake data
_client.NoDelay = true;
// Configure TCP keepalive
_client.Client.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, true);
// Set read/write timeouts
_stream.ReadTimeout = OperationTimeoutMs;
_stream.WriteTimeout = OperationTimeoutMs;
}
_logger.LogDebug("Connected to peer: {Address} (NoDelay=true for immediate send)", _peerAddress);
}
catch (OperationCanceledException) when (timeoutCts.IsCancellationRequested)
{
throw new TimeoutException($"Connection to {_peerAddress} timed out after {ConnectionTimeoutMs}ms");
}
}
/// <summary> /// <summary>
/// Gets the list of collections the remote peer is interested in. /// Gets the list of collections the remote peer is interested in.
/// </summary> /// </summary>
public System.Collections.Generic.IReadOnlyList<string> RemoteInterests => _remoteInterests.AsReadOnly(); public IReadOnlyList<string> RemoteInterests => _remoteInterests.AsReadOnly();
private List<string> _remoteInterests = new();
/// <summary>
/// Performs authentication handshake with the remote peer.
/// </summary>
/// <param name="myNodeId">The local node identifier.</param>
/// <param name="authToken">The authentication token.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>True if handshake was accepted, false otherwise.</returns>
public async Task<bool> HandshakeAsync(string myNodeId, string authToken, CancellationToken token)
{
return await HandshakeAsync(myNodeId, authToken, null, token);
}
/// <summary>
/// Performs authentication handshake with the remote peer, including collection interests.
/// </summary>
/// <param name="myNodeId">The local node identifier.</param>
/// <param name="authToken">The authentication token.</param>
/// <param name="interestingCollections">Optional collection names this node is interested in receiving.</param>
/// <param name="token">Cancellation token.</param>
/// <returns><see langword="true"/> if handshake was accepted; otherwise <see langword="false"/>.</returns>
public async Task<bool> HandshakeAsync(string myNodeId, string authToken, IEnumerable<string>? interestingCollections, CancellationToken token)
{
if (HasHandshaked) return true;
if (_handshakeService != null)
{
// Perform secure handshake if service is available
// We assume we are initiator here
_cipherState = await _handshakeService.HandshakeAsync(_stream!, true, myNodeId, token);
}
var req = new HandshakeRequest { NodeId = myNodeId, AuthToken = authToken ?? "" };
if (interestingCollections != null)
{
foreach (var coll in interestingCollections)
{
req.InterestingCollections.Add(coll);
}
}
if (CompressionHelper.IsBrotliSupported)
{
req.SupportedCompression.Add("brotli");
}
_logger.LogDebug("Sending HandshakeReq to {Address}", _peerAddress);
await _protocol.SendMessageAsync(_stream!, MessageType.HandshakeReq, req, false, _cipherState, token);
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
_logger.LogDebug("Received Handshake response type: {Type}", type);
if (type != MessageType.HandshakeRes) return false;
var res = HandshakeResponse.Parser.ParseFrom(payload);
// Store remote interests
_remoteInterests = res.InterestingCollections.ToList();
// Negotiation Result
if (res.SelectedCompression == "brotli")
{
_useCompression = true;
_logger.LogInformation("Brotli compression negotiated.");
}
HasHandshaked = res.Accepted;
return res.Accepted;
}
/// <summary>
/// Retrieves the remote peer's latest HLC timestamp.
/// </summary>
/// <param name="token">Cancellation token.</param>
/// <returns>The latest remote hybrid logical clock timestamp.</returns>
public async Task<HlcTimestamp> GetClockAsync(CancellationToken token)
{
using (_telemetry?.StartMetric(MetricType.RoundTripTime))
{
await _protocol.SendMessageAsync(_stream!, MessageType.GetClockReq, new GetClockRequest(), _useCompression, _cipherState, token);
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ClockRes) throw new Exception("Unexpected response");
var res = ClockResponse.Parser.ParseFrom(payload);
return new HlcTimestamp(res.HlcWall, res.HlcLogic, res.HlcNode);
}
}
/// <summary>
/// Retrieves the remote peer's vector clock (latest timestamp per node).
/// </summary>
/// <param name="token">Cancellation token.</param>
/// <returns>The remote vector clock.</returns>
public async Task<VectorClock> GetVectorClockAsync(CancellationToken token)
{
using (_telemetry?.StartMetric(MetricType.RoundTripTime))
{
await _protocol.SendMessageAsync(_stream!, MessageType.GetVectorClockReq, new GetVectorClockRequest(), _useCompression, _cipherState, token);
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.VectorClockRes) throw new Exception("Unexpected response");
var res = VectorClockResponse.Parser.ParseFrom(payload);
var vectorClock = new VectorClock();
foreach (var entry in res.Entries)
{
vectorClock.SetTimestamp(entry.NodeId, new HlcTimestamp(entry.HlcWall, entry.HlcLogic, entry.NodeId));
}
return vectorClock;
}
}
/// <summary>
/// Pulls oplog changes from the remote peer since the specified timestamp.
/// </summary>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesAsync(HlcTimestamp since, CancellationToken token)
{
return await PullChangesAsync(since, null, token);
}
/// <summary>
/// Pulls oplog changes from the remote peer since the specified timestamp, filtered by collections.
/// </summary>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="collections">Optional collection names used to filter the returned entries.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesAsync(HlcTimestamp since, IEnumerable<string>? collections, CancellationToken token)
{
var req = new PullChangesRequest
{
SinceWall = since.PhysicalTime,
SinceLogic = since.LogicalCounter,
// Empty SinceNode indicates a global pull (not source-node filtered).
SinceNode = string.Empty
};
if (collections != null)
{
foreach (var coll in collections)
{
req.Collections.Add(coll);
}
}
await _protocol.SendMessageAsync(_stream!, MessageType.PullChangesReq, req, _useCompression, _cipherState, token);
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ChangeSetRes) throw new Exception("Unexpected response");
var res = ChangeSetResponse.Parser.ParseFrom(payload);
return res.Entries.Select(e => new OplogEntry(
e.Collection,
e.Key,
ParseOp(e.Operation),
string.IsNullOrEmpty(e.JsonData) ? default : System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>(e.JsonData),
new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode),
e.PreviousHash,
e.Hash // Pass the received hash to preserve integrity reference
)).ToList();
}
/// <summary>
/// Pulls oplog changes for a specific node from the remote peer since the specified timestamp.
/// </summary>
/// <param name="nodeId">The node identifier to filter changes by.</param>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesFromNodeAsync(string nodeId, HlcTimestamp since, CancellationToken token)
{
return await PullChangesFromNodeAsync(nodeId, since, null, token);
}
/// <summary>
/// Pulls oplog changes for a specific node from the remote peer since the specified timestamp, filtered by collections.
/// </summary>
/// <param name="nodeId">The node identifier to filter changes by.</param>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="collections">Optional collection names used to filter the returned entries.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesFromNodeAsync(string nodeId, HlcTimestamp since, IEnumerable<string>? collections, CancellationToken token)
{
var req = new PullChangesRequest
{
SinceNode = nodeId,
SinceWall = since.PhysicalTime,
SinceLogic = since.LogicalCounter
};
if (collections != null)
{
foreach (var coll in collections)
{
req.Collections.Add(coll);
}
}
await _protocol.SendMessageAsync(_stream!, MessageType.PullChangesReq, req, _useCompression, _cipherState, token);
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ChangeSetRes) throw new Exception("Unexpected response");
var res = ChangeSetResponse.Parser.ParseFrom(payload);
return res.Entries.Select(e => new OplogEntry(
e.Collection,
e.Key,
ParseOp(e.Operation),
string.IsNullOrEmpty(e.JsonData) ? default : System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>(e.JsonData),
new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode),
e.PreviousHash,
e.Hash
)).ToList();
}
/// <summary>
/// Retrieves a range of oplog entries connecting two hashes (Gap Recovery).
/// </summary>
/// <param name="startHash">The starting hash in the chain.</param>
/// <param name="endHash">The ending hash in the chain.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The chain entries connecting the requested hash range.</returns>
public virtual async Task<List<OplogEntry>> GetChainRangeAsync(string startHash, string endHash, CancellationToken token)
{
var req = new GetChainRangeRequest { StartHash = startHash, EndHash = endHash };
await _protocol.SendMessageAsync(_stream!, MessageType.GetChainRangeReq, req, _useCompression, _cipherState, token);
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ChainRangeRes) throw new Exception($"Unexpected response for ChainRange: {type}");
var res = ChainRangeResponse.Parser.ParseFrom(payload);
if (res.SnapshotRequired) throw new SnapshotRequiredException();
return res.Entries.Select(e => new OplogEntry(
e.Collection,
e.Key,
ParseOp(e.Operation),
string.IsNullOrEmpty(e.JsonData) ? default : System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>(e.JsonData),
new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode),
e.PreviousHash,
e.Hash
)).ToList();
}
/// <summary>
/// Pushes local oplog changes to the remote peer.
/// </summary>
/// <param name="entries">The oplog entries to push.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>A task that represents the asynchronous push operation.</returns>
public async Task PushChangesAsync(IEnumerable<OplogEntry> entries, CancellationToken token)
{
var req = new PushChangesRequest();
var entryList = entries.ToList();
if (entryList.Count == 0) return;
foreach (var e in entryList)
{
req.Entries.Add(new ProtoOplogEntry
{
Collection = e.Collection,
Key = e.Key,
Operation = e.Operation.ToString(),
JsonData = e.Payload?.GetRawText() ?? "",
HlcWall = e.Timestamp.PhysicalTime,
HlcLogic = e.Timestamp.LogicalCounter,
HlcNode = e.Timestamp.NodeId,
Hash = e.Hash,
PreviousHash = e.PreviousHash
});
}
await _protocol.SendMessageAsync(_stream!, MessageType.PushChangesReq, req, _useCompression, _cipherState, token);
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.AckRes) throw new Exception("Push failed");
var res = AckResponse.Parser.ParseFrom(payload);
if (res.SnapshotRequired) throw new SnapshotRequiredException();
if (!res.Success) throw new Exception("Push failed");
}
private bool _useCompression = false; // Negotiated after handshake
private OperationType ParseOp(string op) => Enum.TryParse<OperationType>(op, out var val) ? val : OperationType.Put;
/// <summary>
/// Downloads a full snapshot from the remote peer to the provided destination stream.
/// </summary>
/// <param name="destination">The stream that receives snapshot bytes.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>A task that represents the asynchronous snapshot transfer operation.</returns>
public async Task GetSnapshotAsync(Stream destination, CancellationToken token)
{
await _protocol.SendMessageAsync(_stream!, MessageType.GetSnapshotReq, new GetSnapshotRequest(), _useCompression, _cipherState, token);
while (true)
{
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.SnapshotChunkMsg) throw new Exception($"Unexpected message type during snapshot: {type}");
var chunk = SnapshotChunk.Parser.ParseFrom(payload);
if (chunk.Data.Length > 0)
{
await destination.WriteAsync(chunk.Data.ToByteArray(), 0, chunk.Data.Length, token);
}
if (chunk.IsLast) break;
}
}
/// <summary> /// <summary>
/// Releases resources used by the peer client. /// Releases resources used by the peer client.
@@ -491,12 +105,380 @@ public class TcpPeerClient : IDisposable
_logger.LogDebug("Disposed connection to peer: {Address}", _peerAddress); _logger.LogDebug("Disposed connection to peer: {Address}", _peerAddress);
} }
/// <summary>
/// Connects to the configured remote peer.
/// </summary>
/// <param name="token">A token used to cancel the connection attempt.</param>
/// <returns>A task that represents the asynchronous connect operation.</returns>
public async Task ConnectAsync(CancellationToken token)
{
lock (_connectionLock)
{
if (_disposed) throw new ObjectDisposedException(nameof(TcpPeerClient));
if (IsConnected) return;
}
string[] parts = _peerAddress.Split(':');
if (parts.Length != 2)
throw new ArgumentException($"Invalid address format: {_peerAddress}. Expected format: host:port");
if (!int.TryParse(parts[1], out int port) || port <= 0 || port > 65535)
throw new ArgumentException($"Invalid port number: {parts[1]}");
// Connect with timeout
using var timeoutCts = new CancellationTokenSource(ConnectionTimeoutMs);
using var linkedCts = CancellationTokenSource.CreateLinkedTokenSource(token, timeoutCts.Token);
try
{
await _client.ConnectAsync(parts[0], port);
lock (_connectionLock)
{
if (_disposed) throw new ObjectDisposedException(nameof(TcpPeerClient));
_stream = _client.GetStream();
// CRITICAL for Android: Disable Nagle's algorithm to prevent buffering delays
// This ensures immediate packet transmission for handshake data
_client.NoDelay = true;
// Configure TCP keepalive
_client.Client.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, true);
// Set read/write timeouts
_stream.ReadTimeout = OperationTimeoutMs;
_stream.WriteTimeout = OperationTimeoutMs;
}
_logger.LogDebug("Connected to peer: {Address} (NoDelay=true for immediate send)", _peerAddress);
}
catch (OperationCanceledException) when (timeoutCts.IsCancellationRequested)
{
throw new TimeoutException($"Connection to {_peerAddress} timed out after {ConnectionTimeoutMs}ms");
}
}
/// <summary>
/// Performs authentication handshake with the remote peer.
/// </summary>
/// <param name="myNodeId">The local node identifier.</param>
/// <param name="authToken">The authentication token.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>True if handshake was accepted, false otherwise.</returns>
public async Task<bool> HandshakeAsync(string myNodeId, string authToken, CancellationToken token)
{
return await HandshakeAsync(myNodeId, authToken, null, token);
}
/// <summary>
/// Performs authentication handshake with the remote peer, including collection interests.
/// </summary>
/// <param name="myNodeId">The local node identifier.</param>
/// <param name="authToken">The authentication token.</param>
/// <param name="interestingCollections">Optional collection names this node is interested in receiving.</param>
/// <param name="token">Cancellation token.</param>
/// <returns><see langword="true" /> if handshake was accepted; otherwise <see langword="false" />.</returns>
public async Task<bool> HandshakeAsync(string myNodeId, string authToken,
IEnumerable<string>? interestingCollections, CancellationToken token)
{
if (HasHandshaked) return true;
if (_handshakeService != null)
// Perform secure handshake if service is available
// We assume we are initiator here
_cipherState = await _handshakeService.HandshakeAsync(_stream!, true, myNodeId, token);
var req = new HandshakeRequest { NodeId = myNodeId, AuthToken = authToken ?? "" };
if (interestingCollections != null)
foreach (string coll in interestingCollections)
req.InterestingCollections.Add(coll);
if (CompressionHelper.IsBrotliSupported) req.SupportedCompression.Add("brotli");
_logger.LogDebug("Sending HandshakeReq to {Address}", _peerAddress);
await _protocol.SendMessageAsync(_stream!, MessageType.HandshakeReq, req, false, _cipherState, token);
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
_logger.LogDebug("Received Handshake response type: {Type}", type);
if (type != MessageType.HandshakeRes) return false;
var res = HandshakeResponse.Parser.ParseFrom(payload);
// Store remote interests
_remoteInterests = res.InterestingCollections.ToList();
// Negotiation Result
if (res.SelectedCompression == "brotli")
{
_useCompression = true;
_logger.LogInformation("Brotli compression negotiated.");
}
HasHandshaked = res.Accepted;
return res.Accepted;
}
/// <summary>
/// Retrieves the remote peer's latest HLC timestamp.
/// </summary>
/// <param name="token">Cancellation token.</param>
/// <returns>The latest remote hybrid logical clock timestamp.</returns>
public async Task<HlcTimestamp> GetClockAsync(CancellationToken token)
{
using (_telemetry?.StartMetric(MetricType.RoundTripTime))
{
await _protocol.SendMessageAsync(_stream!, MessageType.GetClockReq, new GetClockRequest(), _useCompression,
_cipherState, token);
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ClockRes) throw new Exception("Unexpected response");
var res = ClockResponse.Parser.ParseFrom(payload);
return new HlcTimestamp(res.HlcWall, res.HlcLogic, res.HlcNode);
}
}
/// <summary>
/// Retrieves the remote peer's vector clock (latest timestamp per node).
/// </summary>
/// <param name="token">Cancellation token.</param>
/// <returns>The remote vector clock.</returns>
public async Task<VectorClock> GetVectorClockAsync(CancellationToken token)
{
using (_telemetry?.StartMetric(MetricType.RoundTripTime))
{
await _protocol.SendMessageAsync(_stream!, MessageType.GetVectorClockReq, new GetVectorClockRequest(),
_useCompression, _cipherState, token);
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.VectorClockRes) throw new Exception("Unexpected response");
var res = VectorClockResponse.Parser.ParseFrom(payload);
var vectorClock = new VectorClock();
foreach (var entry in res.Entries)
vectorClock.SetTimestamp(entry.NodeId, new HlcTimestamp(entry.HlcWall, entry.HlcLogic, entry.NodeId));
return vectorClock;
}
}
/// <summary>
/// Pulls oplog changes from the remote peer since the specified timestamp.
/// </summary>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesAsync(HlcTimestamp since, CancellationToken token)
{
return await PullChangesAsync(since, null, token);
}
/// <summary>
/// Pulls oplog changes from the remote peer since the specified timestamp, filtered by collections.
/// </summary>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="collections">Optional collection names used to filter the returned entries.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesAsync(HlcTimestamp since, IEnumerable<string>? collections,
CancellationToken token)
{
var req = new PullChangesRequest
{
SinceWall = since.PhysicalTime,
SinceLogic = since.LogicalCounter,
// Empty SinceNode indicates a global pull (not source-node filtered).
SinceNode = string.Empty
};
if (collections != null)
foreach (string coll in collections)
req.Collections.Add(coll);
await _protocol.SendMessageAsync(_stream!, MessageType.PullChangesReq, req, _useCompression, _cipherState,
token);
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ChangeSetRes) throw new Exception("Unexpected response");
var res = ChangeSetResponse.Parser.ParseFrom(payload);
return res.Entries.Select(e => new OplogEntry(
e.Collection,
e.Key,
ParseOp(e.Operation),
string.IsNullOrEmpty(e.JsonData) ? default : JsonSerializer.Deserialize<JsonElement>(e.JsonData),
new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode),
e.PreviousHash,
e.Hash // Pass the received hash to preserve integrity reference
)).ToList();
}
/// <summary>
/// Pulls oplog changes for a specific node from the remote peer since the specified timestamp.
/// </summary>
/// <param name="nodeId">The node identifier to filter changes by.</param>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesFromNodeAsync(string nodeId, HlcTimestamp since,
CancellationToken token)
{
return await PullChangesFromNodeAsync(nodeId, since, null, token);
}
/// <summary>
/// Pulls oplog changes for a specific node from the remote peer since the specified timestamp, filtered by
/// collections.
/// </summary>
/// <param name="nodeId">The node identifier to filter changes by.</param>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="collections">Optional collection names used to filter the returned entries.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesFromNodeAsync(string nodeId, HlcTimestamp since,
IEnumerable<string>? collections, CancellationToken token)
{
var req = new PullChangesRequest
{
SinceNode = nodeId,
SinceWall = since.PhysicalTime,
SinceLogic = since.LogicalCounter
};
if (collections != null)
foreach (string coll in collections)
req.Collections.Add(coll);
await _protocol.SendMessageAsync(_stream!, MessageType.PullChangesReq, req, _useCompression, _cipherState,
token);
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ChangeSetRes) throw new Exception("Unexpected response");
var res = ChangeSetResponse.Parser.ParseFrom(payload);
return res.Entries.Select(e => new OplogEntry(
e.Collection,
e.Key,
ParseOp(e.Operation),
string.IsNullOrEmpty(e.JsonData) ? default : JsonSerializer.Deserialize<JsonElement>(e.JsonData),
new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode),
e.PreviousHash,
e.Hash
)).ToList();
}
/// <summary>
/// Retrieves a range of oplog entries connecting two hashes (Gap Recovery).
/// </summary>
/// <param name="startHash">The starting hash in the chain.</param>
/// <param name="endHash">The ending hash in the chain.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The chain entries connecting the requested hash range.</returns>
public virtual async Task<List<OplogEntry>> GetChainRangeAsync(string startHash, string endHash,
CancellationToken token)
{
var req = new GetChainRangeRequest { StartHash = startHash, EndHash = endHash };
await _protocol.SendMessageAsync(_stream!, MessageType.GetChainRangeReq, req, _useCompression, _cipherState,
token);
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ChainRangeRes) throw new Exception($"Unexpected response for ChainRange: {type}");
var res = ChainRangeResponse.Parser.ParseFrom(payload);
if (res.SnapshotRequired) throw new SnapshotRequiredException();
return res.Entries.Select(e => new OplogEntry(
e.Collection,
e.Key,
ParseOp(e.Operation),
string.IsNullOrEmpty(e.JsonData) ? default : JsonSerializer.Deserialize<JsonElement>(e.JsonData),
new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode),
e.PreviousHash,
e.Hash
)).ToList();
}
/// <summary>
/// Pushes local oplog changes to the remote peer.
/// </summary>
/// <param name="entries">The oplog entries to push.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>A task that represents the asynchronous push operation.</returns>
public async Task PushChangesAsync(IEnumerable<OplogEntry> entries, CancellationToken token)
{
var req = new PushChangesRequest();
var entryList = entries.ToList();
if (entryList.Count == 0) return;
foreach (var e in entryList)
req.Entries.Add(new ProtoOplogEntry
{
Collection = e.Collection,
Key = e.Key,
Operation = e.Operation.ToString(),
JsonData = e.Payload?.GetRawText() ?? "",
HlcWall = e.Timestamp.PhysicalTime,
HlcLogic = e.Timestamp.LogicalCounter,
HlcNode = e.Timestamp.NodeId,
Hash = e.Hash,
PreviousHash = e.PreviousHash
});
await _protocol.SendMessageAsync(_stream!, MessageType.PushChangesReq, req, _useCompression, _cipherState,
token);
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.AckRes) throw new Exception("Push failed");
var res = AckResponse.Parser.ParseFrom(payload);
if (res.SnapshotRequired) throw new SnapshotRequiredException();
if (!res.Success) throw new Exception("Push failed");
}
private OperationType ParseOp(string op)
{
return Enum.TryParse<OperationType>(op, out var val) ? val : OperationType.Put;
}
/// <summary>
/// Downloads a full snapshot from the remote peer to the provided destination stream.
/// </summary>
/// <param name="destination">The stream that receives snapshot bytes.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>A task that represents the asynchronous snapshot transfer operation.</returns>
public async Task GetSnapshotAsync(Stream destination, CancellationToken token)
{
await _protocol.SendMessageAsync(_stream!, MessageType.GetSnapshotReq, new GetSnapshotRequest(),
_useCompression, _cipherState, token);
while (true)
{
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.SnapshotChunkMsg)
throw new Exception($"Unexpected message type during snapshot: {type}");
var chunk = SnapshotChunk.Parser.ParseFrom(payload);
if (chunk.Data.Length > 0)
await destination.WriteAsync(chunk.Data.ToByteArray(), 0, chunk.Data.Length, token);
if (chunk.IsLast) break;
}
}
} }
public class SnapshotRequiredException : Exception public class SnapshotRequiredException : Exception
{ {
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="SnapshotRequiredException"/> class. /// Initializes a new instance of the <see cref="SnapshotRequiredException" /> class.
/// </summary> /// </summary>
public SnapshotRequiredException() : base("Peer requires a full snapshot sync.") { } public SnapshotRequiredException() : base("Peer requires a full snapshot sync.")
{
}
} }

View File

@@ -1,22 +1,16 @@
using System.Net;
using System.Net.Sockets;
using System.Text.Json;
using Google.Protobuf;
using Microsoft.Extensions.Logging;
using Serilog.Context;
using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network.Proto; using ZB.MOM.WW.CBDDC.Network.Proto;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Protocol; using ZB.MOM.WW.CBDDC.Network.Protocol;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Telemetry; using ZB.MOM.WW.CBDDC.Network.Telemetry;
using Google.Protobuf;
using Microsoft.Extensions.Logging;
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Net.Sockets;
using System.Threading;
using System.Threading.Tasks;
using Serilog.Context;
namespace ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Network;
@@ -25,35 +19,39 @@ namespace ZB.MOM.WW.CBDDC.Network;
/// </summary> /// </summary>
internal class TcpSyncServer : ISyncServer internal class TcpSyncServer : ISyncServer
{ {
private readonly IOplogStore _oplogStore;
private readonly IDocumentStore _documentStore;
private readonly ISnapshotService _snapshotStore;
private readonly ILogger<TcpSyncServer> _logger;
private readonly IPeerNodeConfigurationProvider _configProvider;
private CancellationTokenSource? _cts;
private TcpListener? _listener;
private readonly object _startStopLock = new object();
private int _activeConnections = 0;
internal int MaxConnections = 100;
private const int ClientOperationTimeoutMs = 60000; private const int ClientOperationTimeoutMs = 60000;
private readonly IAuthenticator _authenticator; private readonly IAuthenticator _authenticator;
private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly IDocumentStore _documentStore;
private readonly IPeerHandshakeService _handshakeService; private readonly IPeerHandshakeService _handshakeService;
private readonly ILogger<TcpSyncServer> _logger;
private readonly IOplogStore _oplogStore;
private readonly ISnapshotService _snapshotStore;
private readonly object _startStopLock = new();
private readonly INetworkTelemetryService? _telemetry; private readonly INetworkTelemetryService? _telemetry;
private int _activeConnections;
private CancellationTokenSource? _cts;
private TcpListener? _listener;
internal int MaxConnections = 100;
/// <summary> /// <summary>
/// Initializes a new instance of the TcpSyncServer class with the specified peer oplogStore, configuration provider, /// Initializes a new instance of the TcpSyncServer class with the specified peer oplogStore, configuration provider,
/// logger, and authenticator. /// logger, and authenticator.
/// </summary> /// </summary>
/// <remarks>The server automatically restarts when the configuration provided by /// <remarks>
/// The server automatically restarts when the configuration provided by
/// peerNodeConfigurationProvider changes. This ensures that configuration updates are applied without requiring /// peerNodeConfigurationProvider changes. This ensures that configuration updates are applied without requiring
/// manual intervention.</remarks> /// manual intervention.
/// </remarks>
/// <param name="oplogStore">The peer oplogStore used to manage and persist peer information for the server.</param> /// <param name="oplogStore">The peer oplogStore used to manage and persist peer information for the server.</param>
/// <param name="documentStore">The document store used to read and apply synchronized documents.</param> /// <param name="documentStore">The document store used to read and apply synchronized documents.</param>
/// <param name="snapshotStore">The snapshot store used to create and manage database snapshots for synchronization.</param> /// <param name="snapshotStore">The snapshot store used to create and manage database snapshots for synchronization.</param>
/// <param name="peerNodeConfigurationProvider">The provider that supplies configuration settings for the peer node and notifies the server of configuration /// <param name="peerNodeConfigurationProvider">
/// changes.</param> /// The provider that supplies configuration settings for the peer node and notifies the server of configuration
/// changes.
/// </param>
/// <param name="logger">The logger used to record informational and error messages for the server instance.</param> /// <param name="logger">The logger used to record informational and error messages for the server instance.</param>
/// <param name="authenticator">The authenticator responsible for validating peer connections to the server.</param> /// <param name="authenticator">The authenticator responsible for validating peer connections to the server.</param>
/// <param name="handshakeService">The service used to perform secure handshake (optional).</param> /// <param name="handshakeService">The service used to perform secure handshake (optional).</param>
@@ -84,11 +82,18 @@ internal class TcpSyncServer : ISyncServer
}; };
} }
/// <summary>
/// Gets the port on which the server is listening.
/// </summary>
public int? ListeningPort => ListeningEndpoint?.Port;
/// <summary> /// <summary>
/// Starts the TCP synchronization server and begins listening for incoming connections asynchronously. /// Starts the TCP synchronization server and begins listening for incoming connections asynchronously.
/// </summary> /// </summary>
/// <remarks>If the server is already running, this method returns immediately without starting a new /// <remarks>
/// listener. The server will listen on the TCP port specified in the current configuration.</remarks> /// If the server is already running, this method returns immediately without starting a new
/// listener. The server will listen on the TCP port specified in the current configuration.
/// </remarks>
/// <returns>A task that represents the asynchronous start operation.</returns> /// <returns>A task that represents the asynchronous start operation.</returns>
public async Task Start() public async Task Start()
{ {
@@ -101,6 +106,7 @@ internal class TcpSyncServer : ISyncServer
_logger.LogWarning("TCP Sync Server already started"); _logger.LogWarning("TCP Sync Server already started");
return; return;
} }
_cts = new CancellationTokenSource(); _cts = new CancellationTokenSource();
} }
@@ -128,9 +134,11 @@ internal class TcpSyncServer : ISyncServer
/// <summary> /// <summary>
/// Stops the listener and cancels any pending operations. /// Stops the listener and cancels any pending operations.
/// </summary> /// </summary>
/// <remarks>After calling this method, the listener will no longer accept new connections or process /// <remarks>
/// After calling this method, the listener will no longer accept new connections or process
/// requests. This method is safe to call multiple times; subsequent calls have no effect if the listener is already /// requests. This method is safe to call multiple times; subsequent calls have no effect if the listener is already
/// stopped.</remarks> /// stopped.
/// </remarks>
/// <returns>A task that represents the asynchronous stop operation.</returns> /// <returns>A task that represents the asynchronous stop operation.</returns>
public async Task Stop() public async Task Stop()
{ {
@@ -174,15 +182,9 @@ internal class TcpSyncServer : ISyncServer
/// </summary> /// </summary>
public IPEndPoint? ListeningEndpoint => _listener?.LocalEndpoint as IPEndPoint; public IPEndPoint? ListeningEndpoint => _listener?.LocalEndpoint as IPEndPoint;
/// <summary>
/// Gets the port on which the server is listening.
/// </summary>
public int? ListeningPort => ListeningEndpoint?.Port;
private async Task ListenAsync(CancellationToken token) private async Task ListenAsync(CancellationToken token)
{ {
while (!token.IsCancellationRequested) while (!token.IsCancellationRequested)
{
try try
{ {
if (_listener == null) break; if (_listener == null) break;
@@ -209,13 +211,15 @@ internal class TcpSyncServer : ISyncServer
} }
}, token); }, token);
} }
catch (ObjectDisposedException) { break; } catch (ObjectDisposedException)
{
break;
}
catch (Exception ex) catch (Exception ex)
{ {
_logger.LogError(ex, "TCP Accept Error"); _logger.LogError(ex, "TCP Accept Error");
} }
} }
}
private async Task HandleClientAsync(TcpClient client, CancellationToken token) private async Task HandleClientAsync(TcpClient client, CancellationToken token)
{ {
@@ -241,14 +245,13 @@ internal class TcpSyncServer : ISyncServer
var protocol = new ProtocolHandler(_logger, _telemetry); var protocol = new ProtocolHandler(_logger, _telemetry);
bool useCompression = false; var useCompression = false;
CipherState? cipherState = null; CipherState? cipherState = null;
List<string> remoteInterests = new(); List<string> remoteInterests = new();
// Perform Secure Handshake (if service is available) // Perform Secure Handshake (if service is available)
var config = await _configProvider.GetConfiguration(); var config = await _configProvider.GetConfiguration();
if (_handshakeService != null) if (_handshakeService != null)
{
try try
{ {
// We are NOT initiator // We are NOT initiator
@@ -261,14 +264,13 @@ internal class TcpSyncServer : ISyncServer
_logger.LogError(ex, "Secure Handshake failed check logic"); _logger.LogError(ex, "Secure Handshake failed check logic");
return; return;
} }
}
while (client.Connected && !token.IsCancellationRequested) while (client.Connected && !token.IsCancellationRequested)
{ {
// Re-fetch config if needed, though usually stable // Re-fetch config if needed, though usually stable
config = await _configProvider.GetConfiguration(); config = await _configProvider.GetConfiguration();
var (type, payload) = await protocol.ReadMessageAsync(stream, cipherState, token); (var type, byte[] payload) = await protocol.ReadMessageAsync(stream, cipherState, token);
if (type == MessageType.Unknown) break; // EOF or Error if (type == MessageType.Unknown) break; // EOF or Error
// Handshake Loop // Handshake Loop
@@ -284,17 +286,17 @@ internal class TcpSyncServer : ISyncServer
if (!valid) if (!valid)
{ {
_logger.LogWarning("Authentication failed for Node {NodeId}", hReq.NodeId); _logger.LogWarning("Authentication failed for Node {NodeId}", hReq.NodeId);
await protocol.SendMessageAsync(stream, MessageType.HandshakeRes, new HandshakeResponse { NodeId = config.NodeId, Accepted = false }, false, cipherState, token); await protocol.SendMessageAsync(stream, MessageType.HandshakeRes,
new HandshakeResponse { NodeId = config.NodeId, Accepted = false }, false, cipherState,
token);
return; return;
} }
var hRes = new HandshakeResponse { NodeId = config.NodeId, Accepted = true }; var hRes = new HandshakeResponse { NodeId = config.NodeId, Accepted = true };
// Include local interests from IDocumentStore in response for push filtering // Include local interests from IDocumentStore in response for push filtering
foreach (var coll in _documentStore.InterestedCollection) foreach (string coll in _documentStore.InterestedCollection)
{
hRes.InterestingCollections.Add(coll); hRes.InterestingCollections.Add(coll);
}
if (CompressionHelper.IsBrotliSupported && hReq.SupportedCompression.Contains("brotli")) if (CompressionHelper.IsBrotliSupported && hReq.SupportedCompression.Contains("brotli"))
{ {
@@ -302,12 +304,13 @@ internal class TcpSyncServer : ISyncServer
useCompression = true; useCompression = true;
} }
await protocol.SendMessageAsync(stream, MessageType.HandshakeRes, hRes, false, cipherState, token); await protocol.SendMessageAsync(stream, MessageType.HandshakeRes, hRes, false, cipherState,
token);
continue; continue;
} }
IMessage? response = null; IMessage? response = null;
MessageType resType = MessageType.Unknown; var resType = MessageType.Unknown;
switch (type) switch (type)
{ {
@@ -325,7 +328,7 @@ internal class TcpSyncServer : ISyncServer
case MessageType.GetVectorClockReq: case MessageType.GetVectorClockReq:
var vectorClock = await _oplogStore.GetVectorClockAsync(token); var vectorClock = await _oplogStore.GetVectorClockAsync(token);
var vcRes = new VectorClockResponse(); var vcRes = new VectorClockResponse();
foreach (var nodeId in vectorClock.NodeIds) foreach (string nodeId in vectorClock.NodeIds)
{ {
var ts = vectorClock.GetTimestamp(nodeId); var ts = vectorClock.GetTimestamp(nodeId);
vcRes.Entries.Add(new VectorClockEntry vcRes.Entries.Add(new VectorClockEntry
@@ -335,6 +338,7 @@ internal class TcpSyncServer : ISyncServer
HlcLogic = ts.LogicalCounter HlcLogic = ts.LogicalCounter
}); });
} }
response = vcRes; response = vcRes;
resType = MessageType.VectorClockRes; resType = MessageType.VectorClockRes;
break; break;
@@ -351,7 +355,6 @@ internal class TcpSyncServer : ISyncServer
var csRes = new ChangeSetResponse(); var csRes = new ChangeSetResponse();
foreach (var e in oplog) foreach (var e in oplog)
{
csRes.Entries.Add(new ProtoOplogEntry csRes.Entries.Add(new ProtoOplogEntry
{ {
Collection = e.Collection, Collection = e.Collection,
@@ -364,7 +367,6 @@ internal class TcpSyncServer : ISyncServer
Hash = e.Hash, Hash = e.Hash,
PreviousHash = e.PreviousHash PreviousHash = e.PreviousHash
}); });
}
response = csRes; response = csRes;
resType = MessageType.ChangeSetRes; resType = MessageType.ChangeSetRes;
break; break;
@@ -375,7 +377,9 @@ internal class TcpSyncServer : ISyncServer
e.Collection, e.Collection,
e.Key, e.Key,
(OperationType)Enum.Parse(typeof(OperationType), e.Operation), (OperationType)Enum.Parse(typeof(OperationType), e.Operation),
string.IsNullOrEmpty(e.JsonData) ? (System.Text.Json.JsonElement?)null : System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>(e.JsonData), string.IsNullOrEmpty(e.JsonData)
? null
: JsonSerializer.Deserialize<JsonElement>(e.JsonData),
new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode), new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode),
e.PreviousHash, // Restore PreviousHash e.PreviousHash, // Restore PreviousHash
e.Hash // Restore Hash e.Hash // Restore Hash
@@ -389,18 +393,15 @@ internal class TcpSyncServer : ISyncServer
case MessageType.GetChainRangeReq: case MessageType.GetChainRangeReq:
var rangeReq = GetChainRangeRequest.Parser.ParseFrom(payload); var rangeReq = GetChainRangeRequest.Parser.ParseFrom(payload);
var rangeEntries = await _oplogStore.GetChainRangeAsync(rangeReq.StartHash, rangeReq.EndHash, token); var rangeEntries =
await _oplogStore.GetChainRangeAsync(rangeReq.StartHash, rangeReq.EndHash, token);
var rangeRes = new ChainRangeResponse(); var rangeRes = new ChainRangeResponse();
if (!rangeEntries.Any() && rangeReq.StartHash != rangeReq.EndHash) if (!rangeEntries.Any() && rangeReq.StartHash != rangeReq.EndHash)
{
// Gap cannot be filled (likely pruned or unknown branch) // Gap cannot be filled (likely pruned or unknown branch)
rangeRes.SnapshotRequired = true; rangeRes.SnapshotRequired = true;
}
else else
{
foreach (var e in rangeEntries) foreach (var e in rangeEntries)
{
rangeRes.Entries.Add(new ProtoOplogEntry rangeRes.Entries.Add(new ProtoOplogEntry
{ {
Collection = e.Collection, Collection = e.Collection,
@@ -413,15 +414,14 @@ internal class TcpSyncServer : ISyncServer
Hash = e.Hash, Hash = e.Hash,
PreviousHash = e.PreviousHash PreviousHash = e.PreviousHash
}); });
}
}
response = rangeRes; response = rangeRes;
resType = MessageType.ChainRangeRes; resType = MessageType.ChainRangeRes;
break; break;
case MessageType.GetSnapshotReq: case MessageType.GetSnapshotReq:
_logger.LogInformation("Processing GetSnapshotReq from {Endpoint}", remoteEp); _logger.LogInformation("Processing GetSnapshotReq from {Endpoint}", remoteEp);
var tempFile = Path.GetTempFileName(); string tempFile = Path.GetTempFileName();
try try
{ {
// Create backup // Create backup
@@ -432,7 +432,7 @@ internal class TcpSyncServer : ISyncServer
using (var fs = File.OpenRead(tempFile)) using (var fs = File.OpenRead(tempFile))
{ {
byte[] buffer = new byte[80 * 1024]; // 80KB chunks var buffer = new byte[80 * 1024]; // 80KB chunks
int bytesRead; int bytesRead;
while ((bytesRead = await fs.ReadAsync(buffer, 0, buffer.Length, token)) > 0) while ((bytesRead = await fs.ReadAsync(buffer, 0, buffer.Length, token)) > 0)
{ {
@@ -441,27 +441,28 @@ internal class TcpSyncServer : ISyncServer
Data = ByteString.CopyFrom(buffer, 0, bytesRead), Data = ByteString.CopyFrom(buffer, 0, bytesRead),
IsLast = false IsLast = false
}; };
await protocol.SendMessageAsync(stream, MessageType.SnapshotChunkMsg, chunk, false, cipherState, token); await protocol.SendMessageAsync(stream, MessageType.SnapshotChunkMsg, chunk,
false, cipherState, token);
} }
// Send End of Snapshot // Send End of Snapshot
await protocol.SendMessageAsync(stream, MessageType.SnapshotChunkMsg, new SnapshotChunk { IsLast = true }, false, cipherState, token); await protocol.SendMessageAsync(stream, MessageType.SnapshotChunkMsg,
new SnapshotChunk { IsLast = true }, false, cipherState, token);
} }
} }
finally finally
{ {
if (File.Exists(tempFile)) File.Delete(tempFile); if (File.Exists(tempFile)) File.Delete(tempFile);
} }
break; break;
} }
if (response != null) if (response != null)
{
await protocol.SendMessageAsync(stream, resType, response, useCompression, cipherState, token); await protocol.SendMessageAsync(stream, resType, response, useCompression, cipherState, token);
} }
} }
} }
}
catch (Exception ex) catch (Exception ex)
{ {
_logger.LogWarning("Client Handler Error from {Endpoint}: {Message}", remoteEp, ex.Message); _logger.LogWarning("Client Handler Error from {Endpoint}: {Message}", remoteEp, ex.Message);

View File

@@ -1,4 +1,3 @@
using System;
using System.Diagnostics; using System.Diagnostics;
namespace ZB.MOM.WW.CBDDC.Network.Telemetry; namespace ZB.MOM.WW.CBDDC.Network.Telemetry;
@@ -23,7 +22,7 @@ public interface INetworkTelemetryService
/// Gets a snapshot of all recorded metric values. /// Gets a snapshot of all recorded metric values.
/// </summary> /// </summary>
/// <returns>A dictionary of metric values grouped by metric type and bucket.</returns> /// <returns>A dictionary of metric values grouped by metric type and bucket.</returns>
System.Collections.Generic.Dictionary<MetricType, System.Collections.Generic.Dictionary<int, double>> GetSnapshot(); Dictionary<MetricType, Dictionary<int, double>> GetSnapshot();
} }
public readonly struct MetricTimer : IDisposable public readonly struct MetricTimer : IDisposable
@@ -49,7 +48,7 @@ public readonly struct MetricTimer : IDisposable
/// </summary> /// </summary>
public void Dispose() public void Dispose()
{ {
var elapsed = Stopwatch.GetTimestamp() - _startTimestamp; long elapsed = Stopwatch.GetTimestamp() - _startTimestamp;
// Convert ticks to milliseconds? Or keep as ticks? // Convert ticks to milliseconds? Or keep as ticks?
// Plan said "latency", usually ms. // Plan said "latency", usually ms.
// Stopwatch.Frequency depends on hardware. // Stopwatch.Frequency depends on hardware.

View File

@@ -1,37 +1,28 @@
using System;
using System.Buffers;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Threading;
using System.Threading.Channels; using System.Threading.Channels;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
namespace ZB.MOM.WW.CBDDC.Network.Telemetry; namespace ZB.MOM.WW.CBDDC.Network.Telemetry;
public class NetworkTelemetryService : INetworkTelemetryService, IDisposable public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
{ {
private readonly Channel<(MetricType Type, double Value)> _metricChannel;
private readonly CancellationTokenSource _cts;
private readonly ILogger<NetworkTelemetryService> _logger;
private readonly string _persistencePath;
// Aggregation State // Aggregation State
// We keep 30m of history with 1s resolution = 1800 buckets. // We keep 30m of history with 1s resolution = 1800 buckets.
private const int MaxHistorySeconds = 1800; private const int MaxHistorySeconds = 1800;
private readonly object _lock = new object();
private readonly MetricBucket[] _history;
private int _headIndex = 0; // Points to current second
private long _currentSecondTimestamp; // Unix timestamp of current bucket
// Rolling Averages (Last calculated) // Rolling Averages (Last calculated)
private readonly Dictionary<string, double> _averages = new Dictionary<string, double>(); private readonly Dictionary<string, double> _averages = new();
private readonly CancellationTokenSource _cts;
private readonly MetricBucket[] _history;
private readonly object _lock = new();
private readonly ILogger<NetworkTelemetryService> _logger;
private readonly Channel<(MetricType Type, double Value)> _metricChannel;
private readonly string _persistencePath;
private long _currentSecondTimestamp; // Unix timestamp of current bucket
private int _headIndex; // Points to current second
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="NetworkTelemetryService"/> class. /// Initializes a new instance of the <see cref="NetworkTelemetryService" /> class.
/// </summary> /// </summary>
/// <param name="logger">The logger used to report telemetry processing and persistence errors.</param> /// <param name="logger">The logger used to report telemetry processing and persistence errors.</param>
/// <param name="persistencePath">The file path where persisted telemetry snapshots are written.</param> /// <param name="persistencePath">The file path where persisted telemetry snapshots are written.</param>
@@ -47,7 +38,7 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
_cts = new CancellationTokenSource(); _cts = new CancellationTokenSource();
_history = new MetricBucket[MaxHistorySeconds]; _history = new MetricBucket[MaxHistorySeconds];
for (int i = 0; i < MaxHistorySeconds; i++) _history[i] = new MetricBucket(); for (var i = 0; i < MaxHistorySeconds; i++) _history[i] = new MetricBucket();
_currentSecondTimestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds(); _currentSecondTimestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds();
@@ -55,6 +46,15 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
_ = Task.Run(PersistenceLoop); _ = Task.Run(PersistenceLoop);
} }
/// <summary>
/// Releases resources used by the telemetry service.
/// </summary>
public void Dispose()
{
_cts.Cancel();
_cts.Dispose();
}
/// <summary> /// <summary>
/// Records a metric value for the specified metric type. /// Records a metric value for the specified metric type.
/// </summary> /// </summary>
@@ -89,13 +89,11 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
foreach (var type in Enum.GetValues(typeof(MetricType)).Cast<MetricType>()) foreach (var type in Enum.GetValues(typeof(MetricType)).Cast<MetricType>())
{ {
var typeDict = new Dictionary<int, double>(); var typeDict = new Dictionary<int, double>();
foreach (var w in windows) foreach (int w in windows) typeDict[w] = CalculateAverage(type, w);
{
typeDict[w] = CalculateAverage(type, w);
}
snapshot[type] = typeDict; snapshot[type] = typeDict;
} }
} }
return snapshot; return snapshot;
} }
@@ -103,24 +101,21 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
{ {
var reader = _metricChannel.Reader; var reader = _metricChannel.Reader;
while (!_cts.IsCancellationRequested) while (!_cts.IsCancellationRequested)
{
try try
{ {
if (await reader.WaitToReadAsync(_cts.Token)) if (await reader.WaitToReadAsync(_cts.Token))
{
while (reader.TryRead(out var item)) while (reader.TryRead(out var item))
{
AddMetricToCurrentBucket(item.Type, item.Value); AddMetricToCurrentBucket(item.Type, item.Value);
} }
catch (OperationCanceledException)
{
break;
} }
}
catch (OperationCanceledException) { break; }
catch (Exception ex) catch (Exception ex)
{ {
_logger.LogError(ex, "Error processing metrics"); _logger.LogError(ex, "Error processing metrics");
} }
} }
}
private void AddMetricToCurrentBucket(MetricType type, double value) private void AddMetricToCurrentBucket(MetricType type, double value)
{ {
@@ -133,11 +128,12 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
{ {
long diff = now - _currentSecondTimestamp; long diff = now - _currentSecondTimestamp;
// Move head forward, clearing buckets in between if gap > 1s // Move head forward, clearing buckets in between if gap > 1s
for (int i = 0; i < diff && i < MaxHistorySeconds; i++) for (var i = 0; i < diff && i < MaxHistorySeconds; i++)
{ {
_headIndex = (_headIndex + 1) % MaxHistorySeconds; _headIndex = (_headIndex + 1) % MaxHistorySeconds;
_history[_headIndex].Reset(); _history[_headIndex].Reset();
} }
_currentSecondTimestamp = now; _currentSecondTimestamp = now;
} }
@@ -148,19 +144,20 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
private async Task PersistenceLoop() private async Task PersistenceLoop()
{ {
while (!_cts.IsCancellationRequested) while (!_cts.IsCancellationRequested)
{
try try
{ {
await Task.Delay(TimeSpan.FromMinutes(1), _cts.Token); await Task.Delay(TimeSpan.FromMinutes(1), _cts.Token);
CalculateAndPersist(); CalculateAndPersist();
} }
catch (OperationCanceledException) { break; } catch (OperationCanceledException)
{
break;
}
catch (Exception ex) catch (Exception ex)
{ {
_logger.LogError(ex, "Error persisting metrics"); _logger.LogError(ex, "Error persisting metrics");
} }
} }
}
private void CalculateAndPersist() private void CalculateAndPersist()
{ {
@@ -179,7 +176,7 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
foreach (var type in Enum.GetValues(typeof(MetricType)).Cast<MetricType>()) foreach (var type in Enum.GetValues(typeof(MetricType)).Cast<MetricType>())
{ {
bw.Write((int)type); bw.Write((int)type);
foreach (var w in windows) foreach (int w in windows)
{ {
double avg = CalculateAverage(type, w); double avg = CalculateAverage(type, w);
bw.Write(w); // Window Seconds bw.Write(w); // Window Seconds
@@ -201,8 +198,8 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
{ {
// Go backwards from head // Go backwards from head
double sum = 0; double sum = 0;
int count = 0; var count = 0;
int scanned = 0; var scanned = 0;
int idx = _headIndex; int idx = _headIndex;
@@ -219,30 +216,22 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
return count == 0 ? 0 : sum / count; return count == 0 ? 0 : sum / count;
} }
/// <summary>
/// Releases resources used by the telemetry service.
/// </summary>
public void Dispose()
{
_cts.Cancel();
_cts.Dispose();
}
} }
internal class MetricBucket internal class MetricBucket
{ {
private readonly int[] _counts;
// Simple lock-free or locked accumulation? Global lock handles it for now. // Simple lock-free or locked accumulation? Global lock handles it for now.
// Storing Sum and Count for each type // Storing Sum and Count for each type
private readonly double[] _sums; private readonly double[] _sums;
private readonly int[] _counts;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="MetricBucket"/> class. /// Initializes a new instance of the <see cref="MetricBucket" /> class.
/// </summary> /// </summary>
public MetricBucket() public MetricBucket()
{ {
var typeCount = Enum.GetValues(typeof(MetricType)).Length; int typeCount = Enum.GetValues(typeof(MetricType)).Length;
_sums = new double[typeCount]; _sums = new double[typeCount];
_counts = new int[typeCount]; _counts = new int[typeCount];
} }
@@ -263,7 +252,7 @@ internal class MetricBucket
/// <param name="value">The value to accumulate.</param> /// <param name="value">The value to accumulate.</param>
public void Add(MetricType type, double value) public void Add(MetricType type, double value)
{ {
int idx = (int)type; var idx = (int)type;
_sums[idx] += value; _sums[idx] += value;
_counts[idx]++; _counts[idx]++;
} }
@@ -273,11 +262,18 @@ internal class MetricBucket
/// </summary> /// </summary>
/// <param name="type">The metric category to read.</param> /// <param name="type">The metric category to read.</param>
/// <returns>The accumulated sum for the specified metric type.</returns> /// <returns>The accumulated sum for the specified metric type.</returns>
public double GetSum(MetricType type) => _sums[(int)type]; public double GetSum(MetricType type)
{
return _sums[(int)type];
}
/// <summary> /// <summary>
/// Gets the accumulated count for a metric type. /// Gets the accumulated count for a metric type.
/// </summary> /// </summary>
/// <param name="type">The metric category to read.</param> /// <param name="type">The metric category to read.</param>
/// <returns>The accumulated sample count for the specified metric type.</returns> /// <returns>The accumulated sample count for the specified metric type.</returns>
public int GetCount(MetricType type) => _counts[(int)type]; public int GetCount(MetricType type)
{
return _counts[(int)type];
}
} }

View File

@@ -1,17 +1,13 @@
using System;
using System.Collections.Concurrent; using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Net; using System.Net;
using System.Net.Sockets; using System.Net.Sockets;
using System.Security.Cryptography;
using System.Text; using System.Text;
using System.Text.Json; using System.Text.Json;
using System.Threading; using System.Text.Json.Serialization;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Storage;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Network; namespace ZB.MOM.WW.CBDDC.Network;
@@ -22,15 +18,15 @@ namespace ZB.MOM.WW.CBDDC.Network;
internal class UdpDiscoveryService : IDiscoveryService internal class UdpDiscoveryService : IDiscoveryService
{ {
private const int DiscoveryPort = 25000; private const int DiscoveryPort = 25000;
private readonly ILogger<UdpDiscoveryService> _logger; private readonly ConcurrentDictionary<string, PeerNode> _activePeers = new();
private readonly IPeerNodeConfigurationProvider _configProvider; private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly IDocumentStore _documentStore; private readonly IDocumentStore _documentStore;
private readonly ILogger<UdpDiscoveryService> _logger;
private readonly object _startStopLock = new();
private CancellationTokenSource? _cts; private CancellationTokenSource? _cts;
private readonly ConcurrentDictionary<string, PeerNode> _activePeers = new();
private readonly object _startStopLock = new object();
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="UdpDiscoveryService"/> class. /// Initializes a new instance of the <see cref="UdpDiscoveryService" /> class.
/// </summary> /// </summary>
/// <param name="peerNodeConfigurationProvider">Provider for peer node configuration.</param> /// <param name="peerNodeConfigurationProvider">Provider for peer node configuration.</param>
/// <param name="documentStore">Document store used to obtain collection interests.</param> /// <param name="documentStore">Document store used to obtain collection interests.</param>
@@ -40,7 +36,8 @@ internal class UdpDiscoveryService : IDiscoveryService
IDocumentStore documentStore, IDocumentStore documentStore,
ILogger<UdpDiscoveryService> logger) ILogger<UdpDiscoveryService> logger)
{ {
_configProvider = peerNodeConfigurationProvider ?? throw new ArgumentNullException(nameof(peerNodeConfigurationProvider)); _configProvider = peerNodeConfigurationProvider ??
throw new ArgumentNullException(nameof(peerNodeConfigurationProvider));
_documentStore = documentStore ?? throw new ArgumentNullException(nameof(documentStore)); _documentStore = documentStore ?? throw new ArgumentNullException(nameof(documentStore));
_logger = logger; _logger = logger;
} }
@@ -57,6 +54,7 @@ internal class UdpDiscoveryService : IDiscoveryService
_logger.LogWarning("UDP Discovery Service already started"); _logger.LogWarning("UDP Discovery Service already started");
return; return;
} }
_cts = new CancellationTokenSource(); _cts = new CancellationTokenSource();
} }
@@ -101,55 +99,6 @@ internal class UdpDiscoveryService : IDiscoveryService
await Task.CompletedTask; await Task.CompletedTask;
} }
// ... Stop ...
private async Task CleanupAsync(CancellationToken token)
{
while (!token.IsCancellationRequested)
{
try
{
await Task.Delay(10000, token); // Check every 10s
var now = DateTimeOffset.UtcNow;
var expired = new List<string>();
foreach (var pair in _activePeers)
{
// Expiry: 15 seconds (broadcast is every 5s, so 3 missed beats = dead)
if ((now - pair.Value.LastSeen).TotalSeconds > 15)
{
expired.Add(pair.Key);
}
}
foreach (var id in expired)
{
if (_activePeers.TryRemove(id, out var removed))
{
_logger.LogInformation("Peer Expired: {NodeId} at {Endpoint}", removed.NodeId, removed.Address);
}
}
}
catch (OperationCanceledException) { break; }
catch (Exception ex)
{
_logger.LogError(ex, "Cleanup Loop Error");
}
}
}
// ... Listen ...
private void HandleBeacon(DiscoveryBeacon beacon, IPAddress address)
{
var peerId = beacon.NodeId;
var endpoint = $"{address}:{beacon.TcpPort}";
var peer = new PeerNode(peerId, endpoint, DateTimeOffset.UtcNow, interestingCollections: beacon.InterestingCollections);
_activePeers.AddOrUpdate(peerId, peer, (key, old) => peer);
}
/// <summary> /// <summary>
/// Stops the discovery service. /// Stops the discovery service.
/// </summary> /// </summary>
@@ -190,7 +139,53 @@ internal class UdpDiscoveryService : IDiscoveryService
/// Gets the currently active peers discovered on the network. /// Gets the currently active peers discovered on the network.
/// </summary> /// </summary>
/// <returns>The collection of active peers.</returns> /// <returns>The collection of active peers.</returns>
public IEnumerable<PeerNode> GetActivePeers() => _activePeers.Values; public IEnumerable<PeerNode> GetActivePeers()
{
return _activePeers.Values;
}
// ... Stop ...
private async Task CleanupAsync(CancellationToken token)
{
while (!token.IsCancellationRequested)
try
{
await Task.Delay(10000, token); // Check every 10s
var now = DateTimeOffset.UtcNow;
var expired = new List<string>();
foreach (var pair in _activePeers)
// Expiry: 15 seconds (broadcast is every 5s, so 3 missed beats = dead)
if ((now - pair.Value.LastSeen).TotalSeconds > 15)
expired.Add(pair.Key);
foreach (string id in expired)
if (_activePeers.TryRemove(id, out var removed))
_logger.LogInformation("Peer Expired: {NodeId} at {Endpoint}", removed.NodeId, removed.Address);
}
catch (OperationCanceledException)
{
break;
}
catch (Exception ex)
{
_logger.LogError(ex, "Cleanup Loop Error");
}
}
// ... Listen ...
private void HandleBeacon(DiscoveryBeacon beacon, IPAddress address)
{
string peerId = beacon.NodeId;
var endpoint = $"{address}:{beacon.TcpPort}";
var peer = new PeerNode(peerId, endpoint, DateTimeOffset.UtcNow,
interestingCollections: beacon.InterestingCollections);
_activePeers.AddOrUpdate(peerId, peer, (key, old) => peer);
}
private async Task ListenAsync(CancellationToken token) private async Task ListenAsync(CancellationToken token)
{ {
@@ -201,17 +196,16 @@ internal class UdpDiscoveryService : IDiscoveryService
_logger.LogInformation("UDP Discovery Listening on port {Port}", DiscoveryPort); _logger.LogInformation("UDP Discovery Listening on port {Port}", DiscoveryPort);
while (!token.IsCancellationRequested) while (!token.IsCancellationRequested)
{
try try
{ {
var result = await udp.ReceiveAsync(); var result = await udp.ReceiveAsync();
var json = Encoding.UTF8.GetString(result.Buffer); string json = Encoding.UTF8.GetString(result.Buffer);
try try
{ {
var config = await _configProvider.GetConfiguration(); var config = await _configProvider.GetConfiguration();
var _nodeId = config.NodeId; string _nodeId = config.NodeId;
var localClusterHash = ComputeClusterHash(config.AuthToken); string localClusterHash = ComputeClusterHash(config.AuthToken);
var beacon = JsonSerializer.Deserialize<DiscoveryBeacon>(json); var beacon = JsonSerializer.Deserialize<DiscoveryBeacon>(json);
@@ -219,10 +213,8 @@ internal class UdpDiscoveryService : IDiscoveryService
{ {
// Filter by ClusterHash to reduce congestion from different clusters // Filter by ClusterHash to reduce congestion from different clusters
if (!string.Equals(beacon.ClusterHash, localClusterHash, StringComparison.Ordinal)) if (!string.Equals(beacon.ClusterHash, localClusterHash, StringComparison.Ordinal))
{
// Optional: Log trace if needed, but keeping it silent avoids flooding logs during congestion // Optional: Log trace if needed, but keeping it silent avoids flooding logs during congestion
continue; continue;
}
HandleBeacon(beacon, result.RemoteEndPoint.Address); HandleBeacon(beacon, result.RemoteEndPoint.Address);
} }
@@ -232,13 +224,15 @@ internal class UdpDiscoveryService : IDiscoveryService
_logger.LogWarning(ex, "Failed to parse beacon from {Address}", result.RemoteEndPoint.Address); _logger.LogWarning(ex, "Failed to parse beacon from {Address}", result.RemoteEndPoint.Address);
} }
} }
catch (ObjectDisposedException) { break; } catch (ObjectDisposedException)
{
break;
}
catch (Exception ex) catch (Exception ex)
{ {
_logger.LogError(ex, "UDP Listener Error"); _logger.LogError(ex, "UDP Listener Error");
} }
} }
}
private async Task BroadcastAsync(CancellationToken token) private async Task BroadcastAsync(CancellationToken token)
{ {
@@ -262,8 +256,8 @@ internal class UdpDiscoveryService : IDiscoveryService
InterestingCollections = _documentStore.InterestedCollection.ToList() InterestingCollections = _documentStore.InterestedCollection.ToList()
}; };
var json = JsonSerializer.Serialize(beacon); string json = JsonSerializer.Serialize(beacon);
var bytes = Encoding.UTF8.GetBytes(json); byte[] bytes = Encoding.UTF8.GetBytes(json);
await udp.SendAsync(bytes, bytes.Length, endpoint); await udp.SendAsync(bytes, bytes.Length, endpoint);
} }
@@ -279,39 +273,38 @@ internal class UdpDiscoveryService : IDiscoveryService
private string ComputeClusterHash(string authToken) private string ComputeClusterHash(string authToken)
{ {
if (string.IsNullOrEmpty(authToken)) return ""; if (string.IsNullOrEmpty(authToken)) return "";
using var sha256 = System.Security.Cryptography.SHA256.Create(); using var sha256 = SHA256.Create();
var bytes = Encoding.UTF8.GetBytes(authToken); byte[] bytes = Encoding.UTF8.GetBytes(authToken);
var hash = sha256.ComputeHash(bytes); byte[] hash = sha256.ComputeHash(bytes);
// Return first 8 chars (4 bytes hex) is enough for filtering // Return first 8 chars (4 bytes hex) is enough for filtering
return BitConverter.ToString(hash).Replace("-", "").Substring(0, 8); return BitConverter.ToString(hash).Replace("-", "").Substring(0, 8);
} }
private class DiscoveryBeacon private class DiscoveryBeacon
{ {
/// <summary> /// <summary>
/// Gets or sets the broadcasting node identifier. /// Gets or sets the broadcasting node identifier.
/// </summary> /// </summary>
[System.Text.Json.Serialization.JsonPropertyName("node_id")] [JsonPropertyName("node_id")]
public string NodeId { get; set; } = ""; public string NodeId { get; set; } = "";
/// <summary> /// <summary>
/// Gets or sets the TCP port used by the broadcasting node. /// Gets or sets the TCP port used by the broadcasting node.
/// </summary> /// </summary>
[System.Text.Json.Serialization.JsonPropertyName("tcp_port")] [JsonPropertyName("tcp_port")]
public int TcpPort { get; set; } public int TcpPort { get; set; }
/// <summary> /// <summary>
/// Gets or sets the cluster hash used for discovery filtering. /// Gets or sets the cluster hash used for discovery filtering.
/// </summary> /// </summary>
[System.Text.Json.Serialization.JsonPropertyName("cluster_hash")] [JsonPropertyName("cluster_hash")]
public string ClusterHash { get; set; } = ""; public string ClusterHash { get; set; } = "";
/// <summary> /// <summary>
/// Gets or sets the collections the node is interested in. /// Gets or sets the collections the node is interested in.
/// </summary> /// </summary>
[System.Text.Json.Serialization.JsonPropertyName("interests")] [JsonPropertyName("interests")]
public List<string> InterestingCollections { get; set; } = new(); public List<string> InterestingCollections { get; set; } = new();
} }
} }

View File

@@ -1,23 +1,23 @@
<Project Sdk="Microsoft.NET.Sdk"> <Project Sdk="Microsoft.NET.Sdk">
<ItemGroup> <ItemGroup>
<ProjectReference Include="..\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj" /> <ProjectReference Include="..\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj"/>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<PackageReference Include="Google.Protobuf" Version="3.25.1" /> <PackageReference Include="Google.Protobuf" Version="3.25.1"/>
<PackageReference Include="Grpc.Tools" Version="2.76.0"> <PackageReference Include="Grpc.Tools" Version="2.76.0">
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets> <IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
<PrivateAssets>all</PrivateAssets> <PrivateAssets>all</PrivateAssets>
</PackageReference> </PackageReference>
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="8.0.0" /> <PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="8.0.0"/>
<PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" Version="8.0.0" /> <PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" Version="8.0.0"/>
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="8.0.0" /> <PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="8.0.0"/>
<PackageReference Include="Serilog" Version="4.2.0" /> <PackageReference Include="Serilog" Version="4.2.0"/>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<Protobuf Include="sync.proto" GrpcServices="None" /> <Protobuf Include="sync.proto" GrpcServices="None"/>
</ItemGroup> </ItemGroup>
<PropertyGroup> <PropertyGroup>
@@ -40,7 +40,7 @@
</PropertyGroup> </PropertyGroup>
<ItemGroup> <ItemGroup>
<None Include="README.md" Pack="true" PackagePath="\" /> <None Include="README.md" Pack="true" PackagePath="\"/>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>

View File

@@ -1,8 +1,8 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite; namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
@@ -16,18 +16,20 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
private readonly ILogger<BLiteDocumentMetadataStore<TDbContext>> _logger; private readonly ILogger<BLiteDocumentMetadataStore<TDbContext>> _logger;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="BLiteDocumentMetadataStore{TDbContext}"/> class. /// Initializes a new instance of the <see cref="BLiteDocumentMetadataStore{TDbContext}" /> class.
/// </summary> /// </summary>
/// <param name="context">The BLite document database context.</param> /// <param name="context">The BLite document database context.</param>
/// <param name="logger">The optional logger instance.</param> /// <param name="logger">The optional logger instance.</param>
public BLiteDocumentMetadataStore(TDbContext context, ILogger<BLiteDocumentMetadataStore<TDbContext>>? logger = null) public BLiteDocumentMetadataStore(TDbContext context,
ILogger<BLiteDocumentMetadataStore<TDbContext>>? logger = null)
{ {
_context = context ?? throw new ArgumentNullException(nameof(context)); _context = context ?? throw new ArgumentNullException(nameof(context));
_logger = logger ?? NullLogger<BLiteDocumentMetadataStore<TDbContext>>.Instance; _logger = logger ?? NullLogger<BLiteDocumentMetadataStore<TDbContext>>.Instance;
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task<DocumentMetadata?> GetMetadataAsync(string collection, string key, CancellationToken cancellationToken = default) public override async Task<DocumentMetadata?> GetMetadataAsync(string collection, string key,
CancellationToken cancellationToken = default)
{ {
var entity = _context.DocumentMetadatas var entity = _context.DocumentMetadatas
.Find(m => m.Collection == collection && m.Key == key) .Find(m => m.Collection == collection && m.Key == key)
@@ -37,7 +39,8 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection, CancellationToken cancellationToken = default) public override async Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection,
CancellationToken cancellationToken = default)
{ {
return _context.DocumentMetadatas return _context.DocumentMetadatas
.Find(m => m.Collection == collection) .Find(m => m.Collection == collection)
@@ -46,7 +49,8 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task UpsertMetadataAsync(DocumentMetadata metadata, CancellationToken cancellationToken = default) public override async Task UpsertMetadataAsync(DocumentMetadata metadata,
CancellationToken cancellationToken = default)
{ {
var existing = _context.DocumentMetadatas var existing = _context.DocumentMetadatas
.Find(m => m.Collection == metadata.Collection && m.Key == metadata.Key) .Find(m => m.Collection == metadata.Collection && m.Key == metadata.Key)
@@ -69,7 +73,8 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas, CancellationToken cancellationToken = default) public override async Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas,
CancellationToken cancellationToken = default)
{ {
foreach (var metadata in metadatas) foreach (var metadata in metadatas)
{ {
@@ -95,7 +100,8 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp, CancellationToken cancellationToken = default) public override async Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp,
CancellationToken cancellationToken = default)
{ {
var existing = _context.DocumentMetadatas var existing = _context.DocumentMetadatas
.Find(m => m.Collection == collection && m.Key == key) .Find(m => m.Collection == collection && m.Key == key)
@@ -127,10 +133,11 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default) public override async Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{ {
var query = _context.DocumentMetadatas.AsQueryable() var query = _context.DocumentMetadatas.AsQueryable()
.Where(m => (m.HlcPhysicalTime > since.PhysicalTime) || .Where(m => m.HlcPhysicalTime > since.PhysicalTime ||
(m.HlcPhysicalTime == since.PhysicalTime && m.HlcLogicalCounter > since.LogicalCounter)); (m.HlcPhysicalTime == since.PhysicalTime && m.HlcLogicalCounter > since.LogicalCounter));
if (collections != null) if (collections != null)
@@ -161,17 +168,16 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task ImportAsync(IEnumerable<DocumentMetadata> items, CancellationToken cancellationToken = default) public override async Task ImportAsync(IEnumerable<DocumentMetadata> items,
CancellationToken cancellationToken = default)
{ {
foreach (var item in items) foreach (var item in items) await _context.DocumentMetadatas.InsertAsync(ToEntity(item));
{
await _context.DocumentMetadatas.InsertAsync(ToEntity(item));
}
await _context.SaveChangesAsync(cancellationToken); await _context.SaveChangesAsync(cancellationToken);
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task MergeAsync(IEnumerable<DocumentMetadata> items, CancellationToken cancellationToken = default) public override async Task MergeAsync(IEnumerable<DocumentMetadata> items,
CancellationToken cancellationToken = default)
{ {
foreach (var item in items) foreach (var item in items)
{ {
@@ -186,7 +192,8 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
else else
{ {
// Update only if incoming is newer // Update only if incoming is newer
var existingTs = new HlcTimestamp(existing.HlcPhysicalTime, existing.HlcLogicalCounter, existing.HlcNodeId); var existingTs = new HlcTimestamp(existing.HlcPhysicalTime, existing.HlcLogicalCounter,
existing.HlcNodeId);
if (item.UpdatedAt.CompareTo(existingTs) > 0) if (item.UpdatedAt.CompareTo(existingTs) > 0)
{ {
existing.HlcPhysicalTime = item.UpdatedAt.PhysicalTime; existing.HlcPhysicalTime = item.UpdatedAt.PhysicalTime;
@@ -197,6 +204,7 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
} }
} }
} }
await _context.SaveChangesAsync(cancellationToken); await _context.SaveChangesAsync(cancellationToken);
} }

View File

@@ -2,7 +2,8 @@
## Overview ## Overview
`BLiteDocumentStore<TDbContext>` is an abstract base class that simplifies creating document stores for CBDDC with BLite persistence. It handles all Oplog management internally, so you only need to implement entity-to-JSON mapping methods. `BLiteDocumentStore<TDbContext>` is an abstract base class that simplifies creating document stores for CBDDC with BLite
persistence. It handles all Oplog management internally, so you only need to implement entity-to-JSON mapping methods.
## Key Features ## Key Features
@@ -27,7 +28,8 @@ Remote Sync ? OplogStore.ApplyBatchAsync()
??? _context.OplogEntries (skip - already exists) ??? _context.OplogEntries (skip - already exists)
``` ```
**Key Advantage**: No circular dependency! `BLiteDocumentStore` writes directly to `CBDDCDocumentDbContext.OplogEntries` collection. **Key Advantage**: No circular dependency! `BLiteDocumentStore` writes directly to `CBDDCDocumentDbContext.OplogEntries`
collection.
## Implementation Example ## Implementation Example
@@ -180,6 +182,7 @@ using (documentStore.BeginRemoteSync()) // ? Suppresses Oplog creation
## Migration from Old CDC-based Approach ## Migration from Old CDC-based Approach
### Before (with CDC Events) ### Before (with CDC Events)
```csharp ```csharp
// SampleDocumentStore subscribes to BLite CDC // SampleDocumentStore subscribes to BLite CDC
// CDC emits events ? OplogCoordinator creates Oplog // CDC emits events ? OplogCoordinator creates Oplog
@@ -187,6 +190,7 @@ using (documentStore.BeginRemoteSync()) // ? Suppresses Oplog creation
``` ```
### After (with BLiteDocumentStore) ### After (with BLiteDocumentStore)
```csharp ```csharp
// Direct Oplog management in DocumentStore // Direct Oplog management in DocumentStore
// AsyncLocal flag prevents duplicates during sync // AsyncLocal flag prevents duplicates during sync
@@ -203,6 +207,7 @@ using (documentStore.BeginRemoteSync()) // ? Suppresses Oplog creation
## Next Steps ## Next Steps
After implementing your DocumentStore: After implementing your DocumentStore:
1. Remove CDC subscriptions from your code 1. Remove CDC subscriptions from your code
2. Remove `OplogCoordinator` from DI (no longer needed) 2. Remove `OplogCoordinator` from DI (no longer needed)
3. Test local operations create Oplog entries 3. Test local operations create Oplog entries

View File

@@ -1,20 +1,14 @@
using System;
using System.Collections.Concurrent; using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Text.Json; using System.Text.Json;
using System.Threading;
using System.Threading.Tasks;
using BLite.Core.CDC; using BLite.Core.CDC;
using BLite.Core.Collections; using BLite.Core.Collections;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync; using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using BLiteOperationType = BLite.Core.Transactions.OperationType; using BLiteOperationType = BLite.Core.Transactions.OperationType;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite; namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
@@ -27,30 +21,30 @@ namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposable public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposable
where TDbContext : CBDDCDocumentDbContext where TDbContext : CBDDCDocumentDbContext
{ {
protected readonly TDbContext _context; private readonly List<IDisposable> _cdcWatchers = new();
private readonly object _clockLock = new();
protected readonly IPeerNodeConfigurationProvider _configProvider; protected readonly IPeerNodeConfigurationProvider _configProvider;
protected readonly IConflictResolver _conflictResolver; protected readonly IConflictResolver _conflictResolver;
protected readonly IVectorClockService _vectorClock; protected readonly TDbContext _context;
protected readonly ILogger<BLiteDocumentStore<TDbContext>> _logger; protected readonly ILogger<BLiteDocumentStore<TDbContext>> _logger;
private readonly HashSet<string> _registeredCollections = new();
/// <summary> /// <summary>
/// Semaphore used to suppress CDC-triggered OplogEntry creation during remote sync. /// Semaphore used to suppress CDC-triggered OplogEntry creation during remote sync.
/// CurrentCount == 0 ? sync in progress, CDC must skip. /// CurrentCount == 0 ? sync in progress, CDC must skip.
/// CurrentCount == 1 ? no sync, CDC creates OplogEntry. /// CurrentCount == 1 ? no sync, CDC creates OplogEntry.
/// </summary> /// </summary>
private readonly SemaphoreSlim _remoteSyncGuard = new SemaphoreSlim(1, 1); private readonly SemaphoreSlim _remoteSyncGuard = new(1, 1);
private readonly ConcurrentDictionary<string, int> _suppressedCdcEvents = new(StringComparer.Ordinal);
private readonly List<IDisposable> _cdcWatchers = new(); private readonly ConcurrentDictionary<string, int> _suppressedCdcEvents = new(StringComparer.Ordinal);
private readonly HashSet<string> _registeredCollections = new(); protected readonly IVectorClockService _vectorClock;
// HLC state for generating timestamps for local changes // HLC state for generating timestamps for local changes
private long _lastPhysicalTime; private long _lastPhysicalTime;
private int _logicalCounter; private int _logicalCounter;
private readonly object _clockLock = new object();
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="BLiteDocumentStore{TDbContext}"/> class. /// Initializes a new instance of the <see cref="BLiteDocumentStore{TDbContext}" /> class.
/// </summary> /// </summary>
/// <param name="context">The BLite database context.</param> /// <param name="context">The BLite database context.</param>
/// <param name="configProvider">The peer node configuration provider.</param> /// <param name="configProvider">The peer node configuration provider.</param>
@@ -74,18 +68,30 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
_logicalCounter = 0; _logicalCounter = 0;
} }
private static ILogger<BLiteDocumentStore<TDbContext>> CreateTypedLogger(ILogger? logger) /// <summary>
/// Releases managed resources used by this document store.
/// </summary>
public virtual void Dispose()
{ {
if (logger is null) foreach (var watcher in _cdcWatchers)
try
{
watcher.Dispose();
}
catch
{ {
return NullLogger<BLiteDocumentStore<TDbContext>>.Instance;
} }
if (logger is ILogger<BLiteDocumentStore<TDbContext>> typedLogger) _cdcWatchers.Clear();
{ _remoteSyncGuard.Dispose();
return typedLogger;
} }
private static ILogger<BLiteDocumentStore<TDbContext>> CreateTypedLogger(ILogger? logger)
{
if (logger is null) return NullLogger<BLiteDocumentStore<TDbContext>>.Instance;
if (logger is ILogger<BLiteDocumentStore<TDbContext>> typedLogger) return typedLogger;
return new ForwardingLogger(logger); return new ForwardingLogger(logger);
} }
@@ -94,7 +100,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
private readonly ILogger _inner; private readonly ILogger _inner;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="ForwardingLogger"/> class. /// Initializes a new instance of the <see cref="ForwardingLogger" /> class.
/// </summary> /// </summary>
/// <param name="inner">The underlying logger instance.</param> /// <param name="inner">The underlying logger instance.</param>
public ForwardingLogger(ILogger inner) public ForwardingLogger(ILogger inner)
@@ -135,29 +141,20 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
private void RegisterSuppressedCdcEvent(string collection, string key, OperationType operationType) private void RegisterSuppressedCdcEvent(string collection, string key, OperationType operationType)
{ {
var suppressionKey = BuildSuppressionKey(collection, key, operationType); string suppressionKey = BuildSuppressionKey(collection, key, operationType);
_suppressedCdcEvents.AddOrUpdate(suppressionKey, 1, (_, current) => current + 1); _suppressedCdcEvents.AddOrUpdate(suppressionKey, 1, (_, current) => current + 1);
} }
private bool TryConsumeSuppressedCdcEvent(string collection, string key, OperationType operationType) private bool TryConsumeSuppressedCdcEvent(string collection, string key, OperationType operationType)
{ {
var suppressionKey = BuildSuppressionKey(collection, key, operationType); string suppressionKey = BuildSuppressionKey(collection, key, operationType);
while (true) while (true)
{ {
if (!_suppressedCdcEvents.TryGetValue(suppressionKey, out var current)) if (!_suppressedCdcEvents.TryGetValue(suppressionKey, out int current)) return false;
{
return false;
}
if (current <= 1) if (current <= 1) return _suppressedCdcEvents.TryRemove(suppressionKey, out _);
{
return _suppressedCdcEvents.TryRemove(suppressionKey, out _);
}
if (_suppressedCdcEvents.TryUpdate(suppressionKey, current - 1, current)) if (_suppressedCdcEvents.TryUpdate(suppressionKey, current - 1, current)) return true;
{
return true;
}
} }
} }
@@ -177,7 +174,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
{ {
_registeredCollections.Add(collectionName); _registeredCollections.Add(collectionName);
var watcher = collection.Watch(capturePayload: true) var watcher = collection.Watch(true)
.Subscribe(new CdcObserver<TEntity>(collectionName, keySelector, this)); .Subscribe(new CdcObserver<TEntity>(collectionName, keySelector, this));
_cdcWatchers.Add(watcher); _cdcWatchers.Add(watcher);
} }
@@ -194,7 +191,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
private readonly BLiteDocumentStore<TDbContext> _store; private readonly BLiteDocumentStore<TDbContext> _store;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="CdcObserver{TEntity}"/> class. /// Initializes a new instance of the <see cref="CdcObserver{TEntity}" /> class.
/// </summary> /// </summary>
/// <param name="collectionName">The logical collection name.</param> /// <param name="collectionName">The logical collection name.</param>
/// <param name="keySelector">The key selector for observed entities.</param> /// <param name="keySelector">The key selector for observed entities.</param>
@@ -215,18 +212,15 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// <param name="changeEvent">The change event payload.</param> /// <param name="changeEvent">The change event payload.</param>
public void OnNext(ChangeStreamEvent<string, TEntity> changeEvent) public void OnNext(ChangeStreamEvent<string, TEntity> changeEvent)
{ {
var operationType = changeEvent.Type == BLiteOperationType.Delete ? OperationType.Delete : OperationType.Put; var operationType = changeEvent.Type == BLiteOperationType.Delete
? OperationType.Delete
: OperationType.Put;
var entityId = changeEvent.DocumentId?.ToString() ?? ""; string entityId = changeEvent.DocumentId ?? "";
if (operationType == OperationType.Put && changeEvent.Entity != null) if (operationType == OperationType.Put && changeEvent.Entity != null)
{
entityId = _keySelector(changeEvent.Entity); entityId = _keySelector(changeEvent.Entity);
}
if (_store.TryConsumeSuppressedCdcEvent(_collectionName, entityId, operationType)) if (_store.TryConsumeSuppressedCdcEvent(_collectionName, entityId, operationType)) return;
{
return;
}
if (_store._remoteSyncGuard.CurrentCount == 0) return; if (_store._remoteSyncGuard.CurrentCount == 0) return;
@@ -238,7 +232,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
else if (changeEvent.Entity != null) else if (changeEvent.Entity != null)
{ {
var content = JsonSerializer.SerializeToElement(changeEvent.Entity); var content = JsonSerializer.SerializeToElement(changeEvent.Entity);
var key = _keySelector(changeEvent.Entity); string key = _keySelector(changeEvent.Entity);
_store.OnLocalChangeDetectedAsync(_collectionName, key, OperationType.Put, content) _store.OnLocalChangeDetectedAsync(_collectionName, key, OperationType.Put, content)
.GetAwaiter().GetResult(); .GetAwaiter().GetResult();
} }
@@ -248,12 +242,16 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// Handles CDC observer errors. /// Handles CDC observer errors.
/// </summary> /// </summary>
/// <param name="error">The observed exception.</param> /// <param name="error">The observed exception.</param>
public void OnError(Exception error) { } public void OnError(Exception error)
{
}
/// <summary> /// <summary>
/// Handles completion of the CDC stream. /// Handles completion of the CDC stream.
/// </summary> /// </summary>
public void OnCompleted() { } public void OnCompleted()
{
}
} }
#endregion #endregion
@@ -278,7 +276,8 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// <param name="documents">The documents to apply in one batch.</param> /// <param name="documents">The documents to apply in one batch.</param>
/// <param name="cancellationToken">The cancellation token.</param> /// <param name="cancellationToken">The cancellation token.</param>
protected abstract Task ApplyContentToEntitiesBatchAsync( protected abstract Task ApplyContentToEntitiesBatchAsync(
IEnumerable<(string Collection, string Key, JsonElement Content)> documents, CancellationToken cancellationToken); IEnumerable<(string Collection, string Key, JsonElement Content)> documents,
CancellationToken cancellationToken);
/// <summary> /// <summary>
/// Reads an entity from the DbContext and returns it as JsonElement. /// Reads an entity from the DbContext and returns it as JsonElement.
@@ -329,8 +328,9 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// <param name="collection">The logical collection name.</param> /// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param> /// <param name="key">The document key.</param>
/// <param name="cancellationToken">The cancellation token.</param> /// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The matching document, or <see langword="null"/> when not found.</returns> /// <returns>The matching document, or <see langword="null" /> when not found.</returns>
public async Task<Document?> GetDocumentAsync(string collection, string key, CancellationToken cancellationToken = default) public async Task<Document?> GetDocumentAsync(string collection, string key,
CancellationToken cancellationToken = default)
{ {
var content = await GetEntityAsJsonAsync(collection, key, cancellationToken); var content = await GetEntityAsJsonAsync(collection, key, cancellationToken);
if (content == null) return null; if (content == null) return null;
@@ -345,7 +345,8 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// <param name="collection">The logical collection name.</param> /// <param name="collection">The logical collection name.</param>
/// <param name="cancellationToken">The cancellation token.</param> /// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The documents in the specified collection.</returns> /// <returns>The documents in the specified collection.</returns>
public async Task<IEnumerable<Document>> GetDocumentsByCollectionAsync(string collection, CancellationToken cancellationToken = default) public async Task<IEnumerable<Document>> GetDocumentsByCollectionAsync(string collection,
CancellationToken cancellationToken = default)
{ {
var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken); var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken);
var timestamp = new HlcTimestamp(0, 0, ""); var timestamp = new HlcTimestamp(0, 0, "");
@@ -358,17 +359,16 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// <param name="documentKeys">The collection and key pairs to resolve.</param> /// <param name="documentKeys">The collection and key pairs to resolve.</param>
/// <param name="cancellationToken">The cancellation token.</param> /// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The documents that were found.</returns> /// <returns>The documents that were found.</returns>
public async Task<IEnumerable<Document>> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys, CancellationToken cancellationToken) public async Task<IEnumerable<Document>> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys,
CancellationToken cancellationToken)
{ {
var documents = new List<Document>(); var documents = new List<Document>();
foreach (var (collection, key) in documentKeys) foreach ((string collection, string key) in documentKeys)
{ {
var doc = await GetDocumentAsync(collection, key, cancellationToken); var doc = await GetDocumentAsync(collection, key, cancellationToken);
if (doc != null) if (doc != null) documents.Add(doc);
{
documents.Add(doc);
}
} }
return documents; return documents;
} }
@@ -377,7 +377,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// </summary> /// </summary>
/// <param name="document">The document to persist.</param> /// <param name="document">The document to persist.</param>
/// <param name="cancellationToken">The cancellation token.</param> /// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true"/> when the operation succeeds.</returns> /// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> PutDocumentAsync(Document document, CancellationToken cancellationToken = default) public async Task<bool> PutDocumentAsync(Document document, CancellationToken cancellationToken = default)
{ {
await _remoteSyncGuard.WaitAsync(cancellationToken); await _remoteSyncGuard.WaitAsync(cancellationToken);
@@ -389,6 +389,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
{ {
_remoteSyncGuard.Release(); _remoteSyncGuard.Release();
} }
return true; return true;
} }
@@ -403,17 +404,16 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// </summary> /// </summary>
/// <param name="documents">The documents to update.</param> /// <param name="documents">The documents to update.</param>
/// <param name="cancellationToken">The cancellation token.</param> /// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true"/> when the operation succeeds.</returns> /// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> UpdateBatchDocumentsAsync(IEnumerable<Document> documents, CancellationToken cancellationToken = default) public async Task<bool> UpdateBatchDocumentsAsync(IEnumerable<Document> documents,
CancellationToken cancellationToken = default)
{ {
var documentList = documents.ToList(); var documentList = documents.ToList();
await _remoteSyncGuard.WaitAsync(cancellationToken); await _remoteSyncGuard.WaitAsync(cancellationToken);
try try
{ {
foreach (var document in documentList) foreach (var document in documentList)
{
RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put); RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put);
}
await ApplyContentToEntitiesBatchAsync( await ApplyContentToEntitiesBatchAsync(
documentList.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken); documentList.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken);
@@ -422,6 +422,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
{ {
_remoteSyncGuard.Release(); _remoteSyncGuard.Release();
} }
return true; return true;
} }
@@ -430,17 +431,16 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// </summary> /// </summary>
/// <param name="documents">The documents to insert.</param> /// <param name="documents">The documents to insert.</param>
/// <param name="cancellationToken">The cancellation token.</param> /// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true"/> when the operation succeeds.</returns> /// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> InsertBatchDocumentsAsync(IEnumerable<Document> documents, CancellationToken cancellationToken = default) public async Task<bool> InsertBatchDocumentsAsync(IEnumerable<Document> documents,
CancellationToken cancellationToken = default)
{ {
var documentList = documents.ToList(); var documentList = documents.ToList();
await _remoteSyncGuard.WaitAsync(cancellationToken); await _remoteSyncGuard.WaitAsync(cancellationToken);
try try
{ {
foreach (var document in documentList) foreach (var document in documentList)
{
RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put); RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put);
}
await ApplyContentToEntitiesBatchAsync( await ApplyContentToEntitiesBatchAsync(
documentList.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken); documentList.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken);
@@ -449,6 +449,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
{ {
_remoteSyncGuard.Release(); _remoteSyncGuard.Release();
} }
return true; return true;
} }
@@ -458,8 +459,9 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// <param name="collection">The logical collection name.</param> /// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param> /// <param name="key">The document key.</param>
/// <param name="cancellationToken">The cancellation token.</param> /// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true"/> when the operation succeeds.</returns> /// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> DeleteDocumentAsync(string collection, string key, CancellationToken cancellationToken = default) public async Task<bool> DeleteDocumentAsync(string collection, string key,
CancellationToken cancellationToken = default)
{ {
await _remoteSyncGuard.WaitAsync(cancellationToken); await _remoteSyncGuard.WaitAsync(cancellationToken);
try try
@@ -470,6 +472,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
{ {
_remoteSyncGuard.Release(); _remoteSyncGuard.Release();
} }
return true; return true;
} }
@@ -484,32 +487,27 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// </summary> /// </summary>
/// <param name="documentKeys">The document keys in collection/key format.</param> /// <param name="documentKeys">The document keys in collection/key format.</param>
/// <param name="cancellationToken">The cancellation token.</param> /// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true"/> when the operation succeeds.</returns> /// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> DeleteBatchDocumentsAsync(IEnumerable<string> documentKeys, CancellationToken cancellationToken = default) public async Task<bool> DeleteBatchDocumentsAsync(IEnumerable<string> documentKeys,
CancellationToken cancellationToken = default)
{ {
var parsedKeys = new List<(string Collection, string Key)>(); var parsedKeys = new List<(string Collection, string Key)>();
foreach (var key in documentKeys) foreach (string key in documentKeys)
{ {
var parts = key.Split('/'); string[] parts = key.Split('/');
if (parts.Length == 2) if (parts.Length == 2)
{
parsedKeys.Add((parts[0], parts[1])); parsedKeys.Add((parts[0], parts[1]));
}
else else
{
_logger.LogWarning("Invalid document key format: {Key}", key); _logger.LogWarning("Invalid document key format: {Key}", key);
} }
}
if (parsedKeys.Count == 0) return true; if (parsedKeys.Count == 0) return true;
await _remoteSyncGuard.WaitAsync(cancellationToken); await _remoteSyncGuard.WaitAsync(cancellationToken);
try try
{ {
foreach (var (collection, key) in parsedKeys) foreach ((string collection, string key) in parsedKeys)
{
RegisterSuppressedCdcEvent(collection, key, OperationType.Delete); RegisterSuppressedCdcEvent(collection, key, OperationType.Delete);
}
await RemoveEntitiesBatchAsync(parsedKeys, cancellationToken); await RemoveEntitiesBatchAsync(parsedKeys, cancellationToken);
} }
@@ -517,6 +515,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
{ {
_remoteSyncGuard.Release(); _remoteSyncGuard.Release();
} }
return true; return true;
} }
@@ -565,13 +564,10 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// <param name="cancellationToken">The cancellation token.</param> /// <param name="cancellationToken">The cancellation token.</param>
public async Task DropAsync(CancellationToken cancellationToken = default) public async Task DropAsync(CancellationToken cancellationToken = default)
{ {
foreach (var collection in InterestedCollection) foreach (string collection in InterestedCollection)
{ {
var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken); var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken);
foreach (var (key, _) in entities) foreach ((string key, var _) in entities) await RemoveEntityAsync(collection, key, cancellationToken);
{
await RemoveEntityAsync(collection, key, cancellationToken);
}
} }
} }
@@ -583,11 +579,12 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
public async Task<IEnumerable<Document>> ExportAsync(CancellationToken cancellationToken = default) public async Task<IEnumerable<Document>> ExportAsync(CancellationToken cancellationToken = default)
{ {
var documents = new List<Document>(); var documents = new List<Document>();
foreach (var collection in InterestedCollection) foreach (string collection in InterestedCollection)
{ {
var collectionDocs = await GetDocumentsByCollectionAsync(collection, cancellationToken); var collectionDocs = await GetDocumentsByCollectionAsync(collection, cancellationToken);
documents.AddRange(collectionDocs); documents.AddRange(collectionDocs);
} }
return documents; return documents;
} }
@@ -603,9 +600,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
try try
{ {
foreach (var document in documents) foreach (var document in documents)
{
RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put); RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put);
}
await ApplyContentToEntitiesBatchAsync( await ApplyContentToEntitiesBatchAsync(
documents.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken); documents.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken);
@@ -627,10 +622,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
await _remoteSyncGuard.WaitAsync(cancellationToken); await _remoteSyncGuard.WaitAsync(cancellationToken);
try try
{ {
foreach (var document in items) foreach (var document in items) await MergeAsync(document, cancellationToken);
{
await MergeAsync(document, cancellationToken);
}
} }
finally finally
{ {
@@ -673,7 +665,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
{ {
lock (_clockLock) lock (_clockLock)
{ {
var now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); long now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
if (now > _lastPhysicalTime) if (now > _lastPhysicalTime)
{ {
@@ -697,7 +689,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
CancellationToken cancellationToken) CancellationToken cancellationToken)
{ {
var config = await _configProvider.GetConfiguration(); var config = await _configProvider.GetConfiguration();
var nodeId = config.NodeId; string nodeId = config.NodeId;
// Get last hash from OplogEntries collection directly // Get last hash from OplogEntries collection directly
var lastEntry = _context.OplogEntries var lastEntry = _context.OplogEntries
@@ -706,7 +698,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
.ThenByDescending(e => e.TimestampLogicalCounter) .ThenByDescending(e => e.TimestampLogicalCounter)
.FirstOrDefault(); .FirstOrDefault();
var previousHash = lastEntry?.Hash ?? string.Empty; string previousHash = lastEntry?.Hash ?? string.Empty;
var timestamp = GenerateTimestamp(nodeId); var timestamp = GenerateTimestamp(nodeId);
var oplogEntry = new OplogEntry( var oplogEntry = new OplogEntry(
@@ -725,7 +717,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
collection, collection,
key, key,
timestamp, timestamp,
isDeleted: operationType == OperationType.Delete); operationType == OperationType.Delete);
var existingMetadata = _context.DocumentMetadatas var existingMetadata = _context.DocumentMetadatas
.Find(m => m.Collection == collection && m.Key == key) .Find(m => m.Collection == collection && m.Key == key)
@@ -770,7 +762,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
private readonly SemaphoreSlim _guard; private readonly SemaphoreSlim _guard;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="RemoteSyncScope"/> class. /// Initializes a new instance of the <see cref="RemoteSyncScope" /> class.
/// </summary> /// </summary>
/// <param name="guard">The semaphore guarding remote sync operations.</param> /// <param name="guard">The semaphore guarding remote sync operations.</param>
public RemoteSyncScope(SemaphoreSlim guard) public RemoteSyncScope(SemaphoreSlim guard)
@@ -788,17 +780,4 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
} }
#endregion #endregion
/// <summary>
/// Releases managed resources used by this document store.
/// </summary>
public virtual void Dispose()
{
foreach (var watcher in _cdcWatchers)
{
try { watcher.Dispose(); } catch { }
}
_cdcWatchers.Clear();
_remoteSyncGuard.Dispose();
}
} }

View File

@@ -1,9 +1,9 @@
using ZB.MOM.WW.CBDDC.Core; using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync; using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite; namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
@@ -13,7 +13,7 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
protected readonly ILogger<BLiteOplogStore<TDbContext>> _logger; protected readonly ILogger<BLiteOplogStore<TDbContext>> _logger;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="BLiteOplogStore{TDbContext}"/> class. /// Initializes a new instance of the <see cref="BLiteOplogStore{TDbContext}" /> class.
/// </summary> /// </summary>
/// <param name="dbContext">The BLite database context.</param> /// <param name="dbContext">The BLite database context.</param>
/// <param name="documentStore">The document store used by the oplog store.</param> /// <param name="documentStore">The document store used by the oplog store.</param>
@@ -27,14 +27,16 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
IConflictResolver conflictResolver, IConflictResolver conflictResolver,
IVectorClockService vectorClockService, IVectorClockService vectorClockService,
ISnapshotMetadataStore? snapshotMetadataStore = null, ISnapshotMetadataStore? snapshotMetadataStore = null,
ILogger<BLiteOplogStore<TDbContext>>? logger = null) : base(documentStore, conflictResolver, vectorClockService, snapshotMetadataStore) ILogger<BLiteOplogStore<TDbContext>>? logger = null) : base(documentStore, conflictResolver, vectorClockService,
snapshotMetadataStore)
{ {
_context = dbContext ?? throw new ArgumentNullException(nameof(dbContext)); _context = dbContext ?? throw new ArgumentNullException(nameof(dbContext));
_logger = logger ?? NullLogger<BLiteOplogStore<TDbContext>>.Instance; _logger = logger ?? NullLogger<BLiteOplogStore<TDbContext>>.Instance;
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task ApplyBatchAsync(IEnumerable<OplogEntry> oplogEntries, CancellationToken cancellationToken = default) public override async Task ApplyBatchAsync(IEnumerable<OplogEntry> oplogEntries,
CancellationToken cancellationToken = default)
{ {
// BLite transactions are committed by each SaveChangesAsync internally. // BLite transactions are committed by each SaveChangesAsync internally.
// Wrapping in an explicit transaction causes "Cannot rollback committed transaction" // Wrapping in an explicit transaction causes "Cannot rollback committed transaction"
@@ -58,22 +60,25 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash, CancellationToken cancellationToken = default) public override async Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash,
CancellationToken cancellationToken = default)
{ {
var startRow = _context.OplogEntries.Find(o => o.Hash == startHash).FirstOrDefault(); var startRow = _context.OplogEntries.Find(o => o.Hash == startHash).FirstOrDefault();
var endRow = _context.OplogEntries.Find(o => o.Hash == endHash).FirstOrDefault(); var endRow = _context.OplogEntries.Find(o => o.Hash == endHash).FirstOrDefault();
if (startRow == null || endRow == null) return []; if (startRow == null || endRow == null) return [];
var nodeId = startRow.TimestampNodeId; string nodeId = startRow.TimestampNodeId;
// 2. Fetch range (Start < Entry <= End) // 2. Fetch range (Start < Entry <= End)
var entities = _context.OplogEntries var entities = _context.OplogEntries
.Find(o => o.TimestampNodeId == nodeId && .Find(o => o.TimestampNodeId == nodeId &&
((o.TimestampPhysicalTime > startRow.TimestampPhysicalTime) || (o.TimestampPhysicalTime > startRow.TimestampPhysicalTime ||
(o.TimestampPhysicalTime == startRow.TimestampPhysicalTime && o.TimestampLogicalCounter > startRow.TimestampLogicalCounter)) && (o.TimestampPhysicalTime == startRow.TimestampPhysicalTime &&
((o.TimestampPhysicalTime < endRow.TimestampPhysicalTime) || o.TimestampLogicalCounter > startRow.TimestampLogicalCounter)) &&
(o.TimestampPhysicalTime == endRow.TimestampPhysicalTime && o.TimestampLogicalCounter <= endRow.TimestampLogicalCounter))) (o.TimestampPhysicalTime < endRow.TimestampPhysicalTime ||
(o.TimestampPhysicalTime == endRow.TimestampPhysicalTime &&
o.TimestampLogicalCounter <= endRow.TimestampLogicalCounter)))
.OrderBy(o => o.TimestampPhysicalTime) .OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter) .ThenBy(o => o.TimestampLogicalCounter)
.ToList(); .ToList();
@@ -82,23 +87,27 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task<OplogEntry?> GetEntryByHashAsync(string hash, CancellationToken cancellationToken = default) public override async Task<OplogEntry?> GetEntryByHashAsync(string hash,
CancellationToken cancellationToken = default)
{ {
// Hash is now a regular indexed property, not the Key // Hash is now a regular indexed property, not the Key
return _context.OplogEntries.Find(o => o.Hash == hash).FirstOrDefault()?.ToDomain(); return _context.OplogEntries.Find(o => o.Hash == hash).FirstOrDefault()?.ToDomain();
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default) public override async Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{ {
var query = _context.OplogEntries var query = _context.OplogEntries
.Find(o => (o.TimestampPhysicalTime > timestamp.PhysicalTime) || .Find(o => o.TimestampPhysicalTime > timestamp.PhysicalTime ||
(o.TimestampPhysicalTime == timestamp.PhysicalTime && o.TimestampLogicalCounter > timestamp.LogicalCounter)); (o.TimestampPhysicalTime == timestamp.PhysicalTime &&
o.TimestampLogicalCounter > timestamp.LogicalCounter));
if (collections != null) if (collections != null)
{ {
var collectionSet = new HashSet<string>(collections); var collectionSet = new HashSet<string>(collections);
query = query.Where(o => collectionSet.Contains(o.Collection)); query = query.Where(o => collectionSet.Contains(o.Collection));
} }
return query return query
.OrderBy(o => o.TimestampPhysicalTime) .OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter) .ThenBy(o => o.TimestampLogicalCounter)
@@ -107,17 +116,20 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default) public override async Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{ {
var query = _context.OplogEntries.AsQueryable() var query = _context.OplogEntries.AsQueryable()
.Where(o => o.TimestampNodeId == nodeId && .Where(o => o.TimestampNodeId == nodeId &&
((o.TimestampPhysicalTime > since.PhysicalTime) || (o.TimestampPhysicalTime > since.PhysicalTime ||
(o.TimestampPhysicalTime == since.PhysicalTime && o.TimestampLogicalCounter > since.LogicalCounter))); (o.TimestampPhysicalTime == since.PhysicalTime &&
o.TimestampLogicalCounter > since.LogicalCounter)));
if (collections != null) if (collections != null)
{ {
var collectionSet = new HashSet<string>(collections); var collectionSet = new HashSet<string>(collections);
query = query.Where(o => collectionSet.Contains(o.Collection)); query = query.Where(o => collectionSet.Contains(o.Collection));
} }
return query return query
.OrderBy(o => o.TimestampPhysicalTime) .OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter) .ThenBy(o => o.TimestampLogicalCounter)
@@ -128,10 +140,7 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
/// <inheritdoc /> /// <inheritdoc />
public override async Task ImportAsync(IEnumerable<OplogEntry> items, CancellationToken cancellationToken = default) public override async Task ImportAsync(IEnumerable<OplogEntry> items, CancellationToken cancellationToken = default)
{ {
foreach (var item in items) foreach (var item in items) await _context.OplogEntries.InsertAsync(item.ToEntity());
{
await _context.OplogEntries.InsertAsync(item.ToEntity());
}
await _context.SaveChangesAsync(cancellationToken); await _context.SaveChangesAsync(cancellationToken);
} }
@@ -142,11 +151,9 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
{ {
// Hash is now a regular indexed property, not the Key // Hash is now a regular indexed property, not the Key
var existing = _context.OplogEntries.Find(o => o.Hash == item.Hash).FirstOrDefault(); var existing = _context.OplogEntries.Find(o => o.Hash == item.Hash).FirstOrDefault();
if (existing == null) if (existing == null) await _context.OplogEntries.InsertAsync(item.ToEntity());
{
await _context.OplogEntries.InsertAsync(item.ToEntity());
}
} }
await _context.SaveChangesAsync(cancellationToken); await _context.SaveChangesAsync(cancellationToken);
} }
@@ -154,8 +161,9 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
public override async Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default) public override async Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default)
{ {
var toDelete = _context.OplogEntries.AsQueryable() var toDelete = _context.OplogEntries.AsQueryable()
.Where(o => (o.TimestampPhysicalTime < cutoff.PhysicalTime) || .Where(o => o.TimestampPhysicalTime < cutoff.PhysicalTime ||
(o.TimestampPhysicalTime == cutoff.PhysicalTime && o.TimestampLogicalCounter <= cutoff.LogicalCounter)) (o.TimestampPhysicalTime == cutoff.PhysicalTime &&
o.TimestampLogicalCounter <= cutoff.LogicalCounter))
.Select(o => o.Hash) .Select(o => o.Hash)
.ToList(); .ToList();
await _context.OplogEntries.DeleteBulkAsync(toDelete); await _context.OplogEntries.DeleteBulkAsync(toDelete);
@@ -175,23 +183,20 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
// Step 1: Load from SnapshotMetadata FIRST (base state after prune) // Step 1: Load from SnapshotMetadata FIRST (base state after prune)
if (_snapshotMetadataStore != null) if (_snapshotMetadataStore != null)
{
try try
{ {
var snapshots = _snapshotMetadataStore.GetAllSnapshotMetadataAsync().GetAwaiter().GetResult(); var snapshots = _snapshotMetadataStore.GetAllSnapshotMetadataAsync().GetAwaiter().GetResult();
foreach (var snapshot in snapshots) foreach (var snapshot in snapshots)
{
_vectorClock.UpdateNode( _vectorClock.UpdateNode(
snapshot.NodeId, snapshot.NodeId,
new HlcTimestamp(snapshot.TimestampPhysicalTime, snapshot.TimestampLogicalCounter, snapshot.NodeId), new HlcTimestamp(snapshot.TimestampPhysicalTime, snapshot.TimestampLogicalCounter,
snapshot.NodeId),
snapshot.Hash ?? ""); snapshot.Hash ?? "");
} }
}
catch catch
{ {
// Ignore errors during initialization - oplog data will be used as fallback // Ignore errors during initialization - oplog data will be used as fallback
} }
}
// Step 2: Load from Oplog (Latest State - Overrides Snapshot if newer) // Step 2: Load from Oplog (Latest State - Overrides Snapshot if newer)
var latestPerNode = _context.OplogEntries.AsQueryable() var latestPerNode = _context.OplogEntries.AsQueryable()
@@ -208,15 +213,12 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
.ToList(); .ToList();
foreach (var node in latestPerNode) foreach (var node in latestPerNode)
{
if (node.MaxEntry != null) if (node.MaxEntry != null)
{
_vectorClock.UpdateNode( _vectorClock.UpdateNode(
node.NodeId, node.NodeId,
new HlcTimestamp(node.MaxEntry.TimestampPhysicalTime, node.MaxEntry.TimestampLogicalCounter, node.MaxEntry.TimestampNodeId), new HlcTimestamp(node.MaxEntry.TimestampPhysicalTime, node.MaxEntry.TimestampLogicalCounter,
node.MaxEntry.TimestampNodeId),
node.MaxEntry.Hash ?? ""); node.MaxEntry.Hash ?? "");
}
}
_vectorClock.IsInitialized = true; _vectorClock.IsInitialized = true;
} }
@@ -228,7 +230,8 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
} }
/// <inheritdoc /> /// <inheritdoc />
protected override async Task<string?> QueryLastHashForNodeAsync(string nodeId, CancellationToken cancellationToken = default) protected override async Task<string?> QueryLastHashForNodeAsync(string nodeId,
CancellationToken cancellationToken = default)
{ {
var lastEntry = _context.OplogEntries.AsQueryable() var lastEntry = _context.OplogEntries.AsQueryable()
.Where(o => o.TimestampNodeId == nodeId) .Where(o => o.TimestampNodeId == nodeId)
@@ -239,7 +242,8 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
} }
/// <inheritdoc /> /// <inheritdoc />
protected override async Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash, CancellationToken cancellationToken = default) protected override async Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash,
CancellationToken cancellationToken = default)
{ {
// Hash is now a regular indexed property, not the Key // Hash is now a regular indexed property, not the Key
var entry = _context.OplogEntries.Find(o => o.Hash == hash).FirstOrDefault(); var entry = _context.OplogEntries.Find(o => o.Hash == hash).FirstOrDefault();

View File

@@ -1,7 +1,8 @@
using ZB.MOM.WW.CBDDC.Core.Network; using System.Text.Json;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions; using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite; namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
@@ -9,11 +10,15 @@ namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// Provides a peer configuration store implementation that uses a specified CBDDCDocumentDbContext for persistence /// Provides a peer configuration store implementation that uses a specified CBDDCDocumentDbContext for persistence
/// operations. /// operations.
/// </summary> /// </summary>
/// <remarks>This class enables storage, retrieval, and management of remote peer configurations using the provided /// <remarks>
/// This class enables storage, retrieval, and management of remote peer configurations using the provided
/// database context. It is typically used in scenarios where peer configurations need to be persisted in a document /// database context. It is typically used in scenarios where peer configurations need to be persisted in a document
/// database.</remarks> /// database.
/// <typeparam name="TDbContext">The type of the document database context used for accessing and managing peer configurations. Must inherit from /// </remarks>
/// CBDDCDocumentDbContext.</typeparam> /// <typeparam name="TDbContext">
/// The type of the document database context used for accessing and managing peer configurations. Must inherit from
/// CBDDCDocumentDbContext.
/// </typeparam>
public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore where TDbContext : CBDDCDocumentDbContext public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore where TDbContext : CBDDCDocumentDbContext
{ {
/// <summary> /// <summary>
@@ -33,7 +38,8 @@ public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore wh
/// <param name="context">The database context used to access and manage peer configuration data. Cannot be null.</param> /// <param name="context">The database context used to access and manage peer configuration data. Cannot be null.</param>
/// <param name="logger">An optional logger for logging diagnostic messages. If null, a no-op logger is used.</param> /// <param name="logger">An optional logger for logging diagnostic messages. If null, a no-op logger is used.</param>
/// <exception cref="ArgumentNullException">Thrown if the context parameter is null.</exception> /// <exception cref="ArgumentNullException">Thrown if the context parameter is null.</exception>
public BLitePeerConfigurationStore(TDbContext context, ILogger<BLitePeerConfigurationStore<TDbContext>>? logger = null) public BLitePeerConfigurationStore(TDbContext context,
ILogger<BLitePeerConfigurationStore<TDbContext>>? logger = null)
{ {
_context = context ?? throw new ArgumentNullException(nameof(context)); _context = context ?? throw new ArgumentNullException(nameof(context));
_logger = logger ?? NullLogger<BLitePeerConfigurationStore<TDbContext>>.Instance; _logger = logger ?? NullLogger<BLitePeerConfigurationStore<TDbContext>>.Instance;
@@ -42,29 +48,36 @@ public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore wh
/// <inheritdoc /> /// <inheritdoc />
public override async Task DropAsync(CancellationToken cancellationToken = default) public override async Task DropAsync(CancellationToken cancellationToken = default)
{ {
_logger.LogWarning("Dropping peer configuration store - all remote peer configurations will be permanently deleted!"); _logger.LogWarning(
"Dropping peer configuration store - all remote peer configurations will be permanently deleted!");
// Use Id (technical key) for deletion, not NodeId (business key) // Use Id (technical key) for deletion, not NodeId (business key)
var allIds = await Task.Run(() => _context.RemotePeerConfigurations.FindAll().Select(p => p.Id).ToList(), cancellationToken); var allIds = await Task.Run(() => _context.RemotePeerConfigurations.FindAll().Select(p => p.Id).ToList(),
cancellationToken);
await _context.RemotePeerConfigurations.DeleteBulkAsync(allIds); await _context.RemotePeerConfigurations.DeleteBulkAsync(allIds);
await _context.SaveChangesAsync(cancellationToken); await _context.SaveChangesAsync(cancellationToken);
_logger.LogInformation("Peer configuration store dropped successfully."); _logger.LogInformation("Peer configuration store dropped successfully.");
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task<IEnumerable<RemotePeerConfiguration>> ExportAsync(CancellationToken cancellationToken = default) public override async Task<IEnumerable<RemotePeerConfiguration>> ExportAsync(
CancellationToken cancellationToken = default)
{ {
return await Task.Run(() => _context.RemotePeerConfigurations.FindAll().ToDomain().ToList(), cancellationToken); return await Task.Run(() => _context.RemotePeerConfigurations.FindAll().ToDomain().ToList(), cancellationToken);
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task<RemotePeerConfiguration?> GetRemotePeerAsync(string nodeId, CancellationToken cancellationToken) public override async Task<RemotePeerConfiguration?> GetRemotePeerAsync(string nodeId,
CancellationToken cancellationToken)
{ {
// NodeId is now a regular indexed property, not the Key // NodeId is now a regular indexed property, not the Key
return await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault()?.ToDomain(), cancellationToken); return await Task.Run(
() => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault()?.ToDomain(),
cancellationToken);
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task<IEnumerable<RemotePeerConfiguration>> GetRemotePeersAsync(CancellationToken cancellationToken = default) public override async Task<IEnumerable<RemotePeerConfiguration>> GetRemotePeersAsync(
CancellationToken cancellationToken = default)
{ {
return await Task.Run(() => _context.RemotePeerConfigurations.FindAll().ToDomain().ToList(), cancellationToken); return await Task.Run(() => _context.RemotePeerConfigurations.FindAll().ToDomain().ToList(), cancellationToken);
} }
@@ -73,7 +86,8 @@ public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore wh
public override async Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default) public override async Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default)
{ {
// NodeId is now a regular indexed property, not the Key // NodeId is now a regular indexed property, not the Key
var peer = await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault(), cancellationToken); var peer = await Task.Run(
() => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault(), cancellationToken);
if (peer != null) if (peer != null)
{ {
await _context.RemotePeerConfigurations.DeleteAsync(peer.Id); await _context.RemotePeerConfigurations.DeleteAsync(peer.Id);
@@ -87,10 +101,13 @@ public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore wh
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task SaveRemotePeerAsync(RemotePeerConfiguration peer, CancellationToken cancellationToken = default) public override async Task SaveRemotePeerAsync(RemotePeerConfiguration peer,
CancellationToken cancellationToken = default)
{ {
// NodeId is now a regular indexed property, not the Key // NodeId is now a regular indexed property, not the Key
var existing = await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == peer.NodeId).FirstOrDefault(), cancellationToken); var existing =
await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == peer.NodeId).FirstOrDefault(),
cancellationToken);
if (existing == null) if (existing == null)
{ {
@@ -103,7 +120,7 @@ public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore wh
existing.Type = (int)peer.Type; existing.Type = (int)peer.Type;
existing.IsEnabled = peer.IsEnabled; existing.IsEnabled = peer.IsEnabled;
existing.InterestsJson = peer.InterestingCollections.Count > 0 existing.InterestsJson = peer.InterestingCollections.Count > 0
? System.Text.Json.JsonSerializer.Serialize(peer.InterestingCollections) ? JsonSerializer.Serialize(peer.InterestingCollections)
: ""; : "";
await _context.RemotePeerConfigurations.UpdateAsync(existing); await _context.RemotePeerConfigurations.UpdateAsync(existing);
} }

View File

@@ -10,7 +10,8 @@ namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// BLite-backed peer oplog confirmation store. /// BLite-backed peer oplog confirmation store.
/// </summary> /// </summary>
/// <typeparam name="TDbContext">The BLite context type.</typeparam> /// <typeparam name="TDbContext">The BLite context type.</typeparam>
public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmationStore where TDbContext : CBDDCDocumentDbContext public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmationStore
where TDbContext : CBDDCDocumentDbContext
{ {
internal const string RegistrationSourceNodeId = "__peer_registration__"; internal const string RegistrationSourceNodeId = "__peer_registration__";
@@ -18,7 +19,7 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
private readonly ILogger<BLitePeerOplogConfirmationStore<TDbContext>> _logger; private readonly ILogger<BLitePeerOplogConfirmationStore<TDbContext>> _logger;
/// <summary> /// <summary>
/// Initializes a new instance of the <see cref="BLitePeerOplogConfirmationStore{TDbContext}"/> class. /// Initializes a new instance of the <see cref="BLitePeerOplogConfirmationStore{TDbContext}" /> class.
/// </summary> /// </summary>
/// <param name="context">The BLite context.</param> /// <param name="context">The BLite context.</param>
/// <param name="logger">An optional logger.</param> /// <param name="logger">An optional logger.</param>
@@ -38,9 +39,7 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
CancellationToken cancellationToken = default) CancellationToken cancellationToken = default)
{ {
if (string.IsNullOrWhiteSpace(peerNodeId)) if (string.IsNullOrWhiteSpace(peerNodeId))
{
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
}
var existing = _context.PeerOplogConfirmations var existing = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId == RegistrationSourceNodeId) .Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId == RegistrationSourceNodeId)
@@ -61,7 +60,8 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
}); });
await _context.SaveChangesAsync(cancellationToken); await _context.SaveChangesAsync(cancellationToken);
_logger.LogDebug("Registered peer confirmation tracking for {PeerNodeId} ({Address}, {Type}).", peerNodeId, address, type); _logger.LogDebug("Registered peer confirmation tracking for {PeerNodeId} ({Address}, {Type}).", peerNodeId,
address, type);
return; return;
} }
@@ -83,20 +83,16 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
CancellationToken cancellationToken = default) CancellationToken cancellationToken = default)
{ {
if (string.IsNullOrWhiteSpace(peerNodeId)) if (string.IsNullOrWhiteSpace(peerNodeId))
{
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
}
if (string.IsNullOrWhiteSpace(sourceNodeId)) if (string.IsNullOrWhiteSpace(sourceNodeId))
{
throw new ArgumentException("Source node id is required.", nameof(sourceNodeId)); throw new ArgumentException("Source node id is required.", nameof(sourceNodeId));
}
var existing = _context.PeerOplogConfirmations var existing = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId == sourceNodeId) .Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId == sourceNodeId)
.FirstOrDefault(); .FirstOrDefault();
var nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
if (existing == null) if (existing == null)
{ {
@@ -115,15 +111,12 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
return; return;
} }
var isNewer = IsIncomingTimestampNewer(timestamp, existing); bool isNewer = IsIncomingTimestampNewer(timestamp, existing);
var samePointHashChanged = timestamp.PhysicalTime == existing.ConfirmedWall && bool samePointHashChanged = timestamp.PhysicalTime == existing.ConfirmedWall &&
timestamp.LogicalCounter == existing.ConfirmedLogic && timestamp.LogicalCounter == existing.ConfirmedLogic &&
!string.Equals(existing.ConfirmedHash, hash, StringComparison.Ordinal); !string.Equals(existing.ConfirmedHash, hash, StringComparison.Ordinal);
if (!isNewer && !samePointHashChanged && existing.IsActive) if (!isNewer && !samePointHashChanged && existing.IsActive) return;
{
return;
}
existing.ConfirmedWall = timestamp.PhysicalTime; existing.ConfirmedWall = timestamp.PhysicalTime;
existing.ConfirmedLogic = timestamp.LogicalCounter; existing.ConfirmedLogic = timestamp.LogicalCounter;
@@ -136,7 +129,8 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
} }
/// <inheritdoc /> /// <inheritdoc />
public override Task<IEnumerable<PeerOplogConfirmation>> GetConfirmationsAsync(CancellationToken cancellationToken = default) public override Task<IEnumerable<PeerOplogConfirmation>> GetConfirmationsAsync(
CancellationToken cancellationToken = default)
{ {
var confirmations = _context.PeerOplogConfirmations var confirmations = _context.PeerOplogConfirmations
.Find(c => c.SourceNodeId != RegistrationSourceNodeId) .Find(c => c.SourceNodeId != RegistrationSourceNodeId)
@@ -152,9 +146,7 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
CancellationToken cancellationToken = default) CancellationToken cancellationToken = default)
{ {
if (string.IsNullOrWhiteSpace(peerNodeId)) if (string.IsNullOrWhiteSpace(peerNodeId))
{
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
}
var confirmations = _context.PeerOplogConfirmations var confirmations = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId != RegistrationSourceNodeId) .Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId != RegistrationSourceNodeId)
@@ -168,26 +160,18 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
public override async Task RemovePeerTrackingAsync(string peerNodeId, CancellationToken cancellationToken = default) public override async Task RemovePeerTrackingAsync(string peerNodeId, CancellationToken cancellationToken = default)
{ {
if (string.IsNullOrWhiteSpace(peerNodeId)) if (string.IsNullOrWhiteSpace(peerNodeId))
{
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
}
var matches = _context.PeerOplogConfirmations var matches = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId) .Find(c => c.PeerNodeId == peerNodeId)
.ToList(); .ToList();
if (matches.Count == 0) if (matches.Count == 0) return;
{
return;
}
var nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
foreach (var match in matches) foreach (var match in matches)
{ {
if (!match.IsActive) if (!match.IsActive) continue;
{
continue;
}
match.IsActive = false; match.IsActive = false;
match.LastConfirmedUtcMs = nowMs; match.LastConfirmedUtcMs = nowMs;
@@ -229,7 +213,8 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task ImportAsync(IEnumerable<PeerOplogConfirmation> items, CancellationToken cancellationToken = default) public override async Task ImportAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default)
{ {
foreach (var item in items) foreach (var item in items)
{ {
@@ -255,7 +240,8 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task MergeAsync(IEnumerable<PeerOplogConfirmation> items, CancellationToken cancellationToken = default) public override async Task MergeAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default)
{ {
foreach (var item in items) foreach (var item in items)
{ {
@@ -271,7 +257,8 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
var changed = false; var changed = false;
var incomingTimestamp = new HlcTimestamp(item.ConfirmedWall, item.ConfirmedLogic, item.SourceNodeId); var incomingTimestamp = new HlcTimestamp(item.ConfirmedWall, item.ConfirmedLogic, item.SourceNodeId);
var existingTimestamp = new HlcTimestamp(existing.ConfirmedWall, existing.ConfirmedLogic, existing.SourceNodeId); var existingTimestamp =
new HlcTimestamp(existing.ConfirmedWall, existing.ConfirmedLogic, existing.SourceNodeId);
if (incomingTimestamp > existingTimestamp) if (incomingTimestamp > existingTimestamp)
{ {
@@ -281,7 +268,7 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
changed = true; changed = true;
} }
var incomingLastConfirmedMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds(); long incomingLastConfirmedMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds();
if (incomingLastConfirmedMs > existing.LastConfirmedUtcMs) if (incomingLastConfirmedMs > existing.LastConfirmedUtcMs)
{ {
existing.LastConfirmedUtcMs = incomingLastConfirmedMs; existing.LastConfirmedUtcMs = incomingLastConfirmedMs;
@@ -294,10 +281,7 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
changed = true; changed = true;
} }
if (changed) if (changed) await _context.PeerOplogConfirmations.UpdateAsync(existing);
{
await _context.PeerOplogConfirmations.UpdateAsync(existing);
}
} }
await _context.SaveChangesAsync(cancellationToken); await _context.SaveChangesAsync(cancellationToken);
@@ -305,16 +289,11 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
private static bool IsIncomingTimestampNewer(HlcTimestamp incomingTimestamp, PeerOplogConfirmationEntity existing) private static bool IsIncomingTimestampNewer(HlcTimestamp incomingTimestamp, PeerOplogConfirmationEntity existing)
{ {
if (incomingTimestamp.PhysicalTime > existing.ConfirmedWall) if (incomingTimestamp.PhysicalTime > existing.ConfirmedWall) return true;
{
return true;
}
if (incomingTimestamp.PhysicalTime == existing.ConfirmedWall && if (incomingTimestamp.PhysicalTime == existing.ConfirmedWall &&
incomingTimestamp.LogicalCounter > existing.ConfirmedLogic) incomingTimestamp.LogicalCounter > existing.ConfirmedLogic)
{
return true; return true;
}
return false; return false;
} }

View File

@@ -1,7 +1,7 @@
using ZB.MOM.WW.CBDDC.Core; using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions; using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite; namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
@@ -9,26 +9,34 @@ namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// Provides a snapshot metadata store implementation that uses a specified CBDDCDocumentDbContext for persistence /// Provides a snapshot metadata store implementation that uses a specified CBDDCDocumentDbContext for persistence
/// operations. /// operations.
/// </summary> /// </summary>
/// <remarks>This class enables storage, retrieval, and management of snapshot metadata using the provided /// <remarks>
/// This class enables storage, retrieval, and management of snapshot metadata using the provided
/// database context. It is typically used in scenarios where snapshot metadata needs to be persisted in a document /// database context. It is typically used in scenarios where snapshot metadata needs to be persisted in a document
/// database. The class supports bulk operations and incremental updates, and can be extended for custom database /// database. The class supports bulk operations and incremental updates, and can be extended for custom database
/// contexts. Thread safety depends on the underlying context implementation.</remarks> /// contexts. Thread safety depends on the underlying context implementation.
/// <typeparam name="TDbContext">The type of the document database context used for accessing and managing snapshot metadata. Must inherit from /// </remarks>
/// CBDDCDocumentDbContext.</typeparam> /// <typeparam name="TDbContext">
/// The type of the document database context used for accessing and managing snapshot metadata. Must inherit from
/// CBDDCDocumentDbContext.
/// </typeparam>
public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore where TDbContext : CBDDCDocumentDbContext public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore where TDbContext : CBDDCDocumentDbContext
{ {
/// <summary> /// <summary>
/// Represents the database context used for data access operations within the derived class. /// Represents the database context used for data access operations within the derived class.
/// </summary> /// </summary>
/// <remarks>Intended for use by derived classes to interact with the underlying database. The context /// <remarks>
/// should be properly disposed of according to the application's lifetime management strategy.</remarks> /// Intended for use by derived classes to interact with the underlying database. The context
/// should be properly disposed of according to the application's lifetime management strategy.
/// </remarks>
protected readonly TDbContext _context; protected readonly TDbContext _context;
/// <summary> /// <summary>
/// Provides logging capabilities for the BLiteSnapshotMetadataStore operations. /// Provides logging capabilities for the BLiteSnapshotMetadataStore operations.
/// </summary> /// </summary>
/// <remarks>Intended for use by derived classes to record diagnostic and operational information. The /// <remarks>
/// logger instance is specific to the BLiteSnapshotMetadataStore<TDbContext> type.</remarks> /// Intended for use by derived classes to record diagnostic and operational information. The
/// logger instance is specific to the BLiteSnapshotMetadataStore<TDbContext> type.
/// </remarks>
protected readonly ILogger<BLiteSnapshotMetadataStore<TDbContext>> _logger; protected readonly ILogger<BLiteSnapshotMetadataStore<TDbContext>> _logger;
/// <summary> /// <summary>
@@ -38,7 +46,8 @@ public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore wher
/// <param name="context">The database context to be used for accessing snapshot metadata. Cannot be null.</param> /// <param name="context">The database context to be used for accessing snapshot metadata. Cannot be null.</param>
/// <param name="logger">An optional logger for logging diagnostic messages. If null, a no-op logger is used.</param> /// <param name="logger">An optional logger for logging diagnostic messages. If null, a no-op logger is used.</param>
/// <exception cref="ArgumentNullException">Thrown if the context parameter is null.</exception> /// <exception cref="ArgumentNullException">Thrown if the context parameter is null.</exception>
public BLiteSnapshotMetadataStore(TDbContext context, ILogger<BLiteSnapshotMetadataStore<TDbContext>>? logger = null) public BLiteSnapshotMetadataStore(TDbContext context,
ILogger<BLiteSnapshotMetadataStore<TDbContext>>? logger = null)
{ {
_context = context ?? throw new ArgumentNullException(nameof(context)); _context = context ?? throw new ArgumentNullException(nameof(context));
_logger = logger ?? NullLogger<BLiteSnapshotMetadataStore<TDbContext>>.Instance; _logger = logger ?? NullLogger<BLiteSnapshotMetadataStore<TDbContext>>.Instance;
@@ -48,7 +57,8 @@ public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore wher
public override async Task DropAsync(CancellationToken cancellationToken = default) public override async Task DropAsync(CancellationToken cancellationToken = default)
{ {
// Use Id (technical key) for deletion, not NodeId (business key) // Use Id (technical key) for deletion, not NodeId (business key)
var allIds = await Task.Run(() => _context.SnapshotMetadatas.FindAll().Select(s => s.Id).ToList(), cancellationToken); var allIds = await Task.Run(() => _context.SnapshotMetadatas.FindAll().Select(s => s.Id).ToList(),
cancellationToken);
await _context.SnapshotMetadatas.DeleteBulkAsync(allIds); await _context.SnapshotMetadatas.DeleteBulkAsync(allIds);
await _context.SaveChangesAsync(cancellationToken); await _context.SaveChangesAsync(cancellationToken);
} }
@@ -60,37 +70,41 @@ public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore wher
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task<string?> GetSnapshotHashAsync(string nodeId, CancellationToken cancellationToken = default) public override async Task<string?> GetSnapshotHashAsync(string nodeId,
CancellationToken cancellationToken = default)
{ {
// NodeId is now a regular indexed property, not the Key // NodeId is now a regular indexed property, not the Key
var snapshot = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault(), cancellationToken); var snapshot = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault(),
cancellationToken);
return snapshot?.Hash; return snapshot?.Hash;
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task ImportAsync(IEnumerable<SnapshotMetadata> items, CancellationToken cancellationToken = default) public override async Task ImportAsync(IEnumerable<SnapshotMetadata> items,
CancellationToken cancellationToken = default)
{ {
foreach (var metadata in items) foreach (var metadata in items) await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity());
{
await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity());
}
await _context.SaveChangesAsync(cancellationToken); await _context.SaveChangesAsync(cancellationToken);
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata, CancellationToken cancellationToken = default) public override async Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata,
CancellationToken cancellationToken = default)
{ {
await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity()); await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity());
await _context.SaveChangesAsync(cancellationToken); await _context.SaveChangesAsync(cancellationToken);
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task MergeAsync(IEnumerable<SnapshotMetadata> items, CancellationToken cancellationToken = default) public override async Task MergeAsync(IEnumerable<SnapshotMetadata> items,
CancellationToken cancellationToken = default)
{ {
foreach (var metadata in items) foreach (var metadata in items)
{ {
// NodeId is now a regular indexed property, not the Key // NodeId is now a regular indexed property, not the Key
var existing = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == metadata.NodeId).FirstOrDefault(), cancellationToken); var existing =
await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == metadata.NodeId).FirstOrDefault(),
cancellationToken);
if (existing == null) if (existing == null)
{ {
@@ -111,14 +125,18 @@ public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore wher
} }
} }
} }
await _context.SaveChangesAsync(cancellationToken); await _context.SaveChangesAsync(cancellationToken);
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta, CancellationToken cancellationToken) public override async Task UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta,
CancellationToken cancellationToken)
{ {
// NodeId is now a regular indexed property, not the Key - find existing by NodeId // NodeId is now a regular indexed property, not the Key - find existing by NodeId
var existing = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == existingMeta.NodeId).FirstOrDefault(), cancellationToken); var existing =
await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == existingMeta.NodeId).FirstOrDefault(),
cancellationToken);
if (existing != null) if (existing != null)
{ {
existing.NodeId = existingMeta.NodeId; existing.NodeId = existingMeta.NodeId;
@@ -131,14 +149,18 @@ public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore wher
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task<SnapshotMetadata?> GetSnapshotMetadataAsync(string nodeId, CancellationToken cancellationToken = default) public override async Task<SnapshotMetadata?> GetSnapshotMetadataAsync(string nodeId,
CancellationToken cancellationToken = default)
{ {
// NodeId is now a regular indexed property, not the Key // NodeId is now a regular indexed property, not the Key
return await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault()?.ToDomain(), cancellationToken); return await Task.Run(
() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault()?.ToDomain(),
cancellationToken);
} }
/// <inheritdoc /> /// <inheritdoc />
public override async Task<IEnumerable<SnapshotMetadata>> GetAllSnapshotMetadataAsync(CancellationToken cancellationToken = default) public override async Task<IEnumerable<SnapshotMetadata>> GetAllSnapshotMetadataAsync(
CancellationToken cancellationToken = default)
{ {
return await Task.Run(() => _context.SnapshotMetadatas.FindAll().ToDomain().ToList(), cancellationToken); return await Task.Run(() => _context.SnapshotMetadatas.FindAll().ToDomain().ToList(), cancellationToken);
} }

View File

@@ -1,7 +1,7 @@
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions; using Microsoft.Extensions.DependencyInjection.Extensions;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite; namespace ZB.MOM.WW.CBDDC.Persistence.BLite;

View File

@@ -8,12 +8,38 @@ namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
public partial class CBDDCDocumentDbContext : DocumentDbContext public partial class CBDDCDocumentDbContext : DocumentDbContext
{ {
/// <summary>
/// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database file path.
/// </summary>
/// <param name="databasePath">
/// The file system path to the database file to be used by the context. Cannot be null or
/// empty.
/// </param>
public CBDDCDocumentDbContext(string databasePath) : base(databasePath)
{
}
/// <summary>
/// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database path and page file
/// configuration.
/// </summary>
/// <param name="databasePath">The file system path to the database file. This value cannot be null or empty.</param>
/// <param name="config">
/// The configuration settings for the page file. Specifies options that control how the database
/// pages are managed.
/// </param>
public CBDDCDocumentDbContext(string databasePath, PageFileConfig config) : base(databasePath, config)
{
}
/// <summary> /// <summary>
/// Gets the collection of operation log entries associated with this instance. /// Gets the collection of operation log entries associated with this instance.
/// </summary> /// </summary>
/// <remarks>The collection provides access to all recorded operation log (oplog) entries, which can be /// <remarks>
/// The collection provides access to all recorded operation log (oplog) entries, which can be
/// used to track changes or replicate operations. The collection is read-only; entries cannot be added or removed /// used to track changes or replicate operations. The collection is read-only; entries cannot be added or removed
/// directly through this property.</remarks> /// directly through this property.
/// </remarks>
public DocumentCollection<string, OplogEntity> OplogEntries { get; private set; } = null!; public DocumentCollection<string, OplogEntity> OplogEntries { get; private set; } = null!;
/// <summary> /// <summary>
@@ -24,16 +50,20 @@ public partial class CBDDCDocumentDbContext : DocumentDbContext
/// <summary> /// <summary>
/// Gets the collection of remote peer configurations associated with this instance. /// Gets the collection of remote peer configurations associated with this instance.
/// </summary> /// </summary>
/// <remarks>Use this collection to access or enumerate the configuration settings for each remote peer. /// <remarks>
/// Use this collection to access or enumerate the configuration settings for each remote peer.
/// The collection is read-only; to modify peer configurations, use the appropriate methods provided by the /// The collection is read-only; to modify peer configurations, use the appropriate methods provided by the
/// containing class.</remarks> /// containing class.
/// </remarks>
public DocumentCollection<string, RemotePeerEntity> RemotePeerConfigurations { get; private set; } = null!; public DocumentCollection<string, RemotePeerEntity> RemotePeerConfigurations { get; private set; } = null!;
/// <summary> /// <summary>
/// Gets the collection of document metadata for sync tracking. /// Gets the collection of document metadata for sync tracking.
/// </summary> /// </summary>
/// <remarks>Stores HLC timestamps and deleted state for each document without modifying application entities. /// <remarks>
/// Used to track document versions for incremental sync instead of full snapshots.</remarks> /// Stores HLC timestamps and deleted state for each document without modifying application entities.
/// Used to track document versions for incremental sync instead of full snapshots.
/// </remarks>
public DocumentCollection<string, DocumentMetadataEntity> DocumentMetadatas { get; private set; } = null!; public DocumentCollection<string, DocumentMetadataEntity> DocumentMetadatas { get; private set; } = null!;
/// <summary> /// <summary>
@@ -41,24 +71,6 @@ public partial class CBDDCDocumentDbContext : DocumentDbContext
/// </summary> /// </summary>
public DocumentCollection<string, PeerOplogConfirmationEntity> PeerOplogConfirmations { get; private set; } = null!; public DocumentCollection<string, PeerOplogConfirmationEntity> PeerOplogConfirmations { get; private set; } = null!;
/// <summary>
/// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database file path.
/// </summary>
/// <param name="databasePath">The file system path to the database file to be used by the context. Cannot be null or empty.</param>
public CBDDCDocumentDbContext(string databasePath) : base(databasePath)
{
}
/// <summary>
/// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database path and page file
/// configuration.
/// </summary>
/// <param name="databasePath">The file system path to the database file. This value cannot be null or empty.</param>
/// <param name="config">The configuration settings for the page file. Specifies options that control how the database pages are managed.</param>
public CBDDCDocumentDbContext(string databasePath, PageFileConfig config) : base(databasePath, config)
{
}
/// <inheritdoc /> /// <inheritdoc />
protected override void OnModelCreating(ModelBuilder modelBuilder) protected override void OnModelCreating(ModelBuilder modelBuilder)
{ {

View File

@@ -9,6 +9,33 @@ namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
/// </summary> /// </summary>
public static class EntityMappers public static class EntityMappers
{ {
#region DocumentMetadataEntity Helpers
/// <summary>
/// Creates a DocumentMetadataEntity from collection, key, timestamp, and deleted state.
/// Used for tracking document sync state.
/// </summary>
/// <param name="collection">The collection name that owns the document.</param>
/// <param name="key">The document key within the collection.</param>
/// <param name="timestamp">The hybrid logical clock timestamp for the document state.</param>
/// <param name="isDeleted">Indicates whether the document is marked as deleted.</param>
public static DocumentMetadataEntity CreateDocumentMetadata(string collection, string key, HlcTimestamp timestamp,
bool isDeleted = false)
{
return new DocumentMetadataEntity
{
Id = Guid.NewGuid().ToString(),
Collection = collection,
Key = key,
HlcPhysicalTime = timestamp.PhysicalTime,
HlcLogicalCounter = timestamp.LogicalCounter,
HlcNodeId = timestamp.NodeId,
IsDeleted = isDeleted
};
}
#endregion
#region OplogEntity Mappers #region OplogEntity Mappers
/// <summary> /// <summary>
@@ -42,9 +69,7 @@ public static class EntityMappers
JsonElement? payload = null; JsonElement? payload = null;
// Treat empty string as null payload (Delete operations) // Treat empty string as null payload (Delete operations)
if (!string.IsNullOrEmpty(entity.PayloadJson)) if (!string.IsNullOrEmpty(entity.PayloadJson))
{
payload = JsonSerializer.Deserialize<JsonElement>(entity.PayloadJson); payload = JsonSerializer.Deserialize<JsonElement>(entity.PayloadJson);
}
return new OplogEntry( return new OplogEntry(
entity.Collection, entity.Collection,
@@ -147,9 +172,7 @@ public static class EntityMappers
}; };
if (!string.IsNullOrEmpty(entity.InterestsJson)) if (!string.IsNullOrEmpty(entity.InterestsJson))
{
config.InterestingCollections = JsonSerializer.Deserialize<List<string>>(entity.InterestsJson) ?? []; config.InterestingCollections = JsonSerializer.Deserialize<List<string>>(entity.InterestsJson) ?? [];
}
return config; return config;
} }
@@ -214,30 +237,4 @@ public static class EntityMappers
} }
#endregion #endregion
#region DocumentMetadataEntity Helpers
/// <summary>
/// Creates a DocumentMetadataEntity from collection, key, timestamp, and deleted state.
/// Used for tracking document sync state.
/// </summary>
/// <param name="collection">The collection name that owns the document.</param>
/// <param name="key">The document key within the collection.</param>
/// <param name="timestamp">The hybrid logical clock timestamp for the document state.</param>
/// <param name="isDeleted">Indicates whether the document is marked as deleted.</param>
public static DocumentMetadataEntity CreateDocumentMetadata(string collection, string key, HlcTimestamp timestamp, bool isDeleted = false)
{
return new DocumentMetadataEntity
{
Id = Guid.NewGuid().ToString(),
Collection = collection,
Key = key,
HlcPhysicalTime = timestamp.PhysicalTime,
HlcLogicalCounter = timestamp.LogicalCounter,
HlcNodeId = timestamp.NodeId,
IsDeleted = isDeleted
};
}
#endregion
} }

View File

@@ -10,22 +10,27 @@ namespace ZB.MOM.WW.CBDDC.Persistence;
public abstract class DocumentMetadataStore : IDocumentMetadataStore public abstract class DocumentMetadataStore : IDocumentMetadataStore
{ {
/// <inheritdoc /> /// <inheritdoc />
public abstract Task<DocumentMetadata?> GetMetadataAsync(string collection, string key, CancellationToken cancellationToken = default); public abstract Task<DocumentMetadata?> GetMetadataAsync(string collection, string key,
CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection, CancellationToken cancellationToken = default); public abstract Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection,
CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task UpsertMetadataAsync(DocumentMetadata metadata, CancellationToken cancellationToken = default); public abstract Task UpsertMetadataAsync(DocumentMetadata metadata, CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas, CancellationToken cancellationToken = default); public abstract Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas,
CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp, CancellationToken cancellationToken = default); public abstract Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp,
CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default); public abstract Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task DropAsync(CancellationToken cancellationToken = default); public abstract Task DropAsync(CancellationToken cancellationToken = default);
@@ -34,7 +39,8 @@ public abstract class DocumentMetadataStore : IDocumentMetadataStore
public abstract Task<IEnumerable<DocumentMetadata>> ExportAsync(CancellationToken cancellationToken = default); public abstract Task<IEnumerable<DocumentMetadata>> ExportAsync(CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task ImportAsync(IEnumerable<DocumentMetadata> items, CancellationToken cancellationToken = default); public abstract Task ImportAsync(IEnumerable<DocumentMetadata> items,
CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task MergeAsync(IEnumerable<DocumentMetadata> items, CancellationToken cancellationToken = default); public abstract Task MergeAsync(IEnumerable<DocumentMetadata> items, CancellationToken cancellationToken = default);

View File

@@ -8,9 +8,9 @@ public class NodeCacheEntry
/// Gets or sets the latest known timestamp for the node. /// Gets or sets the latest known timestamp for the node.
/// </summary> /// </summary>
public HlcTimestamp Timestamp { get; set; } public HlcTimestamp Timestamp { get; set; }
/// <summary> /// <summary>
/// Gets or sets the latest known hash for the node. /// Gets or sets the latest known hash for the node.
/// </summary> /// </summary>
public string Hash { get; set; } = ""; public string Hash { get; set; } = "";
} }

View File

@@ -6,27 +6,13 @@ namespace ZB.MOM.WW.CBDDC.Persistence;
public abstract class OplogStore : IOplogStore public abstract class OplogStore : IOplogStore
{ {
protected readonly IDocumentStore _documentStore;
protected readonly IConflictResolver _conflictResolver; protected readonly IConflictResolver _conflictResolver;
protected readonly IDocumentStore _documentStore;
protected readonly ISnapshotMetadataStore? _snapshotMetadataStore; protected readonly ISnapshotMetadataStore? _snapshotMetadataStore;
protected readonly IVectorClockService _vectorClock; protected readonly IVectorClockService _vectorClock;
/// <summary> /// <summary>
/// Occurs after a set of oplog entries has been applied. /// Initializes a new instance of the <see cref="OplogStore" /> class.
/// </summary>
public event EventHandler<ChangesAppliedEventArgs>? ChangesApplied;
/// <summary>
/// Raises the <see cref="ChangesApplied"/> event.
/// </summary>
/// <param name="appliedEntries">The entries that were applied.</param>
public virtual void OnChangesApplied(IEnumerable<OplogEntry> appliedEntries)
{
ChangesApplied?.Invoke(this, new ChangesAppliedEventArgs(appliedEntries));
}
/// <summary>
/// Initializes a new instance of the <see cref="OplogStore"/> class.
/// </summary> /// </summary>
/// <param name="documentStore">The backing document store.</param> /// <param name="documentStore">The backing document store.</param>
/// <param name="conflictResolver">The conflict resolver used during merges.</param> /// <param name="conflictResolver">The conflict resolver used during merges.</param>
@@ -46,20 +32,9 @@ public abstract class OplogStore : IOplogStore
} }
/// <summary> /// <summary>
/// Initializes the VectorClockService with existing oplog/snapshot data. /// Occurs after a set of oplog entries has been applied.
/// Called once at construction time.
/// </summary> /// </summary>
protected abstract void InitializeVectorClock(); public event EventHandler<ChangesAppliedEventArgs>? ChangesApplied;
/// <summary>
/// Asynchronously inserts an operation log entry into the underlying data store.
/// </summary>
/// <remarks>Implementations should ensure that the entry is persisted reliably. If the operation is
/// cancelled, the entry may not be inserted.</remarks>
/// <param name="entry">The operation log entry to insert. Cannot be null.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the insert operation.</param>
/// <returns>A task that represents the asynchronous insert operation.</returns>
protected abstract Task InsertOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public async Task AppendOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default) public async Task AppendOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default)
@@ -69,13 +44,11 @@ public abstract class OplogStore : IOplogStore
} }
/// <inheritdoc /> /// <inheritdoc />
public async virtual Task ApplyBatchAsync(IEnumerable<OplogEntry> oplogEntries, CancellationToken cancellationToken = default) public virtual async Task ApplyBatchAsync(IEnumerable<OplogEntry> oplogEntries,
CancellationToken cancellationToken = default)
{ {
var entries = oplogEntries.ToList(); var entries = oplogEntries.ToList();
if (entries.Count == 0) if (entries.Count == 0) return;
{
return;
}
var documentKeys = entries.Select(e => (e.Collection, e.Key)).Distinct().ToList(); var documentKeys = entries.Select(e => (e.Collection, e.Key)).Distinct().ToList();
var documentsToFetch = await _documentStore.GetDocumentsAsync(documentKeys, cancellationToken); var documentsToFetch = await _documentStore.GetDocumentsAsync(documentKeys, cancellationToken);
@@ -88,7 +61,8 @@ public abstract class OplogStore : IOplogStore
foreach (var entry in orderedEntriesPerCollectionKey) foreach (var entry in orderedEntriesPerCollectionKey)
{ {
var existingDocument = documentsToFetch.FirstOrDefault(d => d.Collection == entry.Key.Collection && d.Key == entry.Key.Key); var existingDocument =
documentsToFetch.FirstOrDefault(d => d.Collection == entry.Key.Collection && d.Key == entry.Key.Key);
var document = existingDocument; var document = existingDocument;
var sawDelete = false; var sawDelete = false;
var sawPut = false; var sawPut = false;
@@ -106,34 +80,25 @@ public abstract class OplogStore : IOplogStore
{ {
sawPut = true; sawPut = true;
if (document == null) if (document == null)
{
document = new Document( document = new Document(
oplogEntry.Collection, oplogEntry.Collection,
oplogEntry.Key, oplogEntry.Key,
oplogEntry.Payload.Value, oplogEntry.Payload.Value,
oplogEntry.Timestamp, oplogEntry.Timestamp,
isDeleted: false); false);
}
else else
{
document.Merge(oplogEntry, _conflictResolver); document.Merge(oplogEntry, _conflictResolver);
} }
} }
}
if (document == null) if (document == null)
{ {
if (sawDelete && existingDocument != null) if (sawDelete && existingDocument != null)
{
await _documentStore.DeleteDocumentAsync(entry.Key.Collection, entry.Key.Key, cancellationToken); await _documentStore.DeleteDocumentAsync(entry.Key.Collection, entry.Key.Key, cancellationToken);
}
continue; continue;
} }
if (sawPut || existingDocument == null) if (sawPut || existingDocument == null) await _documentStore.PutDocumentAsync(document, cancellationToken);
{
await _documentStore.PutDocumentAsync(document, cancellationToken);
}
} }
//insert all oplog entries after processing documents to ensure oplog reflects the actual state of documents //insert all oplog entries after processing documents to ensure oplog reflects the actual state of documents
@@ -145,41 +110,21 @@ public abstract class OplogStore : IOplogStore
} }
/// <inheritdoc /> /// <inheritdoc />
public abstract Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash, CancellationToken cancellationToken = default); public abstract Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash,
CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task<OplogEntry?> GetEntryByHashAsync(string hash, CancellationToken cancellationToken = default); public abstract Task<OplogEntry?> GetEntryByHashAsync(string hash, CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously retrieves the most recent hash value associated with the specified node.
/// </summary>
/// <param name="nodeId">The unique identifier of the node for which to query the last hash. Cannot be null or empty.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result contains the last hash value for the node, or
/// null if no hash is available.</returns>
protected abstract Task<string?> QueryLastHashForNodeAsync(string nodeId, CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously queries the oplog for the most recent timestamp associated with the specified hash.
/// </summary>
/// <remarks>This method is intended to be implemented by derived classes to provide access to the oplog.
/// The returned timestamps can be used to track the last occurrence of a hash in the oplog for synchronization or
/// auditing purposes.</remarks>
/// <param name="hash">The hash value to search for in the oplog. Cannot be null.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result contains a tuple with the wall clock
/// timestamp and logical timestamp if the hash is found; otherwise, null.</returns>
protected abstract Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash, CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public async Task<string?> GetLastEntryHashAsync(string nodeId, CancellationToken cancellationToken = default) public async Task<string?> GetLastEntryHashAsync(string nodeId, CancellationToken cancellationToken = default)
{ {
// Try cache first // Try cache first
var cachedHash = _vectorClock.GetLastHash(nodeId); string? cachedHash = _vectorClock.GetLastHash(nodeId);
if (cachedHash != null) return cachedHash; if (cachedHash != null) return cachedHash;
// Cache miss - query database (Oplog first) // Cache miss - query database (Oplog first)
var hash = await QueryLastHashForNodeAsync(nodeId, cancellationToken); string? hash = await QueryLastHashForNodeAsync(nodeId, cancellationToken);
// FALLBACK: If not in oplog, check SnapshotMetadata (important after prune!) // FALLBACK: If not in oplog, check SnapshotMetadata (important after prune!)
if (hash == null && _snapshotMetadataStore != null) if (hash == null && _snapshotMetadataStore != null)
@@ -190,11 +135,10 @@ public abstract class OplogStore : IOplogStore
{ {
var snapshotMeta = await _snapshotMetadataStore.GetSnapshotMetadataAsync(nodeId, cancellationToken); var snapshotMeta = await _snapshotMetadataStore.GetSnapshotMetadataAsync(nodeId, cancellationToken);
if (snapshotMeta != null) if (snapshotMeta != null)
{
_vectorClock.UpdateNode(nodeId, _vectorClock.UpdateNode(nodeId,
new HlcTimestamp(snapshotMeta.TimestampPhysicalTime, snapshotMeta.TimestampLogicalCounter, nodeId), new HlcTimestamp(snapshotMeta.TimestampPhysicalTime, snapshotMeta.TimestampLogicalCounter,
nodeId),
hash); hash);
}
return hash; return hash;
} }
} }
@@ -204,12 +148,10 @@ public abstract class OplogStore : IOplogStore
{ {
var row = await QueryLastHashTimestampFromOplogAsync(hash, cancellationToken); var row = await QueryLastHashTimestampFromOplogAsync(hash, cancellationToken);
if (row.HasValue) if (row.HasValue)
{
_vectorClock.UpdateNode(nodeId, _vectorClock.UpdateNode(nodeId,
new HlcTimestamp(row.Value.Wall, row.Value.Logic, nodeId), new HlcTimestamp(row.Value.Wall, row.Value.Logic, nodeId),
hash); hash);
} }
}
return hash; return hash;
} }
@@ -221,10 +163,12 @@ public abstract class OplogStore : IOplogStore
} }
/// <inheritdoc /> /// <inheritdoc />
public abstract Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default); public abstract Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default); public abstract Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public Task<VectorClock> GetVectorClockAsync(CancellationToken cancellationToken = default) public Task<VectorClock> GetVectorClockAsync(CancellationToken cancellationToken = default)
@@ -264,4 +208,60 @@ public abstract class OplogStore : IOplogStore
/// <param name="cancellationToken">A token used to cancel the operation.</param> /// <param name="cancellationToken">A token used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation.</returns> /// <returns>A task that represents the asynchronous operation.</returns>
public abstract Task MergeAsync(IEnumerable<OplogEntry> items, CancellationToken cancellationToken = default); public abstract Task MergeAsync(IEnumerable<OplogEntry> items, CancellationToken cancellationToken = default);
/// <summary>
/// Raises the <see cref="ChangesApplied" /> event.
/// </summary>
/// <param name="appliedEntries">The entries that were applied.</param>
public virtual void OnChangesApplied(IEnumerable<OplogEntry> appliedEntries)
{
ChangesApplied?.Invoke(this, new ChangesAppliedEventArgs(appliedEntries));
}
/// <summary>
/// Initializes the VectorClockService with existing oplog/snapshot data.
/// Called once at construction time.
/// </summary>
protected abstract void InitializeVectorClock();
/// <summary>
/// Asynchronously inserts an operation log entry into the underlying data store.
/// </summary>
/// <remarks>
/// Implementations should ensure that the entry is persisted reliably. If the operation is
/// cancelled, the entry may not be inserted.
/// </remarks>
/// <param name="entry">The operation log entry to insert. Cannot be null.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the insert operation.</param>
/// <returns>A task that represents the asynchronous insert operation.</returns>
protected abstract Task InsertOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously retrieves the most recent hash value associated with the specified node.
/// </summary>
/// <param name="nodeId">The unique identifier of the node for which to query the last hash. Cannot be null or empty.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>
/// A task that represents the asynchronous operation. The task result contains the last hash value for the node, or
/// null if no hash is available.
/// </returns>
protected abstract Task<string?> QueryLastHashForNodeAsync(string nodeId,
CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously queries the oplog for the most recent timestamp associated with the specified hash.
/// </summary>
/// <remarks>
/// This method is intended to be implemented by derived classes to provide access to the oplog.
/// The returned timestamps can be used to track the last occurrence of a hash in the oplog for synchronization or
/// auditing purposes.
/// </remarks>
/// <param name="hash">The hash value to search for in the oplog. Cannot be null.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>
/// A task that represents the asynchronous operation. The task result contains a tuple with the wall clock
/// timestamp and logical timestamp if the hash is found; otherwise, null.
/// </returns>
protected abstract Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash,
CancellationToken cancellationToken = default);
} }

View File

@@ -6,44 +6,43 @@ namespace ZB.MOM.WW.CBDDC.Persistence;
public abstract class PeerConfigurationStore : IPeerConfigurationStore public abstract class PeerConfigurationStore : IPeerConfigurationStore
{ {
/// <inheritdoc /> /// <inheritdoc />
public abstract Task<IEnumerable<RemotePeerConfiguration>> GetRemotePeersAsync(CancellationToken cancellationToken = default); public abstract Task<IEnumerable<RemotePeerConfiguration>> GetRemotePeersAsync(
CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task<RemotePeerConfiguration?> GetRemotePeerAsync(string nodeId, CancellationToken cancellationToken); public abstract Task<RemotePeerConfiguration?> GetRemotePeerAsync(string nodeId,
CancellationToken cancellationToken);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default); public abstract Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task SaveRemotePeerAsync(RemotePeerConfiguration peer, CancellationToken cancellationToken = default); public abstract Task SaveRemotePeerAsync(RemotePeerConfiguration peer,
CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task DropAsync(CancellationToken cancellationToken = default); public abstract Task DropAsync(CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task<IEnumerable<RemotePeerConfiguration>> ExportAsync(CancellationToken cancellationToken = default); public abstract Task<IEnumerable<RemotePeerConfiguration>> ExportAsync(
CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public virtual async Task ImportAsync(IEnumerable<RemotePeerConfiguration> items, CancellationToken cancellationToken = default) public virtual async Task ImportAsync(IEnumerable<RemotePeerConfiguration> items,
CancellationToken cancellationToken = default)
{ {
foreach (var item in items) foreach (var item in items) await SaveRemotePeerAsync(item, cancellationToken);
{
await SaveRemotePeerAsync(item, cancellationToken);
}
} }
/// <inheritdoc /> /// <inheritdoc />
public virtual async Task MergeAsync(IEnumerable<RemotePeerConfiguration> items, CancellationToken cancellationToken = default) public virtual async Task MergeAsync(IEnumerable<RemotePeerConfiguration> items,
CancellationToken cancellationToken = default)
{ {
foreach (var item in items) foreach (var item in items)
{ {
var existing = await GetRemotePeerAsync(item.NodeId, cancellationToken); var existing = await GetRemotePeerAsync(item.NodeId, cancellationToken);
if (existing == null) if (existing == null) await SaveRemotePeerAsync(item, cancellationToken);
{
await SaveRemotePeerAsync(item, cancellationToken);
}
// If exists, keep existing (simple merge strategy) // If exists, keep existing (simple merge strategy)
} }
} }
} }

View File

@@ -49,8 +49,10 @@ public abstract class PeerOplogConfirmationStore : IPeerOplogConfirmationStore
public abstract Task<IEnumerable<PeerOplogConfirmation>> ExportAsync(CancellationToken cancellationToken = default); public abstract Task<IEnumerable<PeerOplogConfirmation>> ExportAsync(CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task ImportAsync(IEnumerable<PeerOplogConfirmation> items, CancellationToken cancellationToken = default); public abstract Task ImportAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default);
/// <inheritdoc /> /// <inheritdoc />
public abstract Task MergeAsync(IEnumerable<PeerOplogConfirmation> items, CancellationToken cancellationToken = default); public abstract Task MergeAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default);
} }

Some files were not shown because too many files have changed in this diff Show More