Reformat/cleanup
All checks were successful
NuGet Package Publish / nuget (push) Successful in 1m10s

This commit is contained in:
Joseph Doherty
2026-02-21 07:53:53 -05:00
parent c6f6d9329a
commit 7ebc2cb567
160 changed files with 7258 additions and 7262 deletions

View File

@@ -1,23 +1,23 @@
<Solution>
<Configurations>
<Platform Name="Any CPU" />
<Platform Name="x64" />
<Platform Name="x86" />
<Platform Name="Any CPU"/>
<Platform Name="x64"/>
<Platform Name="x86"/>
</Configurations>
<Folder Name="/samples/">
<Project Path="samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj" />
<Project Path="samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj"/>
</Folder>
<Folder Name="/src/">
<Project Path="src/ZB.MOM.WW.CBDDC.Hosting/ZB.MOM.WW.CBDDC.Hosting.csproj" />
<Project Path="src/ZB.MOM.WW.CBDDC.Core/ZB.MOM.WW.CBDDC.Core.csproj" />
<Project Path="src/ZB.MOM.WW.CBDDC.Network/ZB.MOM.WW.CBDDC.Network.csproj" />
<Project Path="src/ZB.MOM.WW.CBDDC.Persistence/ZB.MOM.WW.CBDDC.Persistence.csproj" />
<Project Path="src/ZB.MOM.WW.CBDDC.Hosting/ZB.MOM.WW.CBDDC.Hosting.csproj"/>
<Project Path="src/ZB.MOM.WW.CBDDC.Core/ZB.MOM.WW.CBDDC.Core.csproj"/>
<Project Path="src/ZB.MOM.WW.CBDDC.Network/ZB.MOM.WW.CBDDC.Network.csproj"/>
<Project Path="src/ZB.MOM.WW.CBDDC.Persistence/ZB.MOM.WW.CBDDC.Persistence.csproj"/>
</Folder>
<Folder Name="/tests/">
<Project Path="tests/ZB.MOM.WW.CBDDC.Core.Tests/ZB.MOM.WW.CBDDC.Core.Tests.csproj" />
<Project Path="tests/ZB.MOM.WW.CBDDC.E2E.Tests/ZB.MOM.WW.CBDDC.E2E.Tests.csproj" />
<Project Path="tests/ZB.MOM.WW.CBDDC.Hosting.Tests/ZB.MOM.WW.CBDDC.Hosting.Tests.csproj" />
<Project Path="tests/ZB.MOM.WW.CBDDC.Network.Tests/ZB.MOM.WW.CBDDC.Network.Tests.csproj" />
<Project Path="tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests.csproj" />
<Project Path="tests/ZB.MOM.WW.CBDDC.Core.Tests/ZB.MOM.WW.CBDDC.Core.Tests.csproj"/>
<Project Path="tests/ZB.MOM.WW.CBDDC.E2E.Tests/ZB.MOM.WW.CBDDC.E2E.Tests.csproj"/>
<Project Path="tests/ZB.MOM.WW.CBDDC.Hosting.Tests/ZB.MOM.WW.CBDDC.Hosting.Tests.csproj"/>
<Project Path="tests/ZB.MOM.WW.CBDDC.Network.Tests/ZB.MOM.WW.CBDDC.Network.Tests.csproj"/>
<Project Path="tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests.csproj"/>
</Folder>
</Solution>

View File

@@ -1,37 +1,33 @@
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core;
using Serilog.Context;
using ZB.MOM.WW.CBDDC.Core.Cache;
using ZB.MOM.WW.CBDDC.Core.Diagnostics;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using Microsoft.Extensions.DependencyInjection; // For IServiceProvider if needed
using Serilog.Context;
using ZB.MOM.WW.CBDDC.Sample.Console;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Network.Security;
// For IServiceProvider if needed
namespace ZB.MOM.WW.CBDDC.Sample.Console;
public class ConsoleInteractiveService : BackgroundService
{
private readonly ILogger<ConsoleInteractiveService> _logger;
private readonly SampleDbContext _db;
private readonly ICBDDCNode _node;
private readonly IHostApplicationLifetime _lifetime;
// Auxiliary services for status/commands
private readonly IDocumentCache _cache;
private readonly IOfflineQueue _queue;
private readonly ICBDDCHealthCheck _healthCheck;
private readonly ISyncStatusTracker _syncTracker;
private readonly IServiceProvider _serviceProvider;
private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly SampleDbContext _db;
private readonly ICBDDCHealthCheck _healthCheck;
private readonly IHostApplicationLifetime _lifetime;
private readonly ILogger<ConsoleInteractiveService> _logger;
private readonly ICBDDCNode _node;
private readonly IOfflineQueue _queue;
private readonly IServiceProvider _serviceProvider;
private readonly ISyncStatusTracker _syncTracker;
/// <summary>
/// Initializes a new instance of the <see cref="ConsoleInteractiveService"/> class.
/// Initializes a new instance of the <see cref="ConsoleInteractiveService" /> class.
/// </summary>
/// <param name="logger">The logger used by the interactive service.</param>
/// <param name="db">The sample database context.</param>
@@ -72,7 +68,7 @@ public class ConsoleInteractiveService : BackgroundService
{
var config = await _configProvider.GetConfiguration();
System.Console.WriteLine($"--- Interactive Console ---");
System.Console.WriteLine("--- Interactive Console ---");
System.Console.WriteLine($"Node ID: {config.NodeId}");
PrintHelp();
@@ -85,7 +81,7 @@ public class ConsoleInteractiveService : BackgroundService
continue;
}
var input = System.Console.ReadLine();
string? input = System.Console.ReadLine();
if (string.IsNullOrEmpty(input)) continue;
try
@@ -126,34 +122,45 @@ public class ConsoleInteractiveService : BackgroundService
if (input.StartsWith("n"))
{
var ts = DateTime.Now.ToString("HH:mm:ss.fff");
var user = new User { Id = Guid.NewGuid().ToString(), Name = $"User-{ts}", Age = new Random().Next(18, 90), Address = new Address { City = "AutoCity" } };
var user = new User
{
Id = Guid.NewGuid().ToString(), Name = $"User-{ts}", Age = new Random().Next(18, 90),
Address = new Address { City = "AutoCity" }
};
await _db.Users.InsertAsync(user);
await _db.SaveChangesAsync();
System.Console.WriteLine($"[+] Created {user.Name} with Id: {user.Id}...");
}
else if (input.StartsWith("s"))
{
for (int i = 0; i < 5; i++)
for (var i = 0; i < 5; i++)
{
var ts = DateTime.Now.ToString("HH:mm:ss.fff");
var user = new User { Id = Guid.NewGuid().ToString(), Name = $"User-{ts}", Age = new Random().Next(18, 90), Address = new Address { City = "SpamCity" } };
var user = new User
{
Id = Guid.NewGuid().ToString(), Name = $"User-{ts}", Age = new Random().Next(18, 90),
Address = new Address { City = "SpamCity" }
};
await _db.Users.InsertAsync(user);
System.Console.WriteLine($"[+] Created {user.Name} with Id: {user.Id}...");
await Task.Delay(100);
}
await _db.SaveChangesAsync();
}
else if (input.StartsWith("c"))
{
var userCount = _db.Users.FindAll().Count();
var todoCount = _db.TodoLists.FindAll().Count();
int userCount = _db.Users.FindAll().Count();
int todoCount = _db.TodoLists.FindAll().Count();
System.Console.WriteLine($"Collection 'Users': {userCount} documents");
System.Console.WriteLine($"Collection 'TodoLists': {todoCount} documents");
}
else if (input.StartsWith("p"))
{
var alice = new User { Id = Guid.NewGuid().ToString(), Name = "Alice", Age = 30, Address = new Address { City = "Paris" } };
var bob = new User { Id = Guid.NewGuid().ToString(), Name = "Bob", Age = 25, Address = new Address { City = "Rome" } };
var alice = new User
{ Id = Guid.NewGuid().ToString(), Name = "Alice", Age = 30, Address = new Address { City = "Paris" } };
var bob = new User
{ Id = Guid.NewGuid().ToString(), Name = "Bob", Age = 25, Address = new Address { City = "Rome" } };
await _db.Users.InsertAsync(alice);
await _db.Users.InsertAsync(bob);
await _db.SaveChangesAsync();
@@ -162,17 +169,19 @@ public class ConsoleInteractiveService : BackgroundService
else if (input.StartsWith("g"))
{
System.Console.Write("Enter user Id: ");
var id = System.Console.ReadLine();
string? id = System.Console.ReadLine();
if (!string.IsNullOrEmpty(id))
{
var u = _db.Users.FindById(id);
System.Console.WriteLine(u != null ? $"Got: {u.Name}, Age {u.Age}, City: {u.Address?.City}" : "Not found");
System.Console.WriteLine(u != null
? $"Got: {u.Name}, Age {u.Age}, City: {u.Address?.City}"
: "Not found");
}
}
else if (input.StartsWith("d"))
{
System.Console.Write("Enter user Id to delete: ");
var id = System.Console.ReadLine();
string? id = System.Console.ReadLine();
if (!string.IsNullOrEmpty(id))
{
await _db.Users.DeleteAsync(id);
@@ -183,8 +192,8 @@ public class ConsoleInteractiveService : BackgroundService
else if (input.StartsWith("l"))
{
var peers = _node.Discovery.GetActivePeers();
var handshakeSvc = _serviceProvider.GetService<ZB.MOM.WW.CBDDC.Network.Security.IPeerHandshakeService>();
var secureIcon = handshakeSvc != null ? "🔒" : "🔓";
var handshakeSvc = _serviceProvider.GetService<IPeerHandshakeService>();
string secureIcon = handshakeSvc != null ? "🔒" : "🔓";
System.Console.WriteLine($"Active Peers ({secureIcon}):");
foreach (var p in peers)
@@ -203,7 +212,7 @@ public class ConsoleInteractiveService : BackgroundService
{
var health = await _healthCheck.CheckAsync();
var syncStatus = _syncTracker.GetStatus();
var handshakeSvc = _serviceProvider.GetService<ZB.MOM.WW.CBDDC.Network.Security.IPeerHandshakeService>();
var handshakeSvc = _serviceProvider.GetService<IPeerHandshakeService>();
System.Console.WriteLine("=== Health Check ===");
System.Console.WriteLine($"Database: {(health.DatabaseHealthy ? "" : "")}");
@@ -216,17 +225,18 @@ public class ConsoleInteractiveService : BackgroundService
if (health.Errors.Any())
{
System.Console.WriteLine("Errors:");
foreach (var err in health.Errors.Take(3)) System.Console.WriteLine($" - {err}");
foreach (string err in health.Errors.Take(3)) System.Console.WriteLine($" - {err}");
}
}
else if (input.StartsWith("ch") || input == "cache")
{
var stats = _cache.GetStatistics();
System.Console.WriteLine($"=== Cache Stats ===\nSize: {stats.Size}\nHits: {stats.Hits}\nMisses: {stats.Misses}\nRate: {stats.HitRate:P1}");
System.Console.WriteLine(
$"=== Cache Stats ===\nSize: {stats.Size}\nHits: {stats.Hits}\nMisses: {stats.Misses}\nRate: {stats.HitRate:P1}");
}
else if (input.StartsWith("r") && input.Contains("resolver"))
{
var parts = input.Split(' ');
string[] parts = input.Split(' ');
if (parts.Length > 1)
{
var newResolver = parts[1].ToLower() switch
@@ -240,7 +250,7 @@ public class ConsoleInteractiveService : BackgroundService
{
// Note: Requires restart to fully apply. For demo, we inform user.
System.Console.WriteLine($"⚠️ Resolver changed to {parts[1].ToUpper()}. Restart node to apply.");
System.Console.WriteLine($" (Current session continues with previous resolver)");
System.Console.WriteLine(" (Current session continues with previous resolver)");
}
else
{
@@ -262,7 +272,7 @@ public class ConsoleInteractiveService : BackgroundService
System.Console.WriteLine($"📋 {list.Name} ({list.Items.Count} items)");
foreach (var item in list.Items)
{
var status = item.Completed ? "✓" : " ";
string status = item.Completed ? "✓" : " ";
System.Console.WriteLine($" [{status}] {item.Task}");
}
}
@@ -281,8 +291,8 @@ public class ConsoleInteractiveService : BackgroundService
Name = "Shopping List",
Items = new List<TodoItem>
{
new TodoItem { Task = "Buy milk", Completed = false },
new TodoItem { Task = "Buy bread", Completed = false }
new() { Task = "Buy milk", Completed = false },
new() { Task = "Buy bread", Completed = false }
}
};
@@ -325,23 +335,19 @@ public class ConsoleInteractiveService : BackgroundService
System.Console.WriteLine($" List: {merged.Name}");
foreach (var item in merged.Items)
{
var status = item.Completed ? "✓" : " ";
string status = item.Completed ? "✓" : " ";
System.Console.WriteLine($" [{status}] {item.Task}");
}
var resolver = _serviceProvider.GetRequiredService<IConflictResolver>();
var resolverType = resolver.GetType().Name;
string resolverType = resolver.GetType().Name;
System.Console.WriteLine($"\n Resolution Strategy: {resolverType}");
if (resolverType.Contains("Recursive"))
{
System.Console.WriteLine(" → Items merged by 'id', both edits preserved");
}
else
{
System.Console.WriteLine(" → Last write wins, Node B changes override Node A");
}
}
System.Console.WriteLine("\n✓ Demo complete. Run 'todos' to see all lists.\n");
}

View File

@@ -1,33 +1,26 @@
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Cache;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Core.Diagnostics;
using ZB.MOM.WW.CBDDC.Core.Resilience;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using ZB.MOM.WW.CBDDC.Sample.Console;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using Serilog;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
namespace ZB.MOM.WW.CBDDC.Sample.Console;
// Local User/Address classes removed in favor of Shared project
class Program
internal class Program
{
static async Task Main(string[] args)
private static async Task Main(string[] args)
{
var builder = Host.CreateApplicationBuilder(args);
// Configuration
builder.Configuration.SetBasePath(Directory.GetCurrentDirectory())
.AddJsonFile("appsettings.json", optional: true, reloadOnChange: true);
.AddJsonFile("appsettings.json", true, true);
// Logging
builder.Logging.ClearProviders();
@@ -38,34 +31,31 @@ class Program
.Enrich.WithProperty("Application", "CBDDC.Sample.Console")
.WriteTo.Console());
var randomPort = new Random().Next(1000, 9999);
int randomPort = new Random().Next(1000, 9999);
// Node ID
string nodeId = args.Length > 0 ? args[0] : ("node-" + randomPort);
string nodeId = args.Length > 0 ? args[0] : "node-" + randomPort;
int tcpPort = args.Length > 1 ? int.Parse(args[1]) : randomPort;
// Conflict Resolution Strategy (can be switched at runtime via service replacement)
var useRecursiveMerge = args.Contains("--merge");
if (useRecursiveMerge)
{
builder.Services.AddSingleton<IConflictResolver, RecursiveNodeMergeConflictResolver>();
}
bool useRecursiveMerge = args.Contains("--merge");
if (useRecursiveMerge) builder.Services.AddSingleton<IConflictResolver, RecursiveNodeMergeConflictResolver>();
IPeerNodeConfigurationProvider peerNodeConfigurationProvider = new StaticPeerNodeConfigurationProvider(
new PeerNodeConfiguration
{
NodeId = nodeId,
TcpPort = tcpPort,
AuthToken = "Test-Cluster-Key",
AuthToken = "Test-Cluster-Key"
//KnownPeers = builder.Configuration.GetSection("CBDDC:KnownPeers").Get<List<KnownPeerConfiguration>>() ?? new()
});
builder.Services.AddSingleton<IPeerNodeConfigurationProvider>(peerNodeConfigurationProvider);
builder.Services.AddSingleton(peerNodeConfigurationProvider);
// Database path
var dataPath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "data");
string dataPath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "data");
Directory.CreateDirectory(dataPath);
var databasePath = Path.Combine(dataPath, $"{nodeId}.blite");
string databasePath = Path.Combine(dataPath, $"{nodeId}.blite");
// Register CBDDC Services using Fluent Extensions with BLite, SampleDbContext, and SampleDocumentStore
builder.Services.AddCBDDCCore()
@@ -86,12 +76,7 @@ class Program
private class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvider
{
/// <summary>
/// Gets or sets the current peer node configuration.
/// </summary>
public PeerNodeConfiguration Configuration { get; set; }
/// <summary>
/// Initializes a new instance of the <see cref="StaticPeerNodeConfigurationProvider"/> class.
/// Initializes a new instance of the <see cref="StaticPeerNodeConfigurationProvider" /> class.
/// </summary>
/// <param name="configuration">The initial peer node configuration.</param>
public StaticPeerNodeConfigurationProvider(PeerNodeConfiguration configuration)
@@ -99,6 +84,11 @@ class Program
Configuration = configuration;
}
/// <summary>
/// Gets or sets the current peer node configuration.
/// </summary>
public PeerNodeConfiguration Configuration { get; }
/// <summary>
/// Occurs when the peer node configuration changes.
/// </summary>
@@ -122,5 +112,4 @@ class Program
ConfigurationChanged?.Invoke(this, newConfig);
}
}
}

View File

@@ -5,21 +5,25 @@ This sample demonstrates the core features of CBDDC, a distributed peer-to-peer
## Features Demonstrated
### 🔑 Primary Keys & Auto-Generation
- Automatic GUID generation for entities
- Convention-based key detection (`Id` property)
- `[PrimaryKey]` attribute support
### 🎯 Generic Type-Safe API
- `Collection<T>()` for compile-time type safety
- Keyless `Put(entity)` with auto-key extraction
- IntelliSense-friendly operations
### 🔍 LINQ Query Support
- Expression-based queries
- Paging and sorting
- Complex predicates (>, >=, ==, !=, nested properties)
### 🌐 Network Synchronization
- UDP peer discovery
- TCP synchronization
- Automatic conflict resolution (Last-Write-Wins)
@@ -35,16 +39,19 @@ dotnet run
### Multi-Node (Peer-to-Peer)
Terminal 1:
```bash
dotnet run -- --node-id node1 --tcp-port 5001 --udp-port 6001
```
Terminal 2:
```bash
dotnet run -- --node-id node2 --tcp-port 5002 --udp-port 6002
```
Terminal 3:
```bash
dotnet run -- --node-id node3 --tcp-port 5003 --udp-port 6003
```
@@ -54,7 +61,7 @@ Changes made on any node will automatically sync to all peers!
## Available Commands
| Command | Description |
|---------|-------------|
|---------|----------------------------------------|
| `p` | Put Alice and Bob (auto-generated IDs) |
| `g` | Get user by ID (prompts for ID) |
| `d` | Delete user by ID (prompts for ID) |

View File

@@ -2,26 +2,11 @@
using BLite.Core.Metadata;
using BLite.Core.Storage;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Sample.Console;
public partial class SampleDbContext : CBDDCDocumentDbContext
public class SampleDbContext : CBDDCDocumentDbContext
{
/// <summary>
/// Gets the users collection.
/// </summary>
public DocumentCollection<string, User> Users { get; private set; } = null!;
/// <summary>
/// Gets the todo lists collection.
/// </summary>
public DocumentCollection<string, TodoList> TodoLists { get; private set; } = null!;
/// <summary>
/// Initializes a new instance of the SampleDbContext class using the specified database file path.
/// </summary>
@@ -40,6 +25,16 @@ public partial class SampleDbContext : CBDDCDocumentDbContext
{
}
/// <summary>
/// Gets the users collection.
/// </summary>
public DocumentCollection<string, User> Users { get; private set; } = null!;
/// <summary>
/// Gets the todo lists collection.
/// </summary>
public DocumentCollection<string, TodoList> TodoLists { get; private set; } = null!;
/// <inheritdoc />
protected override void OnModelCreating(ModelBuilder modelBuilder)
{

View File

@@ -1,10 +1,9 @@
using ZB.MOM.WW.CBDDC.Core;
using System.Text.Json;
using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using Microsoft.Extensions.Logging;
using System.Text.Json;
namespace ZB.MOM.WW.CBDDC.Sample.Console;
@@ -18,7 +17,7 @@ public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
private const string TodoListsCollection = "TodoLists";
/// <summary>
/// Initializes a new instance of the <see cref="SampleDocumentStore"/> class.
/// Initializes a new instance of the <see cref="SampleDocumentStore" /> class.
/// </summary>
/// <param name="context">The sample database context.</param>
/// <param name="configProvider">The peer node configuration provider.</param>
@@ -37,6 +36,16 @@ public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
WatchCollection(TodoListsCollection, context.TodoLists, t => t.Id);
}
#region Helper Methods
private static JsonElement? SerializeEntity<T>(T? entity) where T : class
{
if (entity == null) return null;
return JsonSerializer.SerializeToElement(entity);
}
#endregion
#region Abstract Method Implementations
/// <inheritdoc />
@@ -49,12 +58,10 @@ public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
/// <inheritdoc />
protected override async Task ApplyContentToEntitiesBatchAsync(
IEnumerable<(string Collection, string Key, JsonElement Content)> documents, CancellationToken cancellationToken)
IEnumerable<(string Collection, string Key, JsonElement Content)> documents,
CancellationToken cancellationToken)
{
foreach (var (collection, key, content) in documents)
{
UpsertEntity(collection, key, content);
}
foreach ((string collection, string key, var content) in documents) UpsertEntity(collection, key, content);
await _context.SaveChangesAsync(cancellationToken);
}
@@ -91,7 +98,7 @@ public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
protected override Task<JsonElement?> GetEntityAsJsonAsync(
string collection, string key, CancellationToken cancellationToken)
{
return Task.FromResult<JsonElement?>(collection switch
return Task.FromResult(collection switch
{
UsersCollection => SerializeEntity(_context.Users.Find(u => u.Id == key).FirstOrDefault()),
TodoListsCollection => SerializeEntity(_context.TodoLists.Find(t => t.Id == key).FirstOrDefault()),
@@ -111,10 +118,7 @@ public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
protected override async Task RemoveEntitiesBatchAsync(
IEnumerable<(string Collection, string Key)> documents, CancellationToken cancellationToken)
{
foreach (var (collection, key) in documents)
{
DeleteEntity(collection, key);
}
foreach ((string collection, string key) in documents) DeleteEntity(collection, key);
await _context.SaveChangesAsync(cancellationToken);
}
@@ -151,14 +155,4 @@ public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
}
#endregion
#region Helper Methods
private static JsonElement? SerializeEntity<T>(T? entity) where T : class
{
if (entity == null) return null;
return JsonSerializer.SerializeToElement(entity);
}
#endregion
}

View File

@@ -1,4 +1,3 @@
using System.Collections.Generic;
using System.ComponentModel.DataAnnotations;
namespace ZB.MOM.WW.CBDDC.Sample.Console;

View File

@@ -1,24 +1,24 @@
<Project Sdk="Microsoft.NET.Sdk">
<ItemGroup>
<PackageReference Include="Lifter.Core" Version="1.1.0" />
<PackageReference Include="Lifter.Core" Version="1.1.0"/>
<PackageReference Include="BLite.SourceGenerators" Version="1.3.1">
<PrivateAssets>all</PrivateAssets>
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
</PackageReference>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj" />
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj" />
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Persistence\ZB.MOM.WW.CBDDC.Persistence.csproj" />
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj"/>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj"/>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Persistence\ZB.MOM.WW.CBDDC.Persistence.csproj"/>
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Configuration.Binder" Version="9.0.0" />
<PackageReference Include="Microsoft.Extensions.Configuration.Json" Version="9.0.0" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection" Version="9.0.0" />
<PackageReference Include="Microsoft.Extensions.Hosting" Version="9.0.0" />
<PackageReference Include="Serilog" Version="4.2.0" />
<PackageReference Include="Serilog.Extensions.Hosting" Version="9.0.0" />
<PackageReference Include="Serilog.Sinks.Console" Version="6.0.0" />
<PackageReference Include="Microsoft.Extensions.Configuration.Binder" Version="9.0.0"/>
<PackageReference Include="Microsoft.Extensions.Configuration.Json" Version="9.0.0"/>
<PackageReference Include="Microsoft.Extensions.DependencyInjection" Version="9.0.0"/>
<PackageReference Include="Microsoft.Extensions.Hosting" Version="9.0.0"/>
<PackageReference Include="Serilog" Version="4.2.0"/>
<PackageReference Include="Serilog.Extensions.Hosting" Version="9.0.0"/>
<PackageReference Include="Serilog.Sinks.Console" Version="6.0.0"/>
</ItemGroup>
<ItemGroup>

View File

@@ -1,5 +1,5 @@
{
"Logging": {
"Logging": {
"LogLevel": {
"Default": "Information",
"Microsoft": "Warning",
@@ -9,7 +9,7 @@
"ZB.MOM.WW.CBDDC.Core.Storage.OplogCoordinator": "Warning",
"ZB.MOM.WW.CBDDC.Persistence": "Warning"
}
},
},
"CBDDC": {
"Network": {
"TcpPort": 5001,

View File

@@ -1,10 +1,8 @@
using System;
using System.Collections.Generic;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Core.Cache;
@@ -14,17 +12,7 @@ namespace ZB.MOM.WW.CBDDC.Core.Cache;
internal class CacheEntry
{
/// <summary>
/// Gets the cached document.
/// </summary>
public Document Document { get; }
/// <summary>
/// Gets the linked-list node used for LRU tracking.
/// </summary>
public LinkedListNode<string> Node { get; }
/// <summary>
/// Initializes a new instance of the <see cref="CacheEntry"/> class.
/// Initializes a new instance of the <see cref="CacheEntry" /> class.
/// </summary>
/// <param name="document">The cached document.</param>
/// <param name="node">The linked-list node used for LRU tracking.</param>
@@ -33,6 +21,16 @@ internal class CacheEntry
Document = document;
Node = node;
}
/// <summary>
/// Gets the cached document.
/// </summary>
public Document Document { get; }
/// <summary>
/// Gets the linked-list node used for LRU tracking.
/// </summary>
public LinkedListNode<string> Node { get; }
}
/// <summary>
@@ -40,22 +38,23 @@ internal class CacheEntry
/// </summary>
public class DocumentCache : IDocumentCache
{
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
private readonly Dictionary<string, CacheEntry> _cache = new();
private readonly LinkedList<string> _lru = new();
private readonly ILogger<DocumentCache> _logger;
private readonly object _lock = new();
private readonly ILogger<DocumentCache> _logger;
private readonly LinkedList<string> _lru = new();
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
// Statistics
private long _hits = 0;
private long _misses = 0;
private long _hits;
private long _misses;
/// <summary>
/// Initializes a new instance of the <see cref="DocumentCache"/> class.
/// Initializes a new instance of the <see cref="DocumentCache" /> class.
/// </summary>
/// <param name="peerNodeConfigurationProvider">The configuration provider used for cache size limits.</param>
/// <param name="logger">The logger instance.</param>
public DocumentCache(IPeerNodeConfigurationProvider peerNodeConfigurationProvider, ILogger<DocumentCache>? logger = null)
public DocumentCache(IPeerNodeConfigurationProvider peerNodeConfigurationProvider,
ILogger<DocumentCache>? logger = null)
{
_peerNodeConfigurationProvider = peerNodeConfigurationProvider;
_logger = logger ?? NullLogger<DocumentCache>.Instance;
@@ -66,7 +65,7 @@ public class DocumentCache : IDocumentCache
/// </summary>
/// <param name="collection">The document collection name.</param>
/// <param name="key">The document key.</param>
/// <returns>A task whose result is the cached document, or <see langword="null"/> if not found.</returns>
/// <returns>A task whose result is the cached document, or <see langword="null" /> if not found.</returns>
public async Task<Document?> Get(string collection, string key)
{
lock (_lock)
@@ -118,7 +117,7 @@ public class DocumentCache : IDocumentCache
// Evict if full
if (_cache.Count >= peerConfig.MaxDocumentCacheSize)
{
var oldest = _lru.Last!.Value;
string oldest = _lru.Last!.Value;
_lru.RemoveLast();
_cache.Remove(oldest);
_logger.LogTrace("Evicted oldest cache entry {Key}", oldest);
@@ -157,7 +156,7 @@ public class DocumentCache : IDocumentCache
{
lock (_lock)
{
var count = _cache.Count;
int count = _cache.Count;
_cache.Clear();
_lru.Clear();
_logger.LogInformation("Cleared cache ({Count} entries)", count);
@@ -171,8 +170,8 @@ public class DocumentCache : IDocumentCache
{
lock (_lock)
{
var total = _hits + _misses;
var hitRate = total > 0 ? (double)_hits / total : 0;
long total = _hits + _misses;
double hitRate = total > 0 ? (double)_hits / total : 0;
return (_hits, _misses, _cache.Count, hitRate);
}
}

View File

@@ -1,12 +1,12 @@
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Core.Cache
namespace ZB.MOM.WW.CBDDC.Core.Cache;
/// <summary>
/// Defines operations for caching documents by collection and key.
/// </summary>
public interface IDocumentCache
{
/// <summary>
/// Defines operations for caching documents by collection and key.
/// </summary>
public interface IDocumentCache
{
/// <summary>
/// Clears all cached documents.
/// </summary>
@@ -17,7 +17,7 @@ namespace ZB.MOM.WW.CBDDC.Core.Cache
/// </summary>
/// <param name="collection">The collection name.</param>
/// <param name="key">The document key.</param>
/// <returns>The cached document, or <see langword="null"/> if not found.</returns>
/// <returns>The cached document, or <see langword="null" /> if not found.</returns>
Task<Document?> Get(string collection, string key);
/// <summary>
@@ -41,5 +41,4 @@ namespace ZB.MOM.WW.CBDDC.Core.Cache
/// <param name="document">The document to cache.</param>
/// <returns>A task that represents the asynchronous operation.</returns>
Task Set(string collection, string key, Document document);
}
}

View File

@@ -9,16 +9,16 @@ namespace ZB.MOM.WW.CBDDC.Core;
public class ChangesAppliedEventArgs : EventArgs
{
/// <summary>
/// Gets the changes that were applied.
/// </summary>
public IEnumerable<OplogEntry> Changes { get; }
/// <summary>
/// Initializes a new instance of the <see cref="ChangesAppliedEventArgs"/> class.
/// Initializes a new instance of the <see cref="ChangesAppliedEventArgs" /> class.
/// </summary>
/// <param name="changes">The changes that were applied.</param>
public ChangesAppliedEventArgs(IEnumerable<OplogEntry> changes)
{
Changes = changes;
}
/// <summary>
/// Gets the changes that were applied.
/// </summary>
public IEnumerable<OplogEntry> Changes { get; }
}

View File

@@ -1,5 +1,4 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
@@ -14,12 +13,12 @@ namespace ZB.MOM.WW.CBDDC.Core.Diagnostics;
/// </summary>
public class CBDDCHealthCheck : ICBDDCHealthCheck
{
private readonly ILogger<CBDDCHealthCheck> _logger;
private readonly IOplogStore _store;
private readonly ISyncStatusTracker _syncTracker;
private readonly ILogger<CBDDCHealthCheck> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCHealthCheck"/> class.
/// Initializes a new instance of the <see cref="CBDDCHealthCheck" /> class.
/// </summary>
/// <param name="store">The oplog store used for database health checks.</param>
/// <param name="syncTracker">The tracker that provides synchronization status.</param>
@@ -65,9 +64,7 @@ public class CBDDCHealthCheck : ICBDDCHealthCheck
// Add error messages from sync tracker
foreach (var error in syncStatus.SyncErrors.Take(5)) // Last 5 errors
{
status.Errors.Add($"{error.Timestamp:yyyy-MM-dd HH:mm:ss} - {error.Message}");
}
// Add metadata
status.Metadata["TotalDocumentsSynced"] = syncStatus.TotalDocumentsSynced;

View File

@@ -1,15 +1,14 @@
using System.Threading;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Core.Diagnostics
namespace ZB.MOM.WW.CBDDC.Core.Diagnostics;
public interface ICBDDCHealthCheck
{
public interface ICBDDCHealthCheck
{
/// <summary>
/// Performs a health check for the implementing component.
/// </summary>
/// <param name="cancellationToken">Cancellation token.</param>
/// <returns>The resulting health status.</returns>
Task<HealthStatus> CheckAsync(CancellationToken cancellationToken = default);
}
}

View File

@@ -1,12 +1,12 @@
using System;
namespace ZB.MOM.WW.CBDDC.Core.Diagnostics
namespace ZB.MOM.WW.CBDDC.Core.Diagnostics;
/// <summary>
/// Tracks synchronization status and peer health metrics.
/// </summary>
public interface ISyncStatusTracker
{
/// <summary>
/// Tracks synchronization status and peer health metrics.
/// </summary>
public interface ISyncStatusTracker
{
/// <summary>
/// Removes peer entries that have been inactive longer than the specified threshold.
/// </summary>
@@ -16,7 +16,7 @@ namespace ZB.MOM.WW.CBDDC.Core.Diagnostics
/// <summary>
/// Gets the current synchronization status snapshot.
/// </summary>
/// <returns>The current <see cref="SyncStatus"/>.</returns>
/// <returns>The current <see cref="SyncStatus" />.</returns>
SyncStatus GetStatus();
/// <summary>
@@ -59,5 +59,4 @@ namespace ZB.MOM.WW.CBDDC.Core.Diagnostics
/// <param name="address">The peer network address.</param>
/// <param name="isConnected">A value indicating whether the peer is connected.</param>
void UpdatePeer(string nodeId, string address, bool isConnected);
}
}

View File

@@ -11,19 +11,19 @@ namespace ZB.MOM.WW.CBDDC.Core.Diagnostics;
/// </summary>
public class SyncStatusTracker : ISyncStatusTracker
{
private readonly ILogger<SyncStatusTracker> _logger;
private readonly object _lock = new();
private bool _isOnline = false;
private DateTime? _lastSyncTime;
private readonly List<PeerInfo> _activePeers = new();
private readonly Queue<SyncError> _recentErrors = new();
private long _totalDocumentsSynced = 0;
private long _totalBytesTransferred = 0;
private const int MaxErrorHistory = 50;
private readonly List<PeerInfo> _activePeers = new();
private readonly object _lock = new();
private readonly ILogger<SyncStatusTracker> _logger;
private readonly Queue<SyncError> _recentErrors = new();
private bool _isOnline;
private DateTime? _lastSyncTime;
private long _totalBytesTransferred;
private long _totalDocumentsSynced;
/// <summary>
/// Initializes a new instance of the <see cref="SyncStatusTracker"/> class.
/// Initializes a new instance of the <see cref="SyncStatusTracker" /> class.
/// </summary>
/// <param name="logger">Optional logger instance.</param>
public SyncStatusTracker(ILogger<SyncStatusTracker>? logger = null)
@@ -84,10 +84,7 @@ public class SyncStatusTracker : ISyncStatusTracker
_recentErrors.Enqueue(error);
while (_recentErrors.Count > MaxErrorHistory)
{
_recentErrors.Dequeue();
}
while (_recentErrors.Count > MaxErrorHistory) _recentErrors.Dequeue();
_logger.LogWarning("Sync error recorded: {Message} (Peer: {Peer})", message, peerNodeId ?? "N/A");
}
@@ -135,10 +132,7 @@ public class SyncStatusTracker : ISyncStatusTracker
lock (_lock)
{
var peer = _activePeers.FirstOrDefault(p => p.NodeId == nodeId);
if (peer != null)
{
peer.SuccessfulSyncs++;
}
if (peer != null) peer.SuccessfulSyncs++;
}
}
@@ -151,10 +145,7 @@ public class SyncStatusTracker : ISyncStatusTracker
lock (_lock)
{
var peer = _activePeers.FirstOrDefault(p => p.NodeId == nodeId);
if (peer != null)
{
peer.FailedSyncs++;
}
if (peer != null) peer.FailedSyncs++;
}
}
@@ -187,12 +178,9 @@ public class SyncStatusTracker : ISyncStatusTracker
lock (_lock)
{
var cutoff = DateTime.UtcNow - inactiveThreshold;
var removed = _activePeers.RemoveAll(p => p.LastSeen < cutoff);
int removed = _activePeers.RemoveAll(p => p.LastSeen < cutoff);
if (removed > 0)
{
_logger.LogInformation("Removed {Count} inactive peers", removed);
}
if (removed > 0) _logger.LogInformation("Removed {Count} inactive peers", removed);
}
}
}

View File

@@ -1,6 +1,5 @@
using ZB.MOM.WW.CBDDC.Core.Sync;
using System;
using System.Text.Json;
using ZB.MOM.WW.CBDDC.Core.Sync;
namespace ZB.MOM.WW.CBDDC.Core;
@@ -9,15 +8,32 @@ namespace ZB.MOM.WW.CBDDC.Core;
/// </summary>
public class Document
{
/// <summary>
/// Initializes a new instance of the <see cref="Document" /> class.
/// </summary>
/// <param name="collection">The collection that contains the document.</param>
/// <param name="key">The document key.</param>
/// <param name="content">The document content.</param>
/// <param name="updatedAt">The timestamp of the latest applied update.</param>
/// <param name="isDeleted">Whether the document is marked as deleted.</param>
public Document(string collection, string key, JsonElement content, HlcTimestamp updatedAt, bool isDeleted)
{
Collection = collection;
Key = key;
Content = content;
UpdatedAt = updatedAt;
IsDeleted = isDeleted;
}
/// <summary>
/// Gets the collection that contains the document.
/// </summary>
public string Collection { get; private set; }
public string Collection { get; }
/// <summary>
/// Gets the document key.
/// </summary>
public string Key { get; private set; }
public string Key { get; }
/// <summary>
/// Gets the document content.
@@ -34,23 +50,6 @@ public class Document
/// </summary>
public bool IsDeleted { get; private set; }
/// <summary>
/// Initializes a new instance of the <see cref="Document"/> class.
/// </summary>
/// <param name="collection">The collection that contains the document.</param>
/// <param name="key">The document key.</param>
/// <param name="content">The document content.</param>
/// <param name="updatedAt">The timestamp of the latest applied update.</param>
/// <param name="isDeleted">Whether the document is marked as deleted.</param>
public Document(string collection, string key, JsonElement content, HlcTimestamp updatedAt, bool isDeleted)
{
Collection = collection;
Key = key;
Content = content;
UpdatedAt = updatedAt;
IsDeleted = isDeleted;
}
/// <summary>
/// Merges a remote operation into the current document using last-write-wins or a conflict resolver.
/// </summary>
@@ -70,8 +69,10 @@ public class Document
UpdatedAt = oplogEntry.Timestamp;
IsDeleted = oplogEntry.Operation == OperationType.Delete;
}
return;
}
var resolutionResult = resolver.Resolve(this, oplogEntry);
if (resolutionResult.ShouldApply && resolutionResult.MergedDocument != null)
{

View File

@@ -8,12 +8,7 @@ namespace ZB.MOM.WW.CBDDC.Core.Exceptions;
public class CBDDCException : Exception
{
/// <summary>
/// Error code for programmatic error handling.
/// </summary>
public string ErrorCode { get; }
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCException"/> class.
/// Initializes a new instance of the <see cref="CBDDCException" /> class.
/// </summary>
/// <param name="errorCode">The application-specific error code.</param>
/// <param name="message">The exception message.</param>
@@ -24,7 +19,7 @@ public class CBDDCException : Exception
}
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCException"/> class.
/// Initializes a new instance of the <see cref="CBDDCException" /> class.
/// </summary>
/// <param name="errorCode">The application-specific error code.</param>
/// <param name="message">The exception message.</param>
@@ -34,6 +29,11 @@ public class CBDDCException : Exception
{
ErrorCode = errorCode;
}
/// <summary>
/// Error code for programmatic error handling.
/// </summary>
public string ErrorCode { get; }
}
/// <summary>
@@ -42,19 +42,23 @@ public class CBDDCException : Exception
public class NetworkException : CBDDCException
{
/// <summary>
/// Initializes a new instance of the <see cref="NetworkException"/> class.
/// Initializes a new instance of the <see cref="NetworkException" /> class.
/// </summary>
/// <param name="message">The exception message.</param>
public NetworkException(string message)
: base("NETWORK_ERROR", message) { }
: base("NETWORK_ERROR", message)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="NetworkException"/> class.
/// Initializes a new instance of the <see cref="NetworkException" /> class.
/// </summary>
/// <param name="message">The exception message.</param>
/// <param name="innerException">The exception that caused the current exception.</param>
public NetworkException(string message, Exception innerException)
: base("NETWORK_ERROR", message, innerException) { }
: base("NETWORK_ERROR", message, innerException)
{
}
}
/// <summary>
@@ -63,19 +67,23 @@ public class NetworkException : CBDDCException
public class PersistenceException : CBDDCException
{
/// <summary>
/// Initializes a new instance of the <see cref="PersistenceException"/> class.
/// Initializes a new instance of the <see cref="PersistenceException" /> class.
/// </summary>
/// <param name="message">The exception message.</param>
public PersistenceException(string message)
: base("PERSISTENCE_ERROR", message) { }
: base("PERSISTENCE_ERROR", message)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="PersistenceException"/> class.
/// Initializes a new instance of the <see cref="PersistenceException" /> class.
/// </summary>
/// <param name="message">The exception message.</param>
/// <param name="innerException">The exception that caused the current exception.</param>
public PersistenceException(string message, Exception innerException)
: base("PERSISTENCE_ERROR", message, innerException) { }
: base("PERSISTENCE_ERROR", message, innerException)
{
}
}
/// <summary>
@@ -84,19 +92,23 @@ public class PersistenceException : CBDDCException
public class SyncException : CBDDCException
{
/// <summary>
/// Initializes a new instance of the <see cref="SyncException"/> class.
/// Initializes a new instance of the <see cref="SyncException" /> class.
/// </summary>
/// <param name="message">The exception message.</param>
public SyncException(string message)
: base("SYNC_ERROR", message) { }
: base("SYNC_ERROR", message)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="SyncException"/> class.
/// Initializes a new instance of the <see cref="SyncException" /> class.
/// </summary>
/// <param name="message">The exception message.</param>
/// <param name="innerException">The exception that caused the current exception.</param>
public SyncException(string message, Exception innerException)
: base("SYNC_ERROR", message, innerException) { }
: base("SYNC_ERROR", message, innerException)
{
}
}
/// <summary>
@@ -105,11 +117,13 @@ public class SyncException : CBDDCException
public class ConfigurationException : CBDDCException
{
/// <summary>
/// Initializes a new instance of the <see cref="ConfigurationException"/> class.
/// Initializes a new instance of the <see cref="ConfigurationException" /> class.
/// </summary>
/// <param name="message">The exception message.</param>
public ConfigurationException(string message)
: base("CONFIG_ERROR", message) { }
: base("CONFIG_ERROR", message)
{
}
}
/// <summary>
@@ -118,19 +132,23 @@ public class ConfigurationException : CBDDCException
public class DatabaseCorruptionException : PersistenceException
{
/// <summary>
/// Initializes a new instance of the <see cref="DatabaseCorruptionException"/> class.
/// Initializes a new instance of the <see cref="DatabaseCorruptionException" /> class.
/// </summary>
/// <param name="message">The exception message.</param>
public DatabaseCorruptionException(string message)
: base(message) { }
: base(message)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="DatabaseCorruptionException"/> class.
/// Initializes a new instance of the <see cref="DatabaseCorruptionException" /> class.
/// </summary>
/// <param name="message">The exception message.</param>
/// <param name="innerException">The exception that caused the current exception.</param>
public DatabaseCorruptionException(string message, Exception innerException)
: base(message, innerException) { }
: base(message, innerException)
{
}
}
/// <summary>
@@ -139,32 +157,23 @@ public class DatabaseCorruptionException : PersistenceException
public class TimeoutException : CBDDCException
{
/// <summary>
/// Initializes a new instance of the <see cref="TimeoutException"/> class.
/// Initializes a new instance of the <see cref="TimeoutException" /> class.
/// </summary>
/// <param name="operation">The operation that timed out.</param>
/// <param name="timeoutMs">The timeout in milliseconds.</param>
public TimeoutException(string operation, int timeoutMs)
: base("TIMEOUT_ERROR", $"Operation '{operation}' timed out after {timeoutMs}ms") { }
: base("TIMEOUT_ERROR", $"Operation '{operation}' timed out after {timeoutMs}ms")
{
}
}
/// <summary>
/// Exception thrown when a document is not found in a collection.
/// </summary>
public class DocumentNotFoundException : PersistenceException
{
/// <summary>
/// Gets the document key that was not found.
/// </summary>
public string Key { get; }
/// <summary>
/// Gets the collection where the document was searched.
/// </summary>
public string Collection { get; }
/// <summary>
/// Initializes a new instance of the <see cref="DocumentNotFoundException"/> class.
/// Initializes a new instance of the <see cref="DocumentNotFoundException" /> class.
/// </summary>
/// <param name="collection">The collection where the document was searched.</param>
/// <param name="key">The document key that was not found.</param>
@@ -174,6 +183,16 @@ public class DocumentNotFoundException : PersistenceException
Collection = collection;
Key = key;
}
/// <summary>
/// Gets the document key that was not found.
/// </summary>
public string Key { get; }
/// <summary>
/// Gets the collection where the document was searched.
/// </summary>
public string Collection { get; }
}
/// <summary>
@@ -182,8 +201,10 @@ public class DocumentNotFoundException : PersistenceException
public class CBDDCConcurrencyException : PersistenceException
{
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCConcurrencyException"/> class.
/// Initializes a new instance of the <see cref="CBDDCConcurrencyException" /> class.
/// </summary>
/// <param name="message">The exception message.</param>
public CBDDCConcurrencyException(string message) : base(message) { }
public CBDDCConcurrencyException(string message) : base(message)
{
}
}

View File

@@ -25,7 +25,7 @@ public readonly struct HlcTimestamp : IComparable<HlcTimestamp>, IComparable, IE
public string NodeId { get; }
/// <summary>
/// Initializes a new instance of the <see cref="HlcTimestamp"/> struct.
/// Initializes a new instance of the <see cref="HlcTimestamp" /> struct.
/// </summary>
/// <param name="physicalTime">The physical time component.</param>
/// <param name="logicalCounter">The logical counter component.</param>
@@ -43,8 +43,8 @@ public readonly struct HlcTimestamp : IComparable<HlcTimestamp>, IComparable, IE
/// </summary>
/// <param name="other">The other timestamp to compare with this instance.</param>
/// <returns>
/// A value less than zero if this instance is earlier than <paramref name="other"/>, zero if they are equal,
/// or greater than zero if this instance is later than <paramref name="other"/>.
/// A value less than zero if this instance is earlier than <paramref name="other" />, zero if they are equal,
/// or greater than zero if this instance is later than <paramref name="other" />.
/// </returns>
public int CompareTo(HlcTimestamp other)
{
@@ -63,7 +63,7 @@ public readonly struct HlcTimestamp : IComparable<HlcTimestamp>, IComparable, IE
/// </summary>
/// <param name="obj">The object to compare with this instance.</param>
/// <returns>
/// A value less than zero if this instance is earlier than <paramref name="obj"/>, zero if equal, or greater
/// A value less than zero if this instance is earlier than <paramref name="obj" />, zero if equal, or greater
/// than zero if later.
/// </returns>
public int CompareTo(object? obj)
@@ -77,7 +77,7 @@ public readonly struct HlcTimestamp : IComparable<HlcTimestamp>, IComparable, IE
/// Determines whether this instance and another timestamp are equal.
/// </summary>
/// <param name="other">The other timestamp to compare.</param>
/// <returns><see langword="true"/> if the timestamps are equal; otherwise, <see langword="false"/>.</returns>
/// <returns><see langword="true" /> if the timestamps are equal; otherwise, <see langword="false" />.</returns>
public bool Equals(HlcTimestamp other)
{
return PhysicalTime == other.PhysicalTime &&
@@ -96,7 +96,7 @@ public readonly struct HlcTimestamp : IComparable<HlcTimestamp>, IComparable, IE
{
unchecked
{
var hashCode = PhysicalTime.GetHashCode();
int hashCode = PhysicalTime.GetHashCode();
hashCode = (hashCode * 397) ^ LogicalCounter;
// Ensure HashCode uses the same comparison logic as Equals/CompareTo
// Handle null NodeId gracefully (possible via default(HlcTimestamp))
@@ -105,33 +105,59 @@ public readonly struct HlcTimestamp : IComparable<HlcTimestamp>, IComparable, IE
}
}
public static bool operator ==(HlcTimestamp left, HlcTimestamp right) => left.Equals(right);
public static bool operator !=(HlcTimestamp left, HlcTimestamp right) => !left.Equals(right);
public static bool operator ==(HlcTimestamp left, HlcTimestamp right)
{
return left.Equals(right);
}
public static bool operator !=(HlcTimestamp left, HlcTimestamp right)
{
return !left.Equals(right);
}
// Standard comparison operators making usage in SyncOrchestrator cleaner (e.g., remote > local)
public static bool operator <(HlcTimestamp left, HlcTimestamp right) => left.CompareTo(right) < 0;
public static bool operator <=(HlcTimestamp left, HlcTimestamp right) => left.CompareTo(right) <= 0;
public static bool operator >(HlcTimestamp left, HlcTimestamp right) => left.CompareTo(right) > 0;
public static bool operator >=(HlcTimestamp left, HlcTimestamp right) => left.CompareTo(right) >= 0;
public static bool operator <(HlcTimestamp left, HlcTimestamp right)
{
return left.CompareTo(right) < 0;
}
public static bool operator <=(HlcTimestamp left, HlcTimestamp right)
{
return left.CompareTo(right) <= 0;
}
public static bool operator >(HlcTimestamp left, HlcTimestamp right)
{
return left.CompareTo(right) > 0;
}
public static bool operator >=(HlcTimestamp left, HlcTimestamp right)
{
return left.CompareTo(right) >= 0;
}
/// <inheritdoc />
public override string ToString() => FormattableString.Invariant($"{PhysicalTime}:{LogicalCounter}:{NodeId}");
public override string ToString()
{
return FormattableString.Invariant($"{PhysicalTime}:{LogicalCounter}:{NodeId}");
}
/// <summary>
/// Parses a timestamp string.
/// </summary>
/// <param name="s">The string to parse, in the format "PhysicalTime:LogicalCounter:NodeId".</param>
/// <returns>The parsed <see cref="HlcTimestamp"/>.</returns>
/// <returns>The parsed <see cref="HlcTimestamp" />.</returns>
public static HlcTimestamp Parse(string s)
{
if (string.IsNullOrEmpty(s)) throw new ArgumentNullException(nameof(s));
var parts = s.Split(':');
if (parts.Length != 3) throw new FormatException("Invalid HlcTimestamp format. Expected 'PhysicalTime:LogicalCounter:NodeId'.");
if (!long.TryParse(parts[0], out var physicalTime))
string[] parts = s.Split(':');
if (parts.Length != 3)
throw new FormatException("Invalid HlcTimestamp format. Expected 'PhysicalTime:LogicalCounter:NodeId'.");
if (!long.TryParse(parts[0], out long physicalTime))
throw new FormatException("Invalid PhysicalTime component in HlcTimestamp.");
if (!int.TryParse(parts[1], out var logicalCounter))
if (!int.TryParse(parts[1], out int logicalCounter))
throw new FormatException("Invalid LogicalCounter component in HlcTimestamp.");
var nodeId = parts[2];
string nodeId = parts[2];
return new HlcTimestamp(physicalTime, logicalCounter, nodeId);
}
}

View File

@@ -2,26 +2,25 @@ using System;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Core.Management;
/// <summary>
/// Implementation of peer management service.
/// Provides CRUD operations for managing remote peer configurations.
///
/// Remote peer configurations are stored in a synchronized collection and automatically
/// replicated across all nodes in the cluster. Any change made on one node will be
/// synchronized to all other nodes through the normal CBDDC sync process.
/// </summary>
public class PeerManagementService : IPeerManagementService
{
private readonly IPeerConfigurationStore _store;
private readonly IPeerOplogConfirmationStore _peerOplogConfirmationStore;
private readonly ILogger<PeerManagementService> _logger;
private readonly IPeerOplogConfirmationStore _peerOplogConfirmationStore;
private readonly IPeerConfigurationStore _store;
/// <summary>
/// Initializes a new instance of the PeerManagementService class.
@@ -35,7 +34,8 @@ public class PeerManagementService : IPeerManagementService
ILogger<PeerManagementService>? logger = null)
{
_store = store ?? throw new ArgumentNullException(nameof(store));
_peerOplogConfirmationStore = peerOplogConfirmationStore ?? throw new ArgumentNullException(nameof(peerOplogConfirmationStore));
_peerOplogConfirmationStore = peerOplogConfirmationStore ??
throw new ArgumentNullException(nameof(peerOplogConfirmationStore));
_logger = logger ?? NullLogger<PeerManagementService>.Instance;
}
@@ -60,7 +60,8 @@ public class PeerManagementService : IPeerManagementService
};
await _store.SaveRemotePeerAsync(config, cancellationToken);
_logger.LogInformation("Added static remote peer: {NodeId} at {Address} (will sync to all cluster nodes)", nodeId, address);
_logger.LogInformation("Added static remote peer: {NodeId} at {Address} (will sync to all cluster nodes)",
nodeId, address);
}
/// <summary>
@@ -71,7 +72,7 @@ public class PeerManagementService : IPeerManagementService
/// <returns>A task that represents the asynchronous operation.</returns>
public async Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default)
{
await RemovePeerTrackingAsync(nodeId, removeRemoteConfig: true, cancellationToken);
await RemovePeerTrackingAsync(nodeId, true, cancellationToken);
}
/// <summary>
@@ -93,7 +94,8 @@ public class PeerManagementService : IPeerManagementService
if (removeRemoteConfig)
{
await _store.RemoveRemotePeerAsync(nodeId, cancellationToken);
_logger.LogInformation("Removed remote peer and tracking: {NodeId} (will sync to all cluster nodes)", nodeId);
_logger.LogInformation("Removed remote peer and tracking: {NodeId} (will sync to all cluster nodes)",
nodeId);
return;
}
@@ -105,7 +107,8 @@ public class PeerManagementService : IPeerManagementService
/// </summary>
/// <param name="cancellationToken">A token used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result contains remote peer configurations.</returns>
public async Task<IEnumerable<RemotePeerConfiguration>> GetAllRemotePeersAsync(CancellationToken cancellationToken = default)
public async Task<IEnumerable<RemotePeerConfiguration>> GetAllRemotePeersAsync(
CancellationToken cancellationToken = default)
{
return await _store.GetRemotePeersAsync(cancellationToken);
}
@@ -122,10 +125,7 @@ public class PeerManagementService : IPeerManagementService
var peer = await _store.GetRemotePeerAsync(nodeId, cancellationToken);
if (peer == null)
{
return; // Peer not found, nothing to enable
}
if (peer == null) return; // Peer not found, nothing to enable
if (!peer.IsEnabled)
{
@@ -147,10 +147,7 @@ public class PeerManagementService : IPeerManagementService
var peer = await _store.GetRemotePeerAsync(nodeId, cancellationToken);
if (peer == null)
{
return; // Peer not found, nothing to disable
}
if (peer == null) return; // Peer not found, nothing to disable
if (peer.IsEnabled)
{
@@ -163,23 +160,16 @@ public class PeerManagementService : IPeerManagementService
private static void ValidateNodeId(string nodeId)
{
if (string.IsNullOrWhiteSpace(nodeId))
{
throw new ArgumentException("NodeId cannot be null or empty", nameof(nodeId));
}
}
private static void ValidateAddress(string address)
{
if (string.IsNullOrWhiteSpace(address))
{
throw new ArgumentException("Address cannot be null or empty", nameof(address));
}
// Basic format validation (should contain host:port)
if (!address.Contains(':'))
{
throw new ArgumentException("Address must be in format 'host:port'", nameof(address));
}
}
}

View File

@@ -1,4 +1,3 @@
using System;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Core.Network;
@@ -13,9 +12,11 @@ public delegate void PeerNodeConfigurationChangedEventHandler(object? sender, Pe
/// <summary>
/// Defines a contract for retrieving and monitoring configuration settings for a peer node.
/// </summary>
/// <remarks>Implementations of this interface provide access to the current configuration and notify subscribers
/// <remarks>
/// Implementations of this interface provide access to the current configuration and notify subscribers
/// when configuration changes occur. This interface is typically used by components that require up-to-date
/// configuration information for peer-to-peer networking scenarios.</remarks>
/// configuration information for peer-to-peer networking scenarios.
/// </remarks>
public interface IPeerNodeConfigurationProvider
{
/// <summary>
@@ -23,16 +24,17 @@ public interface IPeerNodeConfigurationProvider
/// </summary>
/// <returns>
/// A task that represents the asynchronous operation. The task result contains the current
/// <see cref="PeerNodeConfiguration"/>.
/// <see cref="PeerNodeConfiguration" />.
/// </returns>
public Task<PeerNodeConfiguration> GetConfiguration();
/// <summary>
/// Occurs when the configuration of the peer node changes.
/// </summary>
/// <remarks>Subscribe to this event to be notified when any configuration settings for the peer node are
/// <remarks>
/// Subscribe to this event to be notified when any configuration settings for the peer node are
/// modified. Event handlers can use this notification to update dependent components or respond to configuration
/// changes as needed.</remarks>
/// changes as needed.
/// </remarks>
public event PeerNodeConfigurationChangedEventHandler? ConfigurationChanged;
}

View File

@@ -1,6 +1,5 @@
using System;
using System.Collections.Generic;
using System.Linq;
namespace ZB.MOM.WW.CBDDC.Core.Network;
@@ -10,6 +9,35 @@ namespace ZB.MOM.WW.CBDDC.Core.Network;
/// </summary>
public class PeerNode
{
/// <summary>
/// Initializes a new instance of the PeerNode class with the specified node identifier, network address, and last
/// seen timestamp.
/// </summary>
/// <param name="nodeId">The unique identifier for the peer node. Cannot be null or empty.</param>
/// <param name="address">The network address of the peer node. Cannot be null or empty.</param>
/// <param name="lastSeen">The date and time when the peer node was last seen, expressed as a DateTimeOffset.</param>
/// <param name="type">The type of the peer node. Defaults to LanDiscovered.</param>
/// <param name="role">The role of the peer node. Defaults to Member.</param>
/// <param name="configuration">The peer node configuration</param>
/// <param name="interestingCollections">The list of collections this peer is interested in.</param>
public PeerNode(
string nodeId,
string address,
DateTimeOffset lastSeen,
PeerType type = PeerType.LanDiscovered,
NodeRole role = NodeRole.Member,
PeerNodeConfiguration? configuration = null,
IEnumerable<string>? interestingCollections = null)
{
NodeId = nodeId;
Address = address;
LastSeen = lastSeen;
Type = type;
Role = role;
Configuration = configuration;
InterestingCollections = new List<string>(interestingCollections ?? []).AsReadOnly();
}
/// <summary>
/// Gets the unique identifier for the node.
/// </summary>
@@ -43,34 +71,5 @@ public class PeerNode
/// <summary>
/// Gets the list of collections this peer is interested in.
/// </summary>
public System.Collections.Generic.IReadOnlyList<string> InterestingCollections { get; }
/// <summary>
/// Initializes a new instance of the PeerNode class with the specified node identifier, network address, and last
/// seen timestamp.
/// </summary>
/// <param name="nodeId">The unique identifier for the peer node. Cannot be null or empty.</param>
/// <param name="address">The network address of the peer node. Cannot be null or empty.</param>
/// <param name="lastSeen">The date and time when the peer node was last seen, expressed as a DateTimeOffset.</param>
/// <param name="type">The type of the peer node. Defaults to LanDiscovered.</param>
/// <param name="role">The role of the peer node. Defaults to Member.</param>
/// <param name="configuration">The peer node configuration</param>
/// <param name="interestingCollections">The list of collections this peer is interested in.</param>
public PeerNode(
string nodeId,
string address,
DateTimeOffset lastSeen,
PeerType type = PeerType.LanDiscovered,
NodeRole role = NodeRole.Member,
PeerNodeConfiguration? configuration = null,
IEnumerable<string>? interestingCollections = null)
{
NodeId = nodeId;
Address = address;
LastSeen = lastSeen;
Type = type;
Role = role;
Configuration = configuration;
InterestingCollections = new List<string>(interestingCollections ?? []).AsReadOnly();
}
public IReadOnlyList<string> InterestingCollections { get; }
}

View File

@@ -1,13 +1,16 @@
using System;
using System.Collections.Generic;
namespace ZB.MOM.WW.CBDDC.Core.Network;
/// <summary>
/// Represents the configuration settings for a peer node in a distributed network.
/// </summary>
/// <remarks>Use this class to specify identification, network port, and authentication details required for a
/// peer node to participate in a cluster or peer-to-peer environment. The <see cref="Default"/> property provides a
/// basic configuration suitable for development or testing scenarios.</remarks>
/// <remarks>
/// Use this class to specify identification, network port, and authentication details required for a
/// peer node to participate in a cluster or peer-to-peer environment. The <see cref="Default" /> property provides a
/// basic configuration suitable for development or testing scenarios.
/// </remarks>
public class PeerNodeConfiguration
{
/// <summary>
@@ -58,15 +61,17 @@ public class PeerNodeConfiguration
/// <summary>
/// Gets or sets a list of known peers to connect to directly, bypassing discovery.
/// </summary>
public System.Collections.Generic.List<KnownPeerConfiguration> KnownPeers { get; set; } = new();
public List<KnownPeerConfiguration> KnownPeers { get; set; } = new();
/// <summary>
/// Gets the default configuration settings for a peer node.
/// </summary>
/// <remarks>Each access returns a new instance of the configuration with a unique node identifier. The
/// <remarks>
/// Each access returns a new instance of the configuration with a unique node identifier. The
/// default settings use TCP port 9000 and a generated authentication token. Modify the returned instance as needed
/// before use.</remarks>
public static PeerNodeConfiguration Default => new PeerNodeConfiguration
/// before use.
/// </remarks>
public static PeerNodeConfiguration Default => new()
{
NodeId = Guid.NewGuid().ToString(),
TcpPort = 9000,

View File

@@ -1,3 +1,4 @@
using System.Collections.Generic;
using System.ComponentModel.DataAnnotations;
namespace ZB.MOM.WW.CBDDC.Core.Network;
@@ -34,5 +35,5 @@ public class RemotePeerConfiguration
/// Gets or sets the list of collections this peer is interested in.
/// If empty, the peer is interested in all collections.
/// </summary>
public System.Collections.Generic.List<string> InterestingCollections { get; set; } = new();
public List<string> InterestingCollections { get; set; } = new();
}

View File

@@ -9,6 +9,15 @@ public class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvide
{
private PeerNodeConfiguration _configuration = new();
/// <summary>
/// Initializes a new instance of the <see cref="StaticPeerNodeConfigurationProvider" /> class.
/// </summary>
/// <param name="configuration">The initial peer node configuration.</param>
public StaticPeerNodeConfigurationProvider(PeerNodeConfiguration configuration)
{
Configuration = configuration;
}
/// <summary>
/// Gets or sets the current peer node configuration.
/// </summary>
@@ -25,15 +34,6 @@ public class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvide
}
}
/// <summary>
/// Initializes a new instance of the <see cref="StaticPeerNodeConfigurationProvider"/> class.
/// </summary>
/// <param name="configuration">The initial peer node configuration.</param>
public StaticPeerNodeConfigurationProvider(PeerNodeConfiguration configuration)
{
Configuration = configuration;
}
/// <summary>
/// Occurs when the peer node configuration changes.
/// </summary>
@@ -49,7 +49,7 @@ public class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvide
}
/// <summary>
/// Raises the <see cref="ConfigurationChanged"/> event.
/// Raises the <see cref="ConfigurationChanged" /> event.
/// </summary>
/// <param name="newConfig">The new peer node configuration.</param>
protected virtual void OnConfigurationChanged(PeerNodeConfiguration newConfig)

View File

@@ -1,5 +1,7 @@
using System;
using System.ComponentModel.DataAnnotations;
using System.Globalization;
using System.Security.Cryptography;
using System.Text;
using System.Text.Json;
namespace ZB.MOM.WW.CBDDC.Core;
@@ -19,15 +21,15 @@ public static class OplogEntryExtensions
/// <returns>The lowercase hexadecimal SHA-256 hash of the entry.</returns>
public static string ComputeHash(this OplogEntry entry)
{
using var sha256 = System.Security.Cryptography.SHA256.Create();
var sb = new System.Text.StringBuilder();
using var sha256 = SHA256.Create();
var sb = new StringBuilder();
sb.Append(entry.Collection);
sb.Append('|');
sb.Append(entry.Key);
sb.Append('|');
// Ensure stable string representation for Enum (integer value)
sb.Append(((int)entry.Operation).ToString(System.Globalization.CultureInfo.InvariantCulture));
sb.Append(((int)entry.Operation).ToString(CultureInfo.InvariantCulture));
sb.Append('|');
// Payload excluded from hash to avoid serialization non-determinism
// sb.Append(entry.Payload...);
@@ -37,8 +39,8 @@ public static class OplogEntryExtensions
sb.Append('|');
sb.Append(entry.PreviousHash);
var bytes = System.Text.Encoding.UTF8.GetBytes(sb.ToString());
var hashBytes = sha256.ComputeHash(bytes);
byte[] bytes = Encoding.UTF8.GetBytes(sb.ToString());
byte[] hashBytes = sha256.ComputeHash(bytes);
// Convert to hex string
return BitConverter.ToString(hashBytes).Replace("-", "").ToLowerInvariant();
@@ -48,36 +50,7 @@ public static class OplogEntryExtensions
public class OplogEntry
{
/// <summary>
/// Gets the collection name associated with this entry.
/// </summary>
public string Collection { get; }
/// <summary>
/// Gets the document key associated with this entry.
/// </summary>
public string Key { get; }
/// <summary>
/// Gets the operation represented by this entry.
/// </summary>
public OperationType Operation { get; }
/// <summary>
/// Gets the serialized payload for the operation.
/// </summary>
public JsonElement? Payload { get; }
/// <summary>
/// Gets the logical timestamp for this entry.
/// </summary>
public HlcTimestamp Timestamp { get; }
/// <summary>
/// Gets the hash of this entry.
/// </summary>
public string Hash { get; }
/// <summary>
/// Gets the hash of the previous entry in the chain.
/// </summary>
public string PreviousHash { get; }
/// <summary>
/// Initializes a new instance of the <see cref="OplogEntry"/> class.
/// Initializes a new instance of the <see cref="OplogEntry" /> class.
/// </summary>
/// <param name="collection">The collection name.</param>
/// <param name="key">The document key.</param>
@@ -86,7 +59,8 @@ public class OplogEntry
/// <param name="timestamp">The logical timestamp.</param>
/// <param name="previousHash">The previous entry hash.</param>
/// <param name="hash">The current entry hash. If null, it is computed.</param>
public OplogEntry(string collection, string key, OperationType operation, JsonElement? payload, HlcTimestamp timestamp, string previousHash, string? hash = null)
public OplogEntry(string collection, string key, OperationType operation, JsonElement? payload,
HlcTimestamp timestamp, string previousHash, string? hash = null)
{
Collection = collection;
Key = key;
@@ -97,6 +71,41 @@ public class OplogEntry
Hash = hash ?? this.ComputeHash();
}
/// <summary>
/// Gets the collection name associated with this entry.
/// </summary>
public string Collection { get; }
/// <summary>
/// Gets the document key associated with this entry.
/// </summary>
public string Key { get; }
/// <summary>
/// Gets the operation represented by this entry.
/// </summary>
public OperationType Operation { get; }
/// <summary>
/// Gets the serialized payload for the operation.
/// </summary>
public JsonElement? Payload { get; }
/// <summary>
/// Gets the logical timestamp for this entry.
/// </summary>
public HlcTimestamp Timestamp { get; }
/// <summary>
/// Gets the hash of this entry.
/// </summary>
public string Hash { get; }
/// <summary>
/// Gets the hash of the previous entry in the chain.
/// </summary>
public string PreviousHash { get; }
/// <summary>
/// Verifies if the stored Hash matches the content.
/// </summary>

View File

@@ -1,111 +1,22 @@
using System.Text.Json;
namespace ZB.MOM.WW.CBDDC.Core;
public abstract class QueryNode { }
public abstract class QueryNode
{
}
public class Eq : QueryNode
{
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the value to compare against.
/// </summary>
public object Value { get; }
/// <summary>
/// Initializes a new equality query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The value to compare against.</param>
public Eq(string field, object value) { Field = field; Value = value; }
}
public Eq(string field, object value)
{
Field = field;
Value = value;
}
public class Gt : QueryNode
{
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
/// <summary>
/// Initializes a new greater-than query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Gt(string field, object value) { Field = field; Value = value; }
}
public class Lt : QueryNode
{
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
/// <summary>
/// Initializes a new less-than query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Lt(string field, object value) { Field = field; Value = value; }
}
public class Gte : QueryNode
{
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
/// <summary>
/// Initializes a new greater-than-or-equal query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Gte(string field, object value) { Field = field; Value = value; }
}
public class Lte : QueryNode
{
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
/// <summary>
/// Initializes a new less-than-or-equal query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Lte(string field, object value) { Field = field; Value = value; }
}
public class Neq : QueryNode
{
/// <summary>
/// Gets the field name to compare.
/// </summary>
@@ -115,17 +26,141 @@ public class Neq : QueryNode
/// Gets the value to compare against.
/// </summary>
public object Value { get; }
}
public class Gt : QueryNode
{
/// <summary>
/// Initializes a new greater-than query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Gt(string field, object value)
{
Field = field;
Value = value;
}
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
}
public class Lt : QueryNode
{
/// <summary>
/// Initializes a new less-than query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Lt(string field, object value)
{
Field = field;
Value = value;
}
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
}
public class Gte : QueryNode
{
/// <summary>
/// Initializes a new greater-than-or-equal query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Gte(string field, object value)
{
Field = field;
Value = value;
}
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
}
public class Lte : QueryNode
{
/// <summary>
/// Initializes a new less-than-or-equal query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The threshold value.</param>
public Lte(string field, object value)
{
Field = field;
Value = value;
}
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the threshold value.
/// </summary>
public object Value { get; }
}
public class Neq : QueryNode
{
/// <summary>
/// Initializes a new not-equal query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The value to compare against.</param>
public Neq(string field, object value) { Field = field; Value = value; }
public Neq(string field, object value)
{
Field = field;
Value = value;
}
/// <summary>
/// Gets the field name to compare.
/// </summary>
public string Field { get; }
/// <summary>
/// Gets the value to compare against.
/// </summary>
public object Value { get; }
}
public class In : QueryNode
{
/// <summary>
/// Initializes a new in-list query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="values">The set of values to compare against.</param>
public In(string field, object[] values)
{
Field = field;
Values = values;
}
/// <summary>
/// Gets the field name to compare.
/// </summary>
@@ -135,17 +170,21 @@ public class In : QueryNode
/// Gets the set of values to compare against.
/// </summary>
public object[] Values { get; }
/// <summary>
/// Initializes a new in-list query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="values">The set of values to compare against.</param>
public In(string field, object[] values) { Field = field; Values = values; }
}
public class Contains : QueryNode
{
/// <summary>
/// Initializes a new contains query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The substring value to search for.</param>
public Contains(string field, string value)
{
Field = field;
Value = value;
}
/// <summary>
/// Gets the field name to compare.
/// </summary>
@@ -155,17 +194,21 @@ public class Contains : QueryNode
/// Gets the substring value to search for.
/// </summary>
public string Value { get; }
/// <summary>
/// Initializes a new contains query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The substring value to search for.</param>
public Contains(string field, string value) { Field = field; Value = value; }
}
public class NotContains : QueryNode
{
/// <summary>
/// Initializes a new not-contains query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The substring value to exclude.</param>
public NotContains(string field, string value)
{
Field = field;
Value = value;
}
/// <summary>
/// Gets the field name to compare.
/// </summary>
@@ -175,17 +218,21 @@ public class NotContains : QueryNode
/// Gets the substring value to exclude.
/// </summary>
public string Value { get; }
/// <summary>
/// Initializes a new not-contains query node.
/// </summary>
/// <param name="field">The field name to compare.</param>
/// <param name="value">The substring value to exclude.</param>
public NotContains(string field, string value) { Field = field; Value = value; }
}
public class And : QueryNode
{
/// <summary>
/// Initializes a new logical AND query node.
/// </summary>
/// <param name="left">The left query node.</param>
/// <param name="right">The right query node.</param>
public And(QueryNode left, QueryNode right)
{
Left = left;
Right = right;
}
/// <summary>
/// Gets the left side of the logical operation.
/// </summary>
@@ -195,17 +242,21 @@ public class And : QueryNode
/// Gets the right side of the logical operation.
/// </summary>
public QueryNode Right { get; }
/// <summary>
/// Initializes a new logical AND query node.
/// </summary>
/// <param name="left">The left query node.</param>
/// <param name="right">The right query node.</param>
public And(QueryNode left, QueryNode right) { Left = left; Right = right; }
}
public class Or : QueryNode
{
/// <summary>
/// Initializes a new logical OR query node.
/// </summary>
/// <param name="left">The left query node.</param>
/// <param name="right">The right query node.</param>
public Or(QueryNode left, QueryNode right)
{
Left = left;
Right = right;
}
/// <summary>
/// Gets the left side of the logical operation.
/// </summary>
@@ -215,11 +266,4 @@ public class Or : QueryNode
/// Gets the right side of the logical operation.
/// </summary>
public QueryNode Right { get; }
/// <summary>
/// Initializes a new logical OR query node.
/// </summary>
/// <param name="left">The left query node.</param>
/// <param name="right">The right query node.</param>
public Or(QueryNode left, QueryNode right) { Left = left; Right = right; }
}

View File

@@ -4,7 +4,9 @@ Core abstractions and logic for **CBDDC**, a peer-to-peer data synchronization m
## What Is CBDDC?
CBDDC is **not** a database it's a sync layer that plugs into your existing data store (BLite) and enables automatic P2P replication across nodes in a mesh network. Your application reads and writes to its database as usual; CBDDC handles synchronization in the background.
CBDDC is **not** a database <EFBFBD> it's a sync layer that plugs into your existing data store (BLite) and enables automatic
P2P replication across nodes in a mesh network. Your application reads and writes to its database as usual; CBDDC
handles synchronization in the background.
## What's In This Package
@@ -66,8 +68,8 @@ builder.Services.AddCBDDCCore()
## Key Concepts
| Concept | Description |
|---------|-------------|
| **CDC** | Change Data Capture watches collections registered via `WatchCollection()` |
|-------------------|------------------------------------------------------------------------------|
| **CDC** | Change Data Capture <EFBFBD> watches collections registered via `WatchCollection()` |
| **Oplog** | Append-only hash-chained journal of changes per node |
| **VectorClock** | Tracks causal ordering across the mesh |
| **DocumentStore** | Your bridge between entities and the sync engine |
@@ -92,14 +94,15 @@ Your App ? DbContext.SaveChangesAsync()
## Related Packages
- **ZB.MOM.WW.CBDDC.Persistence** <20> BLite embedded provider (.NET 10+)
- **ZB.MOM.WW.CBDDC.Network** P2P networking (UDP discovery, TCP sync, Gossip)
- **ZB.MOM.WW.CBDDC.Network** <EFBFBD> P2P networking (UDP discovery, TCP sync, Gossip)
## Documentation
- **[Complete Documentation](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net)**
- **[Sample Application](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net/tree/main/samples/ZB.MOM.WW.CBDDC.Sample.Console)**
- **[Sample Application](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net/tree/main/samples/ZB.MOM.WW.CBDDC.Sample.Console)
**
- **[Integration Guide](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net#integrating-with-your-database)**
## License
MIT see [LICENSE](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net/blob/main/LICENSE)
MIT <EFBFBD> see [LICENSE](https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net/blob/main/LICENSE)

View File

@@ -2,10 +2,10 @@
using System.Threading;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Core.Resilience
namespace ZB.MOM.WW.CBDDC.Core.Resilience;
public interface IRetryPolicy
{
public interface IRetryPolicy
{
/// <summary>
/// Executes an asynchronous operation with retry handling.
/// </summary>
@@ -14,6 +14,7 @@ namespace ZB.MOM.WW.CBDDC.Core.Resilience
/// <param name="cancellationToken">A token used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous execution.</returns>
Task ExecuteAsync(Func<Task> operation, string operationName, CancellationToken cancellationToken = default);
/// <summary>
/// Executes an asynchronous operation with retry handling and returns a result.
/// </summary>
@@ -22,6 +23,6 @@ namespace ZB.MOM.WW.CBDDC.Core.Resilience
/// <param name="operationName">The operation name used for diagnostics.</param>
/// <param name="cancellationToken">A token used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous execution and yields the operation result.</returns>
Task<T> ExecuteAsync<T>(Func<Task<T>> operation, string operationName, CancellationToken cancellationToken = default);
}
Task<T> ExecuteAsync<T>(Func<Task<T>> operation, string operationName,
CancellationToken cancellationToken = default);
}

View File

@@ -1,10 +1,13 @@
using System;
using System.IO;
using System.Net.Sockets;
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Exceptions;
using ZB.MOM.WW.CBDDC.Core.Network;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core.Exceptions;
using ZB.MOM.WW.CBDDC.Core.Network;
using TimeoutException = ZB.MOM.WW.CBDDC.Core.Exceptions.TimeoutException;
namespace ZB.MOM.WW.CBDDC.Core.Resilience;
@@ -13,15 +16,16 @@ namespace ZB.MOM.WW.CBDDC.Core.Resilience;
/// </summary>
public class RetryPolicy : IRetryPolicy
{
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
private readonly ILogger<RetryPolicy> _logger;
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
/// <summary>
/// Initializes a new instance of the <see cref="RetryPolicy"/> class.
/// Initializes a new instance of the <see cref="RetryPolicy" /> class.
/// </summary>
/// <param name="peerNodeConfigurationProvider">The provider for retry configuration values.</param>
/// <param name="logger">The logger instance.</param>
public RetryPolicy(IPeerNodeConfigurationProvider peerNodeConfigurationProvider, ILogger<RetryPolicy>? logger = null)
public RetryPolicy(IPeerNodeConfigurationProvider peerNodeConfigurationProvider,
ILogger<RetryPolicy>? logger = null)
{
_logger = logger ?? NullLogger<RetryPolicy>.Instance;
_peerNodeConfigurationProvider = peerNodeConfigurationProvider
@@ -43,8 +47,7 @@ public class RetryPolicy : IRetryPolicy
var config = await _peerNodeConfigurationProvider.GetConfiguration();
Exception? lastException = null;
for (int attempt = 1; attempt <= config.RetryAttempts; attempt++)
{
for (var attempt = 1; attempt <= config.RetryAttempts; attempt++)
try
{
_logger.LogDebug("Executing {Operation} (attempt {Attempt}/{Max})",
@@ -55,7 +58,7 @@ public class RetryPolicy : IRetryPolicy
catch (Exception ex) when (attempt < config.RetryAttempts && IsTransient(ex))
{
lastException = ex;
var delay = config.RetryDelayMs * attempt; // Exponential backoff
int delay = config.RetryDelayMs * attempt; // Exponential backoff
_logger.LogWarning(ex,
"Operation {Operation} failed (attempt {Attempt}/{Max}). Retrying in {Delay}ms...",
@@ -63,20 +66,15 @@ public class RetryPolicy : IRetryPolicy
await Task.Delay(delay, cancellationToken);
}
}
if (lastException != null)
{
_logger.LogError(lastException,
"Operation {Operation} failed after {Attempts} attempts",
operationName, config.RetryAttempts);
}
else
{
_logger.LogError(
"Operation {Operation} failed after {Attempts} attempts",
operationName, config.RetryAttempts);
}
throw new CBDDCException("RETRY_EXHAUSTED",
$"Operation '{operationName}' failed after {config.RetryAttempts} attempts",
@@ -104,11 +102,11 @@ public class RetryPolicy : IRetryPolicy
private static bool IsTransient(Exception ex)
{
// Network errors are typically transient
if (ex is NetworkException or System.Net.Sockets.SocketException or System.IO.IOException)
if (ex is NetworkException or SocketException or IOException)
return true;
// Timeout errors are transient
if (ex is Exceptions.TimeoutException or OperationCanceledException)
if (ex is TimeoutException or OperationCanceledException)
return true;
return false;

View File

@@ -6,14 +6,17 @@ public class SnapshotMetadata
/// Gets or sets the node identifier associated with the snapshot.
/// </summary>
public string NodeId { get; set; } = "";
/// <summary>
/// Gets or sets the physical time component of the snapshot timestamp.
/// </summary>
public long TimestampPhysicalTime { get; set; }
/// <summary>
/// Gets or sets the logical counter component of the snapshot timestamp.
/// </summary>
public int TimestampLogicalCounter { get; set; }
/// <summary>
/// Gets or sets the snapshot hash.
/// </summary>

View File

@@ -8,9 +8,11 @@ namespace ZB.MOM.WW.CBDDC.Core.Storage;
public class CorruptDatabaseException : Exception
{
/// <summary>
/// Initializes a new instance of the <see cref="CorruptDatabaseException"/> class.
/// Initializes a new instance of the <see cref="CorruptDatabaseException" /> class.
/// </summary>
/// <param name="message">The exception message.</param>
/// <param name="innerException">The underlying exception that caused this error.</param>
public CorruptDatabaseException(string message, Exception innerException) : base(message, innerException) { }
public CorruptDatabaseException(string message, Exception innerException) : base(message, innerException)
{
}
}

View File

@@ -17,7 +17,8 @@ public interface IDocumentMetadataStore : ISnapshotable<DocumentMetadata>
/// <param name="key">The document key.</param>
/// <param name="cancellationToken">A cancellation token.</param>
/// <returns>The document metadata if found; otherwise null.</returns>
Task<DocumentMetadata?> GetMetadataAsync(string collection, string key, CancellationToken cancellationToken = default);
Task<DocumentMetadata?> GetMetadataAsync(string collection, string key,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets metadata for all documents in a collection.
@@ -25,7 +26,8 @@ public interface IDocumentMetadataStore : ISnapshotable<DocumentMetadata>
/// <param name="collection">The collection name.</param>
/// <param name="cancellationToken">A cancellation token.</param>
/// <returns>Enumerable of document metadata for the collection.</returns>
Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection, CancellationToken cancellationToken = default);
Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection,
CancellationToken cancellationToken = default);
/// <summary>
/// Upserts (inserts or updates) metadata for a document.
@@ -39,7 +41,8 @@ public interface IDocumentMetadataStore : ISnapshotable<DocumentMetadata>
/// </summary>
/// <param name="metadatas">The metadata items to upsert.</param>
/// <param name="cancellationToken">A cancellation token.</param>
Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas, CancellationToken cancellationToken = default);
Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas,
CancellationToken cancellationToken = default);
/// <summary>
/// Marks a document as deleted by setting IsDeleted=true and updating the timestamp.
@@ -48,7 +51,8 @@ public interface IDocumentMetadataStore : ISnapshotable<DocumentMetadata>
/// <param name="key">The document key.</param>
/// <param name="timestamp">The HLC timestamp of the deletion.</param>
/// <param name="cancellationToken">A cancellation token.</param>
Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp, CancellationToken cancellationToken = default);
Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp,
CancellationToken cancellationToken = default);
/// <summary>
/// Gets all document metadata with timestamps after the specified timestamp.
@@ -58,7 +62,8 @@ public interface IDocumentMetadataStore : ISnapshotable<DocumentMetadata>
/// <param name="collections">Optional collection filter.</param>
/// <param name="cancellationToken">A cancellation token.</param>
/// <returns>Documents modified after the specified timestamp.</returns>
Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
}
/// <summary>
@@ -66,6 +71,28 @@ public interface IDocumentMetadataStore : ISnapshotable<DocumentMetadata>
/// </summary>
public class DocumentMetadata
{
/// <summary>
/// Initializes a new instance of the <see cref="DocumentMetadata" /> class.
/// </summary>
public DocumentMetadata()
{
}
/// <summary>
/// Initializes a new instance of the <see cref="DocumentMetadata" /> class.
/// </summary>
/// <param name="collection">The collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="updatedAt">The last update timestamp.</param>
/// <param name="isDeleted">Whether the document is marked as deleted.</param>
public DocumentMetadata(string collection, string key, HlcTimestamp updatedAt, bool isDeleted = false)
{
Collection = collection;
Key = key;
UpdatedAt = updatedAt;
IsDeleted = isDeleted;
}
/// <summary>
/// Gets or sets the collection name.
/// </summary>
@@ -85,24 +112,4 @@ public class DocumentMetadata
/// Gets or sets whether this document is marked as deleted (tombstone).
/// </summary>
public bool IsDeleted { get; set; }
/// <summary>
/// Initializes a new instance of the <see cref="DocumentMetadata"/> class.
/// </summary>
public DocumentMetadata() { }
/// <summary>
/// Initializes a new instance of the <see cref="DocumentMetadata"/> class.
/// </summary>
/// <param name="collection">The collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="updatedAt">The last update timestamp.</param>
/// <param name="isDeleted">Whether the document is marked as deleted.</param>
public DocumentMetadata(string collection, string key, HlcTimestamp updatedAt, bool isDeleted = false)
{
Collection = collection;
Key = key;
UpdatedAt = updatedAt;
IsDeleted = isDeleted;
}
}

View File

@@ -1,4 +1,3 @@
using System;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
@@ -21,7 +20,10 @@ public interface IDocumentStore : ISnapshotable<Document>
/// <param name="collection">The name of the collection containing the incoming to retrieve. Cannot be null or empty.</param>
/// <param name="key">The unique key identifying the incoming within the collection. Cannot be null or empty.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result contains the incoming if found; otherwise, null.</returns>
/// <returns>
/// A task that represents the asynchronous operation. The task result contains the incoming if found; otherwise,
/// null.
/// </returns>
Task<Document?> GetDocumentAsync(string collection, string key, CancellationToken cancellationToken = default);
/// <summary>
@@ -29,26 +31,34 @@ public interface IDocumentStore : ISnapshotable<Document>
/// </summary>
/// <param name="collection">The name of the collection from which to retrieve documents. Cannot be null or empty.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result contains an enumerable collection of
/// documents in the specified collection. The collection is empty if no documents are found.</returns>
Task<IEnumerable<Document>> GetDocumentsByCollectionAsync(string collection, CancellationToken cancellationToken = default);
/// <returns>
/// A task that represents the asynchronous operation. The task result contains an enumerable collection of
/// documents in the specified collection. The collection is empty if no documents are found.
/// </returns>
Task<IEnumerable<Document>> GetDocumentsByCollectionAsync(string collection,
CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously inserts a batch of documents into the data store.
/// </summary>
/// <param name="documents">The collection of documents to insert. Cannot be null or contain null elements.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result is <see langword="true"/> if all documents
/// were inserted successfully; otherwise, <see langword="false"/>.</returns>
Task<bool> InsertBatchDocumentsAsync(IEnumerable<Document> documents, CancellationToken cancellationToken = default);
/// <returns>
/// A task that represents the asynchronous operation. The task result is <see langword="true" /> if all documents
/// were inserted successfully; otherwise, <see langword="false" />.
/// </returns>
Task<bool> InsertBatchDocumentsAsync(IEnumerable<Document> documents,
CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously updates the specified incoming in the data store.
/// </summary>
/// <param name="document">The incoming to update. Cannot be null.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the update operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result is <see langword="true"/> if the incoming was
/// successfully updated; otherwise, <see langword="false"/>.</returns>
/// <returns>
/// A task that represents the asynchronous operation. The task result is <see langword="true" /> if the incoming was
/// successfully updated; otherwise, <see langword="false" />.
/// </returns>
Task<bool> PutDocumentAsync(Document document, CancellationToken cancellationToken = default);
/// <summary>
@@ -56,9 +66,12 @@ public interface IDocumentStore : ISnapshotable<Document>
/// </summary>
/// <param name="documents">The collection of documents to update. Cannot be null or contain null elements.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result is <see langword="true"/> if all documents
/// were updated successfully; otherwise, <see langword="false"/>.</returns>
Task<bool> UpdateBatchDocumentsAsync(IEnumerable<Document> documents, CancellationToken cancellationToken = default);
/// <returns>
/// A task that represents the asynchronous operation. The task result is <see langword="true" /> if all documents
/// were updated successfully; otherwise, <see langword="false" />.
/// </returns>
Task<bool> UpdateBatchDocumentsAsync(IEnumerable<Document> documents,
CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously deletes a incoming identified by the specified key from the given collection.
@@ -66,23 +79,31 @@ public interface IDocumentStore : ISnapshotable<Document>
/// <param name="collection">The name of the collection containing the incoming to delete. Cannot be null or empty.</param>
/// <param name="key">The unique key identifying the incoming to delete. Cannot be null or empty.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the delete operation.</param>
/// <returns>A task that represents the asynchronous delete operation. The task result is <see langword="true"/> if the
/// incoming was successfully deleted; otherwise, <see langword="false"/>.</returns>
/// <returns>
/// A task that represents the asynchronous delete operation. The task result is <see langword="true" /> if the
/// incoming was successfully deleted; otherwise, <see langword="false" />.
/// </returns>
Task<bool> DeleteDocumentAsync(string collection, string key, CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously deletes a batch of documents identified by their keys.
/// </summary>
/// <remarks>
/// If any of the specified documents cannot be deleted, the method returns <see langword="false"/> but does not
/// throw an exception. The operation is performed asynchronously and may complete partially if cancellation is requested.
/// If any of the specified documents cannot be deleted, the method returns <see langword="false" /> but does not
/// throw an exception. The operation is performed asynchronously and may complete partially if cancellation is
/// requested.
/// </remarks>
/// <param name="documentKeys">A collection of incoming keys that specify the documents to delete. Cannot be null or contain null or empty
/// values.</param>
/// <param name="documentKeys">
/// A collection of incoming keys that specify the documents to delete. Cannot be null or contain null or empty
/// values.
/// </param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the delete operation.</param>
/// <returns>A task that represents the asynchronous delete operation. The task result is <see langword="true"/> if all
/// specified documents were successfully deleted; otherwise, <see langword="false"/>.</returns>
Task<bool> DeleteBatchDocumentsAsync(IEnumerable<string> documentKeys, CancellationToken cancellationToken = default);
/// <returns>
/// A task that represents the asynchronous delete operation. The task result is <see langword="true" /> if all
/// specified documents were successfully deleted; otherwise, <see langword="false" />.
/// </returns>
Task<bool> DeleteBatchDocumentsAsync(IEnumerable<string> documentKeys,
CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously merges the specified incoming with existing data and returns the updated incoming.
@@ -95,9 +116,12 @@ public interface IDocumentStore : ISnapshotable<Document>
/// <summary>
/// Asynchronously retrieves documents identified by the specified collection and key pairs.
/// </summary>
/// <param name="documentKeys">A list of tuples, each containing the collection name and the document key that uniquely identify the documents
/// to retrieve. Cannot be null or empty.</param>
/// <param name="documentKeys">
/// A list of tuples, each containing the collection name and the document key that uniquely identify the documents
/// to retrieve. Cannot be null or empty.
/// </param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>A task that represents the asynchronous retrieval operation.</returns>
Task<IEnumerable<Document>> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys, CancellationToken cancellationToken);
Task<IEnumerable<Document>> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys,
CancellationToken cancellationToken);
}

View File

@@ -1,5 +1,4 @@
using System;
using System.Buffers;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
@@ -31,7 +30,8 @@ public interface IOplogStore : ISnapshotable<OplogEntry>
/// <param name="collections">An optional collection of collection names to filter the results.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>A task that represents the asynchronous operation containing matching oplog entries.</returns>
Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp, IEnumerable<string>? collections = null,
CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously retrieves the latest observed hybrid logical clock (HLC) timestamp.
@@ -55,24 +55,30 @@ public interface IOplogStore : ISnapshotable<OplogEntry>
/// <param name="collections">An optional collection of collection names to filter the oplog entries.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>A task that represents the asynchronous operation containing oplog entries for the specified node.</returns>
Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously retrieves the hash of the most recent entry for the specified node.
/// </summary>
/// <param name="nodeId">The unique identifier of the node for which to retrieve the last entry hash. Cannot be null or empty.</param>
/// <param name="nodeId">
/// The unique identifier of the node for which to retrieve the last entry hash. Cannot be null or
/// empty.
/// </param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation containing the hash string of the last entry or null.</returns>
Task<string?> GetLastEntryHashAsync(string nodeId, CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously retrieves a sequence of oplog entries representing the chain between the specified start and end hashes.
/// Asynchronously retrieves a sequence of oplog entries representing the chain between the specified start and end
/// hashes.
/// </summary>
/// <param name="startHash">The hash of the first entry in the chain range. Cannot be null or empty.</param>
/// <param name="endHash">The hash of the last entry in the chain range. Cannot be null or empty.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>A task that represents the asynchronous operation containing OplogEntry objects in chain order.</returns>
Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash, CancellationToken cancellationToken = default);
Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash,
CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously retrieves the oplog entry associated with the specified hash value.
@@ -97,5 +103,4 @@ public interface IOplogStore : ISnapshotable<OplogEntry>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the prune operation.</param>
/// <returns>A task that represents the asynchronous prune operation.</returns>
Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default);
}

View File

@@ -9,10 +9,15 @@ public interface ISnapshotMetadataStore : ISnapshotable<SnapshotMetadata>
/// <summary>
/// Asynchronously retrieves the snapshot metadata associated with the specified node identifier.
/// </summary>
/// <param name="nodeId">The unique identifier of the node for which to retrieve snapshot metadata. Cannot be null or empty.</param>
/// <param name="nodeId">
/// The unique identifier of the node for which to retrieve snapshot metadata. Cannot be null or
/// empty.
/// </param>
/// <param name="cancellationToken">A token to monitor for cancellation requests.</param>
/// <returns>A task that represents the asynchronous operation. The task result contains the <see cref="SnapshotMetadata"/>
/// for the specified node if found; otherwise, <see langword="null"/>.</returns>
/// <returns>
/// A task that represents the asynchronous operation. The task result contains the <see cref="SnapshotMetadata" />
/// for the specified node if found; otherwise, <see langword="null" />.
/// </returns>
Task<SnapshotMetadata?> GetSnapshotMetadataAsync(string nodeId, CancellationToken cancellationToken = default);
/// <summary>

View File

@@ -10,8 +10,10 @@ public interface ISnapshotable<T>
/// Asynchronously deletes the underlying data store and all of its contents.
/// </summary>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the drop operation.</param>
/// <remarks>After calling this method, the data store and all stored data will be permanently removed.
/// This operation cannot be undone. Any further operations on the data store may result in errors.</remarks>
/// <remarks>
/// After calling this method, the data store and all stored data will be permanently removed.
/// This operation cannot be undone. Any further operations on the data store may result in errors.
/// </remarks>
/// <returns>A task that represents the asynchronous drop operation.</returns>
Task DropAsync(CancellationToken cancellationToken = default);
@@ -19,8 +21,10 @@ public interface ISnapshotable<T>
/// Asynchronously exports a collection of items of type T.
/// </summary>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the export operation.</param>
/// <returns>A task that represents the asynchronous export operation. The task result contains an enumerable collection of
/// exported items of type T.</returns>
/// <returns>
/// A task that represents the asynchronous export operation. The task result contains an enumerable collection of
/// exported items of type T.
/// </returns>
Task<IEnumerable<T>> ExportAsync(CancellationToken cancellationToken = default);
/// <summary>
@@ -34,9 +38,11 @@ public interface ISnapshotable<T>
/// <summary>
/// Merges the specified collection of items into the target data store asynchronously.
/// </summary>
/// <remarks>If the operation is canceled via the provided cancellation token, the returned task will be
/// <remarks>
/// If the operation is canceled via the provided cancellation token, the returned task will be
/// in a canceled state. The merge operation may update existing items or add new items, depending on the
/// implementation.</remarks>
/// implementation.
/// </remarks>
/// <param name="items">The collection of items to merge into the data store. Cannot be null.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the merge operation.</param>
/// <returns>A task that represents the asynchronous merge operation.</returns>

View File

@@ -11,7 +11,7 @@ public interface IVectorClockService
{
/// <summary>
/// Indicates whether the cache has been populated with initial data.
/// Reset to false by <see cref="Invalidate"/>.
/// Reset to false by <see cref="Invalidate" />.
/// </summary>
bool IsInitialized { get; set; }
@@ -51,7 +51,7 @@ public interface IVectorClockService
void UpdateNode(string nodeId, HlcTimestamp timestamp, string hash);
/// <summary>
/// Clears the cache and resets <see cref="IsInitialized"/> to false,
/// Clears the cache and resets <see cref="IsInitialized" /> to false,
/// forcing re-initialization on next access.
/// </summary>
void Invalidate();

View File

@@ -1,21 +1,9 @@
using System.Text.Json;
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Core.Sync;
public class ConflictResolutionResult
{
/// <summary>
/// Gets a value indicating whether the remote change should be applied.
/// </summary>
public bool ShouldApply { get; }
/// <summary>
/// Gets the merged document to apply when conflict resolution produced one.
/// </summary>
public Document? MergedDocument { get; }
/// <summary>
/// Initializes a new instance of the <see cref="ConflictResolutionResult"/> class.
/// Initializes a new instance of the <see cref="ConflictResolutionResult" /> class.
/// </summary>
/// <param name="shouldApply">Indicates whether the change should be applied.</param>
/// <param name="mergedDocument">The merged document produced by resolution, if any.</param>
@@ -25,17 +13,34 @@ public class ConflictResolutionResult
MergedDocument = mergedDocument;
}
/// <summary>
/// Gets a value indicating whether the remote change should be applied.
/// </summary>
public bool ShouldApply { get; }
/// <summary>
/// Gets the merged document to apply when conflict resolution produced one.
/// </summary>
public Document? MergedDocument { get; }
/// <summary>
/// Creates a result indicating that the resolved document should be applied.
/// </summary>
/// <param name="document">The merged document to apply.</param>
/// <returns>A resolution result that applies the provided document.</returns>
public static ConflictResolutionResult Apply(Document document) => new(true, document);
public static ConflictResolutionResult Apply(Document document)
{
return new ConflictResolutionResult(true, document);
}
/// <summary>
/// Creates a result indicating that the remote change should be ignored.
/// </summary>
/// <returns>A resolution result that skips applying the remote change.</returns>
public static ConflictResolutionResult Ignore() => new(false, null);
public static ConflictResolutionResult Ignore()
{
return new ConflictResolutionResult(false, null);
}
}
public interface IConflictResolver

View File

@@ -2,13 +2,13 @@
using System.Threading;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Core.Sync
namespace ZB.MOM.WW.CBDDC.Core.Sync;
/// <summary>
/// Represents a queue for operations that should be executed when connectivity is restored.
/// </summary>
public interface IOfflineQueue
{
/// <summary>
/// Represents a queue for operations that should be executed when connectivity is restored.
/// </summary>
public interface IOfflineQueue
{
/// <summary>
/// Gets the number of pending operations in the queue.
/// </summary>
@@ -35,6 +35,6 @@ namespace ZB.MOM.WW.CBDDC.Core.Sync
/// <returns>
/// A task that returns a tuple containing the number of successful and failed operations.
/// </returns>
Task<(int Successful, int Failed)> FlushAsync(Func<PendingOperation, Task> executor, CancellationToken cancellationToken = default);
}
Task<(int Successful, int Failed)> FlushAsync(Func<PendingOperation, Task> executor,
CancellationToken cancellationToken = default);
}

View File

@@ -1,6 +1,3 @@
using System.Text.Json;
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Core.Sync;
public class LastWriteWinsConflictResolver : IConflictResolver
@@ -18,7 +15,8 @@ public class LastWriteWinsConflictResolver : IConflictResolver
{
// Construct new document from oplog entry
var content = remote.Payload ?? default;
var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp, remote.Operation == OperationType.Delete);
var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp,
remote.Operation == OperationType.Delete);
return ConflictResolutionResult.Apply(newDoc);
}
@@ -27,7 +25,8 @@ public class LastWriteWinsConflictResolver : IConflictResolver
{
// Remote is newer, apply it
var content = remote.Payload ?? default;
var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp, remote.Operation == OperationType.Delete);
var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp,
remote.Operation == OperationType.Delete);
return ConflictResolutionResult.Apply(newDoc);
}

View File

@@ -1,11 +1,11 @@
using ZB.MOM.WW.CBDDC.Core.Network;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Core.Sync;
@@ -14,17 +14,18 @@ namespace ZB.MOM.WW.CBDDC.Core.Sync;
/// </summary>
public class OfflineQueue : IOfflineQueue
{
private readonly object _lock = new();
private readonly ILogger<OfflineQueue> _logger;
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
private readonly Queue<PendingOperation> _queue = new();
private readonly ILogger<OfflineQueue> _logger;
private readonly object _lock = new();
/// <summary>
/// Initializes a new instance of the <see cref="OfflineQueue"/> class.
/// Initializes a new instance of the <see cref="OfflineQueue" /> class.
/// </summary>
/// <param name="peerNodeConfigurationProvider">The configuration provider used for queue limits.</param>
/// <param name="logger">The logger instance.</param>
public OfflineQueue(IPeerNodeConfigurationProvider peerNodeConfigurationProvider, ILogger<OfflineQueue>? logger = null)
public OfflineQueue(IPeerNodeConfigurationProvider peerNodeConfigurationProvider,
ILogger<OfflineQueue>? logger = null)
{
_peerNodeConfigurationProvider = peerNodeConfigurationProvider;
_logger = logger ?? NullLogger<OfflineQueue>.Instance;
@@ -73,7 +74,8 @@ public class OfflineQueue : IOfflineQueue
/// <param name="executor">The delegate that executes each pending operation.</param>
/// <param name="cancellationToken">A token used to cancel the operation.</param>
/// <returns>A task whose result contains the number of successful and failed operations.</returns>
public async Task<(int Successful, int Failed)> FlushAsync(Func<PendingOperation, Task> executor, CancellationToken cancellationToken = default)
public async Task<(int Successful, int Failed)> FlushAsync(Func<PendingOperation, Task> executor,
CancellationToken cancellationToken = default)
{
List<PendingOperation> operations;
@@ -91,11 +93,10 @@ public class OfflineQueue : IOfflineQueue
_logger.LogInformation("Flushing {Count} pending operations", operations.Count);
int successful = 0;
int failed = 0;
var successful = 0;
var failed = 0;
foreach (var op in operations)
{
try
{
await executor(op);
@@ -107,7 +108,6 @@ public class OfflineQueue : IOfflineQueue
_logger.LogError(ex, "Failed to execute pending {Type} operation for {Collection}:{Key}",
op.Type, op.Collection, op.Key);
}
}
_logger.LogInformation("Flush completed: {Successful} successful, {Failed} failed",
successful, failed);
@@ -122,7 +122,7 @@ public class OfflineQueue : IOfflineQueue
{
lock (_lock)
{
var count = _queue.Count;
int count = _queue.Count;
_queue.Clear();
_logger.LogInformation("Cleared {Count} pending operations", count);
}

View File

@@ -1,6 +1,4 @@
using System;
using System.Threading;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Core.Sync;
@@ -13,18 +11,22 @@ public class PendingOperation
/// Gets or sets the operation type.
/// </summary>
public string Type { get; set; } = "";
/// <summary>
/// Gets or sets the collection targeted by the operation.
/// </summary>
public string Collection { get; set; } = "";
/// <summary>
/// Gets or sets the document key targeted by the operation.
/// </summary>
public string Key { get; set; } = "";
/// <summary>
/// Gets or sets the payload associated with the operation.
/// </summary>
public object? Data { get; set; }
/// <summary>
/// Gets or sets the UTC time when the operation was queued.
/// </summary>

View File

@@ -1,7 +1,5 @@
using System;
using System.Buffers;
using System.Collections.Generic;
using System.IO;
using System.Text.Json;
namespace ZB.MOM.WW.CBDDC.Core.Sync;
@@ -14,7 +12,7 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
/// <summary>
/// Resolves a conflict between a local document and a remote operation.
/// </summary>
/// <param name="local">The local document, or <see langword="null"/> if none exists.</param>
/// <param name="local">The local document, or <see langword="null" /> if none exists.</param>
/// <param name="remote">The remote operation to apply.</param>
/// <returns>The conflict resolution result indicating whether and what to apply.</returns>
public ConflictResolutionResult Resolve(Document? local, OplogEntry remote)
@@ -22,7 +20,8 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
if (local == null)
{
var content = remote.Payload ?? default;
var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp, remote.Operation == OperationType.Delete);
var newDoc = new Document(remote.Collection, remote.Key, content, remote.Timestamp,
remote.Operation == OperationType.Delete);
return ConflictResolutionResult.Apply(newDoc);
}
@@ -33,6 +32,7 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
var newDoc = new Document(remote.Collection, remote.Key, default, remote.Timestamp, true);
return ConflictResolutionResult.Apply(newDoc);
}
return ConflictResolutionResult.Ignore();
}
@@ -41,7 +41,9 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
var localTs = local.UpdatedAt;
var remoteTs = remote.Timestamp;
if (localJson.ValueKind == JsonValueKind.Undefined) return ConflictResolutionResult.Apply(new Document(remote.Collection, remote.Key, remoteJson, remoteTs, false));
if (localJson.ValueKind == JsonValueKind.Undefined)
return ConflictResolutionResult.Apply(new Document(remote.Collection, remote.Key, remoteJson, remoteTs,
false));
if (remoteJson.ValueKind == JsonValueKind.Undefined) return ConflictResolutionResult.Ignore();
// Optimization: Use ArrayBufferWriter (Net6.0) or MemoryStream (NS2.0)
@@ -55,6 +57,7 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
{
MergeJson(writer, localJson, localTs, remoteJson, remoteTs);
}
mergedDocJson = JsonDocument.Parse(bufferWriter.WrittenMemory).RootElement;
#else
using (var ms = new MemoryStream())
@@ -73,7 +76,8 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
return ConflictResolutionResult.Apply(mergedDoc);
}
private void MergeJson(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote, HlcTimestamp remoteTs)
private void MergeJson(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote,
HlcTimestamp remoteTs)
{
if (local.ValueKind != remote.ValueKind)
{
@@ -102,11 +106,13 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
if (remoteTs.CompareTo(localTs) > 0) remote.WriteTo(writer);
else local.WriteTo(writer);
}
break;
}
}
private void MergeObjects(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote, HlcTimestamp remoteTs)
private void MergeObjects(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote,
HlcTimestamp remoteTs)
{
writer.WriteStartObject();
@@ -119,37 +125,32 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
foreach (var prop in local.EnumerateObject())
{
var key = prop.Name;
string key = prop.Name;
processedKeys.Add(key); // Mark as processed
writer.WritePropertyName(key);
if (remote.TryGetProperty(key, out var remoteVal))
{
// Collision -> Merge
MergeJson(writer, prop.Value, localTs, remoteVal, remoteTs);
}
else
{
// Only local
prop.Value.WriteTo(writer);
}
}
foreach (var prop in remote.EnumerateObject())
{
if (!processedKeys.Contains(prop.Name))
{
// New from remote
writer.WritePropertyName(prop.Name);
prop.Value.WriteTo(writer);
}
}
writer.WriteEndObject();
}
private void MergeArrays(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote, HlcTimestamp remoteTs)
private void MergeArrays(Utf8JsonWriter writer, JsonElement local, HlcTimestamp localTs, JsonElement remote,
HlcTimestamp remoteTs)
{
// Heuristic check
bool localIsObj = HasObjects(local);
@@ -198,30 +199,22 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
// 1. Process Local Items (Merge or Write)
foreach (var kvp in localMap)
{
var id = kvp.Key;
string id = kvp.Key;
var localItem = kvp.Value;
processedIds.Add(id);
if (remoteMap.TryGetValue(id, out var remoteItem))
{
// Merge recursively
MergeJson(writer, localItem, localTs, remoteItem, remoteTs);
}
else
{
// Keep local item
localItem.WriteTo(writer);
}
}
// 2. Process New Remote Items
foreach (var kvp in remoteMap)
{
if (!processedIds.Contains(kvp.Key))
{
kvp.Value.WriteTo(writer);
}
}
writer.WriteEndArray();
}
@@ -249,6 +242,7 @@ public class RecursiveNodeMergeConflictResolver : IConflictResolver
map[id] = item;
}
return map;
}
}

View File

@@ -59,15 +59,13 @@ public class VectorClock
/// <param name="other">The vector clock to merge from.</param>
public void Merge(VectorClock other)
{
foreach (var nodeId in other.NodeIds)
foreach (string nodeId in other.NodeIds)
{
var otherTs = other.GetTimestamp(nodeId);
if (!_clock.TryGetValue(nodeId, out var currentTs) || otherTs.CompareTo(currentTs) > 0)
{
_clock[nodeId] = otherTs;
}
}
}
/// <summary>
/// Compares this vector clock with another to determine causality.
@@ -79,12 +77,12 @@ public class VectorClock
/// <param name="other">The vector clock to compare with.</param>
public CausalityRelation CompareTo(VectorClock other)
{
bool thisAhead = false;
bool otherAhead = false;
var thisAhead = false;
var otherAhead = false;
var allNodes = new HashSet<string>(_clock.Keys.Union(other._clock.Keys), StringComparer.Ordinal);
foreach (var nodeId in allNodes)
foreach (string nodeId in allNodes)
{
var thisTs = GetTimestamp(nodeId);
var otherTs = other.GetTimestamp(nodeId);
@@ -92,19 +90,11 @@ public class VectorClock
int cmp = thisTs.CompareTo(otherTs);
if (cmp > 0)
{
thisAhead = true;
}
else if (cmp < 0)
{
otherAhead = true;
}
else if (cmp < 0) otherAhead = true;
// Early exit if concurrent
if (thisAhead && otherAhead)
{
return CausalityRelation.Concurrent;
}
if (thisAhead && otherAhead) return CausalityRelation.Concurrent;
}
if (thisAhead && !otherAhead)
@@ -123,20 +113,14 @@ public class VectorClock
public IEnumerable<string> GetNodesWithUpdates(VectorClock other)
{
var allNodes = new HashSet<string>(_clock.Keys, StringComparer.Ordinal);
foreach (var nodeId in other._clock.Keys)
{
allNodes.Add(nodeId);
}
foreach (string nodeId in other._clock.Keys) allNodes.Add(nodeId);
foreach (var nodeId in allNodes)
foreach (string nodeId in allNodes)
{
var thisTs = GetTimestamp(nodeId);
var otherTs = other.GetTimestamp(nodeId);
if (otherTs.CompareTo(thisTs) > 0)
{
yield return nodeId;
}
if (otherTs.CompareTo(thisTs) > 0) yield return nodeId;
}
}
@@ -149,15 +133,12 @@ public class VectorClock
{
var allNodes = new HashSet<string>(_clock.Keys.Union(other._clock.Keys), StringComparer.Ordinal);
foreach (var nodeId in allNodes)
foreach (string nodeId in allNodes)
{
var thisTs = GetTimestamp(nodeId);
var otherTs = other.GetTimestamp(nodeId);
if (thisTs.CompareTo(otherTs) > 0)
{
yield return nodeId;
}
if (thisTs.CompareTo(otherTs) > 0) yield return nodeId;
}
}
@@ -187,10 +168,13 @@ public enum CausalityRelation
{
/// <summary>Both vector clocks are equal.</summary>
Equal,
/// <summary>This vector clock is strictly ahead (dominates).</summary>
StrictlyAhead,
/// <summary>This vector clock is strictly behind (dominated).</summary>
StrictlyBehind,
/// <summary>Vector clocks are concurrent (neither dominates).</summary>
Concurrent
}

View File

@@ -19,15 +19,15 @@
</PropertyGroup>
<ItemGroup>
<None Include="README.md" Pack="true" PackagePath="\" />
<None Include="README.md" Pack="true" PackagePath="\"/>
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="8.0.0" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="8.0.0"/>
</ItemGroup>
<ItemGroup>
<Folder Include="Storage\Events\" />
<Folder Include="Storage\Events\"/>
</ItemGroup>
</Project>

View File

@@ -1,12 +1,12 @@
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Diagnostics.HealthChecks;
using Microsoft.Extensions.Hosting;
using ZB.MOM.WW.CBDDC.Hosting.Configuration;
using ZB.MOM.WW.CBDDC.Hosting.HealthChecks;
using ZB.MOM.WW.CBDDC.Hosting.HostedServices;
using ZB.MOM.WW.CBDDC.Hosting.Services;
using ZB.MOM.WW.CBDDC.Network;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Diagnostics.HealthChecks;
using Microsoft.Extensions.Hosting;
namespace ZB.MOM.WW.CBDDC.Hosting;
@@ -51,10 +51,7 @@ public static class CBDDCHostingExtensions
this IServiceCollection services,
Action<ClusterOptions>? configure = null)
{
return services.AddCBDDCHosting(options =>
{
configure?.Invoke(options.Cluster);
});
return services.AddCBDDCHosting(options => { configure?.Invoke(options.Cluster); });
}
private static void RegisterSingleClusterServices(
@@ -81,12 +78,10 @@ public static class CBDDCHostingExtensions
{
// Health checks
if (options.EnableHealthChecks)
{
services.AddHealthChecks()
.AddCheck<CBDDCHealthCheck>(
"cbddc",
failureStatus: HealthStatus.Unhealthy,
tags: new[] { "db", "ready" });
}
HealthStatus.Unhealthy,
new[] { "db", "ready" });
}
}

View File

@@ -1,5 +1,3 @@
using System;
namespace ZB.MOM.WW.CBDDC.Hosting.Configuration;
/// <summary>
@@ -32,7 +30,8 @@ public class ClusterOptions
public long PeerConfirmationLagThresholdMs { get; set; } = 30_000;
/// <summary>
/// Gets or sets the critical lag threshold (in milliseconds) used to determine when a tracked peer causes unhealthy status.
/// Gets or sets the critical lag threshold (in milliseconds) used to determine when a tracked peer causes unhealthy
/// status.
/// Peers above this threshold mark health as unhealthy.
/// Default: 120,000 ms.
/// </summary>

View File

@@ -1,8 +1,3 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Diagnostics.HealthChecks;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Hosting.Configuration;
@@ -16,11 +11,11 @@ namespace ZB.MOM.WW.CBDDC.Hosting.HealthChecks;
public class CBDDCHealthCheck : IHealthCheck
{
private readonly IOplogStore _oplogStore;
private readonly IPeerOplogConfirmationStore _peerOplogConfirmationStore;
private readonly CBDDCHostingOptions _options;
private readonly IPeerOplogConfirmationStore _peerOplogConfirmationStore;
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCHealthCheck"/> class.
/// Initializes a new instance of the <see cref="CBDDCHealthCheck" /> class.
/// </summary>
/// <param name="oplogStore">The oplog store used to verify persistence health.</param>
/// <param name="peerOplogConfirmationStore">The peer confirmation store used for confirmation lag health checks.</param>
@@ -31,7 +26,8 @@ public class CBDDCHealthCheck : IHealthCheck
CBDDCHostingOptions options)
{
_oplogStore = oplogStore ?? throw new ArgumentNullException(nameof(oplogStore));
_peerOplogConfirmationStore = peerOplogConfirmationStore ?? throw new ArgumentNullException(nameof(peerOplogConfirmationStore));
_peerOplogConfirmationStore = peerOplogConfirmationStore ??
throw new ArgumentNullException(nameof(peerOplogConfirmationStore));
_options = options ?? throw new ArgumentNullException(nameof(options));
}
@@ -40,7 +36,7 @@ public class CBDDCHealthCheck : IHealthCheck
/// </summary>
/// <param name="context">The health check execution context.</param>
/// <param name="cancellationToken">A token used to cancel the health check.</param>
/// <returns>A <see cref="HealthCheckResult"/> describing the health status.</returns>
/// <returns>A <see cref="HealthCheckResult" /> describing the health status.</returns>
public async Task<HealthCheckResult> CheckHealthAsync(
HealthCheckContext context,
CancellationToken cancellationToken = default)
@@ -58,15 +54,18 @@ public class CBDDCHealthCheck : IHealthCheck
var peersWithNoConfirmation = new List<string>();
var laggingPeers = new List<string>();
var criticalLaggingPeers = new List<string>();
var lastSuccessfulConfirmationUpdateByPeer = new Dictionary<string, DateTimeOffset?>(StringComparer.Ordinal);
var lastSuccessfulConfirmationUpdateByPeer =
new Dictionary<string, DateTimeOffset?>(StringComparer.Ordinal);
var maxLagMs = 0L;
var lagThresholdMs = Math.Max(0, _options.Cluster.PeerConfirmationLagThresholdMs);
var criticalLagThresholdMs = Math.Max(lagThresholdMs, _options.Cluster.PeerConfirmationCriticalLagThresholdMs);
long lagThresholdMs = Math.Max(0, _options.Cluster.PeerConfirmationLagThresholdMs);
long criticalLagThresholdMs =
Math.Max(lagThresholdMs, _options.Cluster.PeerConfirmationCriticalLagThresholdMs);
foreach (var peerNodeId in trackedPeers)
foreach (string peerNodeId in trackedPeers)
{
var confirmations = (await _peerOplogConfirmationStore.GetConfirmationsForPeerAsync(peerNodeId, cancellationToken))
var confirmations =
(await _peerOplogConfirmationStore.GetConfirmationsForPeerAsync(peerNodeId, cancellationToken))
.Where(confirmation => confirmation.IsActive)
.ToList();
@@ -83,19 +82,14 @@ public class CBDDCHealthCheck : IHealthCheck
.ThenBy(confirmation => confirmation.ConfirmedLogic)
.First();
var lagMs = Math.Max(0, localHead.PhysicalTime - oldestConfirmation.ConfirmedWall);
long lagMs = Math.Max(0, localHead.PhysicalTime - oldestConfirmation.ConfirmedWall);
maxLagMs = Math.Max(maxLagMs, lagMs);
lastSuccessfulConfirmationUpdateByPeer[peerNodeId] = confirmations.Max(confirmation => confirmation.LastConfirmedUtc);
lastSuccessfulConfirmationUpdateByPeer[peerNodeId] =
confirmations.Max(confirmation => confirmation.LastConfirmedUtc);
if (lagMs > lagThresholdMs)
{
laggingPeers.Add(peerNodeId);
}
if (lagMs > lagThresholdMs) laggingPeers.Add(peerNodeId);
if (lagMs > criticalLagThresholdMs)
{
criticalLaggingPeers.Add(peerNodeId);
}
if (lagMs > criticalLagThresholdMs) criticalLaggingPeers.Add(peerNodeId);
}
var payload = new Dictionary<string, object>
@@ -108,18 +102,14 @@ public class CBDDCHealthCheck : IHealthCheck
};
if (criticalLaggingPeers.Count > 0)
{
return HealthCheckResult.Unhealthy(
$"CBDDC is unhealthy. Critical lag detected for {criticalLaggingPeers.Count} tracked peer(s).",
data: payload);
}
if (peersWithNoConfirmation.Count > 0 || laggingPeers.Count > 0)
{
return HealthCheckResult.Degraded(
$"CBDDC is degraded. Lagging peers: {laggingPeers.Count}, unconfirmed peers: {peersWithNoConfirmation.Count}.",
data: payload);
}
return HealthCheckResult.Healthy(
$"CBDDC is healthy. Latest timestamp: {localHead.PhysicalTime}.",
@@ -129,7 +119,7 @@ public class CBDDCHealthCheck : IHealthCheck
{
return HealthCheckResult.Unhealthy(
"CBDDC persistence layer is unavailable",
exception: ex);
ex);
}
}
}

View File

@@ -1,5 +1,3 @@
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using Serilog.Context;
@@ -16,7 +14,7 @@ public class DiscoveryServiceHostedService : IHostedService
private readonly ILogger<DiscoveryServiceHostedService> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="DiscoveryServiceHostedService"/> class.
/// Initializes a new instance of the <see cref="DiscoveryServiceHostedService" /> class.
/// </summary>
/// <param name="discoveryService">The discovery service to manage.</param>
/// <param name="logger">The logger used for service lifecycle events.</param>

View File

@@ -1,5 +1,3 @@
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using Serilog.Context;
@@ -12,11 +10,11 @@ namespace ZB.MOM.WW.CBDDC.Hosting.HostedServices;
/// </summary>
public class TcpSyncServerHostedService : IHostedService
{
private readonly ISyncServer _syncServer;
private readonly ILogger<TcpSyncServerHostedService> _logger;
private readonly ISyncServer _syncServer;
/// <summary>
/// Initializes a new instance of the <see cref="TcpSyncServerHostedService"/> class.
/// Initializes a new instance of the <see cref="TcpSyncServerHostedService" /> class.
/// </summary>
/// <param name="syncServer">The sync server to start and stop.</param>
/// <param name="logger">The logger instance.</param>

View File

@@ -41,6 +41,7 @@ app.Run();
## Health Checks
CBDDC registers health checks that verify:
- Database connectivity
- Latest timestamp retrieval
@@ -53,6 +54,7 @@ curl http://localhost:5000/health
### Cluster
Best for:
- Dedicated database servers
- Simple deployments
- Development/testing environments
@@ -60,6 +62,7 @@ Best for:
## Server Behavior
CBDDC servers operate in respond-only mode:
- Accept incoming sync connections
- Respond to sync requests
- Do not initiate outbound sync
@@ -70,7 +73,7 @@ CBDDC servers operate in respond-only mode:
### ClusterOptions
| Property | Type | Default | Description |
|----------|------|---------|-------------|
|--------------------|--------|-------------|------------------------|
| NodeId | string | MachineName | Unique node identifier |
| TcpPort | int | 5001 | TCP port for sync |
| EnableUdpDiscovery | bool | false | Enable UDP discovery |

View File

@@ -1,5 +1,3 @@
using System;
using System.Collections.Generic;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using Serilog.Context;
@@ -17,9 +15,9 @@ public class NoOpDiscoveryService : IDiscoveryService
private readonly ILogger<NoOpDiscoveryService> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="NoOpDiscoveryService"/> class.
/// Initializes a new instance of the <see cref="NoOpDiscoveryService" /> class.
/// </summary>
/// <param name="logger">The logger instance to use, or <see langword="null"/> to use a no-op logger.</param>
/// <param name="logger">The logger instance to use, or <see langword="null" /> to use a no-op logger.</param>
public NoOpDiscoveryService(ILogger<NoOpDiscoveryService>? logger = null)
{
_logger = logger ?? NullLogger<NoOpDiscoveryService>.Instance;

View File

@@ -1,4 +1,3 @@
using System.Threading.Tasks;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using Serilog.Context;
@@ -15,9 +14,9 @@ public class NoOpSyncOrchestrator : ISyncOrchestrator
private readonly ILogger<NoOpSyncOrchestrator> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="NoOpSyncOrchestrator"/> class.
/// Initializes a new instance of the <see cref="NoOpSyncOrchestrator" /> class.
/// </summary>
/// <param name="logger">The logger instance to use, or <see langword="null"/> for a no-op logger.</param>
/// <param name="logger">The logger instance to use, or <see langword="null" /> for a no-op logger.</param>
public NoOpSyncOrchestrator(ILogger<NoOpSyncOrchestrator>? logger = null)
{
_logger = logger ?? NullLogger<NoOpSyncOrchestrator>.Instance;

View File

@@ -1,14 +1,14 @@
<Project Sdk="Microsoft.NET.Sdk">
<ItemGroup>
<ProjectReference Include="..\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj" />
<ProjectReference Include="..\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj"/>
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Diagnostics.HealthChecks" Version="8.0.0" />
<PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" Version="8.0.0" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="8.0.0" />
<PackageReference Include="Serilog" Version="4.2.0" />
<PackageReference Include="Microsoft.Extensions.Diagnostics.HealthChecks" Version="8.0.0"/>
<PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" Version="8.0.0"/>
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="8.0.0"/>
<PackageReference Include="Serilog" Version="4.2.0"/>
</ItemGroup>
<PropertyGroup>
@@ -31,7 +31,7 @@
</PropertyGroup>
<ItemGroup>
<None Include="README.md" Pack="true" PackagePath="\" />
<None Include="README.md" Pack="true" PackagePath="\"/>
</ItemGroup>
</Project>

View File

@@ -1,8 +1,7 @@
using System;
using System.Net;
using System.Net.NetworkInformation;
using System.Net.Sockets;
using Microsoft.Extensions.Logging;
using System.Linq;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Network;
@@ -14,24 +13,9 @@ public class CBDDCNode : ICBDDCNode
{
private readonly ILogger<CBDDCNode> _logger;
/// <summary>
/// Gets the Sync Server instance.
/// </summary>
public ISyncServer Server { get; }
/// <summary>
/// Gets the Discovery Service instance.
/// </summary>
public IDiscoveryService Discovery { get; }
/// <summary>
/// Gets the Synchronization Orchestrator instance.
/// </summary>
public ISyncOrchestrator Orchestrator { get; }
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCNode"/> class.
/// Initializes a new instance of the <see cref="CBDDCNode" /> class.
/// </summary>
/// <param name="server">The TCP server for handling incoming sync requests.</param>
/// <param name="discovery">The UDP service for peer discovery.</param>
@@ -49,6 +33,21 @@ public class CBDDCNode : ICBDDCNode
_logger = logger;
}
/// <summary>
/// Gets the Sync Server instance.
/// </summary>
public ISyncServer Server { get; }
/// <summary>
/// Gets the Discovery Service instance.
/// </summary>
public IDiscoveryService Discovery { get; }
/// <summary>
/// Gets the Synchronization Orchestrator instance.
/// </summary>
public ISyncOrchestrator Orchestrator { get; }
/// <summary>
/// Starts all node components (Server, Discovery, Orchestrator).
/// </summary>
@@ -93,12 +92,11 @@ public class CBDDCNode : ICBDDCNode
{
// If the server is listening on "Any" (0.0.0.0), we cannot advertise that as a connectable address.
// We must resolve the actual machine IP address that peers can reach.
if (Equals(ep.Address, System.Net.IPAddress.Any) || Equals(ep.Address, System.Net.IPAddress.IPv6Any))
{
if (Equals(ep.Address, IPAddress.Any) || Equals(ep.Address, IPAddress.IPv6Any))
return new NodeAddress(GetLocalIpAddress(), ep.Port);
}
return new NodeAddress(ep.Address.ToString(), ep.Port);
}
return new NodeAddress("Unknown", 0);
}
}
@@ -107,20 +105,17 @@ public class CBDDCNode : ICBDDCNode
{
try
{
var interfaces = System.Net.NetworkInformation.NetworkInterface.GetAllNetworkInterfaces()
.Where(i => i.OperationalStatus == System.Net.NetworkInformation.OperationalStatus.Up
&& i.NetworkInterfaceType != System.Net.NetworkInformation.NetworkInterfaceType.Loopback);
var interfaces = NetworkInterface.GetAllNetworkInterfaces()
.Where(i => i.OperationalStatus == OperationalStatus.Up
&& i.NetworkInterfaceType != NetworkInterfaceType.Loopback);
foreach (var i in interfaces)
{
var props = i.GetIPProperties();
var ipInfo = props.UnicastAddresses
.FirstOrDefault(u => u.Address.AddressFamily == System.Net.Sockets.AddressFamily.InterNetwork); // Prefer IPv4
.FirstOrDefault(u => u.Address.AddressFamily == AddressFamily.InterNetwork); // Prefer IPv4
if (ipInfo != null)
{
return ipInfo.Address.ToString();
}
if (ipInfo != null) return ipInfo.Address.ToString();
}
return "127.0.0.1";
@@ -136,16 +131,7 @@ public class CBDDCNode : ICBDDCNode
public class NodeAddress
{
/// <summary>
/// Gets the host portion of the node address.
/// </summary>
public string Host { get; }
/// <summary>
/// Gets the port portion of the node address.
/// </summary>
public int Port { get; }
/// <summary>
/// Initializes a new instance of the <see cref="NodeAddress"/> class.
/// Initializes a new instance of the <see cref="NodeAddress" /> class.
/// </summary>
/// <param name="host">The host name or IP address.</param>
/// <param name="port">The port number.</param>
@@ -155,6 +141,19 @@ public class NodeAddress
Port = port;
}
/// <summary>
/// Gets the host portion of the node address.
/// </summary>
public string Host { get; }
/// <summary>
/// Gets the port portion of the node address.
/// </summary>
public int Port { get; }
/// <inheritdoc />
public override string ToString() => $"{Host}:{Port}";
public override string ToString()
{
return $"{Host}:{Port}";
}
}

View File

@@ -1,9 +1,6 @@
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using Serilog.Context;
using System;
using System.Threading;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network;
@@ -12,11 +9,11 @@ namespace ZB.MOM.WW.CBDDC.Network;
/// </summary>
public class CBDDCNodeService : IHostedService
{
private readonly ICBDDCNode _node;
private readonly ILogger<CBDDCNodeService> _logger;
private readonly ICBDDCNode _node;
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCNodeService"/> class.
/// Initializes a new instance of the <see cref="CBDDCNodeService" /> class.
/// </summary>
/// <param name="node">The CBDDC node to manage.</param>
/// <param name="logger">The logger instance.</param>

View File

@@ -16,7 +16,7 @@ public static class CBDDCServiceCollectionExtensions
/// Registers core CBDDC service dependencies.
/// </summary>
/// <param name="services">The service collection to update.</param>
/// <returns>The same <see cref="IServiceCollection"/> instance for chaining.</returns>
/// <returns>The same <see cref="IServiceCollection" /> instance for chaining.</returns>
public static IServiceCollection AddCBDDCCore(this IServiceCollection services)
{
ArgumentNullException.ThrowIfNull(services);

View File

@@ -1,35 +1,30 @@
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Network;
/// <summary>
/// Composite discovery service that combines UDP LAN discovery with persistent remote peers from the peerConfigurationStore.
/// Composite discovery service that combines UDP LAN discovery with persistent remote peers from the
/// peerConfigurationStore.
/// Periodically refreshes the remote peer list and merges with actively discovered LAN peers.
///
/// Remote peer configurations are stored in a synchronized collection that is automatically
/// replicated across all nodes in the cluster. Any node that adds a remote peer will have
/// it synchronized to all other nodes automatically.
/// </summary>
public class CompositeDiscoveryService : IDiscoveryService
{
private readonly IDiscoveryService _udpDiscovery;
private readonly IPeerConfigurationStore _peerConfigurationStore;
private readonly ILogger<CompositeDiscoveryService> _logger;
private readonly TimeSpan _refreshInterval;
private const string RemotePeersCollectionName = "_system_remote_peers";
private readonly ILogger<CompositeDiscoveryService> _logger;
private readonly IPeerConfigurationStore _peerConfigurationStore;
private readonly TimeSpan _refreshInterval;
private readonly ConcurrentDictionary<string, PeerNode> _remotePeers = new();
private readonly object _startStopLock = new();
private readonly IDiscoveryService _udpDiscovery;
private CancellationTokenSource? _cts;
private readonly ConcurrentDictionary<string, PeerNode> _remotePeers = new();
private readonly object _startStopLock = new object();
/// <summary>
/// Initializes a new instance of the CompositeDiscoveryService class.
@@ -45,7 +40,8 @@ public class CompositeDiscoveryService : IDiscoveryService
TimeSpan? refreshInterval = null)
{
_udpDiscovery = udpDiscovery ?? throw new ArgumentNullException(nameof(udpDiscovery));
_peerConfigurationStore = peerConfigurationStore ?? throw new ArgumentNullException(nameof(peerConfigurationStore));
_peerConfigurationStore =
peerConfigurationStore ?? throw new ArgumentNullException(nameof(peerConfigurationStore));
_logger = logger ?? NullLogger<CompositeDiscoveryService>.Instance;
_refreshInterval = refreshInterval ?? TimeSpan.FromMinutes(5);
}
@@ -76,6 +72,7 @@ public class CompositeDiscoveryService : IDiscoveryService
_logger.LogWarning("Composite discovery service already started");
return;
}
_cts = new CancellationTokenSource();
}
@@ -143,7 +140,6 @@ public class CompositeDiscoveryService : IDiscoveryService
private async Task RefreshLoopAsync(CancellationToken cancellationToken)
{
while (!cancellationToken.IsCancellationRequested)
{
try
{
await Task.Delay(_refreshInterval, cancellationToken);
@@ -159,7 +155,6 @@ public class CompositeDiscoveryService : IDiscoveryService
_logger.LogError(ex, "Error during remote peer refresh");
}
}
}
private async Task RefreshRemotePeersAsync()
{
@@ -178,14 +173,14 @@ public class CompositeDiscoveryService : IDiscoveryService
config.NodeId,
config.Address,
now, // LastSeen is now for persistent peers (always considered active)
config.Type,
NodeRole.Member // Remote peers are always members, never gateways
config.Type // Remote peers are always members, never gateways
);
_remotePeers[config.NodeId] = peerNode;
}
_logger.LogInformation("Refreshed remote peers: {Count} enabled peers loaded from synchronized collection", _remotePeers.Count);
_logger.LogInformation("Refreshed remote peers: {Count} enabled peers loaded from synchronized collection",
_remotePeers.Count);
}
catch (Exception ex)
{

View File

@@ -1,5 +1,3 @@
using System;
using System.IO;
using System.IO.Compression;
namespace ZB.MOM.WW.CBDDC.Network;
@@ -38,6 +36,7 @@ public static class CompressionHelper
{
brotli.Write(data, 0, data.Length);
}
return output.ToArray();
#else
return data;
@@ -58,6 +57,7 @@ public static class CompressionHelper
{
brotli.CopyTo(output);
}
return output.ToArray();
#else
throw new NotSupportedException("Brotli decompression not supported on this platform.");

View File

@@ -1,21 +1,22 @@
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network;
namespace ZB.MOM.WW.CBDDC.Network
public interface ICBDDCNode
{
public interface ICBDDCNode
{
/// <summary>
/// Gets the node address.
/// </summary>
NodeAddress Address { get; }
/// <summary>
/// Gets the discovery service.
/// </summary>
IDiscoveryService Discovery { get; }
/// <summary>
/// Gets the synchronization orchestrator.
/// </summary>
ISyncOrchestrator Orchestrator { get; }
/// <summary>
/// Gets the synchronization server.
/// </summary>
@@ -26,10 +27,10 @@ namespace ZB.MOM.WW.CBDDC.Network
/// </summary>
/// <returns>A task that represents the asynchronous start operation.</returns>
Task Start();
/// <summary>
/// Stops the node services.
/// </summary>
/// <returns>A task that represents the asynchronous stop operation.</returns>
Task Stop();
}
}

View File

@@ -1,14 +1,12 @@
using ZB.MOM.WW.CBDDC.Core.Network;
using System.Collections.Generic;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network
namespace ZB.MOM.WW.CBDDC.Network;
/// <summary>
/// Defines peer discovery operations.
/// </summary>
public interface IDiscoveryService
{
/// <summary>
/// Defines peer discovery operations.
/// </summary>
public interface IDiscoveryService
{
/// <summary>
/// Gets the currently active peers.
/// </summary>
@@ -26,5 +24,4 @@ namespace ZB.MOM.WW.CBDDC.Network
/// </summary>
/// <returns>A task that represents the asynchronous operation.</returns>
Task Stop();
}
}

View File

@@ -1,5 +1,3 @@
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Network;

View File

@@ -1,12 +1,10 @@
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network;
namespace ZB.MOM.WW.CBDDC.Network
/// <summary>
/// Defines lifecycle operations for synchronization orchestration.
/// </summary>
public interface ISyncOrchestrator
{
/// <summary>
/// Defines lifecycle operations for synchronization orchestration.
/// </summary>
public interface ISyncOrchestrator
{
/// <summary>
/// Starts synchronization orchestration.
/// </summary>
@@ -18,5 +16,4 @@ namespace ZB.MOM.WW.CBDDC.Network
/// </summary>
/// <returns>A task that represents the asynchronous stop operation.</returns>
Task Stop();
}
}

View File

@@ -1,17 +1,24 @@
using System.Net;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network;
/// <summary>
/// Defines the contract for a server that supports starting, stopping, and reporting its listening network endpoint for
/// Defines the contract for a server that supports starting, stopping, and reporting its listening network endpoint
/// for
/// synchronization operations.
/// </summary>
/// <remarks>Implementations of this interface are expected to provide asynchronous methods for starting and
/// <remarks>
/// Implementations of this interface are expected to provide asynchronous methods for starting and
/// stopping the server. The listening endpoint may be null if the server is not currently active or has not been
/// started.</remarks>
/// started.
/// </remarks>
public interface ISyncServer
{
/// <summary>
/// Gets the network endpoint currently used by the server for listening.
/// </summary>
IPEndPoint? ListeningEndpoint { get; }
/// <summary>
/// Starts the synchronization server.
/// </summary>
@@ -23,9 +30,4 @@ public interface ISyncServer
/// </summary>
/// <returns>A task that represents the asynchronous operation.</returns>
Task Stop();
/// <summary>
/// Gets the network endpoint currently used by the server for listening.
/// </summary>
IPEndPoint? ListeningEndpoint { get; }
}

View File

@@ -1,11 +1,6 @@
using System;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Network.Leadership;
@@ -16,30 +11,13 @@ namespace ZB.MOM.WW.CBDDC.Network.Leadership;
/// </summary>
public class BullyLeaderElectionService : ILeaderElectionService
{
private readonly IDiscoveryService _discoveryService;
private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly ILogger<BullyLeaderElectionService> _logger;
private readonly IDiscoveryService _discoveryService;
private readonly TimeSpan _electionInterval;
private readonly ILogger<BullyLeaderElectionService> _logger;
private CancellationTokenSource? _cts;
private string? _localNodeId;
private string? _currentGatewayNodeId;
private bool _isCloudGateway;
/// <summary>
/// Gets a value indicating whether this node is currently the cloud gateway leader.
/// </summary>
public bool IsCloudGateway => _isCloudGateway;
/// <summary>
/// Gets the current gateway node identifier.
/// </summary>
public string? CurrentGatewayNodeId => _currentGatewayNodeId;
/// <summary>
/// Occurs when leadership changes.
/// </summary>
public event EventHandler<LeadershipChangedEventArgs>? LeadershipChanged;
/// <summary>
/// Initializes a new instance of the BullyLeaderElectionService class.
@@ -60,6 +38,21 @@ public class BullyLeaderElectionService : ILeaderElectionService
_electionInterval = electionInterval ?? TimeSpan.FromSeconds(5);
}
/// <summary>
/// Gets a value indicating whether this node is currently the cloud gateway leader.
/// </summary>
public bool IsCloudGateway { get; private set; }
/// <summary>
/// Gets the current gateway node identifier.
/// </summary>
public string? CurrentGatewayNodeId { get; private set; }
/// <summary>
/// Occurs when leadership changes.
/// </summary>
public event EventHandler<LeadershipChangedEventArgs>? LeadershipChanged;
/// <summary>
/// Starts the leader election loop.
/// </summary>
@@ -100,7 +93,6 @@ public class BullyLeaderElectionService : ILeaderElectionService
private async Task ElectionLoopAsync(CancellationToken cancellationToken)
{
while (!cancellationToken.IsCancellationRequested)
{
try
{
await Task.Delay(_electionInterval, cancellationToken);
@@ -116,7 +108,6 @@ public class BullyLeaderElectionService : ILeaderElectionService
_logger.LogError(ex, "Error during leader election");
}
}
}
private void RunElection()
{
@@ -132,34 +123,30 @@ public class BullyLeaderElectionService : ILeaderElectionService
lanPeers.Add(_localNodeId);
// Bully algorithm: smallest NodeId wins (lexicographic comparison)
var newLeader = lanPeers.OrderBy(id => id, StringComparer.Ordinal).FirstOrDefault();
string? newLeader = lanPeers.OrderBy(id => id, StringComparer.Ordinal).FirstOrDefault();
if (newLeader == null)
{
// No peers available, local node is leader by default
newLeader = _localNodeId;
}
// Check if leadership changed
if (newLeader != _currentGatewayNodeId)
if (newLeader != CurrentGatewayNodeId)
{
var wasLeader = _isCloudGateway;
_currentGatewayNodeId = newLeader;
_isCloudGateway = newLeader == _localNodeId;
bool wasLeader = IsCloudGateway;
CurrentGatewayNodeId = newLeader;
IsCloudGateway = newLeader == _localNodeId;
if (wasLeader != _isCloudGateway)
if (wasLeader != IsCloudGateway)
{
if (_isCloudGateway)
{
_logger.LogInformation("🔐 This node is now the CLOUD GATEWAY (Leader) - Will sync with remote cloud nodes");
}
if (IsCloudGateway)
_logger.LogInformation(
"🔐 This node is now the CLOUD GATEWAY (Leader) - Will sync with remote cloud nodes");
else
{
_logger.LogInformation("👤 This node is now a MEMBER - Cloud sync handled by gateway: {Gateway}", _currentGatewayNodeId);
}
_logger.LogInformation("👤 This node is now a MEMBER - Cloud sync handled by gateway: {Gateway}",
CurrentGatewayNodeId);
// Raise event
LeadershipChanged?.Invoke(this, new LeadershipChangedEventArgs(_currentGatewayNodeId, _isCloudGateway));
LeadershipChanged?.Invoke(this, new LeadershipChangedEventArgs(CurrentGatewayNodeId, IsCloudGateway));
}
}
}

View File

@@ -1,6 +1,3 @@
using System;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network.Leadership;
/// <summary>
@@ -8,6 +5,20 @@ namespace ZB.MOM.WW.CBDDC.Network.Leadership;
/// </summary>
public class LeadershipChangedEventArgs : EventArgs
{
/// <summary>
/// Initializes a new instance of the LeadershipChangedEventArgs class.
/// </summary>
/// <param name="currentGatewayNodeId">
/// The NodeId of the current gateway node, or <see langword="null" /> when none is
/// elected.
/// </param>
/// <param name="isLocalNodeGateway">A value indicating whether the local node is the gateway.</param>
public LeadershipChangedEventArgs(string? currentGatewayNodeId, bool isLocalNodeGateway)
{
CurrentGatewayNodeId = currentGatewayNodeId;
IsLocalNodeGateway = isLocalNodeGateway;
}
/// <summary>
/// Gets the NodeId of the current cloud gateway (leader).
/// Null if no leader is elected.
@@ -18,17 +29,6 @@ public class LeadershipChangedEventArgs : EventArgs
/// Gets whether the local node is now the cloud gateway.
/// </summary>
public bool IsLocalNodeGateway { get; }
/// <summary>
/// Initializes a new instance of the LeadershipChangedEventArgs class.
/// </summary>
/// <param name="currentGatewayNodeId">The NodeId of the current gateway node, or <see langword="null"/> when none is elected.</param>
/// <param name="isLocalNodeGateway">A value indicating whether the local node is the gateway.</param>
public LeadershipChangedEventArgs(string? currentGatewayNodeId, bool isLocalNodeGateway)
{
CurrentGatewayNodeId = currentGatewayNodeId;
IsLocalNodeGateway = isLocalNodeGateway;
}
}
/// <summary>

View File

@@ -1,8 +1,3 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
@@ -18,7 +13,7 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator
private readonly IPeerOplogConfirmationStore? _peerOplogConfirmationStore;
/// <summary>
/// Initializes a new instance of the <see cref="OplogPruneCutoffCalculator"/> class.
/// Initializes a new instance of the <see cref="OplogPruneCutoffCalculator" /> class.
/// </summary>
/// <param name="oplogStore">The oplog store.</param>
/// <param name="peerOplogConfirmationStore">The optional peer confirmation store.</param>
@@ -39,23 +34,19 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator
var retentionCutoff = BuildRetentionCutoff(configuration);
if (_peerOplogConfirmationStore == null)
{
return OplogPruneCutoffDecision.WithCutoff(
retentionCutoff,
confirmationCutoff: null,
effectiveCutoff: retentionCutoff,
reason: "Confirmation tracking is not configured.");
}
null,
retentionCutoff,
"Confirmation tracking is not configured.");
var relevantSources = await GetRelevantSourceNodesAsync(cancellationToken);
if (relevantSources.Count == 0)
{
return OplogPruneCutoffDecision.WithCutoff(
retentionCutoff,
confirmationCutoff: null,
effectiveCutoff: retentionCutoff,
reason: "No local non-default oplog/vector-clock sources were found.");
}
null,
retentionCutoff,
"No local non-default oplog/vector-clock sources were found.");
var activeTrackedPeers = (await _peerOplogConfirmationStore.GetActiveTrackedPeersAsync(cancellationToken))
.Where(peerNodeId => !string.IsNullOrWhiteSpace(peerNodeId))
@@ -63,19 +54,18 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator
.ToList();
if (activeTrackedPeers.Count == 0)
{
return OplogPruneCutoffDecision.WithCutoff(
retentionCutoff,
confirmationCutoff: null,
effectiveCutoff: retentionCutoff,
reason: "No active tracked peers found for confirmation gating.");
}
null,
retentionCutoff,
"No active tracked peers found for confirmation gating.");
HlcTimestamp? confirmationCutoff = null;
foreach (var peerNodeId in activeTrackedPeers)
foreach (string peerNodeId in activeTrackedPeers)
{
var confirmationsForPeer = (await _peerOplogConfirmationStore.GetConfirmationsForPeerAsync(peerNodeId, cancellationToken))
var confirmationsForPeer =
(await _peerOplogConfirmationStore.GetConfirmationsForPeerAsync(peerNodeId, cancellationToken))
.Where(confirmation => confirmation.IsActive)
.Where(confirmation => !string.IsNullOrWhiteSpace(confirmation.SourceNodeId))
.GroupBy(confirmation => confirmation.SourceNodeId, StringComparer.Ordinal)
@@ -87,30 +77,25 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator
.Last(),
StringComparer.Ordinal);
foreach (var sourceNodeId in relevantSources)
{
if (!confirmationsForPeer.TryGetValue(sourceNodeId, out var confirmedTimestamp) || confirmedTimestamp == default)
foreach (string sourceNodeId in relevantSources)
{
if (!confirmationsForPeer.TryGetValue(sourceNodeId, out var confirmedTimestamp) ||
confirmedTimestamp == default)
return OplogPruneCutoffDecision.NoCutoff(
retentionCutoff,
$"Active tracked peer '{peerNodeId}' is missing confirmation for source '{sourceNodeId}'.");
}
if (!confirmationCutoff.HasValue || confirmedTimestamp < confirmationCutoff.Value)
{
confirmationCutoff = confirmedTimestamp;
}
}
}
if (!confirmationCutoff.HasValue)
{
return OplogPruneCutoffDecision.WithCutoff(
retentionCutoff,
confirmationCutoff: null,
effectiveCutoff: retentionCutoff,
reason: "No confirmation cutoff could be determined.");
}
null,
retentionCutoff,
"No confirmation cutoff could be determined.");
var effectiveCutoff = retentionCutoff <= confirmationCutoff.Value
? retentionCutoff
@@ -124,7 +109,7 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator
private static HlcTimestamp BuildRetentionCutoff(PeerNodeConfiguration configuration)
{
var retentionTimestamp = DateTimeOffset.UtcNow
long retentionTimestamp = DateTimeOffset.UtcNow
.AddHours(-configuration.OplogRetentionHours)
.ToUnixTimeMilliseconds();
@@ -135,18 +120,12 @@ public class OplogPruneCutoffCalculator : IOplogPruneCutoffCalculator
{
var localVectorClock = await _oplogStore.GetVectorClockAsync(cancellationToken);
var sourceNodes = new HashSet<string>(StringComparer.Ordinal);
foreach (var sourceNodeId in localVectorClock.NodeIds)
foreach (string sourceNodeId in localVectorClock.NodeIds)
{
if (string.IsNullOrWhiteSpace(sourceNodeId))
{
continue;
}
if (string.IsNullOrWhiteSpace(sourceNodeId)) continue;
var timestamp = localVectorClock.GetTimestamp(sourceNodeId);
if (timestamp == default)
{
continue;
}
if (timestamp == default) continue;
sourceNodes.Add(sourceNodeId);
}

View File

@@ -37,7 +37,7 @@ public sealed class OplogPruneCutoffDecision
public HlcTimestamp? ConfirmationCutoff { get; }
/// <summary>
/// Gets the effective cutoff to use for pruning when <see cref="HasCutoff"/> is true.
/// Gets the effective cutoff to use for pruning when <see cref="HasCutoff" /> is true.
/// </summary>
public HlcTimestamp? EffectiveCutoff { get; }
@@ -60,11 +60,11 @@ public sealed class OplogPruneCutoffDecision
string reason = "")
{
return new OplogPruneCutoffDecision(
hasCutoff: true,
retentionCutoff: retentionCutoff,
confirmationCutoff: confirmationCutoff,
effectiveCutoff: effectiveCutoff,
reason: reason);
true,
retentionCutoff,
confirmationCutoff,
effectiveCutoff,
reason);
}
/// <summary>
@@ -75,10 +75,10 @@ public sealed class OplogPruneCutoffDecision
public static OplogPruneCutoffDecision NoCutoff(HlcTimestamp retentionCutoff, string reason)
{
return new OplogPruneCutoffDecision(
hasCutoff: false,
retentionCutoff: retentionCutoff,
confirmationCutoff: null,
effectiveCutoff: null,
reason: reason);
false,
retentionCutoff,
null,
null,
reason);
}
}

View File

@@ -1,12 +1,10 @@
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network; // For IMeshNetwork if we implement it
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network.Security;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Hosting;
using System;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Telemetry;
// For IMeshNetwork if we implement it
namespace ZB.MOM.WW.CBDDC.Network;
@@ -17,7 +15,10 @@ public static class CBDDCNetworkExtensions
/// </summary>
/// <typeparam name="TPeerNodeConfigurationProvider">The peer node configuration provider implementation type.</typeparam>
/// <param name="services">The service collection to register services into.</param>
/// <param name="useHostedService">If true, registers CBDDCNodeService as IHostedService to automatically start/stop the node.</param>
/// <param name="useHostedService">
/// If true, registers CBDDCNodeService as IHostedService to automatically start/stop the
/// node.
/// </param>
public static IServiceCollection AddCBDDCNetwork<TPeerNodeConfigurationProvider>(
this IServiceCollection services,
bool useHostedService = true)
@@ -31,11 +32,11 @@ public static class CBDDCNetworkExtensions
services.TryAddSingleton<IDiscoveryService, UdpDiscoveryService>();
services.TryAddSingleton<ZB.MOM.WW.CBDDC.Network.Telemetry.INetworkTelemetryService>(sp =>
services.TryAddSingleton<INetworkTelemetryService>(sp =>
{
var logger = sp.GetRequiredService<ILogger<ZB.MOM.WW.CBDDC.Network.Telemetry.NetworkTelemetryService>>();
var path = System.IO.Path.Combine(System.AppContext.BaseDirectory, "cbddc_metrics.bin");
return new ZB.MOM.WW.CBDDC.Network.Telemetry.NetworkTelemetryService(logger, path);
var logger = sp.GetRequiredService<ILogger<NetworkTelemetryService>>();
string path = Path.Combine(AppContext.BaseDirectory, "cbddc_metrics.bin");
return new NetworkTelemetryService(logger, path);
});
services.TryAddSingleton<ISyncServer, TcpSyncServer>();
@@ -47,10 +48,7 @@ public static class CBDDCNetworkExtensions
services.TryAddSingleton<ICBDDCNode, CBDDCNode>();
// Optionally register hosted service for automatic node lifecycle management
if (useHostedService)
{
services.AddHostedService<CBDDCNodeService>();
}
if (useHostedService) services.AddHostedService<CBDDCNodeService>();
return services;
}

View File

@@ -1,28 +1,24 @@
using System;
using System.IO;
using System.Threading;
using System.Threading.Tasks;
using Google.Protobuf;
using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Network.Proto;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Telemetry;
namespace ZB.MOM.WW.CBDDC.Network.Protocol
namespace ZB.MOM.WW.CBDDC.Network.Protocol;
/// <summary>
/// Handles the low-level framing, compression, encryption, and serialization of CBDDC messages.
/// Encapsulates the wire format: [Length (4)] [Type (1)] [Compression (1)] [Payload (N)]
/// </summary>
internal class ProtocolHandler
{
/// <summary>
/// Handles the low-level framing, compression, encryption, and serialization of CBDDC messages.
/// Encapsulates the wire format: [Length (4)] [Type (1)] [Compression (1)] [Payload (N)]
/// </summary>
internal class ProtocolHandler
{
private readonly ILogger<ProtocolHandler> _logger;
private readonly SemaphoreSlim _readLock = new(1, 1);
private readonly INetworkTelemetryService? _telemetry;
private readonly SemaphoreSlim _writeLock = new SemaphoreSlim(1, 1);
private readonly SemaphoreSlim _readLock = new SemaphoreSlim(1, 1);
private readonly SemaphoreSlim _writeLock = new(1, 1);
/// <summary>
/// Initializes a new instance of the <see cref="ProtocolHandler"/> class.
/// Initializes a new instance of the <see cref="ProtocolHandler" /> class.
/// </summary>
/// <param name="logger">The logger used for protocol diagnostics.</param>
/// <param name="telemetry">An optional telemetry service used to record network metrics.</param>
@@ -33,7 +29,7 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
}
/// <summary>
/// Initializes a new instance of the <see cref="ProtocolHandler"/> class using a non-generic logger.
/// Initializes a new instance of the <see cref="ProtocolHandler" /> class using a non-generic logger.
/// </summary>
/// <param name="logger">The logger used for protocol diagnostics.</param>
/// <param name="telemetry">An optional telemetry service used to record network metrics.</param>
@@ -52,7 +48,8 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
/// <param name="cipherState">Optional cipher state used to encrypt outgoing payloads.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>A task that represents the asynchronous send operation.</returns>
public async Task SendMessageAsync(Stream stream, MessageType type, IMessage message, bool useCompression, CipherState? cipherState, CancellationToken token = default)
public async Task SendMessageAsync(Stream stream, MessageType type, IMessage message, bool useCompression,
CipherState? cipherState, CancellationToken token = default)
{
if (stream == null) throw new ArgumentNullException(nameof(stream));
@@ -87,7 +84,6 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
// 3. Encrypt
if (cipherState != null)
{
using (_telemetry?.StartMetric(MetricType.EncryptionTime))
{
// Inner data: [Type (1)] [Compression (1)] [Payload (N)]
@@ -96,7 +92,8 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
dataToEncrypt[1] = compressionFlag;
Buffer.BlockCopy(payloadBytes, 0, dataToEncrypt, 2, payloadBytes.Length);
var (ciphertext, iv, tag) = CryptoHelper.Encrypt(dataToEncrypt, cipherState.EncryptKey);
(byte[] ciphertext, byte[] iv, byte[] tag) =
CryptoHelper.Encrypt(dataToEncrypt, cipherState.EncryptKey);
var env = new SecureEnvelope
{
@@ -109,16 +106,16 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
type = MessageType.SecureEnv;
compressionFlag = 0x00; // Outer envelope is not compressed
}
}
// 4. Thread-Safe Write
await _writeLock.WaitAsync(token);
try
{
_logger.LogDebug("Sending Message {Type}, OrgSize: {Org}, WireSize: {Wire}", type, originalSize, payloadBytes.Length);
_logger.LogDebug("Sending Message {Type}, OrgSize: {Org}, WireSize: {Wire}", type, originalSize,
payloadBytes.Length);
// Framing: [Length (4)] [Type (1)] [Compression (1)] [Payload (N)]
var lengthBytes = BitConverter.GetBytes(payloadBytes.Length);
byte[] lengthBytes = BitConverter.GetBytes(payloadBytes.Length);
await stream.WriteAsync(lengthBytes, 0, 4, token);
stream.WriteByte((byte)type);
stream.WriteByte(compressionFlag);
@@ -138,7 +135,8 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
/// <param name="cipherState">Optional cipher state used to decrypt incoming payloads.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>A tuple containing the decoded message type and payload bytes.</returns>
public async Task<(MessageType, byte[])> ReadMessageAsync(Stream stream, CipherState? cipherState, CancellationToken token = default)
public async Task<(MessageType, byte[])> ReadMessageAsync(Stream stream, CipherState? cipherState,
CancellationToken token = default)
{
await _readLock.WaitAsync(token);
try
@@ -147,7 +145,7 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
int read = await ReadExactAsync(stream, lenBuf, 0, 4, token);
if (read == 0) return (MessageType.Unknown, null!);
int length = BitConverter.ToInt32(lenBuf, 0);
var length = BitConverter.ToInt32(lenBuf, 0);
int typeByte = stream.ReadByte();
if (typeByte == -1) throw new EndOfStreamException("Connection closed abruptly (type byte)");
@@ -163,7 +161,8 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
// Handle Secure Envelope
if (msgType == MessageType.SecureEnv)
{
if (cipherState == null) throw new InvalidOperationException("Received encrypted message but no cipher state established");
if (cipherState == null)
throw new InvalidOperationException("Received encrypted message but no cipher state established");
byte[] decrypted;
using (_telemetry?.StartMetric(MetricType.DecryptionTime))
@@ -184,19 +183,13 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
var innerPayload = new byte[decrypted.Length - 2];
Buffer.BlockCopy(decrypted, 2, innerPayload, 0, innerPayload.Length);
if (innerComp == 0x01)
{
innerPayload = CompressionHelper.Decompress(innerPayload);
}
if (innerComp == 0x01) innerPayload = CompressionHelper.Decompress(innerPayload);
return (msgType, innerPayload);
}
// Handle Unencrypted Compression
if (compByte == 0x01)
{
payload = CompressionHelper.Decompress(payload);
}
if (compByte == 0x01) payload = CompressionHelper.Decompress(payload);
_logger.LogDebug("Read Message {Type}, Size: {Size}", msgType, payload.Length);
return (msgType, payload);
@@ -209,13 +202,14 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
private async Task<int> ReadExactAsync(Stream stream, byte[] buffer, int offset, int count, CancellationToken token)
{
int total = 0;
var total = 0;
while (total < count)
{
int read = await stream.ReadAsync(buffer, offset + total, count - total, token);
if (read == 0) return 0; // EOF
total += read;
}
return total;
}
@@ -224,7 +218,7 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
private readonly ILogger _inner;
/// <summary>
/// Initializes a new instance of the <see cref="ForwardingLogger"/> class.
/// Initializes a new instance of the <see cref="ForwardingLogger" /> class.
/// </summary>
/// <param name="inner">The underlying logger instance.</param>
public ForwardingLogger(ILogger inner)
@@ -255,5 +249,4 @@ namespace ZB.MOM.WW.CBDDC.Network.Protocol
_inner.Log(logLevel, eventId, state, exception, formatter);
}
}
}
}

View File

@@ -48,12 +48,15 @@ node.Start();
## Features
### Automatic Discovery
Nodes broadcast their presence via UDP and automatically connect to peers on the same network.
### Secure Synchronization
All nodes must share the same authentication token to sync data.
### Scalable Gossip
Updates propagate exponentially - each node tells multiple peers, ensuring fast network-wide propagation.
## Documentation

View File

@@ -1,7 +1,6 @@
using ZB.MOM.WW.CBDDC.Core.Network;
using System.Security.Cryptography;
using System.Text;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Network.Security;
@@ -14,7 +13,7 @@ public class ClusterKeyAuthenticator : IAuthenticator
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
/// <summary>
/// Initializes a new instance of the <see cref="ClusterKeyAuthenticator"/> class.
/// Initializes a new instance of the <see cref="ClusterKeyAuthenticator" /> class.
/// </summary>
/// <param name="peerNodeConfigurationProvider">The provider for peer node configuration.</param>
public ClusterKeyAuthenticator(IPeerNodeConfigurationProvider peerNodeConfigurationProvider)
@@ -26,8 +25,8 @@ public class ClusterKeyAuthenticator : IAuthenticator
public async Task<bool> ValidateAsync(string nodeId, string token)
{
var config = await _peerNodeConfigurationProvider.GetConfiguration();
var configuredHash = SHA256.HashData(Encoding.UTF8.GetBytes(config.AuthToken ?? string.Empty));
var presentedHash = SHA256.HashData(Encoding.UTF8.GetBytes(token ?? string.Empty));
byte[] configuredHash = SHA256.HashData(Encoding.UTF8.GetBytes(config.AuthToken ?? string.Empty));
byte[] presentedHash = SHA256.HashData(Encoding.UTF8.GetBytes(token ?? string.Empty));
return CryptographicOperations.FixedTimeEquals(configuredHash, presentedHash);
}
}

View File

@@ -1,5 +1,3 @@
using System;
using System.IO;
using System.Security.Cryptography;
namespace ZB.MOM.WW.CBDDC.Network.Security;
@@ -21,10 +19,10 @@ public static class CryptoHelper
using var aes = Aes.Create();
aes.Key = key;
aes.GenerateIV();
var iv = aes.IV;
byte[] iv = aes.IV;
using var encryptor = aes.CreateEncryptor();
var ciphertext = encryptor.TransformFinalBlock(plaintext, 0, plaintext.Length);
byte[] ciphertext = encryptor.TransformFinalBlock(plaintext, 0, plaintext.Length);
// Compute HMAC
using var hmac = new HMACSHA256(key);
@@ -32,7 +30,7 @@ public static class CryptoHelper
var toSign = new byte[iv.Length + ciphertext.Length];
Buffer.BlockCopy(iv, 0, toSign, 0, iv.Length);
Buffer.BlockCopy(ciphertext, 0, toSign, iv.Length, ciphertext.Length);
var tag = hmac.ComputeHash(toSign);
byte[] tag = hmac.ComputeHash(toSign);
return (ciphertext, iv, tag);
}
@@ -52,12 +50,10 @@ public static class CryptoHelper
var toVerify = new byte[iv.Length + ciphertext.Length];
Buffer.BlockCopy(iv, 0, toVerify, 0, iv.Length);
Buffer.BlockCopy(ciphertext, 0, toVerify, iv.Length, ciphertext.Length);
var computedTag = hmac.ComputeHash(toVerify);
byte[] computedTag = hmac.ComputeHash(toVerify);
if (!FixedTimeEquals(tag, computedTag))
{
throw new CryptographicException("Authentication failed (HMAC mismatch)");
}
using var aes = Aes.Create();
aes.Key = key;

View File

@@ -1,5 +1,3 @@
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network.Security;
public interface IAuthenticator
@@ -9,6 +7,6 @@ public interface IAuthenticator
/// </summary>
/// <param name="nodeId">The node identifier to validate.</param>
/// <param name="token">The authentication token to validate.</param>
/// <returns><see langword="true"/> if the token is valid for the node; otherwise <see langword="false"/>.</returns>
/// <returns><see langword="true" /> if the token is valid for the node; otherwise <see langword="false" />.</returns>
Task<bool> ValidateAsync(string nodeId, string token);
}

View File

@@ -1,6 +1,3 @@
using System.Threading;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network.Security;
public interface IPeerHandshakeService
@@ -13,24 +10,16 @@ public interface IPeerHandshakeService
/// <param name="myNodeId">The local node identifier.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>A CipherState if encryption is established, or null if plaintext.</returns>
Task<CipherState?> HandshakeAsync(System.IO.Stream stream, bool isInitiator, string myNodeId, CancellationToken token);
Task<CipherState?> HandshakeAsync(Stream stream, bool isInitiator, string myNodeId, CancellationToken token);
}
public class CipherState
{
/// <summary>
/// Gets the key used to encrypt outgoing messages.
/// </summary>
public byte[] EncryptKey { get; }
/// <summary>
/// Gets the key used to decrypt incoming messages.
/// </summary>
public byte[] DecryptKey { get; }
// For simplicity using IV chaining or explicit IVs.
// We'll store just the keys here and let the encryption helper handle IVs.
/// <summary>
/// Initializes a new instance of the <see cref="CipherState"/> class.
/// Initializes a new instance of the <see cref="CipherState" /> class.
/// </summary>
/// <param name="encryptKey">The key used for encrypting outgoing payloads.</param>
/// <param name="decryptKey">The key used for decrypting incoming payloads.</param>
@@ -39,4 +28,14 @@ public class CipherState
EncryptKey = encryptKey;
DecryptKey = decryptKey;
}
/// <summary>
/// Gets the key used to encrypt outgoing messages.
/// </summary>
public byte[] EncryptKey { get; }
/// <summary>
/// Gets the key used to decrypt incoming messages.
/// </summary>
public byte[] DecryptKey { get; }
}

View File

@@ -1,15 +1,13 @@
using System.IO;
using System.Threading;
using System.Threading.Tasks;
namespace ZB.MOM.WW.CBDDC.Network.Security;
/// <summary>
/// Provides a no-operation implementation of the peer handshake service that performs no handshake and always returns
/// null.
/// </summary>
/// <remarks>This class can be used in scenarios where a handshake is not required or for testing purposes. All
/// handshake attempts using this service will result in no cipher state being established.</remarks>
/// <remarks>
/// This class can be used in scenarios where a handshake is not required or for testing purposes. All
/// handshake attempts using this service will result in no cipher state being established.
/// </remarks>
public class NoOpHandshakeService : IPeerHandshakeService
{
/// <summary>
@@ -17,11 +15,16 @@ public class NoOpHandshakeService : IPeerHandshakeService
/// asynchronously.
/// </summary>
/// <param name="stream">The stream used for exchanging handshake messages between nodes. Must be readable and writable.</param>
/// <param name="isInitiator">true to initiate the handshake as the local node; otherwise, false to respond as the remote node.</param>
/// <param name="isInitiator">
/// true to initiate the handshake as the local node; otherwise, false to respond as the remote
/// node.
/// </param>
/// <param name="myNodeId">The unique identifier of the local node participating in the handshake. Cannot be null.</param>
/// <param name="token">A cancellation token that can be used to cancel the handshake operation.</param>
/// <returns>A task that represents the asynchronous handshake operation. The task result contains a CipherState if the
/// handshake succeeds; otherwise, null.</returns>
/// <returns>
/// A task that represents the asynchronous handshake operation. The task result contains a CipherState if the
/// handshake succeeds; otherwise, null.
/// </returns>
public Task<CipherState?> HandshakeAsync(Stream stream, bool isInitiator, string myNodeId, CancellationToken token)
{
return Task.FromResult<CipherState?>(null);

View File

@@ -1,8 +1,4 @@
using System;
using System.IO;
using System.Security.Cryptography;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging;
namespace ZB.MOM.WW.CBDDC.Network.Security;
@@ -12,7 +8,7 @@ public class SecureHandshakeService : IPeerHandshakeService
private readonly ILogger<SecureHandshakeService>? _logger;
/// <summary>
/// Initializes a new instance of the <see cref="SecureHandshakeService"/> class.
/// Initializes a new instance of the <see cref="SecureHandshakeService" /> class.
/// </summary>
/// <param name="logger">The optional logger instance.</param>
public SecureHandshakeService(ILogger<SecureHandshakeService>? logger = null)
@@ -33,17 +29,18 @@ public class SecureHandshakeService : IPeerHandshakeService
/// <param name="myNodeId">The local node identifier.</param>
/// <param name="token">A token used to cancel the handshake.</param>
/// <returns>
/// A task that returns the negotiated <see cref="CipherState"/>, or <see langword="null"/> if unavailable.
/// A task that returns the negotiated <see cref="CipherState" />, or <see langword="null" /> if unavailable.
/// </returns>
public async Task<CipherState?> HandshakeAsync(Stream stream, bool isInitiator, string myNodeId, CancellationToken token)
public async Task<CipherState?> HandshakeAsync(Stream stream, bool isInitiator, string myNodeId,
CancellationToken token)
{
#if NET6_0_OR_GREATER
using var ecdh = ECDiffieHellman.Create();
ecdh.KeySize = 256;
// 1. ExportAsync & Send Public Key
var myPublicKey = ecdh.ExportSubjectPublicKeyInfo();
var lenBytes = BitConverter.GetBytes(myPublicKey.Length);
byte[] myPublicKey = ecdh.ExportSubjectPublicKeyInfo();
byte[] lenBytes = BitConverter.GetBytes(myPublicKey.Length);
await stream.WriteAsync(lenBytes, 0, 4, token);
await stream.WriteAsync(myPublicKey, 0, myPublicKey.Length, token);
await stream.FlushAsync(token); // CRITICAL: Ensure data is sent immediately
@@ -51,13 +48,10 @@ public class SecureHandshakeService : IPeerHandshakeService
// 2. Receive Peer Public Key
var peerLenBuf = new byte[4];
await ReadExactAsync(stream, peerLenBuf, 0, 4, token);
int peerLen = BitConverter.ToInt32(peerLenBuf, 0);
var peerLen = BitConverter.ToInt32(peerLenBuf, 0);
// Validate peer key length to prevent DoS
if (peerLen <= 0 || peerLen > 10000)
{
throw new InvalidOperationException($"Invalid peer key length: {peerLen}");
}
if (peerLen <= 0 || peerLen > 10000) throw new InvalidOperationException($"Invalid peer key length: {peerLen}");
var peerKeyBytes = new byte[peerLen];
await ReadExactAsync(stream, peerKeyBytes, 0, peerLen, token);
@@ -77,18 +71,18 @@ public class SecureHandshakeService : IPeerHandshakeService
var k1Input = new byte[sharedSecret.Length + 1];
Buffer.BlockCopy(sharedSecret, 0, k1Input, 0, sharedSecret.Length);
k1Input[sharedSecret.Length] = 0; // "0"
var key1 = sha.ComputeHash(k1Input);
byte[] key1 = sha.ComputeHash(k1Input);
var k2Input = new byte[sharedSecret.Length + 1];
Buffer.BlockCopy(sharedSecret, 0, k2Input, 0, sharedSecret.Length);
k2Input[sharedSecret.Length] = 1; // "1"
var key2 = sha.ComputeHash(k2Input);
byte[] key2 = sha.ComputeHash(k2Input);
// If initiator: Encrypt with Key1, Decrypt with Key2
// If responder: Encrypt with Key2, Decrypt with Key1
var encryptKey = isInitiator ? key1 : key2;
var decryptKey = isInitiator ? key2 : key1;
byte[] encryptKey = isInitiator ? key1 : key2;
byte[] decryptKey = isInitiator ? key2 : key1;
return new CipherState(encryptKey, decryptKey);
#else
@@ -100,13 +94,14 @@ public class SecureHandshakeService : IPeerHandshakeService
private async Task<int> ReadExactAsync(Stream stream, byte[] buffer, int offset, int count, CancellationToken token)
{
int total = 0;
var total = 0;
while (total < count)
{
int read = await stream.ReadAsync(buffer, offset + total, count - total, token);
if (read == 0) throw new EndOfStreamException();
total += read;
}
return total;
}
}

View File

@@ -1,18 +1,12 @@
using System.Collections.Concurrent;
using System.Net.Sockets;
using Microsoft.Extensions.Logging;
using Serilog.Context;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Telemetry;
using Microsoft.Extensions.Logging;
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net.Sockets;
using System.Threading;
using System.Threading.Tasks;
using Serilog.Context;
namespace ZB.MOM.WW.CBDDC.Network;
@@ -22,43 +16,30 @@ namespace ZB.MOM.WW.CBDDC.Network;
/// </summary>
public class SyncOrchestrator : ISyncOrchestrator
{
private readonly IDiscoveryService _discovery;
private readonly IOplogStore _oplogStore;
private readonly IOplogPruneCutoffCalculator? _oplogPruneCutoffCalculator;
private readonly IPeerOplogConfirmationStore? _peerOplogConfirmationStore;
private readonly IDocumentStore _documentStore;
private readonly ISnapshotMetadataStore _snapshotMetadataStore;
private readonly ISnapshotService _snapshotService;
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
private readonly ILogger<SyncOrchestrator> _logger;
private readonly ILoggerFactory _loggerFactory;
private CancellationTokenSource? _cts;
private readonly Random _random = new Random();
private readonly object _startStopLock = new object();
// Persistent clients pool
private readonly ConcurrentDictionary<string, TcpPeerClient> _clients = new();
private readonly ConcurrentDictionary<string, PeerStatus> _peerStates = new();
private readonly IDiscoveryService _discovery;
private readonly IDocumentStore _documentStore;
private readonly IPeerHandshakeService? _handshakeService;
private readonly ILogger<SyncOrchestrator> _logger;
private readonly ILoggerFactory _loggerFactory;
private readonly IOplogPruneCutoffCalculator? _oplogPruneCutoffCalculator;
private readonly IOplogStore _oplogStore;
private readonly IPeerNodeConfigurationProvider _peerNodeConfigurationProvider;
private readonly IPeerOplogConfirmationStore? _peerOplogConfirmationStore;
private readonly ConcurrentDictionary<string, PeerStatus> _peerStates = new();
private readonly Random _random = new();
private readonly ISnapshotMetadataStore _snapshotMetadataStore;
private readonly ISnapshotService _snapshotService;
private readonly object _startStopLock = new();
private readonly INetworkTelemetryService? _telemetry;
private class PeerStatus
{
/// <summary>
/// Gets or sets the number of consecutive failures for the peer.
/// </summary>
public int FailureCount { get; set; }
/// <summary>
/// Gets or sets the next time a retry attempt is allowed.
/// </summary>
public DateTime NextRetryTime { get; set; }
}
private CancellationTokenSource? _cts;
private DateTime _lastMaintenanceTime = DateTime.MinValue;
/// <summary>
/// Initializes a new instance of the <see cref="SyncOrchestrator"/> class.
/// Initializes a new instance of the <see cref="SyncOrchestrator" /> class.
/// </summary>
/// <param name="discovery">The discovery service.</param>
/// <param name="oplogStore">The oplog store.</param>
@@ -111,6 +92,7 @@ public class SyncOrchestrator : ISyncOrchestrator
_logger.LogWarning("Sync Orchestrator already started");
return;
}
_cts = new CancellationTokenSource();
}
@@ -165,7 +147,6 @@ public class SyncOrchestrator : ISyncOrchestrator
// Cleanup clients
foreach (var client in _clients.Values)
{
try
{
client.Dispose();
@@ -174,7 +155,7 @@ public class SyncOrchestrator : ISyncOrchestrator
{
_logger.LogWarning(ex, "Error disposing client during shutdown");
}
}
_clients.Clear();
await Task.CompletedTask;
@@ -207,10 +188,7 @@ public class SyncOrchestrator : ISyncOrchestrator
var now = DateTime.UtcNow;
var eligiblePeers = allPeers.Where(p =>
{
if (_peerStates.TryGetValue(p.NodeId, out var status))
{
return status.NextRetryTime <= now;
}
if (_peerStates.TryGetValue(p.NodeId, out var status)) return status.NextRetryTime <= now;
return true;
}).ToList();
@@ -259,10 +237,7 @@ public class SyncOrchestrator : ISyncOrchestrator
internal async Task RunMaintenanceIfDueAsync(PeerNodeConfiguration config, DateTime now, CancellationToken token)
{
var maintenanceInterval = TimeSpan.FromMinutes(config.MaintenanceIntervalMinutes);
if ((now - _lastMaintenanceTime) < maintenanceInterval)
{
return;
}
if (now - _lastMaintenanceTime < maintenanceInterval) return;
_logger.LogInformation("Running periodic maintenance (Oplog pruning)...");
try
@@ -271,7 +246,7 @@ public class SyncOrchestrator : ISyncOrchestrator
if (!cutoffDecision.HasCutoff || !cutoffDecision.EffectiveCutoff.HasValue)
{
_lastMaintenanceTime = now;
var reason = string.IsNullOrWhiteSpace(cutoffDecision.Reason)
string reason = string.IsNullOrWhiteSpace(cutoffDecision.Reason)
? "No effective cutoff was produced."
: cutoffDecision.Reason;
_logger.LogInformation("Skipping oplog prune for this maintenance cycle: {Reason}", reason);
@@ -282,28 +257,25 @@ public class SyncOrchestrator : ISyncOrchestrator
_lastMaintenanceTime = now;
if (cutoffDecision.ConfirmationCutoff.HasValue)
{
_logger.LogInformation(
"Maintenance completed successfully (Retention: {RetentionHours}h, EffectiveCutoff: {EffectiveCutoff}, ConfirmationCutoff: {ConfirmationCutoff}).",
config.OplogRetentionHours,
cutoffDecision.EffectiveCutoff.Value,
cutoffDecision.ConfirmationCutoff.Value);
}
else
{
_logger.LogInformation(
"Maintenance completed successfully (Retention: {RetentionHours}h, EffectiveCutoff: {EffectiveCutoff}).",
config.OplogRetentionHours,
cutoffDecision.EffectiveCutoff.Value);
}
}
catch (Exception maintenanceEx)
{
_logger.LogError(maintenanceEx, "Maintenance failed.");
}
}
private async Task<OplogPruneCutoffDecision> CalculatePruneCutoffAsync(PeerNodeConfiguration config, CancellationToken token)
private async Task<OplogPruneCutoffDecision> CalculatePruneCutoffAsync(PeerNodeConfiguration config,
CancellationToken token)
{
if (_oplogPruneCutoffCalculator == null)
{
@@ -314,9 +286,9 @@ public class SyncOrchestrator : ISyncOrchestrator
return OplogPruneCutoffDecision.WithCutoff(
retentionCutoff,
confirmationCutoff: null,
effectiveCutoff: retentionCutoff,
reason: "Oplog prune cutoff calculator is not configured.");
null,
retentionCutoff,
"Oplog prune cutoff calculator is not configured.");
}
return await _oplogPruneCutoffCalculator.CalculateEffectiveCutoffAsync(config, token);
@@ -334,8 +306,8 @@ public class SyncOrchestrator : ISyncOrchestrator
using var peerAddressContext = LogContext.PushProperty("PeerAddress", peer.Address);
TcpPeerClient? client = null;
bool shouldRemoveClient = false;
bool syncSuccessful = false;
var shouldRemoveClient = false;
var syncSuccessful = false;
try
{
@@ -349,13 +321,11 @@ public class SyncOrchestrator : ISyncOrchestrator
_telemetry));
// Reconnect if disconnected
if (!client.IsConnected)
{
await client.ConnectAsync(token);
}
if (!client.IsConnected) await client.ConnectAsync(token);
// Handshake (idempotent)
if (!await client.HandshakeAsync(config.NodeId, config.AuthToken, _documentStore.InterestedCollection, token))
if (!await client.HandshakeAsync(config.NodeId, config.AuthToken, _documentStore.InterestedCollection,
token))
{
_logger.LogWarning("Handshake rejected by {NodeId}", peer.NodeId);
shouldRemoveClient = true;
@@ -381,7 +351,7 @@ public class SyncOrchestrator : ISyncOrchestrator
_logger.LogInformation("Pulling changes from {PeerNodeId} for {Count} nodes: {Nodes}",
peer.NodeId, nodesToPull.Count, string.Join(", ", nodesToPull));
foreach (var nodeId in nodesToPull)
foreach (string nodeId in nodesToPull)
{
var localTs = localVectorClock.GetTimestamp(nodeId);
var remoteTs = remoteVectorClock.GetTimestamp(nodeId);
@@ -390,13 +360,16 @@ public class SyncOrchestrator : ISyncOrchestrator
nodeId, localTs, remoteTs);
// PASS LOCAL INTERESTS TO PULL
var changes = await client.PullChangesFromNodeAsync(nodeId, localTs, _documentStore.InterestedCollection, token);
var changes = await client.PullChangesFromNodeAsync(nodeId, localTs,
_documentStore.InterestedCollection, token);
if (changes != null && changes.Count > 0)
{
var result = await ProcessInboundBatchAsync(client, peer.NodeId, changes, token);
if (result != SyncBatchResult.Success)
{
_logger.LogWarning("Inbound batch processing failed with status {Status}. Aborting sync for this session.", result);
_logger.LogWarning(
"Inbound batch processing failed with status {Status}. Aborting sync for this session.",
result);
RecordFailure(peer.NodeId);
return;
}
@@ -410,13 +383,15 @@ public class SyncOrchestrator : ISyncOrchestrator
_logger.LogInformation("Pushing changes to {PeerNodeId} for {Count} nodes: {Nodes}",
peer.NodeId, nodesToPush.Count, string.Join(", ", nodesToPush));
foreach (var nodeId in nodesToPush)
foreach (string nodeId in nodesToPush)
{
var remoteTs = remoteVectorClock.GetTimestamp(nodeId);
// PUSH FILTERING: Pass remote receiver's interests to oplogStore for efficient retrieval
var remoteInterests = client.RemoteInterests;
var changes = (await _oplogStore.GetOplogForNodeAfterAsync(nodeId, remoteTs, remoteInterests, token)).ToList();
var changes =
(await _oplogStore.GetOplogForNodeAfterAsync(nodeId, remoteTs, remoteInterests, token))
.ToList();
if (changes.Any())
{
@@ -429,13 +404,10 @@ public class SyncOrchestrator : ISyncOrchestrator
// 5. Handle Concurrent/Equal cases
if (causality == CausalityRelation.Equal)
{
_logger.LogDebug("Vector clocks are equal with {PeerNodeId}. No sync needed.", peer.NodeId);
}
else if (causality == CausalityRelation.Concurrent && !nodesToPull.Any() && !nodesToPush.Any())
{
_logger.LogDebug("Vector clocks are concurrent with {PeerNodeId}, but no divergence detected.", peer.NodeId);
}
_logger.LogDebug("Vector clocks are concurrent with {PeerNodeId}, but no divergence detected.",
peer.NodeId);
syncSuccessful = true;
RecordSuccess(peer.NodeId);
@@ -466,7 +438,9 @@ public class SyncOrchestrator : ISyncOrchestrator
}
catch (CorruptDatabaseException cex)
{
_logger.LogCritical(cex, "Local database corruption detected during sync with {NodeId}. Initiating EMERGENCY SNAPSHOT RECOVERY.", peer.NodeId);
_logger.LogCritical(cex,
"Local database corruption detected during sync with {NodeId}. Initiating EMERGENCY SNAPSHOT RECOVERY.",
peer.NodeId);
if (client != null && client.IsConnected)
{
try
@@ -498,7 +472,8 @@ public class SyncOrchestrator : ISyncOrchestrator
}
catch (SocketException sex)
{
_logger.LogWarning("Network error syncing with {NodeId}: {Message}. Will retry later.", peer.NodeId, sex.Message);
_logger.LogWarning("Network error syncing with {NodeId}: {Message}. Will retry later.", peer.NodeId,
sex.Message);
shouldRemoveClient = true;
RecordFailure(peer.NodeId);
}
@@ -511,18 +486,18 @@ public class SyncOrchestrator : ISyncOrchestrator
finally
{
if (shouldRemoveClient && client != null)
{
if (_clients.TryRemove(peer.NodeId, out var removedClient))
try
{
try { removedClient.Dispose(); } catch { /* Ignore disposal errors */ }
removedClient.Dispose();
}
catch
{
/* Ignore disposal errors */
}
// Log successful sync outcome (failures are already logged in catch blocks)
if (syncSuccessful)
{
_logger.LogInformation("Sync with {NodeId} completed successfully.", peer.NodeId);
}
if (syncSuccessful) _logger.LogInformation("Sync with {NodeId} completed successfully.", peer.NodeId);
}
}
@@ -530,7 +505,12 @@ public class SyncOrchestrator : ISyncOrchestrator
{
_peerStates.AddOrUpdate(nodeId,
new PeerStatus { FailureCount = 0, NextRetryTime = DateTime.MinValue },
(k, v) => { v.FailureCount = 0; v.NextRetryTime = DateTime.MinValue; return v; });
(k, v) =>
{
v.FailureCount = 0;
v.NextRetryTime = DateTime.MinValue;
return v;
});
}
/// <summary>
@@ -560,23 +540,19 @@ public class SyncOrchestrator : ISyncOrchestrator
/// <param name="localNodeId">The local node identifier used to skip self-registration.</param>
/// <param name="token">The cancellation token.</param>
/// <returns>A task that represents the asynchronous registration operation.</returns>
internal async Task EnsurePeersRegisteredAsync(IEnumerable<PeerNode> peers, string localNodeId, CancellationToken token)
internal async Task EnsurePeersRegisteredAsync(IEnumerable<PeerNode> peers, string localNodeId,
CancellationToken token)
{
if (_peerOplogConfirmationStore == null)
{
return;
}
if (_peerOplogConfirmationStore == null) return;
foreach (var peer in peers)
{
if (string.Equals(peer.NodeId, localNodeId, StringComparison.Ordinal))
{
continue;
}
if (string.Equals(peer.NodeId, localNodeId, StringComparison.Ordinal)) continue;
try
{
await _peerOplogConfirmationStore.EnsurePeerRegisteredAsync(peer.NodeId, peer.Address, peer.Type, token);
await _peerOplogConfirmationStore.EnsurePeerRegisteredAsync(peer.NodeId, peer.Address, peer.Type,
token);
}
catch (OperationCanceledException) when (token.IsCancellationRequested)
{
@@ -603,30 +579,18 @@ public class SyncOrchestrator : ISyncOrchestrator
VectorClock remoteVectorClock,
CancellationToken token)
{
if (_peerOplogConfirmationStore == null)
{
return;
}
if (_peerOplogConfirmationStore == null) return;
var nodeIds = new HashSet<string>(localVectorClock.NodeIds, StringComparer.Ordinal);
foreach (var nodeId in remoteVectorClock.NodeIds)
{
nodeIds.Add(nodeId);
}
foreach (string nodeId in remoteVectorClock.NodeIds) nodeIds.Add(nodeId);
foreach (var sourceNodeId in nodeIds)
foreach (string sourceNodeId in nodeIds)
{
var localTimestamp = localVectorClock.GetTimestamp(sourceNodeId);
if (localTimestamp == default)
{
continue;
}
if (localTimestamp == default) continue;
var remoteTimestamp = remoteVectorClock.GetTimestamp(sourceNodeId);
if (remoteTimestamp < localTimestamp)
{
continue;
}
if (remoteTimestamp < localTimestamp) continue;
await UpdatePeerConfirmationAsync(peerNodeId, sourceNodeId, localTimestamp, token);
}
@@ -646,10 +610,7 @@ public class SyncOrchestrator : ISyncOrchestrator
IReadOnlyCollection<OplogEntry> pushedChanges,
CancellationToken token)
{
if (_peerOplogConfirmationStore == null || pushedChanges.Count == 0)
{
return;
}
if (_peerOplogConfirmationStore == null || pushedChanges.Count == 0) return;
var maxPushed = pushedChanges
.OrderBy(entry => entry.Timestamp)
@@ -682,15 +643,12 @@ public class SyncOrchestrator : ISyncOrchestrator
HlcTimestamp timestamp,
CancellationToken token)
{
if (_peerOplogConfirmationStore == null)
{
return;
}
if (_peerOplogConfirmationStore == null) return;
try
{
// Best-effort hash lookup: IOplogStore exposes latest hash per source node.
var hash = await _oplogStore.GetLastEntryHashAsync(sourceNodeId, token) ?? string.Empty;
string hash = await _oplogStore.GetLastEntryHashAsync(sourceNodeId, token) ?? string.Empty;
await _peerOplogConfirmationStore.UpdateConfirmationAsync(peerNodeId, sourceNodeId, timestamp, hash, token);
}
catch (OperationCanceledException) when (token.IsCancellationRequested)
@@ -713,7 +671,7 @@ public class SyncOrchestrator : ISyncOrchestrator
{
v.FailureCount++;
// Exponential backoff: 1s, 2s, 4s... max 60s
var delaySeconds = Math.Min(Math.Pow(2, v.FailureCount), 60);
double delaySeconds = Math.Min(Math.Pow(2, v.FailureCount), 60);
v.NextRetryTime = DateTime.UtcNow.AddSeconds(delaySeconds);
return v;
});
@@ -723,66 +681,51 @@ public class SyncOrchestrator : ISyncOrchestrator
/// Validates an inbound batch of changes, checks for gaps, performs recovery if needed, and applies to oplogStore.
/// Extracted to enforce Single Responsibility Principle.
/// </summary>
private enum SyncBatchResult
{
Success,
GapDetected,
IntegrityError,
ChainBroken
}
/// <summary>
/// Validates an inbound batch of changes, checks for gaps, performs recovery if needed, and applies to oplogStore.
/// Extracted to enforce Single Responsibility Principle.
/// </summary>
private async Task<SyncBatchResult> ProcessInboundBatchAsync(TcpPeerClient client, string peerNodeId, IList<OplogEntry> changes, CancellationToken token)
private async Task<SyncBatchResult> ProcessInboundBatchAsync(TcpPeerClient client, string peerNodeId,
IList<OplogEntry> changes, CancellationToken token)
{
_logger.LogInformation("Received {Count} changes from {NodeId}", changes.Count, peerNodeId);
// 1. Validate internal integrity of the batch (Hash check)
foreach (var entry in changes)
{
if (!entry.IsValid())
{
// CHANGED: Log Critical Error but ACCEPT the entry to allow sync to progress (Soft Validation).
// Throwing here would cause an unrecoverable state where this batch blocks sync forever.
_logger.LogError("Integrity Check Failed for Entry {Hash} (Node: {NodeId}). Expected: {computedHash}. ACCEPTING payload despite mismatch to maintain availability.",
_logger.LogError(
"Integrity Check Failed for Entry {Hash} (Node: {NodeId}). Expected: {computedHash}. ACCEPTING payload despite mismatch to maintain availability.",
entry.Hash, entry.Timestamp.NodeId, entry.ComputeHash());
}
}
// 2. Group changes by Author Node to validate Source Chains independently
var changesByNode = changes.GroupBy(c => c.Timestamp.NodeId);
foreach (var group in changesByNode)
{
var authorNodeId = group.Key;
string authorNodeId = group.Key;
// FIX: Order by the full Timestamp (Physical + Logical), not just LogicalCounter.
// LogicalCounter resets when PhysicalTime advances, so sorting by Counter alone breaks chronological order.
var authorChain = group.OrderBy(c => c.Timestamp).ToList();
// Check linkage within the batch
for (int i = 1; i < authorChain.Count; i++)
{
for (var i = 1; i < authorChain.Count; i++)
if (authorChain[i].PreviousHash != authorChain[i - 1].Hash)
{
_logger.LogError("Chain Broken in Batch for Node {AuthorId}", authorNodeId);
return SyncBatchResult.ChainBroken;
}
}
// Check linkage with Local State
var firstEntry = authorChain[0];
var localHeadHash = await _oplogStore.GetLastEntryHashAsync(authorNodeId, token);
string? localHeadHash = await _oplogStore.GetLastEntryHashAsync(authorNodeId, token);
_logger.LogDebug("Processing chain for Node {AuthorId}: FirstEntry.PrevHash={PrevHash}, FirstEntry.Hash={Hash}, LocalHeadHash={LocalHead}",
_logger.LogDebug(
"Processing chain for Node {AuthorId}: FirstEntry.PrevHash={PrevHash}, FirstEntry.Hash={Hash}, LocalHeadHash={LocalHead}",
authorNodeId, firstEntry.PreviousHash, firstEntry.Hash, localHeadHash ?? "(null)");
if (localHeadHash != null && firstEntry.PreviousHash != localHeadHash)
{
// Check if entry starts from snapshot boundary (valid case after pruning)
var snapshotHash = await _snapshotMetadataStore.GetSnapshotHashAsync(authorNodeId, token);
string? snapshotHash = await _snapshotMetadataStore.GetSnapshotHashAsync(authorNodeId, token);
if (snapshotHash != null && firstEntry.PreviousHash == snapshotHash)
{
@@ -797,7 +740,8 @@ public class SyncOrchestrator : ISyncOrchestrator
else
{
// GAP DETECTED (not a snapshot boundary case)
_logger.LogWarning("Gap Detected for Node {AuthorId}. Local Head: {Local}, Remote Prev: {Prev}. Initiating Recovery.",
_logger.LogWarning(
"Gap Detected for Node {AuthorId}. Local Head: {Local}, Remote Prev: {Prev}. Initiating Recovery.",
authorNodeId, localHeadHash, firstEntry.PreviousHash);
// Gap Recovery (Range Sync)
@@ -821,10 +765,11 @@ public class SyncOrchestrator : ISyncOrchestrator
_logger.LogInformation("Gap Recovery: Retrieved {Count} missing entries.", missingChain.Count);
// Validate Recovery Chain Linkage
bool linkValid = true;
var linkValid = true;
if (missingChain[0].PreviousHash != localHeadHash) linkValid = false;
for (int i = 1; i < missingChain.Count; i++)
if (missingChain[i].PreviousHash != missingChain[i - 1].Hash) linkValid = false;
for (var i = 1; i < missingChain.Count; i++)
if (missingChain[i].PreviousHash != missingChain[i - 1].Hash)
linkValid = false;
if (missingChain.Last().Hash != firstEntry.PreviousHash) linkValid = false;
if (!linkValid)
@@ -846,7 +791,8 @@ public class SyncOrchestrator : ISyncOrchestrator
// DECISION: Accept the entries anyway but log a warning
// This allows forward progress even with partial history
_logger.LogWarning("Could not recover gap for Node {AuthorId}. Local Head: {Local}, Remote Prev: {Prev}. Accepting entries anyway (partial sync).",
_logger.LogWarning(
"Could not recover gap for Node {AuthorId}. Local Head: {Local}, Remote Prev: {Prev}. Accepting entries anyway (partial sync).",
authorNodeId, localHeadHash, firstEntry.PreviousHash);
// Optionally: Mark this as a partial sync in metadata
@@ -857,7 +803,8 @@ public class SyncOrchestrator : ISyncOrchestrator
else if (localHeadHash == null && !string.IsNullOrEmpty(firstEntry.PreviousHash))
{
// Implicit Accept / Partial Sync warning
_logger.LogWarning("First contact with Node {AuthorId} at explicit state (Not Genesis). Accepting.", authorNodeId);
_logger.LogWarning("First contact with Node {AuthorId} at explicit state (Not Genesis). Accepting.",
authorNodeId);
}
// Apply original batch (grouped by node for clarity, but oplogStore usually handles bulk)
@@ -871,7 +818,7 @@ public class SyncOrchestrator : ISyncOrchestrator
{
_logger.LogInformation(mergeOnly ? "Starting Snapshot Merge..." : "Starting Full Database Replacement...");
var tempFile = Path.GetTempFileName();
string tempFile = Path.GetTempFileName();
try
{
_logger.LogInformation("Downloading snapshot to {TempFile}...", tempFile);
@@ -885,14 +832,10 @@ public class SyncOrchestrator : ISyncOrchestrator
using (var fs = File.OpenRead(tempFile))
{
if (mergeOnly)
{
await _snapshotService.MergeSnapshotAsync(fs, token);
}
else
{
await _snapshotService.ReplaceDatabaseAsync(fs, token);
}
}
_logger.LogInformation("Snapshot applied successfully.");
}
@@ -904,7 +847,6 @@ public class SyncOrchestrator : ISyncOrchestrator
finally
{
if (File.Exists(tempFile))
{
try
{
File.Delete(tempFile);
@@ -915,5 +857,29 @@ public class SyncOrchestrator : ISyncOrchestrator
}
}
}
private class PeerStatus
{
/// <summary>
/// Gets or sets the number of consecutive failures for the peer.
/// </summary>
public int FailureCount { get; set; }
/// <summary>
/// Gets or sets the next time a retry attempt is allowed.
/// </summary>
public DateTime NextRetryTime { get; set; }
}
/// <summary>
/// Validates an inbound batch of changes, checks for gaps, performs recovery if needed, and applies to oplogStore.
/// Extracted to enforce Single Responsibility Principle.
/// </summary>
private enum SyncBatchResult
{
Success,
GapDetected,
IntegrityError,
ChainBroken
}
}

View File

@@ -1,16 +1,10 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net.Sockets;
using System.Threading;
using System.Threading.Tasks;
using Google.Protobuf;
using System.Text.Json;
using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Network.Proto;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Protocol;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Telemetry;
namespace ZB.MOM.WW.CBDDC.Network;
@@ -20,20 +14,42 @@ namespace ZB.MOM.WW.CBDDC.Network;
/// </summary>
public class TcpPeerClient : IDisposable
{
private readonly TcpClient _client;
private readonly string _peerAddress;
private readonly ILogger<TcpPeerClient> _logger;
private readonly IPeerHandshakeService? _handshakeService;
private NetworkStream? _stream;
private CipherState? _cipherState;
private readonly object _connectionLock = new object();
private bool _disposed = false;
private const int ConnectionTimeoutMs = 5000;
private const int OperationTimeoutMs = 30000;
private readonly TcpClient _client;
private readonly object _connectionLock = new();
private readonly IPeerHandshakeService? _handshakeService;
private readonly ILogger<TcpPeerClient> _logger;
private readonly string _peerAddress;
private readonly ProtocolHandler _protocol;
private readonly INetworkTelemetryService? _telemetry;
private CipherState? _cipherState;
private bool _disposed;
private List<string> _remoteInterests = new();
private NetworkStream? _stream;
private bool _useCompression; // Negotiated after handshake
/// <summary>
/// Initializes a new instance of the <see cref="TcpPeerClient" /> class.
/// </summary>
/// <param name="peerAddress">The remote peer address in <c>host:port</c> format.</param>
/// <param name="logger">The logger used for connection and protocol events.</param>
/// <param name="handshakeService">The optional handshake service used to establish secure sessions.</param>
/// <param name="telemetry">The optional telemetry service for network metrics.</param>
public TcpPeerClient(string peerAddress, ILogger<TcpPeerClient> logger,
IPeerHandshakeService? handshakeService = null, INetworkTelemetryService? telemetry = null)
{
_client = new TcpClient();
_peerAddress = peerAddress;
_logger = logger;
_handshakeService = handshakeService;
_telemetry = telemetry;
_protocol = new ProtocolHandler(logger, telemetry);
}
/// <summary>
/// Gets a value indicating whether the client currently has an active connection.
/// </summary>
@@ -53,412 +69,10 @@ public class TcpPeerClient : IDisposable
/// </summary>
public bool HasHandshaked { get; private set; }
private readonly INetworkTelemetryService? _telemetry;
/// <summary>
/// Initializes a new instance of the <see cref="TcpPeerClient"/> class.
/// </summary>
/// <param name="peerAddress">The remote peer address in <c>host:port</c> format.</param>
/// <param name="logger">The logger used for connection and protocol events.</param>
/// <param name="handshakeService">The optional handshake service used to establish secure sessions.</param>
/// <param name="telemetry">The optional telemetry service for network metrics.</param>
public TcpPeerClient(string peerAddress, ILogger<TcpPeerClient> logger, IPeerHandshakeService? handshakeService = null, INetworkTelemetryService? telemetry = null)
{
_client = new TcpClient();
_peerAddress = peerAddress;
_logger = logger;
_handshakeService = handshakeService;
_telemetry = telemetry;
_protocol = new ProtocolHandler(logger, telemetry);
}
/// <summary>
/// Connects to the configured remote peer.
/// </summary>
/// <param name="token">A token used to cancel the connection attempt.</param>
/// <returns>A task that represents the asynchronous connect operation.</returns>
public async Task ConnectAsync(CancellationToken token)
{
lock (_connectionLock)
{
if (_disposed)
{
throw new ObjectDisposedException(nameof(TcpPeerClient));
}
if (IsConnected) return;
}
var parts = _peerAddress.Split(':');
if (parts.Length != 2)
{
throw new ArgumentException($"Invalid address format: {_peerAddress}. Expected format: host:port");
}
if (!int.TryParse(parts[1], out int port) || port <= 0 || port > 65535)
{
throw new ArgumentException($"Invalid port number: {parts[1]}");
}
// Connect with timeout
using var timeoutCts = new CancellationTokenSource(ConnectionTimeoutMs);
using var linkedCts = CancellationTokenSource.CreateLinkedTokenSource(token, timeoutCts.Token);
try
{
await _client.ConnectAsync(parts[0], port);
lock (_connectionLock)
{
if (_disposed)
{
throw new ObjectDisposedException(nameof(TcpPeerClient));
}
_stream = _client.GetStream();
// CRITICAL for Android: Disable Nagle's algorithm to prevent buffering delays
// This ensures immediate packet transmission for handshake data
_client.NoDelay = true;
// Configure TCP keepalive
_client.Client.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, true);
// Set read/write timeouts
_stream.ReadTimeout = OperationTimeoutMs;
_stream.WriteTimeout = OperationTimeoutMs;
}
_logger.LogDebug("Connected to peer: {Address} (NoDelay=true for immediate send)", _peerAddress);
}
catch (OperationCanceledException) when (timeoutCts.IsCancellationRequested)
{
throw new TimeoutException($"Connection to {_peerAddress} timed out after {ConnectionTimeoutMs}ms");
}
}
/// <summary>
/// Gets the list of collections the remote peer is interested in.
/// </summary>
public System.Collections.Generic.IReadOnlyList<string> RemoteInterests => _remoteInterests.AsReadOnly();
private List<string> _remoteInterests = new();
/// <summary>
/// Performs authentication handshake with the remote peer.
/// </summary>
/// <param name="myNodeId">The local node identifier.</param>
/// <param name="authToken">The authentication token.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>True if handshake was accepted, false otherwise.</returns>
public async Task<bool> HandshakeAsync(string myNodeId, string authToken, CancellationToken token)
{
return await HandshakeAsync(myNodeId, authToken, null, token);
}
/// <summary>
/// Performs authentication handshake with the remote peer, including collection interests.
/// </summary>
/// <param name="myNodeId">The local node identifier.</param>
/// <param name="authToken">The authentication token.</param>
/// <param name="interestingCollections">Optional collection names this node is interested in receiving.</param>
/// <param name="token">Cancellation token.</param>
/// <returns><see langword="true"/> if handshake was accepted; otherwise <see langword="false"/>.</returns>
public async Task<bool> HandshakeAsync(string myNodeId, string authToken, IEnumerable<string>? interestingCollections, CancellationToken token)
{
if (HasHandshaked) return true;
if (_handshakeService != null)
{
// Perform secure handshake if service is available
// We assume we are initiator here
_cipherState = await _handshakeService.HandshakeAsync(_stream!, true, myNodeId, token);
}
var req = new HandshakeRequest { NodeId = myNodeId, AuthToken = authToken ?? "" };
if (interestingCollections != null)
{
foreach (var coll in interestingCollections)
{
req.InterestingCollections.Add(coll);
}
}
if (CompressionHelper.IsBrotliSupported)
{
req.SupportedCompression.Add("brotli");
}
_logger.LogDebug("Sending HandshakeReq to {Address}", _peerAddress);
await _protocol.SendMessageAsync(_stream!, MessageType.HandshakeReq, req, false, _cipherState, token);
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
_logger.LogDebug("Received Handshake response type: {Type}", type);
if (type != MessageType.HandshakeRes) return false;
var res = HandshakeResponse.Parser.ParseFrom(payload);
// Store remote interests
_remoteInterests = res.InterestingCollections.ToList();
// Negotiation Result
if (res.SelectedCompression == "brotli")
{
_useCompression = true;
_logger.LogInformation("Brotli compression negotiated.");
}
HasHandshaked = res.Accepted;
return res.Accepted;
}
/// <summary>
/// Retrieves the remote peer's latest HLC timestamp.
/// </summary>
/// <param name="token">Cancellation token.</param>
/// <returns>The latest remote hybrid logical clock timestamp.</returns>
public async Task<HlcTimestamp> GetClockAsync(CancellationToken token)
{
using (_telemetry?.StartMetric(MetricType.RoundTripTime))
{
await _protocol.SendMessageAsync(_stream!, MessageType.GetClockReq, new GetClockRequest(), _useCompression, _cipherState, token);
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ClockRes) throw new Exception("Unexpected response");
var res = ClockResponse.Parser.ParseFrom(payload);
return new HlcTimestamp(res.HlcWall, res.HlcLogic, res.HlcNode);
}
}
/// <summary>
/// Retrieves the remote peer's vector clock (latest timestamp per node).
/// </summary>
/// <param name="token">Cancellation token.</param>
/// <returns>The remote vector clock.</returns>
public async Task<VectorClock> GetVectorClockAsync(CancellationToken token)
{
using (_telemetry?.StartMetric(MetricType.RoundTripTime))
{
await _protocol.SendMessageAsync(_stream!, MessageType.GetVectorClockReq, new GetVectorClockRequest(), _useCompression, _cipherState, token);
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.VectorClockRes) throw new Exception("Unexpected response");
var res = VectorClockResponse.Parser.ParseFrom(payload);
var vectorClock = new VectorClock();
foreach (var entry in res.Entries)
{
vectorClock.SetTimestamp(entry.NodeId, new HlcTimestamp(entry.HlcWall, entry.HlcLogic, entry.NodeId));
}
return vectorClock;
}
}
/// <summary>
/// Pulls oplog changes from the remote peer since the specified timestamp.
/// </summary>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesAsync(HlcTimestamp since, CancellationToken token)
{
return await PullChangesAsync(since, null, token);
}
/// <summary>
/// Pulls oplog changes from the remote peer since the specified timestamp, filtered by collections.
/// </summary>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="collections">Optional collection names used to filter the returned entries.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesAsync(HlcTimestamp since, IEnumerable<string>? collections, CancellationToken token)
{
var req = new PullChangesRequest
{
SinceWall = since.PhysicalTime,
SinceLogic = since.LogicalCounter,
// Empty SinceNode indicates a global pull (not source-node filtered).
SinceNode = string.Empty
};
if (collections != null)
{
foreach (var coll in collections)
{
req.Collections.Add(coll);
}
}
await _protocol.SendMessageAsync(_stream!, MessageType.PullChangesReq, req, _useCompression, _cipherState, token);
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ChangeSetRes) throw new Exception("Unexpected response");
var res = ChangeSetResponse.Parser.ParseFrom(payload);
return res.Entries.Select(e => new OplogEntry(
e.Collection,
e.Key,
ParseOp(e.Operation),
string.IsNullOrEmpty(e.JsonData) ? default : System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>(e.JsonData),
new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode),
e.PreviousHash,
e.Hash // Pass the received hash to preserve integrity reference
)).ToList();
}
/// <summary>
/// Pulls oplog changes for a specific node from the remote peer since the specified timestamp.
/// </summary>
/// <param name="nodeId">The node identifier to filter changes by.</param>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesFromNodeAsync(string nodeId, HlcTimestamp since, CancellationToken token)
{
return await PullChangesFromNodeAsync(nodeId, since, null, token);
}
/// <summary>
/// Pulls oplog changes for a specific node from the remote peer since the specified timestamp, filtered by collections.
/// </summary>
/// <param name="nodeId">The node identifier to filter changes by.</param>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="collections">Optional collection names used to filter the returned entries.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesFromNodeAsync(string nodeId, HlcTimestamp since, IEnumerable<string>? collections, CancellationToken token)
{
var req = new PullChangesRequest
{
SinceNode = nodeId,
SinceWall = since.PhysicalTime,
SinceLogic = since.LogicalCounter
};
if (collections != null)
{
foreach (var coll in collections)
{
req.Collections.Add(coll);
}
}
await _protocol.SendMessageAsync(_stream!, MessageType.PullChangesReq, req, _useCompression, _cipherState, token);
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ChangeSetRes) throw new Exception("Unexpected response");
var res = ChangeSetResponse.Parser.ParseFrom(payload);
return res.Entries.Select(e => new OplogEntry(
e.Collection,
e.Key,
ParseOp(e.Operation),
string.IsNullOrEmpty(e.JsonData) ? default : System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>(e.JsonData),
new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode),
e.PreviousHash,
e.Hash
)).ToList();
}
/// <summary>
/// Retrieves a range of oplog entries connecting two hashes (Gap Recovery).
/// </summary>
/// <param name="startHash">The starting hash in the chain.</param>
/// <param name="endHash">The ending hash in the chain.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The chain entries connecting the requested hash range.</returns>
public virtual async Task<List<OplogEntry>> GetChainRangeAsync(string startHash, string endHash, CancellationToken token)
{
var req = new GetChainRangeRequest { StartHash = startHash, EndHash = endHash };
await _protocol.SendMessageAsync(_stream!, MessageType.GetChainRangeReq, req, _useCompression, _cipherState, token);
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ChainRangeRes) throw new Exception($"Unexpected response for ChainRange: {type}");
var res = ChainRangeResponse.Parser.ParseFrom(payload);
if (res.SnapshotRequired) throw new SnapshotRequiredException();
return res.Entries.Select(e => new OplogEntry(
e.Collection,
e.Key,
ParseOp(e.Operation),
string.IsNullOrEmpty(e.JsonData) ? default : System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>(e.JsonData),
new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode),
e.PreviousHash,
e.Hash
)).ToList();
}
/// <summary>
/// Pushes local oplog changes to the remote peer.
/// </summary>
/// <param name="entries">The oplog entries to push.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>A task that represents the asynchronous push operation.</returns>
public async Task PushChangesAsync(IEnumerable<OplogEntry> entries, CancellationToken token)
{
var req = new PushChangesRequest();
var entryList = entries.ToList();
if (entryList.Count == 0) return;
foreach (var e in entryList)
{
req.Entries.Add(new ProtoOplogEntry
{
Collection = e.Collection,
Key = e.Key,
Operation = e.Operation.ToString(),
JsonData = e.Payload?.GetRawText() ?? "",
HlcWall = e.Timestamp.PhysicalTime,
HlcLogic = e.Timestamp.LogicalCounter,
HlcNode = e.Timestamp.NodeId,
Hash = e.Hash,
PreviousHash = e.PreviousHash
});
}
await _protocol.SendMessageAsync(_stream!, MessageType.PushChangesReq, req, _useCompression, _cipherState, token);
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.AckRes) throw new Exception("Push failed");
var res = AckResponse.Parser.ParseFrom(payload);
if (res.SnapshotRequired) throw new SnapshotRequiredException();
if (!res.Success) throw new Exception("Push failed");
}
private bool _useCompression = false; // Negotiated after handshake
private OperationType ParseOp(string op) => Enum.TryParse<OperationType>(op, out var val) ? val : OperationType.Put;
/// <summary>
/// Downloads a full snapshot from the remote peer to the provided destination stream.
/// </summary>
/// <param name="destination">The stream that receives snapshot bytes.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>A task that represents the asynchronous snapshot transfer operation.</returns>
public async Task GetSnapshotAsync(Stream destination, CancellationToken token)
{
await _protocol.SendMessageAsync(_stream!, MessageType.GetSnapshotReq, new GetSnapshotRequest(), _useCompression, _cipherState, token);
while (true)
{
var (type, payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.SnapshotChunkMsg) throw new Exception($"Unexpected message type during snapshot: {type}");
var chunk = SnapshotChunk.Parser.ParseFrom(payload);
if (chunk.Data.Length > 0)
{
await destination.WriteAsync(chunk.Data.ToByteArray(), 0, chunk.Data.Length, token);
}
if (chunk.IsLast) break;
}
}
public IReadOnlyList<string> RemoteInterests => _remoteInterests.AsReadOnly();
/// <summary>
/// Releases resources used by the peer client.
@@ -491,12 +105,380 @@ public class TcpPeerClient : IDisposable
_logger.LogDebug("Disposed connection to peer: {Address}", _peerAddress);
}
/// <summary>
/// Connects to the configured remote peer.
/// </summary>
/// <param name="token">A token used to cancel the connection attempt.</param>
/// <returns>A task that represents the asynchronous connect operation.</returns>
public async Task ConnectAsync(CancellationToken token)
{
lock (_connectionLock)
{
if (_disposed) throw new ObjectDisposedException(nameof(TcpPeerClient));
if (IsConnected) return;
}
string[] parts = _peerAddress.Split(':');
if (parts.Length != 2)
throw new ArgumentException($"Invalid address format: {_peerAddress}. Expected format: host:port");
if (!int.TryParse(parts[1], out int port) || port <= 0 || port > 65535)
throw new ArgumentException($"Invalid port number: {parts[1]}");
// Connect with timeout
using var timeoutCts = new CancellationTokenSource(ConnectionTimeoutMs);
using var linkedCts = CancellationTokenSource.CreateLinkedTokenSource(token, timeoutCts.Token);
try
{
await _client.ConnectAsync(parts[0], port);
lock (_connectionLock)
{
if (_disposed) throw new ObjectDisposedException(nameof(TcpPeerClient));
_stream = _client.GetStream();
// CRITICAL for Android: Disable Nagle's algorithm to prevent buffering delays
// This ensures immediate packet transmission for handshake data
_client.NoDelay = true;
// Configure TCP keepalive
_client.Client.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, true);
// Set read/write timeouts
_stream.ReadTimeout = OperationTimeoutMs;
_stream.WriteTimeout = OperationTimeoutMs;
}
_logger.LogDebug("Connected to peer: {Address} (NoDelay=true for immediate send)", _peerAddress);
}
catch (OperationCanceledException) when (timeoutCts.IsCancellationRequested)
{
throw new TimeoutException($"Connection to {_peerAddress} timed out after {ConnectionTimeoutMs}ms");
}
}
/// <summary>
/// Performs authentication handshake with the remote peer.
/// </summary>
/// <param name="myNodeId">The local node identifier.</param>
/// <param name="authToken">The authentication token.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>True if handshake was accepted, false otherwise.</returns>
public async Task<bool> HandshakeAsync(string myNodeId, string authToken, CancellationToken token)
{
return await HandshakeAsync(myNodeId, authToken, null, token);
}
/// <summary>
/// Performs authentication handshake with the remote peer, including collection interests.
/// </summary>
/// <param name="myNodeId">The local node identifier.</param>
/// <param name="authToken">The authentication token.</param>
/// <param name="interestingCollections">Optional collection names this node is interested in receiving.</param>
/// <param name="token">Cancellation token.</param>
/// <returns><see langword="true" /> if handshake was accepted; otherwise <see langword="false" />.</returns>
public async Task<bool> HandshakeAsync(string myNodeId, string authToken,
IEnumerable<string>? interestingCollections, CancellationToken token)
{
if (HasHandshaked) return true;
if (_handshakeService != null)
// Perform secure handshake if service is available
// We assume we are initiator here
_cipherState = await _handshakeService.HandshakeAsync(_stream!, true, myNodeId, token);
var req = new HandshakeRequest { NodeId = myNodeId, AuthToken = authToken ?? "" };
if (interestingCollections != null)
foreach (string coll in interestingCollections)
req.InterestingCollections.Add(coll);
if (CompressionHelper.IsBrotliSupported) req.SupportedCompression.Add("brotli");
_logger.LogDebug("Sending HandshakeReq to {Address}", _peerAddress);
await _protocol.SendMessageAsync(_stream!, MessageType.HandshakeReq, req, false, _cipherState, token);
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
_logger.LogDebug("Received Handshake response type: {Type}", type);
if (type != MessageType.HandshakeRes) return false;
var res = HandshakeResponse.Parser.ParseFrom(payload);
// Store remote interests
_remoteInterests = res.InterestingCollections.ToList();
// Negotiation Result
if (res.SelectedCompression == "brotli")
{
_useCompression = true;
_logger.LogInformation("Brotli compression negotiated.");
}
HasHandshaked = res.Accepted;
return res.Accepted;
}
/// <summary>
/// Retrieves the remote peer's latest HLC timestamp.
/// </summary>
/// <param name="token">Cancellation token.</param>
/// <returns>The latest remote hybrid logical clock timestamp.</returns>
public async Task<HlcTimestamp> GetClockAsync(CancellationToken token)
{
using (_telemetry?.StartMetric(MetricType.RoundTripTime))
{
await _protocol.SendMessageAsync(_stream!, MessageType.GetClockReq, new GetClockRequest(), _useCompression,
_cipherState, token);
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ClockRes) throw new Exception("Unexpected response");
var res = ClockResponse.Parser.ParseFrom(payload);
return new HlcTimestamp(res.HlcWall, res.HlcLogic, res.HlcNode);
}
}
/// <summary>
/// Retrieves the remote peer's vector clock (latest timestamp per node).
/// </summary>
/// <param name="token">Cancellation token.</param>
/// <returns>The remote vector clock.</returns>
public async Task<VectorClock> GetVectorClockAsync(CancellationToken token)
{
using (_telemetry?.StartMetric(MetricType.RoundTripTime))
{
await _protocol.SendMessageAsync(_stream!, MessageType.GetVectorClockReq, new GetVectorClockRequest(),
_useCompression, _cipherState, token);
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.VectorClockRes) throw new Exception("Unexpected response");
var res = VectorClockResponse.Parser.ParseFrom(payload);
var vectorClock = new VectorClock();
foreach (var entry in res.Entries)
vectorClock.SetTimestamp(entry.NodeId, new HlcTimestamp(entry.HlcWall, entry.HlcLogic, entry.NodeId));
return vectorClock;
}
}
/// <summary>
/// Pulls oplog changes from the remote peer since the specified timestamp.
/// </summary>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesAsync(HlcTimestamp since, CancellationToken token)
{
return await PullChangesAsync(since, null, token);
}
/// <summary>
/// Pulls oplog changes from the remote peer since the specified timestamp, filtered by collections.
/// </summary>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="collections">Optional collection names used to filter the returned entries.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesAsync(HlcTimestamp since, IEnumerable<string>? collections,
CancellationToken token)
{
var req = new PullChangesRequest
{
SinceWall = since.PhysicalTime,
SinceLogic = since.LogicalCounter,
// Empty SinceNode indicates a global pull (not source-node filtered).
SinceNode = string.Empty
};
if (collections != null)
foreach (string coll in collections)
req.Collections.Add(coll);
await _protocol.SendMessageAsync(_stream!, MessageType.PullChangesReq, req, _useCompression, _cipherState,
token);
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ChangeSetRes) throw new Exception("Unexpected response");
var res = ChangeSetResponse.Parser.ParseFrom(payload);
return res.Entries.Select(e => new OplogEntry(
e.Collection,
e.Key,
ParseOp(e.Operation),
string.IsNullOrEmpty(e.JsonData) ? default : JsonSerializer.Deserialize<JsonElement>(e.JsonData),
new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode),
e.PreviousHash,
e.Hash // Pass the received hash to preserve integrity reference
)).ToList();
}
/// <summary>
/// Pulls oplog changes for a specific node from the remote peer since the specified timestamp.
/// </summary>
/// <param name="nodeId">The node identifier to filter changes by.</param>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesFromNodeAsync(string nodeId, HlcTimestamp since,
CancellationToken token)
{
return await PullChangesFromNodeAsync(nodeId, since, null, token);
}
/// <summary>
/// Pulls oplog changes for a specific node from the remote peer since the specified timestamp, filtered by
/// collections.
/// </summary>
/// <param name="nodeId">The node identifier to filter changes by.</param>
/// <param name="since">The starting timestamp for requested changes.</param>
/// <param name="collections">Optional collection names used to filter the returned entries.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The list of oplog entries returned by the remote peer.</returns>
public async Task<List<OplogEntry>> PullChangesFromNodeAsync(string nodeId, HlcTimestamp since,
IEnumerable<string>? collections, CancellationToken token)
{
var req = new PullChangesRequest
{
SinceNode = nodeId,
SinceWall = since.PhysicalTime,
SinceLogic = since.LogicalCounter
};
if (collections != null)
foreach (string coll in collections)
req.Collections.Add(coll);
await _protocol.SendMessageAsync(_stream!, MessageType.PullChangesReq, req, _useCompression, _cipherState,
token);
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ChangeSetRes) throw new Exception("Unexpected response");
var res = ChangeSetResponse.Parser.ParseFrom(payload);
return res.Entries.Select(e => new OplogEntry(
e.Collection,
e.Key,
ParseOp(e.Operation),
string.IsNullOrEmpty(e.JsonData) ? default : JsonSerializer.Deserialize<JsonElement>(e.JsonData),
new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode),
e.PreviousHash,
e.Hash
)).ToList();
}
/// <summary>
/// Retrieves a range of oplog entries connecting two hashes (Gap Recovery).
/// </summary>
/// <param name="startHash">The starting hash in the chain.</param>
/// <param name="endHash">The ending hash in the chain.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>The chain entries connecting the requested hash range.</returns>
public virtual async Task<List<OplogEntry>> GetChainRangeAsync(string startHash, string endHash,
CancellationToken token)
{
var req = new GetChainRangeRequest { StartHash = startHash, EndHash = endHash };
await _protocol.SendMessageAsync(_stream!, MessageType.GetChainRangeReq, req, _useCompression, _cipherState,
token);
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.ChainRangeRes) throw new Exception($"Unexpected response for ChainRange: {type}");
var res = ChainRangeResponse.Parser.ParseFrom(payload);
if (res.SnapshotRequired) throw new SnapshotRequiredException();
return res.Entries.Select(e => new OplogEntry(
e.Collection,
e.Key,
ParseOp(e.Operation),
string.IsNullOrEmpty(e.JsonData) ? default : JsonSerializer.Deserialize<JsonElement>(e.JsonData),
new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode),
e.PreviousHash,
e.Hash
)).ToList();
}
/// <summary>
/// Pushes local oplog changes to the remote peer.
/// </summary>
/// <param name="entries">The oplog entries to push.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>A task that represents the asynchronous push operation.</returns>
public async Task PushChangesAsync(IEnumerable<OplogEntry> entries, CancellationToken token)
{
var req = new PushChangesRequest();
var entryList = entries.ToList();
if (entryList.Count == 0) return;
foreach (var e in entryList)
req.Entries.Add(new ProtoOplogEntry
{
Collection = e.Collection,
Key = e.Key,
Operation = e.Operation.ToString(),
JsonData = e.Payload?.GetRawText() ?? "",
HlcWall = e.Timestamp.PhysicalTime,
HlcLogic = e.Timestamp.LogicalCounter,
HlcNode = e.Timestamp.NodeId,
Hash = e.Hash,
PreviousHash = e.PreviousHash
});
await _protocol.SendMessageAsync(_stream!, MessageType.PushChangesReq, req, _useCompression, _cipherState,
token);
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.AckRes) throw new Exception("Push failed");
var res = AckResponse.Parser.ParseFrom(payload);
if (res.SnapshotRequired) throw new SnapshotRequiredException();
if (!res.Success) throw new Exception("Push failed");
}
private OperationType ParseOp(string op)
{
return Enum.TryParse<OperationType>(op, out var val) ? val : OperationType.Put;
}
/// <summary>
/// Downloads a full snapshot from the remote peer to the provided destination stream.
/// </summary>
/// <param name="destination">The stream that receives snapshot bytes.</param>
/// <param name="token">Cancellation token.</param>
/// <returns>A task that represents the asynchronous snapshot transfer operation.</returns>
public async Task GetSnapshotAsync(Stream destination, CancellationToken token)
{
await _protocol.SendMessageAsync(_stream!, MessageType.GetSnapshotReq, new GetSnapshotRequest(),
_useCompression, _cipherState, token);
while (true)
{
(var type, byte[] payload) = await _protocol.ReadMessageAsync(_stream!, _cipherState, token);
if (type != MessageType.SnapshotChunkMsg)
throw new Exception($"Unexpected message type during snapshot: {type}");
var chunk = SnapshotChunk.Parser.ParseFrom(payload);
if (chunk.Data.Length > 0)
await destination.WriteAsync(chunk.Data.ToByteArray(), 0, chunk.Data.Length, token);
if (chunk.IsLast) break;
}
}
}
public class SnapshotRequiredException : Exception
{
/// <summary>
/// Initializes a new instance of the <see cref="SnapshotRequiredException"/> class.
/// Initializes a new instance of the <see cref="SnapshotRequiredException" /> class.
/// </summary>
public SnapshotRequiredException() : base("Peer requires a full snapshot sync.") { }
public SnapshotRequiredException() : base("Peer requires a full snapshot sync.")
{
}
}

View File

@@ -1,22 +1,16 @@
using System.Net;
using System.Net.Sockets;
using System.Text.Json;
using Google.Protobuf;
using Microsoft.Extensions.Logging;
using Serilog.Context;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Network.Proto;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Protocol;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Network.Telemetry;
using Google.Protobuf;
using Microsoft.Extensions.Logging;
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Net.Sockets;
using System.Threading;
using System.Threading.Tasks;
using Serilog.Context;
namespace ZB.MOM.WW.CBDDC.Network;
@@ -25,35 +19,39 @@ namespace ZB.MOM.WW.CBDDC.Network;
/// </summary>
internal class TcpSyncServer : ISyncServer
{
private readonly IOplogStore _oplogStore;
private readonly IDocumentStore _documentStore;
private readonly ISnapshotService _snapshotStore;
private readonly ILogger<TcpSyncServer> _logger;
private readonly IPeerNodeConfigurationProvider _configProvider;
private CancellationTokenSource? _cts;
private TcpListener? _listener;
private readonly object _startStopLock = new object();
private int _activeConnections = 0;
internal int MaxConnections = 100;
private const int ClientOperationTimeoutMs = 60000;
private readonly IAuthenticator _authenticator;
private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly IDocumentStore _documentStore;
private readonly IPeerHandshakeService _handshakeService;
private readonly ILogger<TcpSyncServer> _logger;
private readonly IOplogStore _oplogStore;
private readonly ISnapshotService _snapshotStore;
private readonly object _startStopLock = new();
private readonly INetworkTelemetryService? _telemetry;
private int _activeConnections;
private CancellationTokenSource? _cts;
private TcpListener? _listener;
internal int MaxConnections = 100;
/// <summary>
/// Initializes a new instance of the TcpSyncServer class with the specified peer oplogStore, configuration provider,
/// logger, and authenticator.
/// </summary>
/// <remarks>The server automatically restarts when the configuration provided by
/// <remarks>
/// The server automatically restarts when the configuration provided by
/// peerNodeConfigurationProvider changes. This ensures that configuration updates are applied without requiring
/// manual intervention.</remarks>
/// manual intervention.
/// </remarks>
/// <param name="oplogStore">The peer oplogStore used to manage and persist peer information for the server.</param>
/// <param name="documentStore">The document store used to read and apply synchronized documents.</param>
/// <param name="snapshotStore">The snapshot store used to create and manage database snapshots for synchronization.</param>
/// <param name="peerNodeConfigurationProvider">The provider that supplies configuration settings for the peer node and notifies the server of configuration
/// changes.</param>
/// <param name="peerNodeConfigurationProvider">
/// The provider that supplies configuration settings for the peer node and notifies the server of configuration
/// changes.
/// </param>
/// <param name="logger">The logger used to record informational and error messages for the server instance.</param>
/// <param name="authenticator">The authenticator responsible for validating peer connections to the server.</param>
/// <param name="handshakeService">The service used to perform secure handshake (optional).</param>
@@ -84,11 +82,18 @@ internal class TcpSyncServer : ISyncServer
};
}
/// <summary>
/// Gets the port on which the server is listening.
/// </summary>
public int? ListeningPort => ListeningEndpoint?.Port;
/// <summary>
/// Starts the TCP synchronization server and begins listening for incoming connections asynchronously.
/// </summary>
/// <remarks>If the server is already running, this method returns immediately without starting a new
/// listener. The server will listen on the TCP port specified in the current configuration.</remarks>
/// <remarks>
/// If the server is already running, this method returns immediately without starting a new
/// listener. The server will listen on the TCP port specified in the current configuration.
/// </remarks>
/// <returns>A task that represents the asynchronous start operation.</returns>
public async Task Start()
{
@@ -101,6 +106,7 @@ internal class TcpSyncServer : ISyncServer
_logger.LogWarning("TCP Sync Server already started");
return;
}
_cts = new CancellationTokenSource();
}
@@ -128,9 +134,11 @@ internal class TcpSyncServer : ISyncServer
/// <summary>
/// Stops the listener and cancels any pending operations.
/// </summary>
/// <remarks>After calling this method, the listener will no longer accept new connections or process
/// <remarks>
/// After calling this method, the listener will no longer accept new connections or process
/// requests. This method is safe to call multiple times; subsequent calls have no effect if the listener is already
/// stopped.</remarks>
/// stopped.
/// </remarks>
/// <returns>A task that represents the asynchronous stop operation.</returns>
public async Task Stop()
{
@@ -174,15 +182,9 @@ internal class TcpSyncServer : ISyncServer
/// </summary>
public IPEndPoint? ListeningEndpoint => _listener?.LocalEndpoint as IPEndPoint;
/// <summary>
/// Gets the port on which the server is listening.
/// </summary>
public int? ListeningPort => ListeningEndpoint?.Port;
private async Task ListenAsync(CancellationToken token)
{
while (!token.IsCancellationRequested)
{
try
{
if (_listener == null) break;
@@ -209,13 +211,15 @@ internal class TcpSyncServer : ISyncServer
}
}, token);
}
catch (ObjectDisposedException) { break; }
catch (ObjectDisposedException)
{
break;
}
catch (Exception ex)
{
_logger.LogError(ex, "TCP Accept Error");
}
}
}
private async Task HandleClientAsync(TcpClient client, CancellationToken token)
{
@@ -241,14 +245,13 @@ internal class TcpSyncServer : ISyncServer
var protocol = new ProtocolHandler(_logger, _telemetry);
bool useCompression = false;
var useCompression = false;
CipherState? cipherState = null;
List<string> remoteInterests = new();
// Perform Secure Handshake (if service is available)
var config = await _configProvider.GetConfiguration();
if (_handshakeService != null)
{
try
{
// We are NOT initiator
@@ -261,14 +264,13 @@ internal class TcpSyncServer : ISyncServer
_logger.LogError(ex, "Secure Handshake failed check logic");
return;
}
}
while (client.Connected && !token.IsCancellationRequested)
{
// Re-fetch config if needed, though usually stable
config = await _configProvider.GetConfiguration();
var (type, payload) = await protocol.ReadMessageAsync(stream, cipherState, token);
(var type, byte[] payload) = await protocol.ReadMessageAsync(stream, cipherState, token);
if (type == MessageType.Unknown) break; // EOF or Error
// Handshake Loop
@@ -284,17 +286,17 @@ internal class TcpSyncServer : ISyncServer
if (!valid)
{
_logger.LogWarning("Authentication failed for Node {NodeId}", hReq.NodeId);
await protocol.SendMessageAsync(stream, MessageType.HandshakeRes, new HandshakeResponse { NodeId = config.NodeId, Accepted = false }, false, cipherState, token);
await protocol.SendMessageAsync(stream, MessageType.HandshakeRes,
new HandshakeResponse { NodeId = config.NodeId, Accepted = false }, false, cipherState,
token);
return;
}
var hRes = new HandshakeResponse { NodeId = config.NodeId, Accepted = true };
// Include local interests from IDocumentStore in response for push filtering
foreach (var coll in _documentStore.InterestedCollection)
{
foreach (string coll in _documentStore.InterestedCollection)
hRes.InterestingCollections.Add(coll);
}
if (CompressionHelper.IsBrotliSupported && hReq.SupportedCompression.Contains("brotli"))
{
@@ -302,12 +304,13 @@ internal class TcpSyncServer : ISyncServer
useCompression = true;
}
await protocol.SendMessageAsync(stream, MessageType.HandshakeRes, hRes, false, cipherState, token);
await protocol.SendMessageAsync(stream, MessageType.HandshakeRes, hRes, false, cipherState,
token);
continue;
}
IMessage? response = null;
MessageType resType = MessageType.Unknown;
var resType = MessageType.Unknown;
switch (type)
{
@@ -325,7 +328,7 @@ internal class TcpSyncServer : ISyncServer
case MessageType.GetVectorClockReq:
var vectorClock = await _oplogStore.GetVectorClockAsync(token);
var vcRes = new VectorClockResponse();
foreach (var nodeId in vectorClock.NodeIds)
foreach (string nodeId in vectorClock.NodeIds)
{
var ts = vectorClock.GetTimestamp(nodeId);
vcRes.Entries.Add(new VectorClockEntry
@@ -335,6 +338,7 @@ internal class TcpSyncServer : ISyncServer
HlcLogic = ts.LogicalCounter
});
}
response = vcRes;
resType = MessageType.VectorClockRes;
break;
@@ -351,7 +355,6 @@ internal class TcpSyncServer : ISyncServer
var csRes = new ChangeSetResponse();
foreach (var e in oplog)
{
csRes.Entries.Add(new ProtoOplogEntry
{
Collection = e.Collection,
@@ -364,7 +367,6 @@ internal class TcpSyncServer : ISyncServer
Hash = e.Hash,
PreviousHash = e.PreviousHash
});
}
response = csRes;
resType = MessageType.ChangeSetRes;
break;
@@ -375,7 +377,9 @@ internal class TcpSyncServer : ISyncServer
e.Collection,
e.Key,
(OperationType)Enum.Parse(typeof(OperationType), e.Operation),
string.IsNullOrEmpty(e.JsonData) ? (System.Text.Json.JsonElement?)null : System.Text.Json.JsonSerializer.Deserialize<System.Text.Json.JsonElement>(e.JsonData),
string.IsNullOrEmpty(e.JsonData)
? null
: JsonSerializer.Deserialize<JsonElement>(e.JsonData),
new HlcTimestamp(e.HlcWall, e.HlcLogic, e.HlcNode),
e.PreviousHash, // Restore PreviousHash
e.Hash // Restore Hash
@@ -389,18 +393,15 @@ internal class TcpSyncServer : ISyncServer
case MessageType.GetChainRangeReq:
var rangeReq = GetChainRangeRequest.Parser.ParseFrom(payload);
var rangeEntries = await _oplogStore.GetChainRangeAsync(rangeReq.StartHash, rangeReq.EndHash, token);
var rangeEntries =
await _oplogStore.GetChainRangeAsync(rangeReq.StartHash, rangeReq.EndHash, token);
var rangeRes = new ChainRangeResponse();
if (!rangeEntries.Any() && rangeReq.StartHash != rangeReq.EndHash)
{
// Gap cannot be filled (likely pruned or unknown branch)
rangeRes.SnapshotRequired = true;
}
else
{
foreach (var e in rangeEntries)
{
rangeRes.Entries.Add(new ProtoOplogEntry
{
Collection = e.Collection,
@@ -413,15 +414,14 @@ internal class TcpSyncServer : ISyncServer
Hash = e.Hash,
PreviousHash = e.PreviousHash
});
}
}
response = rangeRes;
resType = MessageType.ChainRangeRes;
break;
case MessageType.GetSnapshotReq:
_logger.LogInformation("Processing GetSnapshotReq from {Endpoint}", remoteEp);
var tempFile = Path.GetTempFileName();
string tempFile = Path.GetTempFileName();
try
{
// Create backup
@@ -432,7 +432,7 @@ internal class TcpSyncServer : ISyncServer
using (var fs = File.OpenRead(tempFile))
{
byte[] buffer = new byte[80 * 1024]; // 80KB chunks
var buffer = new byte[80 * 1024]; // 80KB chunks
int bytesRead;
while ((bytesRead = await fs.ReadAsync(buffer, 0, buffer.Length, token)) > 0)
{
@@ -441,27 +441,28 @@ internal class TcpSyncServer : ISyncServer
Data = ByteString.CopyFrom(buffer, 0, bytesRead),
IsLast = false
};
await protocol.SendMessageAsync(stream, MessageType.SnapshotChunkMsg, chunk, false, cipherState, token);
await protocol.SendMessageAsync(stream, MessageType.SnapshotChunkMsg, chunk,
false, cipherState, token);
}
// Send End of Snapshot
await protocol.SendMessageAsync(stream, MessageType.SnapshotChunkMsg, new SnapshotChunk { IsLast = true }, false, cipherState, token);
await protocol.SendMessageAsync(stream, MessageType.SnapshotChunkMsg,
new SnapshotChunk { IsLast = true }, false, cipherState, token);
}
}
finally
{
if (File.Exists(tempFile)) File.Delete(tempFile);
}
break;
}
if (response != null)
{
await protocol.SendMessageAsync(stream, resType, response, useCompression, cipherState, token);
}
}
}
}
catch (Exception ex)
{
_logger.LogWarning("Client Handler Error from {Endpoint}: {Message}", remoteEp, ex.Message);

View File

@@ -1,4 +1,3 @@
using System;
using System.Diagnostics;
namespace ZB.MOM.WW.CBDDC.Network.Telemetry;
@@ -23,7 +22,7 @@ public interface INetworkTelemetryService
/// Gets a snapshot of all recorded metric values.
/// </summary>
/// <returns>A dictionary of metric values grouped by metric type and bucket.</returns>
System.Collections.Generic.Dictionary<MetricType, System.Collections.Generic.Dictionary<int, double>> GetSnapshot();
Dictionary<MetricType, Dictionary<int, double>> GetSnapshot();
}
public readonly struct MetricTimer : IDisposable
@@ -49,7 +48,7 @@ public readonly struct MetricTimer : IDisposable
/// </summary>
public void Dispose()
{
var elapsed = Stopwatch.GetTimestamp() - _startTimestamp;
long elapsed = Stopwatch.GetTimestamp() - _startTimestamp;
// Convert ticks to milliseconds? Or keep as ticks?
// Plan said "latency", usually ms.
// Stopwatch.Frequency depends on hardware.

View File

@@ -1,37 +1,28 @@
using System;
using System.Buffers;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Threading;
using System.Threading.Channels;
using System.Threading.Tasks;
using Microsoft.Extensions.Logging;
namespace ZB.MOM.WW.CBDDC.Network.Telemetry;
public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
{
private readonly Channel<(MetricType Type, double Value)> _metricChannel;
private readonly CancellationTokenSource _cts;
private readonly ILogger<NetworkTelemetryService> _logger;
private readonly string _persistencePath;
// Aggregation State
// We keep 30m of history with 1s resolution = 1800 buckets.
private const int MaxHistorySeconds = 1800;
private readonly object _lock = new object();
private readonly MetricBucket[] _history;
private int _headIndex = 0; // Points to current second
private long _currentSecondTimestamp; // Unix timestamp of current bucket
// Rolling Averages (Last calculated)
private readonly Dictionary<string, double> _averages = new Dictionary<string, double>();
private readonly Dictionary<string, double> _averages = new();
private readonly CancellationTokenSource _cts;
private readonly MetricBucket[] _history;
private readonly object _lock = new();
private readonly ILogger<NetworkTelemetryService> _logger;
private readonly Channel<(MetricType Type, double Value)> _metricChannel;
private readonly string _persistencePath;
private long _currentSecondTimestamp; // Unix timestamp of current bucket
private int _headIndex; // Points to current second
/// <summary>
/// Initializes a new instance of the <see cref="NetworkTelemetryService"/> class.
/// Initializes a new instance of the <see cref="NetworkTelemetryService" /> class.
/// </summary>
/// <param name="logger">The logger used to report telemetry processing and persistence errors.</param>
/// <param name="persistencePath">The file path where persisted telemetry snapshots are written.</param>
@@ -47,7 +38,7 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
_cts = new CancellationTokenSource();
_history = new MetricBucket[MaxHistorySeconds];
for (int i = 0; i < MaxHistorySeconds; i++) _history[i] = new MetricBucket();
for (var i = 0; i < MaxHistorySeconds; i++) _history[i] = new MetricBucket();
_currentSecondTimestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds();
@@ -55,6 +46,15 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
_ = Task.Run(PersistenceLoop);
}
/// <summary>
/// Releases resources used by the telemetry service.
/// </summary>
public void Dispose()
{
_cts.Cancel();
_cts.Dispose();
}
/// <summary>
/// Records a metric value for the specified metric type.
/// </summary>
@@ -89,13 +89,11 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
foreach (var type in Enum.GetValues(typeof(MetricType)).Cast<MetricType>())
{
var typeDict = new Dictionary<int, double>();
foreach (var w in windows)
{
typeDict[w] = CalculateAverage(type, w);
}
foreach (int w in windows) typeDict[w] = CalculateAverage(type, w);
snapshot[type] = typeDict;
}
}
return snapshot;
}
@@ -103,24 +101,21 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
{
var reader = _metricChannel.Reader;
while (!_cts.IsCancellationRequested)
{
try
{
if (await reader.WaitToReadAsync(_cts.Token))
{
while (reader.TryRead(out var item))
{
AddMetricToCurrentBucket(item.Type, item.Value);
}
catch (OperationCanceledException)
{
break;
}
}
catch (OperationCanceledException) { break; }
catch (Exception ex)
{
_logger.LogError(ex, "Error processing metrics");
}
}
}
private void AddMetricToCurrentBucket(MetricType type, double value)
{
@@ -133,11 +128,12 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
{
long diff = now - _currentSecondTimestamp;
// Move head forward, clearing buckets in between if gap > 1s
for (int i = 0; i < diff && i < MaxHistorySeconds; i++)
for (var i = 0; i < diff && i < MaxHistorySeconds; i++)
{
_headIndex = (_headIndex + 1) % MaxHistorySeconds;
_history[_headIndex].Reset();
}
_currentSecondTimestamp = now;
}
@@ -148,19 +144,20 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
private async Task PersistenceLoop()
{
while (!_cts.IsCancellationRequested)
{
try
{
await Task.Delay(TimeSpan.FromMinutes(1), _cts.Token);
CalculateAndPersist();
}
catch (OperationCanceledException) { break; }
catch (OperationCanceledException)
{
break;
}
catch (Exception ex)
{
_logger.LogError(ex, "Error persisting metrics");
}
}
}
private void CalculateAndPersist()
{
@@ -179,7 +176,7 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
foreach (var type in Enum.GetValues(typeof(MetricType)).Cast<MetricType>())
{
bw.Write((int)type);
foreach (var w in windows)
foreach (int w in windows)
{
double avg = CalculateAverage(type, w);
bw.Write(w); // Window Seconds
@@ -201,8 +198,8 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
{
// Go backwards from head
double sum = 0;
int count = 0;
int scanned = 0;
var count = 0;
var scanned = 0;
int idx = _headIndex;
@@ -219,30 +216,22 @@ public class NetworkTelemetryService : INetworkTelemetryService, IDisposable
return count == 0 ? 0 : sum / count;
}
/// <summary>
/// Releases resources used by the telemetry service.
/// </summary>
public void Dispose()
{
_cts.Cancel();
_cts.Dispose();
}
}
internal class MetricBucket
{
private readonly int[] _counts;
// Simple lock-free or locked accumulation? Global lock handles it for now.
// Storing Sum and Count for each type
private readonly double[] _sums;
private readonly int[] _counts;
/// <summary>
/// Initializes a new instance of the <see cref="MetricBucket"/> class.
/// Initializes a new instance of the <see cref="MetricBucket" /> class.
/// </summary>
public MetricBucket()
{
var typeCount = Enum.GetValues(typeof(MetricType)).Length;
int typeCount = Enum.GetValues(typeof(MetricType)).Length;
_sums = new double[typeCount];
_counts = new int[typeCount];
}
@@ -263,7 +252,7 @@ internal class MetricBucket
/// <param name="value">The value to accumulate.</param>
public void Add(MetricType type, double value)
{
int idx = (int)type;
var idx = (int)type;
_sums[idx] += value;
_counts[idx]++;
}
@@ -273,11 +262,18 @@ internal class MetricBucket
/// </summary>
/// <param name="type">The metric category to read.</param>
/// <returns>The accumulated sum for the specified metric type.</returns>
public double GetSum(MetricType type) => _sums[(int)type];
public double GetSum(MetricType type)
{
return _sums[(int)type];
}
/// <summary>
/// Gets the accumulated count for a metric type.
/// </summary>
/// <param name="type">The metric category to read.</param>
/// <returns>The accumulated sample count for the specified metric type.</returns>
public int GetCount(MetricType type) => _counts[(int)type];
public int GetCount(MetricType type)
{
return _counts[(int)type];
}
}

View File

@@ -1,17 +1,13 @@
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Net.Sockets;
using System.Security.Cryptography;
using System.Text;
using System.Text.Json;
using System.Threading;
using System.Threading.Tasks;
using ZB.MOM.WW.CBDDC.Core.Storage;
using System.Text.Json.Serialization;
using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Network;
@@ -22,15 +18,15 @@ namespace ZB.MOM.WW.CBDDC.Network;
internal class UdpDiscoveryService : IDiscoveryService
{
private const int DiscoveryPort = 25000;
private readonly ILogger<UdpDiscoveryService> _logger;
private readonly ConcurrentDictionary<string, PeerNode> _activePeers = new();
private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly IDocumentStore _documentStore;
private readonly ILogger<UdpDiscoveryService> _logger;
private readonly object _startStopLock = new();
private CancellationTokenSource? _cts;
private readonly ConcurrentDictionary<string, PeerNode> _activePeers = new();
private readonly object _startStopLock = new object();
/// <summary>
/// Initializes a new instance of the <see cref="UdpDiscoveryService"/> class.
/// Initializes a new instance of the <see cref="UdpDiscoveryService" /> class.
/// </summary>
/// <param name="peerNodeConfigurationProvider">Provider for peer node configuration.</param>
/// <param name="documentStore">Document store used to obtain collection interests.</param>
@@ -40,7 +36,8 @@ internal class UdpDiscoveryService : IDiscoveryService
IDocumentStore documentStore,
ILogger<UdpDiscoveryService> logger)
{
_configProvider = peerNodeConfigurationProvider ?? throw new ArgumentNullException(nameof(peerNodeConfigurationProvider));
_configProvider = peerNodeConfigurationProvider ??
throw new ArgumentNullException(nameof(peerNodeConfigurationProvider));
_documentStore = documentStore ?? throw new ArgumentNullException(nameof(documentStore));
_logger = logger;
}
@@ -57,6 +54,7 @@ internal class UdpDiscoveryService : IDiscoveryService
_logger.LogWarning("UDP Discovery Service already started");
return;
}
_cts = new CancellationTokenSource();
}
@@ -101,55 +99,6 @@ internal class UdpDiscoveryService : IDiscoveryService
await Task.CompletedTask;
}
// ... Stop ...
private async Task CleanupAsync(CancellationToken token)
{
while (!token.IsCancellationRequested)
{
try
{
await Task.Delay(10000, token); // Check every 10s
var now = DateTimeOffset.UtcNow;
var expired = new List<string>();
foreach (var pair in _activePeers)
{
// Expiry: 15 seconds (broadcast is every 5s, so 3 missed beats = dead)
if ((now - pair.Value.LastSeen).TotalSeconds > 15)
{
expired.Add(pair.Key);
}
}
foreach (var id in expired)
{
if (_activePeers.TryRemove(id, out var removed))
{
_logger.LogInformation("Peer Expired: {NodeId} at {Endpoint}", removed.NodeId, removed.Address);
}
}
}
catch (OperationCanceledException) { break; }
catch (Exception ex)
{
_logger.LogError(ex, "Cleanup Loop Error");
}
}
}
// ... Listen ...
private void HandleBeacon(DiscoveryBeacon beacon, IPAddress address)
{
var peerId = beacon.NodeId;
var endpoint = $"{address}:{beacon.TcpPort}";
var peer = new PeerNode(peerId, endpoint, DateTimeOffset.UtcNow, interestingCollections: beacon.InterestingCollections);
_activePeers.AddOrUpdate(peerId, peer, (key, old) => peer);
}
/// <summary>
/// Stops the discovery service.
/// </summary>
@@ -190,7 +139,53 @@ internal class UdpDiscoveryService : IDiscoveryService
/// Gets the currently active peers discovered on the network.
/// </summary>
/// <returns>The collection of active peers.</returns>
public IEnumerable<PeerNode> GetActivePeers() => _activePeers.Values;
public IEnumerable<PeerNode> GetActivePeers()
{
return _activePeers.Values;
}
// ... Stop ...
private async Task CleanupAsync(CancellationToken token)
{
while (!token.IsCancellationRequested)
try
{
await Task.Delay(10000, token); // Check every 10s
var now = DateTimeOffset.UtcNow;
var expired = new List<string>();
foreach (var pair in _activePeers)
// Expiry: 15 seconds (broadcast is every 5s, so 3 missed beats = dead)
if ((now - pair.Value.LastSeen).TotalSeconds > 15)
expired.Add(pair.Key);
foreach (string id in expired)
if (_activePeers.TryRemove(id, out var removed))
_logger.LogInformation("Peer Expired: {NodeId} at {Endpoint}", removed.NodeId, removed.Address);
}
catch (OperationCanceledException)
{
break;
}
catch (Exception ex)
{
_logger.LogError(ex, "Cleanup Loop Error");
}
}
// ... Listen ...
private void HandleBeacon(DiscoveryBeacon beacon, IPAddress address)
{
string peerId = beacon.NodeId;
var endpoint = $"{address}:{beacon.TcpPort}";
var peer = new PeerNode(peerId, endpoint, DateTimeOffset.UtcNow,
interestingCollections: beacon.InterestingCollections);
_activePeers.AddOrUpdate(peerId, peer, (key, old) => peer);
}
private async Task ListenAsync(CancellationToken token)
{
@@ -201,17 +196,16 @@ internal class UdpDiscoveryService : IDiscoveryService
_logger.LogInformation("UDP Discovery Listening on port {Port}", DiscoveryPort);
while (!token.IsCancellationRequested)
{
try
{
var result = await udp.ReceiveAsync();
var json = Encoding.UTF8.GetString(result.Buffer);
string json = Encoding.UTF8.GetString(result.Buffer);
try
{
var config = await _configProvider.GetConfiguration();
var _nodeId = config.NodeId;
var localClusterHash = ComputeClusterHash(config.AuthToken);
string _nodeId = config.NodeId;
string localClusterHash = ComputeClusterHash(config.AuthToken);
var beacon = JsonSerializer.Deserialize<DiscoveryBeacon>(json);
@@ -219,10 +213,8 @@ internal class UdpDiscoveryService : IDiscoveryService
{
// Filter by ClusterHash to reduce congestion from different clusters
if (!string.Equals(beacon.ClusterHash, localClusterHash, StringComparison.Ordinal))
{
// Optional: Log trace if needed, but keeping it silent avoids flooding logs during congestion
continue;
}
HandleBeacon(beacon, result.RemoteEndPoint.Address);
}
@@ -232,13 +224,15 @@ internal class UdpDiscoveryService : IDiscoveryService
_logger.LogWarning(ex, "Failed to parse beacon from {Address}", result.RemoteEndPoint.Address);
}
}
catch (ObjectDisposedException) { break; }
catch (ObjectDisposedException)
{
break;
}
catch (Exception ex)
{
_logger.LogError(ex, "UDP Listener Error");
}
}
}
private async Task BroadcastAsync(CancellationToken token)
{
@@ -262,8 +256,8 @@ internal class UdpDiscoveryService : IDiscoveryService
InterestingCollections = _documentStore.InterestedCollection.ToList()
};
var json = JsonSerializer.Serialize(beacon);
var bytes = Encoding.UTF8.GetBytes(json);
string json = JsonSerializer.Serialize(beacon);
byte[] bytes = Encoding.UTF8.GetBytes(json);
await udp.SendAsync(bytes, bytes.Length, endpoint);
}
@@ -279,39 +273,38 @@ internal class UdpDiscoveryService : IDiscoveryService
private string ComputeClusterHash(string authToken)
{
if (string.IsNullOrEmpty(authToken)) return "";
using var sha256 = System.Security.Cryptography.SHA256.Create();
var bytes = Encoding.UTF8.GetBytes(authToken);
var hash = sha256.ComputeHash(bytes);
using var sha256 = SHA256.Create();
byte[] bytes = Encoding.UTF8.GetBytes(authToken);
byte[] hash = sha256.ComputeHash(bytes);
// Return first 8 chars (4 bytes hex) is enough for filtering
return BitConverter.ToString(hash).Replace("-", "").Substring(0, 8);
}
private class DiscoveryBeacon
{
/// <summary>
/// Gets or sets the broadcasting node identifier.
/// </summary>
[System.Text.Json.Serialization.JsonPropertyName("node_id")]
[JsonPropertyName("node_id")]
public string NodeId { get; set; } = "";
/// <summary>
/// Gets or sets the TCP port used by the broadcasting node.
/// </summary>
[System.Text.Json.Serialization.JsonPropertyName("tcp_port")]
[JsonPropertyName("tcp_port")]
public int TcpPort { get; set; }
/// <summary>
/// Gets or sets the cluster hash used for discovery filtering.
/// </summary>
[System.Text.Json.Serialization.JsonPropertyName("cluster_hash")]
[JsonPropertyName("cluster_hash")]
public string ClusterHash { get; set; } = "";
/// <summary>
/// Gets or sets the collections the node is interested in.
/// </summary>
[System.Text.Json.Serialization.JsonPropertyName("interests")]
[JsonPropertyName("interests")]
public List<string> InterestingCollections { get; set; } = new();
}
}

View File

@@ -1,23 +1,23 @@
<Project Sdk="Microsoft.NET.Sdk">
<ItemGroup>
<ProjectReference Include="..\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj" />
<ProjectReference Include="..\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj"/>
</ItemGroup>
<ItemGroup>
<PackageReference Include="Google.Protobuf" Version="3.25.1" />
<PackageReference Include="Google.Protobuf" Version="3.25.1"/>
<PackageReference Include="Grpc.Tools" Version="2.76.0">
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
<PrivateAssets>all</PrivateAssets>
</PackageReference>
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="8.0.0" />
<PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" Version="8.0.0" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="8.0.0" />
<PackageReference Include="Serilog" Version="4.2.0" />
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="8.0.0"/>
<PackageReference Include="Microsoft.Extensions.Hosting.Abstractions" Version="8.0.0"/>
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="8.0.0"/>
<PackageReference Include="Serilog" Version="4.2.0"/>
</ItemGroup>
<ItemGroup>
<Protobuf Include="sync.proto" GrpcServices="None" />
<Protobuf Include="sync.proto" GrpcServices="None"/>
</ItemGroup>
<PropertyGroup>
@@ -40,7 +40,7 @@
</PropertyGroup>
<ItemGroup>
<None Include="README.md" Pack="true" PackagePath="\" />
<None Include="README.md" Pack="true" PackagePath="\"/>
</ItemGroup>
<ItemGroup>

View File

@@ -1,8 +1,8 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
@@ -16,18 +16,20 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
private readonly ILogger<BLiteDocumentMetadataStore<TDbContext>> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="BLiteDocumentMetadataStore{TDbContext}"/> class.
/// Initializes a new instance of the <see cref="BLiteDocumentMetadataStore{TDbContext}" /> class.
/// </summary>
/// <param name="context">The BLite document database context.</param>
/// <param name="logger">The optional logger instance.</param>
public BLiteDocumentMetadataStore(TDbContext context, ILogger<BLiteDocumentMetadataStore<TDbContext>>? logger = null)
public BLiteDocumentMetadataStore(TDbContext context,
ILogger<BLiteDocumentMetadataStore<TDbContext>>? logger = null)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
_logger = logger ?? NullLogger<BLiteDocumentMetadataStore<TDbContext>>.Instance;
}
/// <inheritdoc />
public override async Task<DocumentMetadata?> GetMetadataAsync(string collection, string key, CancellationToken cancellationToken = default)
public override async Task<DocumentMetadata?> GetMetadataAsync(string collection, string key,
CancellationToken cancellationToken = default)
{
var entity = _context.DocumentMetadatas
.Find(m => m.Collection == collection && m.Key == key)
@@ -37,7 +39,8 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
}
/// <inheritdoc />
public override async Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection, CancellationToken cancellationToken = default)
public override async Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection,
CancellationToken cancellationToken = default)
{
return _context.DocumentMetadatas
.Find(m => m.Collection == collection)
@@ -46,7 +49,8 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
}
/// <inheritdoc />
public override async Task UpsertMetadataAsync(DocumentMetadata metadata, CancellationToken cancellationToken = default)
public override async Task UpsertMetadataAsync(DocumentMetadata metadata,
CancellationToken cancellationToken = default)
{
var existing = _context.DocumentMetadatas
.Find(m => m.Collection == metadata.Collection && m.Key == metadata.Key)
@@ -69,7 +73,8 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
}
/// <inheritdoc />
public override async Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas, CancellationToken cancellationToken = default)
public override async Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas,
CancellationToken cancellationToken = default)
{
foreach (var metadata in metadatas)
{
@@ -95,7 +100,8 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
}
/// <inheritdoc />
public override async Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp, CancellationToken cancellationToken = default)
public override async Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp,
CancellationToken cancellationToken = default)
{
var existing = _context.DocumentMetadatas
.Find(m => m.Collection == collection && m.Key == key)
@@ -127,10 +133,11 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
}
/// <inheritdoc />
public override async Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
public override async Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var query = _context.DocumentMetadatas.AsQueryable()
.Where(m => (m.HlcPhysicalTime > since.PhysicalTime) ||
.Where(m => m.HlcPhysicalTime > since.PhysicalTime ||
(m.HlcPhysicalTime == since.PhysicalTime && m.HlcLogicalCounter > since.LogicalCounter));
if (collections != null)
@@ -161,17 +168,16 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
}
/// <inheritdoc />
public override async Task ImportAsync(IEnumerable<DocumentMetadata> items, CancellationToken cancellationToken = default)
public override async Task ImportAsync(IEnumerable<DocumentMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
await _context.DocumentMetadatas.InsertAsync(ToEntity(item));
}
foreach (var item in items) await _context.DocumentMetadatas.InsertAsync(ToEntity(item));
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task MergeAsync(IEnumerable<DocumentMetadata> items, CancellationToken cancellationToken = default)
public override async Task MergeAsync(IEnumerable<DocumentMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
@@ -186,7 +192,8 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
else
{
// Update only if incoming is newer
var existingTs = new HlcTimestamp(existing.HlcPhysicalTime, existing.HlcLogicalCounter, existing.HlcNodeId);
var existingTs = new HlcTimestamp(existing.HlcPhysicalTime, existing.HlcLogicalCounter,
existing.HlcNodeId);
if (item.UpdatedAt.CompareTo(existingTs) > 0)
{
existing.HlcPhysicalTime = item.UpdatedAt.PhysicalTime;
@@ -197,6 +204,7 @@ public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore wher
}
}
}
await _context.SaveChangesAsync(cancellationToken);
}

View File

@@ -2,7 +2,8 @@
## Overview
`BLiteDocumentStore<TDbContext>` is an abstract base class that simplifies creating document stores for CBDDC with BLite persistence. It handles all Oplog management internally, so you only need to implement entity-to-JSON mapping methods.
`BLiteDocumentStore<TDbContext>` is an abstract base class that simplifies creating document stores for CBDDC with BLite
persistence. It handles all Oplog management internally, so you only need to implement entity-to-JSON mapping methods.
## Key Features
@@ -27,7 +28,8 @@ Remote Sync ? OplogStore.ApplyBatchAsync()
??? _context.OplogEntries (skip - already exists)
```
**Key Advantage**: No circular dependency! `BLiteDocumentStore` writes directly to `CBDDCDocumentDbContext.OplogEntries` collection.
**Key Advantage**: No circular dependency! `BLiteDocumentStore` writes directly to `CBDDCDocumentDbContext.OplogEntries`
collection.
## Implementation Example
@@ -180,6 +182,7 @@ using (documentStore.BeginRemoteSync()) // ? Suppresses Oplog creation
## Migration from Old CDC-based Approach
### Before (with CDC Events)
```csharp
// SampleDocumentStore subscribes to BLite CDC
// CDC emits events ? OplogCoordinator creates Oplog
@@ -187,6 +190,7 @@ using (documentStore.BeginRemoteSync()) // ? Suppresses Oplog creation
```
### After (with BLiteDocumentStore)
```csharp
// Direct Oplog management in DocumentStore
// AsyncLocal flag prevents duplicates during sync
@@ -203,6 +207,7 @@ using (documentStore.BeginRemoteSync()) // ? Suppresses Oplog creation
## Next Steps
After implementing your DocumentStore:
1. Remove CDC subscriptions from your code
2. Remove `OplogCoordinator` from DI (no longer needed)
3. Test local operations create Oplog entries

View File

@@ -1,20 +1,14 @@
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Text.Json;
using System.Threading;
using System.Threading.Tasks;
using BLite.Core.CDC;
using BLite.Core.Collections;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using BLiteOperationType = BLite.Core.Transactions.OperationType;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
@@ -27,30 +21,30 @@ namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposable
where TDbContext : CBDDCDocumentDbContext
{
protected readonly TDbContext _context;
private readonly List<IDisposable> _cdcWatchers = new();
private readonly object _clockLock = new();
protected readonly IPeerNodeConfigurationProvider _configProvider;
protected readonly IConflictResolver _conflictResolver;
protected readonly IVectorClockService _vectorClock;
protected readonly TDbContext _context;
protected readonly ILogger<BLiteDocumentStore<TDbContext>> _logger;
private readonly HashSet<string> _registeredCollections = new();
/// <summary>
/// Semaphore used to suppress CDC-triggered OplogEntry creation during remote sync.
/// CurrentCount == 0 ? sync in progress, CDC must skip.
/// CurrentCount == 1 ? no sync, CDC creates OplogEntry.
/// </summary>
private readonly SemaphoreSlim _remoteSyncGuard = new SemaphoreSlim(1, 1);
private readonly ConcurrentDictionary<string, int> _suppressedCdcEvents = new(StringComparer.Ordinal);
private readonly SemaphoreSlim _remoteSyncGuard = new(1, 1);
private readonly List<IDisposable> _cdcWatchers = new();
private readonly HashSet<string> _registeredCollections = new();
private readonly ConcurrentDictionary<string, int> _suppressedCdcEvents = new(StringComparer.Ordinal);
protected readonly IVectorClockService _vectorClock;
// HLC state for generating timestamps for local changes
private long _lastPhysicalTime;
private int _logicalCounter;
private readonly object _clockLock = new object();
/// <summary>
/// Initializes a new instance of the <see cref="BLiteDocumentStore{TDbContext}"/> class.
/// Initializes a new instance of the <see cref="BLiteDocumentStore{TDbContext}" /> class.
/// </summary>
/// <param name="context">The BLite database context.</param>
/// <param name="configProvider">The peer node configuration provider.</param>
@@ -74,18 +68,30 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
_logicalCounter = 0;
}
private static ILogger<BLiteDocumentStore<TDbContext>> CreateTypedLogger(ILogger? logger)
/// <summary>
/// Releases managed resources used by this document store.
/// </summary>
public virtual void Dispose()
{
if (logger is null)
foreach (var watcher in _cdcWatchers)
try
{
watcher.Dispose();
}
catch
{
return NullLogger<BLiteDocumentStore<TDbContext>>.Instance;
}
if (logger is ILogger<BLiteDocumentStore<TDbContext>> typedLogger)
{
return typedLogger;
_cdcWatchers.Clear();
_remoteSyncGuard.Dispose();
}
private static ILogger<BLiteDocumentStore<TDbContext>> CreateTypedLogger(ILogger? logger)
{
if (logger is null) return NullLogger<BLiteDocumentStore<TDbContext>>.Instance;
if (logger is ILogger<BLiteDocumentStore<TDbContext>> typedLogger) return typedLogger;
return new ForwardingLogger(logger);
}
@@ -94,7 +100,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
private readonly ILogger _inner;
/// <summary>
/// Initializes a new instance of the <see cref="ForwardingLogger"/> class.
/// Initializes a new instance of the <see cref="ForwardingLogger" /> class.
/// </summary>
/// <param name="inner">The underlying logger instance.</param>
public ForwardingLogger(ILogger inner)
@@ -135,29 +141,20 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
private void RegisterSuppressedCdcEvent(string collection, string key, OperationType operationType)
{
var suppressionKey = BuildSuppressionKey(collection, key, operationType);
string suppressionKey = BuildSuppressionKey(collection, key, operationType);
_suppressedCdcEvents.AddOrUpdate(suppressionKey, 1, (_, current) => current + 1);
}
private bool TryConsumeSuppressedCdcEvent(string collection, string key, OperationType operationType)
{
var suppressionKey = BuildSuppressionKey(collection, key, operationType);
string suppressionKey = BuildSuppressionKey(collection, key, operationType);
while (true)
{
if (!_suppressedCdcEvents.TryGetValue(suppressionKey, out var current))
{
return false;
}
if (!_suppressedCdcEvents.TryGetValue(suppressionKey, out int current)) return false;
if (current <= 1)
{
return _suppressedCdcEvents.TryRemove(suppressionKey, out _);
}
if (current <= 1) return _suppressedCdcEvents.TryRemove(suppressionKey, out _);
if (_suppressedCdcEvents.TryUpdate(suppressionKey, current - 1, current))
{
return true;
}
if (_suppressedCdcEvents.TryUpdate(suppressionKey, current - 1, current)) return true;
}
}
@@ -177,7 +174,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
{
_registeredCollections.Add(collectionName);
var watcher = collection.Watch(capturePayload: true)
var watcher = collection.Watch(true)
.Subscribe(new CdcObserver<TEntity>(collectionName, keySelector, this));
_cdcWatchers.Add(watcher);
}
@@ -194,7 +191,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
private readonly BLiteDocumentStore<TDbContext> _store;
/// <summary>
/// Initializes a new instance of the <see cref="CdcObserver{TEntity}"/> class.
/// Initializes a new instance of the <see cref="CdcObserver{TEntity}" /> class.
/// </summary>
/// <param name="collectionName">The logical collection name.</param>
/// <param name="keySelector">The key selector for observed entities.</param>
@@ -215,18 +212,15 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// <param name="changeEvent">The change event payload.</param>
public void OnNext(ChangeStreamEvent<string, TEntity> changeEvent)
{
var operationType = changeEvent.Type == BLiteOperationType.Delete ? OperationType.Delete : OperationType.Put;
var operationType = changeEvent.Type == BLiteOperationType.Delete
? OperationType.Delete
: OperationType.Put;
var entityId = changeEvent.DocumentId?.ToString() ?? "";
string entityId = changeEvent.DocumentId ?? "";
if (operationType == OperationType.Put && changeEvent.Entity != null)
{
entityId = _keySelector(changeEvent.Entity);
}
if (_store.TryConsumeSuppressedCdcEvent(_collectionName, entityId, operationType))
{
return;
}
if (_store.TryConsumeSuppressedCdcEvent(_collectionName, entityId, operationType)) return;
if (_store._remoteSyncGuard.CurrentCount == 0) return;
@@ -238,7 +232,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
else if (changeEvent.Entity != null)
{
var content = JsonSerializer.SerializeToElement(changeEvent.Entity);
var key = _keySelector(changeEvent.Entity);
string key = _keySelector(changeEvent.Entity);
_store.OnLocalChangeDetectedAsync(_collectionName, key, OperationType.Put, content)
.GetAwaiter().GetResult();
}
@@ -248,12 +242,16 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// Handles CDC observer errors.
/// </summary>
/// <param name="error">The observed exception.</param>
public void OnError(Exception error) { }
public void OnError(Exception error)
{
}
/// <summary>
/// Handles completion of the CDC stream.
/// </summary>
public void OnCompleted() { }
public void OnCompleted()
{
}
}
#endregion
@@ -278,7 +276,8 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// <param name="documents">The documents to apply in one batch.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected abstract Task ApplyContentToEntitiesBatchAsync(
IEnumerable<(string Collection, string Key, JsonElement Content)> documents, CancellationToken cancellationToken);
IEnumerable<(string Collection, string Key, JsonElement Content)> documents,
CancellationToken cancellationToken);
/// <summary>
/// Reads an entity from the DbContext and returns it as JsonElement.
@@ -329,8 +328,9 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The matching document, or <see langword="null"/> when not found.</returns>
public async Task<Document?> GetDocumentAsync(string collection, string key, CancellationToken cancellationToken = default)
/// <returns>The matching document, or <see langword="null" /> when not found.</returns>
public async Task<Document?> GetDocumentAsync(string collection, string key,
CancellationToken cancellationToken = default)
{
var content = await GetEntityAsJsonAsync(collection, key, cancellationToken);
if (content == null) return null;
@@ -345,7 +345,8 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// <param name="collection">The logical collection name.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The documents in the specified collection.</returns>
public async Task<IEnumerable<Document>> GetDocumentsByCollectionAsync(string collection, CancellationToken cancellationToken = default)
public async Task<IEnumerable<Document>> GetDocumentsByCollectionAsync(string collection,
CancellationToken cancellationToken = default)
{
var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken);
var timestamp = new HlcTimestamp(0, 0, "");
@@ -358,17 +359,16 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// <param name="documentKeys">The collection and key pairs to resolve.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The documents that were found.</returns>
public async Task<IEnumerable<Document>> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys, CancellationToken cancellationToken)
public async Task<IEnumerable<Document>> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys,
CancellationToken cancellationToken)
{
var documents = new List<Document>();
foreach (var (collection, key) in documentKeys)
foreach ((string collection, string key) in documentKeys)
{
var doc = await GetDocumentAsync(collection, key, cancellationToken);
if (doc != null)
{
documents.Add(doc);
}
if (doc != null) documents.Add(doc);
}
return documents;
}
@@ -377,7 +377,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// </summary>
/// <param name="document">The document to persist.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true"/> when the operation succeeds.</returns>
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> PutDocumentAsync(Document document, CancellationToken cancellationToken = default)
{
await _remoteSyncGuard.WaitAsync(cancellationToken);
@@ -389,6 +389,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
{
_remoteSyncGuard.Release();
}
return true;
}
@@ -403,17 +404,16 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// </summary>
/// <param name="documents">The documents to update.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true"/> when the operation succeeds.</returns>
public async Task<bool> UpdateBatchDocumentsAsync(IEnumerable<Document> documents, CancellationToken cancellationToken = default)
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> UpdateBatchDocumentsAsync(IEnumerable<Document> documents,
CancellationToken cancellationToken = default)
{
var documentList = documents.ToList();
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
foreach (var document in documentList)
{
RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put);
}
await ApplyContentToEntitiesBatchAsync(
documentList.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken);
@@ -422,6 +422,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
{
_remoteSyncGuard.Release();
}
return true;
}
@@ -430,17 +431,16 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// </summary>
/// <param name="documents">The documents to insert.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true"/> when the operation succeeds.</returns>
public async Task<bool> InsertBatchDocumentsAsync(IEnumerable<Document> documents, CancellationToken cancellationToken = default)
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> InsertBatchDocumentsAsync(IEnumerable<Document> documents,
CancellationToken cancellationToken = default)
{
var documentList = documents.ToList();
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
foreach (var document in documentList)
{
RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put);
}
await ApplyContentToEntitiesBatchAsync(
documentList.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken);
@@ -449,6 +449,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
{
_remoteSyncGuard.Release();
}
return true;
}
@@ -458,8 +459,9 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true"/> when the operation succeeds.</returns>
public async Task<bool> DeleteDocumentAsync(string collection, string key, CancellationToken cancellationToken = default)
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> DeleteDocumentAsync(string collection, string key,
CancellationToken cancellationToken = default)
{
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
@@ -470,6 +472,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
{
_remoteSyncGuard.Release();
}
return true;
}
@@ -484,32 +487,27 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// </summary>
/// <param name="documentKeys">The document keys in collection/key format.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true"/> when the operation succeeds.</returns>
public async Task<bool> DeleteBatchDocumentsAsync(IEnumerable<string> documentKeys, CancellationToken cancellationToken = default)
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> DeleteBatchDocumentsAsync(IEnumerable<string> documentKeys,
CancellationToken cancellationToken = default)
{
var parsedKeys = new List<(string Collection, string Key)>();
foreach (var key in documentKeys)
foreach (string key in documentKeys)
{
var parts = key.Split('/');
string[] parts = key.Split('/');
if (parts.Length == 2)
{
parsedKeys.Add((parts[0], parts[1]));
}
else
{
_logger.LogWarning("Invalid document key format: {Key}", key);
}
}
if (parsedKeys.Count == 0) return true;
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
foreach (var (collection, key) in parsedKeys)
{
foreach ((string collection, string key) in parsedKeys)
RegisterSuppressedCdcEvent(collection, key, OperationType.Delete);
}
await RemoveEntitiesBatchAsync(parsedKeys, cancellationToken);
}
@@ -517,6 +515,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
{
_remoteSyncGuard.Release();
}
return true;
}
@@ -565,13 +564,10 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
/// <param name="cancellationToken">The cancellation token.</param>
public async Task DropAsync(CancellationToken cancellationToken = default)
{
foreach (var collection in InterestedCollection)
foreach (string collection in InterestedCollection)
{
var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken);
foreach (var (key, _) in entities)
{
await RemoveEntityAsync(collection, key, cancellationToken);
}
foreach ((string key, var _) in entities) await RemoveEntityAsync(collection, key, cancellationToken);
}
}
@@ -583,11 +579,12 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
public async Task<IEnumerable<Document>> ExportAsync(CancellationToken cancellationToken = default)
{
var documents = new List<Document>();
foreach (var collection in InterestedCollection)
foreach (string collection in InterestedCollection)
{
var collectionDocs = await GetDocumentsByCollectionAsync(collection, cancellationToken);
documents.AddRange(collectionDocs);
}
return documents;
}
@@ -603,9 +600,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
try
{
foreach (var document in documents)
{
RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put);
}
await ApplyContentToEntitiesBatchAsync(
documents.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken);
@@ -627,10 +622,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
foreach (var document in items)
{
await MergeAsync(document, cancellationToken);
}
foreach (var document in items) await MergeAsync(document, cancellationToken);
}
finally
{
@@ -673,7 +665,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
{
lock (_clockLock)
{
var now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
long now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
if (now > _lastPhysicalTime)
{
@@ -697,7 +689,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
CancellationToken cancellationToken)
{
var config = await _configProvider.GetConfiguration();
var nodeId = config.NodeId;
string nodeId = config.NodeId;
// Get last hash from OplogEntries collection directly
var lastEntry = _context.OplogEntries
@@ -706,7 +698,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
.ThenByDescending(e => e.TimestampLogicalCounter)
.FirstOrDefault();
var previousHash = lastEntry?.Hash ?? string.Empty;
string previousHash = lastEntry?.Hash ?? string.Empty;
var timestamp = GenerateTimestamp(nodeId);
var oplogEntry = new OplogEntry(
@@ -725,7 +717,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
collection,
key,
timestamp,
isDeleted: operationType == OperationType.Delete);
operationType == OperationType.Delete);
var existingMetadata = _context.DocumentMetadatas
.Find(m => m.Collection == collection && m.Key == key)
@@ -770,7 +762,7 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
private readonly SemaphoreSlim _guard;
/// <summary>
/// Initializes a new instance of the <see cref="RemoteSyncScope"/> class.
/// Initializes a new instance of the <see cref="RemoteSyncScope" /> class.
/// </summary>
/// <param name="guard">The semaphore guarding remote sync operations.</param>
public RemoteSyncScope(SemaphoreSlim guard)
@@ -788,17 +780,4 @@ public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposab
}
#endregion
/// <summary>
/// Releases managed resources used by this document store.
/// </summary>
public virtual void Dispose()
{
foreach (var watcher in _cdcWatchers)
{
try { watcher.Dispose(); } catch { }
}
_cdcWatchers.Clear();
_remoteSyncGuard.Dispose();
}
}

View File

@@ -1,9 +1,9 @@
using ZB.MOM.WW.CBDDC.Core;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
@@ -13,7 +13,7 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
protected readonly ILogger<BLiteOplogStore<TDbContext>> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="BLiteOplogStore{TDbContext}"/> class.
/// Initializes a new instance of the <see cref="BLiteOplogStore{TDbContext}" /> class.
/// </summary>
/// <param name="dbContext">The BLite database context.</param>
/// <param name="documentStore">The document store used by the oplog store.</param>
@@ -27,14 +27,16 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
IConflictResolver conflictResolver,
IVectorClockService vectorClockService,
ISnapshotMetadataStore? snapshotMetadataStore = null,
ILogger<BLiteOplogStore<TDbContext>>? logger = null) : base(documentStore, conflictResolver, vectorClockService, snapshotMetadataStore)
ILogger<BLiteOplogStore<TDbContext>>? logger = null) : base(documentStore, conflictResolver, vectorClockService,
snapshotMetadataStore)
{
_context = dbContext ?? throw new ArgumentNullException(nameof(dbContext));
_logger = logger ?? NullLogger<BLiteOplogStore<TDbContext>>.Instance;
}
/// <inheritdoc />
public override async Task ApplyBatchAsync(IEnumerable<OplogEntry> oplogEntries, CancellationToken cancellationToken = default)
public override async Task ApplyBatchAsync(IEnumerable<OplogEntry> oplogEntries,
CancellationToken cancellationToken = default)
{
// BLite transactions are committed by each SaveChangesAsync internally.
// Wrapping in an explicit transaction causes "Cannot rollback committed transaction"
@@ -58,22 +60,25 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
}
/// <inheritdoc />
public override async Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash, CancellationToken cancellationToken = default)
public override async Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash,
CancellationToken cancellationToken = default)
{
var startRow = _context.OplogEntries.Find(o => o.Hash == startHash).FirstOrDefault();
var endRow = _context.OplogEntries.Find(o => o.Hash == endHash).FirstOrDefault();
if (startRow == null || endRow == null) return [];
var nodeId = startRow.TimestampNodeId;
string nodeId = startRow.TimestampNodeId;
// 2. Fetch range (Start < Entry <= End)
var entities = _context.OplogEntries
.Find(o => o.TimestampNodeId == nodeId &&
((o.TimestampPhysicalTime > startRow.TimestampPhysicalTime) ||
(o.TimestampPhysicalTime == startRow.TimestampPhysicalTime && o.TimestampLogicalCounter > startRow.TimestampLogicalCounter)) &&
((o.TimestampPhysicalTime < endRow.TimestampPhysicalTime) ||
(o.TimestampPhysicalTime == endRow.TimestampPhysicalTime && o.TimestampLogicalCounter <= endRow.TimestampLogicalCounter)))
(o.TimestampPhysicalTime > startRow.TimestampPhysicalTime ||
(o.TimestampPhysicalTime == startRow.TimestampPhysicalTime &&
o.TimestampLogicalCounter > startRow.TimestampLogicalCounter)) &&
(o.TimestampPhysicalTime < endRow.TimestampPhysicalTime ||
(o.TimestampPhysicalTime == endRow.TimestampPhysicalTime &&
o.TimestampLogicalCounter <= endRow.TimestampLogicalCounter)))
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
.ToList();
@@ -82,23 +87,27 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
}
/// <inheritdoc />
public override async Task<OplogEntry?> GetEntryByHashAsync(string hash, CancellationToken cancellationToken = default)
public override async Task<OplogEntry?> GetEntryByHashAsync(string hash,
CancellationToken cancellationToken = default)
{
// Hash is now a regular indexed property, not the Key
return _context.OplogEntries.Find(o => o.Hash == hash).FirstOrDefault()?.ToDomain();
}
/// <inheritdoc />
public override async Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
public override async Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var query = _context.OplogEntries
.Find(o => (o.TimestampPhysicalTime > timestamp.PhysicalTime) ||
(o.TimestampPhysicalTime == timestamp.PhysicalTime && o.TimestampLogicalCounter > timestamp.LogicalCounter));
.Find(o => o.TimestampPhysicalTime > timestamp.PhysicalTime ||
(o.TimestampPhysicalTime == timestamp.PhysicalTime &&
o.TimestampLogicalCounter > timestamp.LogicalCounter));
if (collections != null)
{
var collectionSet = new HashSet<string>(collections);
query = query.Where(o => collectionSet.Contains(o.Collection));
}
return query
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
@@ -107,17 +116,20 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
}
/// <inheritdoc />
public override async Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
public override async Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var query = _context.OplogEntries.AsQueryable()
.Where(o => o.TimestampNodeId == nodeId &&
((o.TimestampPhysicalTime > since.PhysicalTime) ||
(o.TimestampPhysicalTime == since.PhysicalTime && o.TimestampLogicalCounter > since.LogicalCounter)));
(o.TimestampPhysicalTime > since.PhysicalTime ||
(o.TimestampPhysicalTime == since.PhysicalTime &&
o.TimestampLogicalCounter > since.LogicalCounter)));
if (collections != null)
{
var collectionSet = new HashSet<string>(collections);
query = query.Where(o => collectionSet.Contains(o.Collection));
}
return query
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
@@ -128,10 +140,7 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
/// <inheritdoc />
public override async Task ImportAsync(IEnumerable<OplogEntry> items, CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
await _context.OplogEntries.InsertAsync(item.ToEntity());
}
foreach (var item in items) await _context.OplogEntries.InsertAsync(item.ToEntity());
await _context.SaveChangesAsync(cancellationToken);
}
@@ -142,11 +151,9 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
{
// Hash is now a regular indexed property, not the Key
var existing = _context.OplogEntries.Find(o => o.Hash == item.Hash).FirstOrDefault();
if (existing == null)
{
await _context.OplogEntries.InsertAsync(item.ToEntity());
}
if (existing == null) await _context.OplogEntries.InsertAsync(item.ToEntity());
}
await _context.SaveChangesAsync(cancellationToken);
}
@@ -154,8 +161,9 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
public override async Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default)
{
var toDelete = _context.OplogEntries.AsQueryable()
.Where(o => (o.TimestampPhysicalTime < cutoff.PhysicalTime) ||
(o.TimestampPhysicalTime == cutoff.PhysicalTime && o.TimestampLogicalCounter <= cutoff.LogicalCounter))
.Where(o => o.TimestampPhysicalTime < cutoff.PhysicalTime ||
(o.TimestampPhysicalTime == cutoff.PhysicalTime &&
o.TimestampLogicalCounter <= cutoff.LogicalCounter))
.Select(o => o.Hash)
.ToList();
await _context.OplogEntries.DeleteBulkAsync(toDelete);
@@ -175,23 +183,20 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
// Step 1: Load from SnapshotMetadata FIRST (base state after prune)
if (_snapshotMetadataStore != null)
{
try
{
var snapshots = _snapshotMetadataStore.GetAllSnapshotMetadataAsync().GetAwaiter().GetResult();
foreach (var snapshot in snapshots)
{
_vectorClock.UpdateNode(
snapshot.NodeId,
new HlcTimestamp(snapshot.TimestampPhysicalTime, snapshot.TimestampLogicalCounter, snapshot.NodeId),
new HlcTimestamp(snapshot.TimestampPhysicalTime, snapshot.TimestampLogicalCounter,
snapshot.NodeId),
snapshot.Hash ?? "");
}
}
catch
{
// Ignore errors during initialization - oplog data will be used as fallback
}
}
// Step 2: Load from Oplog (Latest State - Overrides Snapshot if newer)
var latestPerNode = _context.OplogEntries.AsQueryable()
@@ -208,15 +213,12 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
.ToList();
foreach (var node in latestPerNode)
{
if (node.MaxEntry != null)
{
_vectorClock.UpdateNode(
node.NodeId,
new HlcTimestamp(node.MaxEntry.TimestampPhysicalTime, node.MaxEntry.TimestampLogicalCounter, node.MaxEntry.TimestampNodeId),
new HlcTimestamp(node.MaxEntry.TimestampPhysicalTime, node.MaxEntry.TimestampLogicalCounter,
node.MaxEntry.TimestampNodeId),
node.MaxEntry.Hash ?? "");
}
}
_vectorClock.IsInitialized = true;
}
@@ -228,7 +230,8 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
}
/// <inheritdoc />
protected override async Task<string?> QueryLastHashForNodeAsync(string nodeId, CancellationToken cancellationToken = default)
protected override async Task<string?> QueryLastHashForNodeAsync(string nodeId,
CancellationToken cancellationToken = default)
{
var lastEntry = _context.OplogEntries.AsQueryable()
.Where(o => o.TimestampNodeId == nodeId)
@@ -239,7 +242,8 @@ public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDo
}
/// <inheritdoc />
protected override async Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash, CancellationToken cancellationToken = default)
protected override async Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash,
CancellationToken cancellationToken = default)
{
// Hash is now a regular indexed property, not the Key
var entry = _context.OplogEntries.Find(o => o.Hash == hash).FirstOrDefault();

View File

@@ -1,7 +1,8 @@
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
using System.Text.Json;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
@@ -9,11 +10,15 @@ namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// Provides a peer configuration store implementation that uses a specified CBDDCDocumentDbContext for persistence
/// operations.
/// </summary>
/// <remarks>This class enables storage, retrieval, and management of remote peer configurations using the provided
/// <remarks>
/// This class enables storage, retrieval, and management of remote peer configurations using the provided
/// database context. It is typically used in scenarios where peer configurations need to be persisted in a document
/// database.</remarks>
/// <typeparam name="TDbContext">The type of the document database context used for accessing and managing peer configurations. Must inherit from
/// CBDDCDocumentDbContext.</typeparam>
/// database.
/// </remarks>
/// <typeparam name="TDbContext">
/// The type of the document database context used for accessing and managing peer configurations. Must inherit from
/// CBDDCDocumentDbContext.
/// </typeparam>
public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore where TDbContext : CBDDCDocumentDbContext
{
/// <summary>
@@ -33,7 +38,8 @@ public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore wh
/// <param name="context">The database context used to access and manage peer configuration data. Cannot be null.</param>
/// <param name="logger">An optional logger for logging diagnostic messages. If null, a no-op logger is used.</param>
/// <exception cref="ArgumentNullException">Thrown if the context parameter is null.</exception>
public BLitePeerConfigurationStore(TDbContext context, ILogger<BLitePeerConfigurationStore<TDbContext>>? logger = null)
public BLitePeerConfigurationStore(TDbContext context,
ILogger<BLitePeerConfigurationStore<TDbContext>>? logger = null)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
_logger = logger ?? NullLogger<BLitePeerConfigurationStore<TDbContext>>.Instance;
@@ -42,29 +48,36 @@ public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore wh
/// <inheritdoc />
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
_logger.LogWarning("Dropping peer configuration store - all remote peer configurations will be permanently deleted!");
_logger.LogWarning(
"Dropping peer configuration store - all remote peer configurations will be permanently deleted!");
// Use Id (technical key) for deletion, not NodeId (business key)
var allIds = await Task.Run(() => _context.RemotePeerConfigurations.FindAll().Select(p => p.Id).ToList(), cancellationToken);
var allIds = await Task.Run(() => _context.RemotePeerConfigurations.FindAll().Select(p => p.Id).ToList(),
cancellationToken);
await _context.RemotePeerConfigurations.DeleteBulkAsync(allIds);
await _context.SaveChangesAsync(cancellationToken);
_logger.LogInformation("Peer configuration store dropped successfully.");
}
/// <inheritdoc />
public override async Task<IEnumerable<RemotePeerConfiguration>> ExportAsync(CancellationToken cancellationToken = default)
public override async Task<IEnumerable<RemotePeerConfiguration>> ExportAsync(
CancellationToken cancellationToken = default)
{
return await Task.Run(() => _context.RemotePeerConfigurations.FindAll().ToDomain().ToList(), cancellationToken);
}
/// <inheritdoc />
public override async Task<RemotePeerConfiguration?> GetRemotePeerAsync(string nodeId, CancellationToken cancellationToken)
public override async Task<RemotePeerConfiguration?> GetRemotePeerAsync(string nodeId,
CancellationToken cancellationToken)
{
// NodeId is now a regular indexed property, not the Key
return await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault()?.ToDomain(), cancellationToken);
return await Task.Run(
() => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault()?.ToDomain(),
cancellationToken);
}
/// <inheritdoc />
public override async Task<IEnumerable<RemotePeerConfiguration>> GetRemotePeersAsync(CancellationToken cancellationToken = default)
public override async Task<IEnumerable<RemotePeerConfiguration>> GetRemotePeersAsync(
CancellationToken cancellationToken = default)
{
return await Task.Run(() => _context.RemotePeerConfigurations.FindAll().ToDomain().ToList(), cancellationToken);
}
@@ -73,7 +86,8 @@ public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore wh
public override async Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default)
{
// NodeId is now a regular indexed property, not the Key
var peer = await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault(), cancellationToken);
var peer = await Task.Run(
() => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault(), cancellationToken);
if (peer != null)
{
await _context.RemotePeerConfigurations.DeleteAsync(peer.Id);
@@ -87,10 +101,13 @@ public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore wh
}
/// <inheritdoc />
public override async Task SaveRemotePeerAsync(RemotePeerConfiguration peer, CancellationToken cancellationToken = default)
public override async Task SaveRemotePeerAsync(RemotePeerConfiguration peer,
CancellationToken cancellationToken = default)
{
// NodeId is now a regular indexed property, not the Key
var existing = await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == peer.NodeId).FirstOrDefault(), cancellationToken);
var existing =
await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == peer.NodeId).FirstOrDefault(),
cancellationToken);
if (existing == null)
{
@@ -103,7 +120,7 @@ public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore wh
existing.Type = (int)peer.Type;
existing.IsEnabled = peer.IsEnabled;
existing.InterestsJson = peer.InterestingCollections.Count > 0
? System.Text.Json.JsonSerializer.Serialize(peer.InterestingCollections)
? JsonSerializer.Serialize(peer.InterestingCollections)
: "";
await _context.RemotePeerConfigurations.UpdateAsync(existing);
}

View File

@@ -10,7 +10,8 @@ namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// BLite-backed peer oplog confirmation store.
/// </summary>
/// <typeparam name="TDbContext">The BLite context type.</typeparam>
public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmationStore where TDbContext : CBDDCDocumentDbContext
public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmationStore
where TDbContext : CBDDCDocumentDbContext
{
internal const string RegistrationSourceNodeId = "__peer_registration__";
@@ -18,7 +19,7 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
private readonly ILogger<BLitePeerOplogConfirmationStore<TDbContext>> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="BLitePeerOplogConfirmationStore{TDbContext}"/> class.
/// Initializes a new instance of the <see cref="BLitePeerOplogConfirmationStore{TDbContext}" /> class.
/// </summary>
/// <param name="context">The BLite context.</param>
/// <param name="logger">An optional logger.</param>
@@ -38,9 +39,7 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
{
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
}
var existing = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId == RegistrationSourceNodeId)
@@ -61,7 +60,8 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
});
await _context.SaveChangesAsync(cancellationToken);
_logger.LogDebug("Registered peer confirmation tracking for {PeerNodeId} ({Address}, {Type}).", peerNodeId, address, type);
_logger.LogDebug("Registered peer confirmation tracking for {PeerNodeId} ({Address}, {Type}).", peerNodeId,
address, type);
return;
}
@@ -83,20 +83,16 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
{
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
}
if (string.IsNullOrWhiteSpace(sourceNodeId))
{
throw new ArgumentException("Source node id is required.", nameof(sourceNodeId));
}
var existing = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId == sourceNodeId)
.FirstOrDefault();
var nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
if (existing == null)
{
@@ -115,15 +111,12 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
return;
}
var isNewer = IsIncomingTimestampNewer(timestamp, existing);
var samePointHashChanged = timestamp.PhysicalTime == existing.ConfirmedWall &&
bool isNewer = IsIncomingTimestampNewer(timestamp, existing);
bool samePointHashChanged = timestamp.PhysicalTime == existing.ConfirmedWall &&
timestamp.LogicalCounter == existing.ConfirmedLogic &&
!string.Equals(existing.ConfirmedHash, hash, StringComparison.Ordinal);
if (!isNewer && !samePointHashChanged && existing.IsActive)
{
return;
}
if (!isNewer && !samePointHashChanged && existing.IsActive) return;
existing.ConfirmedWall = timestamp.PhysicalTime;
existing.ConfirmedLogic = timestamp.LogicalCounter;
@@ -136,7 +129,8 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
}
/// <inheritdoc />
public override Task<IEnumerable<PeerOplogConfirmation>> GetConfirmationsAsync(CancellationToken cancellationToken = default)
public override Task<IEnumerable<PeerOplogConfirmation>> GetConfirmationsAsync(
CancellationToken cancellationToken = default)
{
var confirmations = _context.PeerOplogConfirmations
.Find(c => c.SourceNodeId != RegistrationSourceNodeId)
@@ -152,9 +146,7 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
{
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
}
var confirmations = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId != RegistrationSourceNodeId)
@@ -168,26 +160,18 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
public override async Task RemovePeerTrackingAsync(string peerNodeId, CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
{
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
}
var matches = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId)
.ToList();
if (matches.Count == 0)
{
return;
}
if (matches.Count == 0) return;
var nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
foreach (var match in matches)
{
if (!match.IsActive)
{
continue;
}
if (!match.IsActive) continue;
match.IsActive = false;
match.LastConfirmedUtcMs = nowMs;
@@ -229,7 +213,8 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
}
/// <inheritdoc />
public override async Task ImportAsync(IEnumerable<PeerOplogConfirmation> items, CancellationToken cancellationToken = default)
public override async Task ImportAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
@@ -255,7 +240,8 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
}
/// <inheritdoc />
public override async Task MergeAsync(IEnumerable<PeerOplogConfirmation> items, CancellationToken cancellationToken = default)
public override async Task MergeAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
@@ -271,7 +257,8 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
var changed = false;
var incomingTimestamp = new HlcTimestamp(item.ConfirmedWall, item.ConfirmedLogic, item.SourceNodeId);
var existingTimestamp = new HlcTimestamp(existing.ConfirmedWall, existing.ConfirmedLogic, existing.SourceNodeId);
var existingTimestamp =
new HlcTimestamp(existing.ConfirmedWall, existing.ConfirmedLogic, existing.SourceNodeId);
if (incomingTimestamp > existingTimestamp)
{
@@ -281,7 +268,7 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
changed = true;
}
var incomingLastConfirmedMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds();
long incomingLastConfirmedMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds();
if (incomingLastConfirmedMs > existing.LastConfirmedUtcMs)
{
existing.LastConfirmedUtcMs = incomingLastConfirmedMs;
@@ -294,10 +281,7 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
changed = true;
}
if (changed)
{
await _context.PeerOplogConfirmations.UpdateAsync(existing);
}
if (changed) await _context.PeerOplogConfirmations.UpdateAsync(existing);
}
await _context.SaveChangesAsync(cancellationToken);
@@ -305,16 +289,11 @@ public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmation
private static bool IsIncomingTimestampNewer(HlcTimestamp incomingTimestamp, PeerOplogConfirmationEntity existing)
{
if (incomingTimestamp.PhysicalTime > existing.ConfirmedWall)
{
return true;
}
if (incomingTimestamp.PhysicalTime > existing.ConfirmedWall) return true;
if (incomingTimestamp.PhysicalTime == existing.ConfirmedWall &&
incomingTimestamp.LogicalCounter > existing.ConfirmedLogic)
{
return true;
}
return false;
}

View File

@@ -1,7 +1,7 @@
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
@@ -9,26 +9,34 @@ namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// Provides a snapshot metadata store implementation that uses a specified CBDDCDocumentDbContext for persistence
/// operations.
/// </summary>
/// <remarks>This class enables storage, retrieval, and management of snapshot metadata using the provided
/// <remarks>
/// This class enables storage, retrieval, and management of snapshot metadata using the provided
/// database context. It is typically used in scenarios where snapshot metadata needs to be persisted in a document
/// database. The class supports bulk operations and incremental updates, and can be extended for custom database
/// contexts. Thread safety depends on the underlying context implementation.</remarks>
/// <typeparam name="TDbContext">The type of the document database context used for accessing and managing snapshot metadata. Must inherit from
/// CBDDCDocumentDbContext.</typeparam>
/// contexts. Thread safety depends on the underlying context implementation.
/// </remarks>
/// <typeparam name="TDbContext">
/// The type of the document database context used for accessing and managing snapshot metadata. Must inherit from
/// CBDDCDocumentDbContext.
/// </typeparam>
public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore where TDbContext : CBDDCDocumentDbContext
{
/// <summary>
/// Represents the database context used for data access operations within the derived class.
/// </summary>
/// <remarks>Intended for use by derived classes to interact with the underlying database. The context
/// should be properly disposed of according to the application's lifetime management strategy.</remarks>
/// <remarks>
/// Intended for use by derived classes to interact with the underlying database. The context
/// should be properly disposed of according to the application's lifetime management strategy.
/// </remarks>
protected readonly TDbContext _context;
/// <summary>
/// Provides logging capabilities for the BLiteSnapshotMetadataStore operations.
/// </summary>
/// <remarks>Intended for use by derived classes to record diagnostic and operational information. The
/// logger instance is specific to the BLiteSnapshotMetadataStore<TDbContext> type.</remarks>
/// <remarks>
/// Intended for use by derived classes to record diagnostic and operational information. The
/// logger instance is specific to the BLiteSnapshotMetadataStore<TDbContext> type.
/// </remarks>
protected readonly ILogger<BLiteSnapshotMetadataStore<TDbContext>> _logger;
/// <summary>
@@ -38,7 +46,8 @@ public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore wher
/// <param name="context">The database context to be used for accessing snapshot metadata. Cannot be null.</param>
/// <param name="logger">An optional logger for logging diagnostic messages. If null, a no-op logger is used.</param>
/// <exception cref="ArgumentNullException">Thrown if the context parameter is null.</exception>
public BLiteSnapshotMetadataStore(TDbContext context, ILogger<BLiteSnapshotMetadataStore<TDbContext>>? logger = null)
public BLiteSnapshotMetadataStore(TDbContext context,
ILogger<BLiteSnapshotMetadataStore<TDbContext>>? logger = null)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
_logger = logger ?? NullLogger<BLiteSnapshotMetadataStore<TDbContext>>.Instance;
@@ -48,7 +57,8 @@ public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore wher
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
// Use Id (technical key) for deletion, not NodeId (business key)
var allIds = await Task.Run(() => _context.SnapshotMetadatas.FindAll().Select(s => s.Id).ToList(), cancellationToken);
var allIds = await Task.Run(() => _context.SnapshotMetadatas.FindAll().Select(s => s.Id).ToList(),
cancellationToken);
await _context.SnapshotMetadatas.DeleteBulkAsync(allIds);
await _context.SaveChangesAsync(cancellationToken);
}
@@ -60,37 +70,41 @@ public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore wher
}
/// <inheritdoc />
public override async Task<string?> GetSnapshotHashAsync(string nodeId, CancellationToken cancellationToken = default)
public override async Task<string?> GetSnapshotHashAsync(string nodeId,
CancellationToken cancellationToken = default)
{
// NodeId is now a regular indexed property, not the Key
var snapshot = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault(), cancellationToken);
var snapshot = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault(),
cancellationToken);
return snapshot?.Hash;
}
/// <inheritdoc />
public override async Task ImportAsync(IEnumerable<SnapshotMetadata> items, CancellationToken cancellationToken = default)
public override async Task ImportAsync(IEnumerable<SnapshotMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var metadata in items)
{
await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity());
}
foreach (var metadata in items) await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity());
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata, CancellationToken cancellationToken = default)
public override async Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata,
CancellationToken cancellationToken = default)
{
await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity());
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task MergeAsync(IEnumerable<SnapshotMetadata> items, CancellationToken cancellationToken = default)
public override async Task MergeAsync(IEnumerable<SnapshotMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var metadata in items)
{
// NodeId is now a regular indexed property, not the Key
var existing = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == metadata.NodeId).FirstOrDefault(), cancellationToken);
var existing =
await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == metadata.NodeId).FirstOrDefault(),
cancellationToken);
if (existing == null)
{
@@ -111,14 +125,18 @@ public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore wher
}
}
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta, CancellationToken cancellationToken)
public override async Task UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta,
CancellationToken cancellationToken)
{
// NodeId is now a regular indexed property, not the Key - find existing by NodeId
var existing = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == existingMeta.NodeId).FirstOrDefault(), cancellationToken);
var existing =
await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == existingMeta.NodeId).FirstOrDefault(),
cancellationToken);
if (existing != null)
{
existing.NodeId = existingMeta.NodeId;
@@ -131,14 +149,18 @@ public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore wher
}
/// <inheritdoc />
public override async Task<SnapshotMetadata?> GetSnapshotMetadataAsync(string nodeId, CancellationToken cancellationToken = default)
public override async Task<SnapshotMetadata?> GetSnapshotMetadataAsync(string nodeId,
CancellationToken cancellationToken = default)
{
// NodeId is now a regular indexed property, not the Key
return await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault()?.ToDomain(), cancellationToken);
return await Task.Run(
() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault()?.ToDomain(),
cancellationToken);
}
/// <inheritdoc />
public override async Task<IEnumerable<SnapshotMetadata>> GetAllSnapshotMetadataAsync(CancellationToken cancellationToken = default)
public override async Task<IEnumerable<SnapshotMetadata>> GetAllSnapshotMetadataAsync(
CancellationToken cancellationToken = default)
{
return await Task.Run(() => _context.SnapshotMetadatas.FindAll().ToDomain().ToList(), cancellationToken);
}

View File

@@ -1,7 +1,7 @@
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;

View File

@@ -8,12 +8,38 @@ namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
public partial class CBDDCDocumentDbContext : DocumentDbContext
{
/// <summary>
/// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database file path.
/// </summary>
/// <param name="databasePath">
/// The file system path to the database file to be used by the context. Cannot be null or
/// empty.
/// </param>
public CBDDCDocumentDbContext(string databasePath) : base(databasePath)
{
}
/// <summary>
/// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database path and page file
/// configuration.
/// </summary>
/// <param name="databasePath">The file system path to the database file. This value cannot be null or empty.</param>
/// <param name="config">
/// The configuration settings for the page file. Specifies options that control how the database
/// pages are managed.
/// </param>
public CBDDCDocumentDbContext(string databasePath, PageFileConfig config) : base(databasePath, config)
{
}
/// <summary>
/// Gets the collection of operation log entries associated with this instance.
/// </summary>
/// <remarks>The collection provides access to all recorded operation log (oplog) entries, which can be
/// <remarks>
/// The collection provides access to all recorded operation log (oplog) entries, which can be
/// used to track changes or replicate operations. The collection is read-only; entries cannot be added or removed
/// directly through this property.</remarks>
/// directly through this property.
/// </remarks>
public DocumentCollection<string, OplogEntity> OplogEntries { get; private set; } = null!;
/// <summary>
@@ -24,16 +50,20 @@ public partial class CBDDCDocumentDbContext : DocumentDbContext
/// <summary>
/// Gets the collection of remote peer configurations associated with this instance.
/// </summary>
/// <remarks>Use this collection to access or enumerate the configuration settings for each remote peer.
/// <remarks>
/// Use this collection to access or enumerate the configuration settings for each remote peer.
/// The collection is read-only; to modify peer configurations, use the appropriate methods provided by the
/// containing class.</remarks>
/// containing class.
/// </remarks>
public DocumentCollection<string, RemotePeerEntity> RemotePeerConfigurations { get; private set; } = null!;
/// <summary>
/// Gets the collection of document metadata for sync tracking.
/// </summary>
/// <remarks>Stores HLC timestamps and deleted state for each document without modifying application entities.
/// Used to track document versions for incremental sync instead of full snapshots.</remarks>
/// <remarks>
/// Stores HLC timestamps and deleted state for each document without modifying application entities.
/// Used to track document versions for incremental sync instead of full snapshots.
/// </remarks>
public DocumentCollection<string, DocumentMetadataEntity> DocumentMetadatas { get; private set; } = null!;
/// <summary>
@@ -41,24 +71,6 @@ public partial class CBDDCDocumentDbContext : DocumentDbContext
/// </summary>
public DocumentCollection<string, PeerOplogConfirmationEntity> PeerOplogConfirmations { get; private set; } = null!;
/// <summary>
/// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database file path.
/// </summary>
/// <param name="databasePath">The file system path to the database file to be used by the context. Cannot be null or empty.</param>
public CBDDCDocumentDbContext(string databasePath) : base(databasePath)
{
}
/// <summary>
/// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database path and page file
/// configuration.
/// </summary>
/// <param name="databasePath">The file system path to the database file. This value cannot be null or empty.</param>
/// <param name="config">The configuration settings for the page file. Specifies options that control how the database pages are managed.</param>
public CBDDCDocumentDbContext(string databasePath, PageFileConfig config) : base(databasePath, config)
{
}
/// <inheritdoc />
protected override void OnModelCreating(ModelBuilder modelBuilder)
{

View File

@@ -9,6 +9,33 @@ namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
/// </summary>
public static class EntityMappers
{
#region DocumentMetadataEntity Helpers
/// <summary>
/// Creates a DocumentMetadataEntity from collection, key, timestamp, and deleted state.
/// Used for tracking document sync state.
/// </summary>
/// <param name="collection">The collection name that owns the document.</param>
/// <param name="key">The document key within the collection.</param>
/// <param name="timestamp">The hybrid logical clock timestamp for the document state.</param>
/// <param name="isDeleted">Indicates whether the document is marked as deleted.</param>
public static DocumentMetadataEntity CreateDocumentMetadata(string collection, string key, HlcTimestamp timestamp,
bool isDeleted = false)
{
return new DocumentMetadataEntity
{
Id = Guid.NewGuid().ToString(),
Collection = collection,
Key = key,
HlcPhysicalTime = timestamp.PhysicalTime,
HlcLogicalCounter = timestamp.LogicalCounter,
HlcNodeId = timestamp.NodeId,
IsDeleted = isDeleted
};
}
#endregion
#region OplogEntity Mappers
/// <summary>
@@ -42,9 +69,7 @@ public static class EntityMappers
JsonElement? payload = null;
// Treat empty string as null payload (Delete operations)
if (!string.IsNullOrEmpty(entity.PayloadJson))
{
payload = JsonSerializer.Deserialize<JsonElement>(entity.PayloadJson);
}
return new OplogEntry(
entity.Collection,
@@ -147,9 +172,7 @@ public static class EntityMappers
};
if (!string.IsNullOrEmpty(entity.InterestsJson))
{
config.InterestingCollections = JsonSerializer.Deserialize<List<string>>(entity.InterestsJson) ?? [];
}
return config;
}
@@ -214,30 +237,4 @@ public static class EntityMappers
}
#endregion
#region DocumentMetadataEntity Helpers
/// <summary>
/// Creates a DocumentMetadataEntity from collection, key, timestamp, and deleted state.
/// Used for tracking document sync state.
/// </summary>
/// <param name="collection">The collection name that owns the document.</param>
/// <param name="key">The document key within the collection.</param>
/// <param name="timestamp">The hybrid logical clock timestamp for the document state.</param>
/// <param name="isDeleted">Indicates whether the document is marked as deleted.</param>
public static DocumentMetadataEntity CreateDocumentMetadata(string collection, string key, HlcTimestamp timestamp, bool isDeleted = false)
{
return new DocumentMetadataEntity
{
Id = Guid.NewGuid().ToString(),
Collection = collection,
Key = key,
HlcPhysicalTime = timestamp.PhysicalTime,
HlcLogicalCounter = timestamp.LogicalCounter,
HlcNodeId = timestamp.NodeId,
IsDeleted = isDeleted
};
}
#endregion
}

View File

@@ -10,22 +10,27 @@ namespace ZB.MOM.WW.CBDDC.Persistence;
public abstract class DocumentMetadataStore : IDocumentMetadataStore
{
/// <inheritdoc />
public abstract Task<DocumentMetadata?> GetMetadataAsync(string collection, string key, CancellationToken cancellationToken = default);
public abstract Task<DocumentMetadata?> GetMetadataAsync(string collection, string key,
CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection, CancellationToken cancellationToken = default);
public abstract Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection,
CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task UpsertMetadataAsync(DocumentMetadata metadata, CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas, CancellationToken cancellationToken = default);
public abstract Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas,
CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp, CancellationToken cancellationToken = default);
public abstract Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp,
CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
public abstract Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task DropAsync(CancellationToken cancellationToken = default);
@@ -34,7 +39,8 @@ public abstract class DocumentMetadataStore : IDocumentMetadataStore
public abstract Task<IEnumerable<DocumentMetadata>> ExportAsync(CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task ImportAsync(IEnumerable<DocumentMetadata> items, CancellationToken cancellationToken = default);
public abstract Task ImportAsync(IEnumerable<DocumentMetadata> items,
CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task MergeAsync(IEnumerable<DocumentMetadata> items, CancellationToken cancellationToken = default);

View File

@@ -8,9 +8,9 @@ public class NodeCacheEntry
/// Gets or sets the latest known timestamp for the node.
/// </summary>
public HlcTimestamp Timestamp { get; set; }
/// <summary>
/// Gets or sets the latest known hash for the node.
/// </summary>
public string Hash { get; set; } = "";
}

View File

@@ -6,27 +6,13 @@ namespace ZB.MOM.WW.CBDDC.Persistence;
public abstract class OplogStore : IOplogStore
{
protected readonly IDocumentStore _documentStore;
protected readonly IConflictResolver _conflictResolver;
protected readonly IDocumentStore _documentStore;
protected readonly ISnapshotMetadataStore? _snapshotMetadataStore;
protected readonly IVectorClockService _vectorClock;
/// <summary>
/// Occurs after a set of oplog entries has been applied.
/// </summary>
public event EventHandler<ChangesAppliedEventArgs>? ChangesApplied;
/// <summary>
/// Raises the <see cref="ChangesApplied"/> event.
/// </summary>
/// <param name="appliedEntries">The entries that were applied.</param>
public virtual void OnChangesApplied(IEnumerable<OplogEntry> appliedEntries)
{
ChangesApplied?.Invoke(this, new ChangesAppliedEventArgs(appliedEntries));
}
/// <summary>
/// Initializes a new instance of the <see cref="OplogStore"/> class.
/// Initializes a new instance of the <see cref="OplogStore" /> class.
/// </summary>
/// <param name="documentStore">The backing document store.</param>
/// <param name="conflictResolver">The conflict resolver used during merges.</param>
@@ -46,20 +32,9 @@ public abstract class OplogStore : IOplogStore
}
/// <summary>
/// Initializes the VectorClockService with existing oplog/snapshot data.
/// Called once at construction time.
/// Occurs after a set of oplog entries has been applied.
/// </summary>
protected abstract void InitializeVectorClock();
/// <summary>
/// Asynchronously inserts an operation log entry into the underlying data store.
/// </summary>
/// <remarks>Implementations should ensure that the entry is persisted reliably. If the operation is
/// cancelled, the entry may not be inserted.</remarks>
/// <param name="entry">The operation log entry to insert. Cannot be null.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the insert operation.</param>
/// <returns>A task that represents the asynchronous insert operation.</returns>
protected abstract Task InsertOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default);
public event EventHandler<ChangesAppliedEventArgs>? ChangesApplied;
/// <inheritdoc />
public async Task AppendOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default)
@@ -69,13 +44,11 @@ public abstract class OplogStore : IOplogStore
}
/// <inheritdoc />
public async virtual Task ApplyBatchAsync(IEnumerable<OplogEntry> oplogEntries, CancellationToken cancellationToken = default)
public virtual async Task ApplyBatchAsync(IEnumerable<OplogEntry> oplogEntries,
CancellationToken cancellationToken = default)
{
var entries = oplogEntries.ToList();
if (entries.Count == 0)
{
return;
}
if (entries.Count == 0) return;
var documentKeys = entries.Select(e => (e.Collection, e.Key)).Distinct().ToList();
var documentsToFetch = await _documentStore.GetDocumentsAsync(documentKeys, cancellationToken);
@@ -88,7 +61,8 @@ public abstract class OplogStore : IOplogStore
foreach (var entry in orderedEntriesPerCollectionKey)
{
var existingDocument = documentsToFetch.FirstOrDefault(d => d.Collection == entry.Key.Collection && d.Key == entry.Key.Key);
var existingDocument =
documentsToFetch.FirstOrDefault(d => d.Collection == entry.Key.Collection && d.Key == entry.Key.Key);
var document = existingDocument;
var sawDelete = false;
var sawPut = false;
@@ -106,34 +80,25 @@ public abstract class OplogStore : IOplogStore
{
sawPut = true;
if (document == null)
{
document = new Document(
oplogEntry.Collection,
oplogEntry.Key,
oplogEntry.Payload.Value,
oplogEntry.Timestamp,
isDeleted: false);
}
false);
else
{
document.Merge(oplogEntry, _conflictResolver);
}
}
}
if (document == null)
{
if (sawDelete && existingDocument != null)
{
await _documentStore.DeleteDocumentAsync(entry.Key.Collection, entry.Key.Key, cancellationToken);
}
continue;
}
if (sawPut || existingDocument == null)
{
await _documentStore.PutDocumentAsync(document, cancellationToken);
}
if (sawPut || existingDocument == null) await _documentStore.PutDocumentAsync(document, cancellationToken);
}
//insert all oplog entries after processing documents to ensure oplog reflects the actual state of documents
@@ -145,41 +110,21 @@ public abstract class OplogStore : IOplogStore
}
/// <inheritdoc />
public abstract Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash, CancellationToken cancellationToken = default);
public abstract Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash,
CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task<OplogEntry?> GetEntryByHashAsync(string hash, CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously retrieves the most recent hash value associated with the specified node.
/// </summary>
/// <param name="nodeId">The unique identifier of the node for which to query the last hash. Cannot be null or empty.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result contains the last hash value for the node, or
/// null if no hash is available.</returns>
protected abstract Task<string?> QueryLastHashForNodeAsync(string nodeId, CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously queries the oplog for the most recent timestamp associated with the specified hash.
/// </summary>
/// <remarks>This method is intended to be implemented by derived classes to provide access to the oplog.
/// The returned timestamps can be used to track the last occurrence of a hash in the oplog for synchronization or
/// auditing purposes.</remarks>
/// <param name="hash">The hash value to search for in the oplog. Cannot be null.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>A task that represents the asynchronous operation. The task result contains a tuple with the wall clock
/// timestamp and logical timestamp if the hash is found; otherwise, null.</returns>
protected abstract Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash, CancellationToken cancellationToken = default);
/// <inheritdoc />
public async Task<string?> GetLastEntryHashAsync(string nodeId, CancellationToken cancellationToken = default)
{
// Try cache first
var cachedHash = _vectorClock.GetLastHash(nodeId);
string? cachedHash = _vectorClock.GetLastHash(nodeId);
if (cachedHash != null) return cachedHash;
// Cache miss - query database (Oplog first)
var hash = await QueryLastHashForNodeAsync(nodeId, cancellationToken);
string? hash = await QueryLastHashForNodeAsync(nodeId, cancellationToken);
// FALLBACK: If not in oplog, check SnapshotMetadata (important after prune!)
if (hash == null && _snapshotMetadataStore != null)
@@ -190,11 +135,10 @@ public abstract class OplogStore : IOplogStore
{
var snapshotMeta = await _snapshotMetadataStore.GetSnapshotMetadataAsync(nodeId, cancellationToken);
if (snapshotMeta != null)
{
_vectorClock.UpdateNode(nodeId,
new HlcTimestamp(snapshotMeta.TimestampPhysicalTime, snapshotMeta.TimestampLogicalCounter, nodeId),
new HlcTimestamp(snapshotMeta.TimestampPhysicalTime, snapshotMeta.TimestampLogicalCounter,
nodeId),
hash);
}
return hash;
}
}
@@ -204,12 +148,10 @@ public abstract class OplogStore : IOplogStore
{
var row = await QueryLastHashTimestampFromOplogAsync(hash, cancellationToken);
if (row.HasValue)
{
_vectorClock.UpdateNode(nodeId,
new HlcTimestamp(row.Value.Wall, row.Value.Logic, nodeId),
hash);
}
}
return hash;
}
@@ -221,10 +163,12 @@ public abstract class OplogStore : IOplogStore
}
/// <inheritdoc />
public abstract Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
public abstract Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since, IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
public abstract Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default);
/// <inheritdoc />
public Task<VectorClock> GetVectorClockAsync(CancellationToken cancellationToken = default)
@@ -264,4 +208,60 @@ public abstract class OplogStore : IOplogStore
/// <param name="cancellationToken">A token used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation.</returns>
public abstract Task MergeAsync(IEnumerable<OplogEntry> items, CancellationToken cancellationToken = default);
/// <summary>
/// Raises the <see cref="ChangesApplied" /> event.
/// </summary>
/// <param name="appliedEntries">The entries that were applied.</param>
public virtual void OnChangesApplied(IEnumerable<OplogEntry> appliedEntries)
{
ChangesApplied?.Invoke(this, new ChangesAppliedEventArgs(appliedEntries));
}
/// <summary>
/// Initializes the VectorClockService with existing oplog/snapshot data.
/// Called once at construction time.
/// </summary>
protected abstract void InitializeVectorClock();
/// <summary>
/// Asynchronously inserts an operation log entry into the underlying data store.
/// </summary>
/// <remarks>
/// Implementations should ensure that the entry is persisted reliably. If the operation is
/// cancelled, the entry may not be inserted.
/// </remarks>
/// <param name="entry">The operation log entry to insert. Cannot be null.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the insert operation.</param>
/// <returns>A task that represents the asynchronous insert operation.</returns>
protected abstract Task InsertOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously retrieves the most recent hash value associated with the specified node.
/// </summary>
/// <param name="nodeId">The unique identifier of the node for which to query the last hash. Cannot be null or empty.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>
/// A task that represents the asynchronous operation. The task result contains the last hash value for the node, or
/// null if no hash is available.
/// </returns>
protected abstract Task<string?> QueryLastHashForNodeAsync(string nodeId,
CancellationToken cancellationToken = default);
/// <summary>
/// Asynchronously queries the oplog for the most recent timestamp associated with the specified hash.
/// </summary>
/// <remarks>
/// This method is intended to be implemented by derived classes to provide access to the oplog.
/// The returned timestamps can be used to track the last occurrence of a hash in the oplog for synchronization or
/// auditing purposes.
/// </remarks>
/// <param name="hash">The hash value to search for in the oplog. Cannot be null.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the asynchronous operation.</param>
/// <returns>
/// A task that represents the asynchronous operation. The task result contains a tuple with the wall clock
/// timestamp and logical timestamp if the hash is found; otherwise, null.
/// </returns>
protected abstract Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash,
CancellationToken cancellationToken = default);
}

View File

@@ -6,44 +6,43 @@ namespace ZB.MOM.WW.CBDDC.Persistence;
public abstract class PeerConfigurationStore : IPeerConfigurationStore
{
/// <inheritdoc />
public abstract Task<IEnumerable<RemotePeerConfiguration>> GetRemotePeersAsync(CancellationToken cancellationToken = default);
public abstract Task<IEnumerable<RemotePeerConfiguration>> GetRemotePeersAsync(
CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task<RemotePeerConfiguration?> GetRemotePeerAsync(string nodeId, CancellationToken cancellationToken);
public abstract Task<RemotePeerConfiguration?> GetRemotePeerAsync(string nodeId,
CancellationToken cancellationToken);
/// <inheritdoc />
public abstract Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task SaveRemotePeerAsync(RemotePeerConfiguration peer, CancellationToken cancellationToken = default);
public abstract Task SaveRemotePeerAsync(RemotePeerConfiguration peer,
CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task DropAsync(CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task<IEnumerable<RemotePeerConfiguration>> ExportAsync(CancellationToken cancellationToken = default);
public abstract Task<IEnumerable<RemotePeerConfiguration>> ExportAsync(
CancellationToken cancellationToken = default);
/// <inheritdoc />
public virtual async Task ImportAsync(IEnumerable<RemotePeerConfiguration> items, CancellationToken cancellationToken = default)
public virtual async Task ImportAsync(IEnumerable<RemotePeerConfiguration> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
await SaveRemotePeerAsync(item, cancellationToken);
}
foreach (var item in items) await SaveRemotePeerAsync(item, cancellationToken);
}
/// <inheritdoc />
public virtual async Task MergeAsync(IEnumerable<RemotePeerConfiguration> items, CancellationToken cancellationToken = default)
public virtual async Task MergeAsync(IEnumerable<RemotePeerConfiguration> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = await GetRemotePeerAsync(item.NodeId, cancellationToken);
if (existing == null)
{
await SaveRemotePeerAsync(item, cancellationToken);
}
if (existing == null) await SaveRemotePeerAsync(item, cancellationToken);
// If exists, keep existing (simple merge strategy)
}
}
}

View File

@@ -49,8 +49,10 @@ public abstract class PeerOplogConfirmationStore : IPeerOplogConfirmationStore
public abstract Task<IEnumerable<PeerOplogConfirmation>> ExportAsync(CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task ImportAsync(IEnumerable<PeerOplogConfirmation> items, CancellationToken cancellationToken = default);
public abstract Task ImportAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default);
/// <inheritdoc />
public abstract Task MergeAsync(IEnumerable<PeerOplogConfirmation> items, CancellationToken cancellationToken = default);
public abstract Task MergeAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default);
}

Some files were not shown because too many files have changed in this diff Show More