Replace BLite with Surreal embedded persistence
All checks were successful
NuGet Package Publish / nuget (push) Successful in 1m21s

This commit is contained in:
Joseph Doherty
2026-02-22 05:21:53 -05:00
parent 7ebc2cb567
commit 9c2a77dc3c
56 changed files with 6613 additions and 3177 deletions

122
README.md
View File

@@ -76,7 +76,7 @@ Your application continues to read and write to its database as usual. CBDDC wor
+---------------------------------------------------+
| uses your DbContext directly
+---------------------------------------------------+
| Your Database (BLite) |
| Your Database (Surreal embedded RocksDB) |
| +---------------------------------------------+ |
| | Users | Orders | Products | ... |
| +---------------------------------------------+ |
@@ -155,7 +155,7 @@ Nodes advertise which collections they sync. The orchestrator prioritizes peers
### [Cloud] Cloud Infrastructure
- ASP.NET Core hosting (single-cluster mode)
- BLite embedded persistence
- Surreal embedded RocksDB persistence
- shared-token authentication
---
@@ -167,11 +167,11 @@ Nodes advertise which collections they sync. The orchestrator prioritizes peers
| Package | Purpose |
|---------|---------|
| `ZB.MOM.WW.CBDDC.Core` | Interfaces, models, conflict resolution (.NET Standard 2.0+) |
| `ZB.MOM.WW.CBDDC.Persistence` | BLite persistence provider, OplogStore, VectorClockService (.NET 10+) |
| `ZB.MOM.WW.CBDDC.Persistence` | Surreal embedded RocksDB provider, OplogStore, VectorClockService (.NET 10+) |
| `CBDDC.Network` | TCP sync, UDP discovery, Protobuf protocol (.NET Standard 2.0+) |
```bash
# BLite (embedded document DB)
# Surreal embedded (RocksDB)
dotnet add package ZB.MOM.WW.CBDDC.Core
dotnet add package ZB.MOM.WW.CBDDC.Persistence
dotnet add package CBDDC.Network
@@ -285,12 +285,25 @@ High-priority troubleshooting topics:
### 1. Define Your Database Context
```csharp
public class MyDbContext : CBDDCDocumentDbContext
public sealed class MyDbContext
{
public DocumentCollection<string, Customer> Customers { get; private set; }
public DocumentCollection<string, Order> Orders { get; private set; }
public MyDbContext(
ICBDDCSurrealEmbeddedClient embeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer)
{
EmbeddedClient = embeddedClient;
SchemaInitializer = schemaInitializer;
Customers = new SampleSurrealCollection<Customer>("customers", c => c.Id, embeddedClient, schemaInitializer);
Orders = new SampleSurrealCollection<Order>("orders", o => o.Id, embeddedClient, schemaInitializer);
}
public MyDbContext(string dbPath) : base(dbPath) { }
public ICBDDCSurrealEmbeddedClient EmbeddedClient { get; }
public ICBDDCSurrealSchemaInitializer SchemaInitializer { get; }
public SampleSurrealCollection<Customer> Customers { get; }
public SampleSurrealCollection<Order> Orders { get; }
public Task SaveChangesAsync(CancellationToken ct = default)
=> SchemaInitializer.EnsureInitializedAsync(ct);
}
```
@@ -299,14 +312,20 @@ public class MyDbContext : CBDDCDocumentDbContext
This is where you tell CBDDC **which collections to sync** and **how to map** between your entities and the sync engine:
```csharp
public class MyDocumentStore : BLiteDocumentStore<MyDbContext>
public sealed class MyDocumentStore : SurrealDocumentStore<MyDbContext>
{
public MyDocumentStore(
MyDbContext context,
IPeerNodeConfigurationProvider configProvider,
IVectorClockService vectorClockService,
ILogger<MyDocumentStore>? logger = null)
: base(context, configProvider, vectorClockService, logger: logger)
: base(
context,
context.EmbeddedClient,
context.SchemaInitializer,
configProvider,
vectorClockService,
logger: logger)
{
// Register collections for CDC - only these will be synced
WatchCollection("Customers", context.Customers, c => c.Id);
@@ -322,34 +341,31 @@ public class MyDocumentStore : BLiteDocumentStore<MyDbContext>
case "Customers":
var customer = content.Deserialize<Customer>()!;
customer.Id = key;
var existing = _context.Customers
.Find(c => c.Id == key).FirstOrDefault();
if (existing != null) _context.Customers.Update(customer);
else _context.Customers.Insert(customer);
var existing = await _context.Customers.FindByIdAsync(key, ct);
if (existing != null) await _context.Customers.UpdateAsync(customer, ct);
else await _context.Customers.InsertAsync(customer, ct);
break;
case "Orders":
var order = content.Deserialize<Order>()!;
order.Id = key;
var existingOrder = _context.Orders
.Find(o => o.Id == key).FirstOrDefault();
if (existingOrder != null) _context.Orders.Update(order);
else _context.Orders.Insert(order);
var existingOrder = await _context.Orders.FindByIdAsync(key, ct);
if (existingOrder != null) await _context.Orders.UpdateAsync(order, ct);
else await _context.Orders.InsertAsync(order, ct);
break;
}
await _context.SaveChangesAsync(ct);
}
protected override Task<JsonElement?> GetEntityAsJsonAsync(
protected override async Task<JsonElement?> GetEntityAsJsonAsync(
string collection, string key, CancellationToken ct)
{
object? entity = collection switch
{
"Customers" => _context.Customers.Find(c => c.Id == key).FirstOrDefault(),
"Orders" => _context.Orders.Find(o => o.Id == key).FirstOrDefault(),
"Customers" => await _context.Customers.FindByIdAsync(key, ct),
"Orders" => await _context.Orders.FindByIdAsync(key, ct),
_ => null
};
return Task.FromResult(entity != null
? (JsonElement?)JsonSerializer.SerializeToElement(entity) : null);
return entity != null ? JsonSerializer.SerializeToElement(entity) : null;
}
protected override async Task RemoveEntityAsync(
@@ -357,8 +373,8 @@ public class MyDocumentStore : BLiteDocumentStore<MyDbContext>
{
switch (collection)
{
case "Customers": _context.Customers.Delete(key); break;
case "Orders": _context.Orders.Delete(key); break;
case "Customers": await _context.Customers.DeleteAsync(key, ct); break;
case "Orders": await _context.Orders.DeleteAsync(key, ct); break;
}
await _context.SaveChangesAsync(ct);
}
@@ -395,9 +411,15 @@ builder.Services.AddSingleton<IPeerNodeConfigurationProvider>(
// Register CBDDC services
builder.Services
.AddSingleton<MyDbContext>()
.AddCBDDCCore()
.AddCBDDCBLite<MyDbContext, MyDocumentStore>(
sp => new MyDbContext("mydata.blite"))
.AddCBDDCSurrealEmbedded<MyDocumentStore>(_ => new CBDDCSurrealEmbeddedOptions
{
Endpoint = "rocksdb://local",
DatabasePath = "data/mydata.rocksdb",
Namespace = "myapp",
Database = "main"
})
.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>();
await builder.Build().RunAsync();
@@ -442,31 +464,40 @@ If you have an **existing database** and want to add P2P sync:
### Step 1 - Wrap your context
Create a `DbContext` extending `CBDDCDocumentDbContext`. This can wrap your existing collections/tables.
Create a context that exposes your collections and holds the Surreal embedded services.
```csharp
public class MyExistingDbContext : CBDDCDocumentDbContext
public sealed class MyExistingDbContext
{
// Your existing collections
public DocumentCollection<string, Product> Products { get; private set; }
public DocumentCollection<string, Inventory> Inventory { get; private set; }
public MyExistingDbContext(
ICBDDCSurrealEmbeddedClient embeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer)
{
EmbeddedClient = embeddedClient;
SchemaInitializer = schemaInitializer;
Products = new SampleSurrealCollection<Product>("products", p => p.Id, embeddedClient, schemaInitializer);
Inventory = new SampleSurrealCollection<Inventory>("inventory", i => i.Id, embeddedClient, schemaInitializer);
}
public MyExistingDbContext(string dbPath) : base(dbPath) { }
public ICBDDCSurrealEmbeddedClient EmbeddedClient { get; }
public ICBDDCSurrealSchemaInitializer SchemaInitializer { get; }
public SampleSurrealCollection<Product> Products { get; }
public SampleSurrealCollection<Inventory> Inventory { get; }
}
```
### Step 2 - Create a DocumentStore
Extend `BLiteDocumentStore<T>`. This is the **bridge** between your data model and the sync engine.
Extend `SurrealDocumentStore<T>`. This is the **bridge** between your data model and the sync engine.
```csharp
public class MyDocumentStore : BLiteDocumentStore<MyExistingDbContext>
public class MyDocumentStore : SurrealDocumentStore<MyExistingDbContext>
{
public MyDocumentStore(MyExistingDbContext ctx,
IPeerNodeConfigurationProvider cfg,
IVectorClockService vc,
ILogger<MyDocumentStore>? log = null)
: base(ctx, cfg, vc, logger: log)
: base(ctx, ctx.EmbeddedClient, ctx.SchemaInitializer, cfg, vc, logger: log)
{
// Continue to next step...
}
@@ -481,7 +512,7 @@ Call `WatchCollection()` in the constructor for each collection you want to repl
```csharp
public MyDocumentStore(...)
: base(ctx, cfg, vc, logger: log)
: base(ctx, ctx.EmbeddedClient, ctx.SchemaInitializer, cfg, vc, logger: log)
{
// Only these 2 collections will be synced across the mesh
WatchCollection("Products", ctx.Products, p => p.Id);
@@ -585,7 +616,7 @@ protected override async Task ApplyContentToEntitiesBatchAsync(
Your Code: db.Users.InsertAsync(user)
|
v
BLite: SaveChangesAsync()
Surreal context write committed
|
| CDC fires (WatchCollection observer)
DocumentStore: CreateOplogEntryAsync()
@@ -610,15 +641,22 @@ DocumentStore: CreateOplogEntryAsync()
## Cloud Deployment
CBDDC supports ASP.NET Core hosting with BLite persistence for cloud deployments.
CBDDC supports ASP.NET Core hosting with Surreal embedded RocksDB persistence for cloud deployments.
### Example: ASP.NET Core with BLite
### Example: ASP.NET Core with Surreal embedded
```csharp
var builder = WebApplication.CreateBuilder(args);
builder.Services.AddSingleton<MyDbContext>();
builder.Services.AddCBDDCCore()
.AddCBDDCBLite<MyDbContext, MyDocumentStore>(sp => new MyDbContext("cbddc.blite"))
.AddCBDDCSurrealEmbedded<MyDocumentStore>(_ => new CBDDCSurrealEmbeddedOptions
{
Endpoint = "rocksdb://local",
DatabasePath = "data/cbddc.rocksdb",
Namespace = "cbddc",
Database = "main"
})
.AddCBDDCNetwork<MyPeerConfigProvider>();
builder.Services.AddCBDDCHostingSingleCluster(options =>
@@ -737,7 +775,7 @@ Console.WriteLine($"Peers: {status.ConnectedPeers}");
### API
- **[API Reference](docs/api-reference.md)** - Complete API documentation
- **[Persistence Providers](docs/persistence-providers.md)** - BLite, custom
- **[Persistence Providers](docs/persistence-providers.md)** - Surreal embedded RocksDB, custom
---

View File

@@ -9,6 +9,7 @@ CBDDC supports multiple persistence backends to suit different deployment scenar
| **SQLite (Direct)** | Embedded apps, single-node | ⭐⭐⭐⭐⭐ | ⭐⭐⭐⭐⭐ | ✅ Yes |
| **EF Core (Generic)** | Multi-DB support, migrations | ⭐⭐⭐ | ⭐⭐⭐ | ✅ Yes |
| **PostgreSQL** | Production, high load, JSON queries | ⭐⭐⭐⭐⭐ | ⭐⭐⭐ | ✅ Yes |
| **Surreal Embedded (RocksDB)** | Embedded multi-peer sync with local CDC | ⭐⭐⭐⭐ | ⭐⭐⭐⭐ | ✅ Yes |
## SQLite (Direct)
@@ -179,19 +180,68 @@ Host=prod-db.example.com;Database=CBDDC;Username=admin;Password=secret;SSL Mode=
Host=localhost;Database=CBDDC;Username=admin;Password=secret;Pooling=true;Minimum Pool Size=5;Maximum Pool Size=100
```
## Surreal Embedded (RocksDB)
**Package:** `ZB.MOM.WW.CBDDC.Persistence`
### Characteristics
-**Embedded + durable**: Uses local RocksDB storage via Surreal embedded endpoint
-**CDC-native workflow**: Collection watches emit oplog entries and metadata updates
-**Durable checkpointing**: CDC cursor state is persisted per consumer id
-**Restart recovery**: Oplog + checkpoint data survive process restart and resume catch-up
-**Loopback suppression**: Remote apply path suppresses local CDC re-emission
-**Idempotent merge window**: Duplicate remote entries are merged by deterministic hash
### When to Use
- Embedded deployments that still need multi-peer replication
- Edge nodes where local durability is required without an external DB server
- CDC-heavy sync topologies that need restart-safe cursor tracking
- Environments that benefit from document-style storage and local operation logs
### Configuration
```csharp
services.AddCBDDCCore()
.AddCBDDCSurrealEmbedded<SampleDocumentStore>(_ => new CBDDCSurrealEmbeddedOptions
{
Endpoint = "rocksdb://local",
DatabasePath = "/var/lib/cbddc/node-a.rocksdb",
Namespace = "cbddc",
Database = "node_a",
Cdc = new CBDDCSurrealCdcOptions
{
Enabled = true,
ConsumerId = "sync-main",
CheckpointTable = "cbddc_cdc_checkpoint",
EnableLiveSelectAccelerator = true,
LiveSelectReconnectDelay = TimeSpan.FromSeconds(2)
}
});
```
### CDC Durability Notes
1. **Checkpoint semantics**: each consumer id has an independent durable cursor (`timestamp + hash`).
2. **Catch-up on restart**: read checkpoint, then request oplog entries strictly after the stored timestamp.
3. **Duplicate-window safety**: replayed windows are deduplicated by oplog hash merge semantics.
4. **Delete durability**: deletes persist as oplog delete operations plus tombstone metadata.
5. **Remote apply behavior**: remote sync applies documents without generating local loopback CDC entries.
## Feature Comparison
| Feature | SQLite (Direct) | EF Core | PostgreSQL |
|---------|----------------|---------|------------|
| **Storage Format** | File-based | Varies | Server-based |
| **JSON Storage** | TEXT | NVARCHAR/TEXT | JSONB |
| **JSON Indexing** | Standard | Standard | GIN/GIST |
| **JSON Queries** | `json_extract()` | In-Memory | Native (future) |
| **Concurrent Writes** | Good (WAL) | Varies | Excellent |
| **Horizontal Scaling** | No | Limited | Yes (replication) |
| **Migrations** | Manual SQL | EF Migrations | EF Migrations |
| **Connection Pooling** | N/A | Built-in | Built-in |
| **Cloud Support** | N/A | Varies | Excellent |
| Feature | SQLite (Direct) | EF Core | PostgreSQL | Surreal Embedded |
|---------|----------------|---------|------------|------------------|
| **Storage Format** | File-based | Varies | Server-based | File-based (RocksDB) |
| **JSON Storage** | TEXT | NVARCHAR/TEXT | JSONB | Native document records |
| **JSON Indexing** | Standard | Standard | GIN/GIST | Table/index schema controls |
| **JSON Queries** | `json_extract()` | In-Memory | Native (future) | Native document querying |
| **Concurrent Writes** | Good (WAL) | Varies | Excellent | Good (embedded engine limits apply) |
| **Horizontal Scaling** | No | Limited | Yes (replication) | Peer replication via CBDDC sync |
| **Migrations** | Manual SQL | EF Migrations | EF Migrations | Schema initializer + scripts |
| **Connection Pooling** | N/A | Built-in | Built-in | N/A (embedded) |
| **Cloud Support** | N/A | Varies | Excellent | Excellent for edge/embedded nodes |
## Performance Benchmarks
@@ -251,6 +301,10 @@ _*Benchmarks vary based on hardware, network, and configuration_
- **Use**: PostgreSQL
- **Why**: Best performance, scalability, reliability
### Production (Edge / Embedded Mesh)
- **Use**: Surreal Embedded (RocksDB)
- **Why**: Durable local CDC, restart-safe checkpoint resume, no external DB dependency
### Enterprise
- **Use**: EF Core with SQL Server or PostgreSQL
- **Why**: Enterprise support, compliance, familiarity
@@ -272,6 +326,15 @@ _*Benchmarks vary based on hardware, network, and configuration_
- Check for connection leaks
- Consider connection pooler (PgBouncer)
### Surreal Embedded: "CDC replay after restart"
- Ensure `Cdc.Enabled=true` and a stable `Cdc.ConsumerId` is configured
- Verify checkpoint table contains cursor state for the consumer
- Resume from checkpoint timestamp before requesting new oplog window
### Surreal Embedded: "Unexpected loopback oplog on remote sync"
- Apply remote entries through CBDDC sync/orchestrator paths (not local collection writes)
- Keep remote sync guards enabled in document store implementations
## Future Enhancements
- **JSONB Query Translation**: Native PostgreSQL JSON queries from QueryNode

View File

@@ -3,10 +3,13 @@ using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using Serilog;
using System.Text.Json;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using ZB.MOM.WW.CBDDC.Persistence.Snapshot;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console;
@@ -16,6 +19,8 @@ internal class Program
{
private static async Task Main(string[] args)
{
if (await TryRunMigrationAsync(args)) return;
var builder = Host.CreateApplicationBuilder(args);
// Configuration
@@ -55,11 +60,20 @@ internal class Program
// Database path
string dataPath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "data");
Directory.CreateDirectory(dataPath);
string databasePath = Path.Combine(dataPath, $"{nodeId}.blite");
string databasePath = Path.Combine(dataPath, $"{nodeId}.rocksdb");
string surrealDatabase = nodeId.Replace("-", "_", StringComparison.Ordinal);
// Register CBDDC Services using Fluent Extensions with BLite, SampleDbContext, and SampleDocumentStore
// Register CBDDC services with embedded Surreal (RocksDB).
builder.Services.AddSingleton<ICBDDCSurrealSchemaInitializer, SampleSurrealSchemaInitializer>();
builder.Services.AddSingleton<SampleDbContext>();
builder.Services.AddCBDDCCore()
.AddCBDDCBLite<SampleDbContext, SampleDocumentStore>(sp => new SampleDbContext(databasePath))
.AddCBDDCSurrealEmbedded<SampleDocumentStore>(_ => new CBDDCSurrealEmbeddedOptions
{
Endpoint = "rocksdb://local",
DatabasePath = databasePath,
Namespace = "cbddc_sample",
Database = surrealDatabase
})
.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>(); // useHostedService = true by default
builder.Services.AddHostedService<ConsoleInteractiveService>(); // Runs the Input Loop
@@ -73,6 +87,107 @@ internal class Program
await host.RunAsync();
}
private static async Task<bool> TryRunMigrationAsync(string[] args)
{
int migrateIndex = Array.IndexOf(args, "--migrate-snapshot");
if (migrateIndex < 0) return false;
string snapshotPath = GetRequiredArgumentValue(args, migrateIndex, "--migrate-snapshot");
if (!File.Exists(snapshotPath))
throw new FileNotFoundException("Snapshot file not found.", snapshotPath);
string targetPath = GetOptionalArgumentValue(args, "--target-db")
?? Path.Combine(Directory.GetCurrentDirectory(), "data", "migration.rocksdb");
Directory.CreateDirectory(Path.GetDirectoryName(Path.GetFullPath(targetPath))!);
string nodeId = "migration-node";
var configProvider = new StaticPeerNodeConfigurationProvider(new PeerNodeConfiguration
{
NodeId = nodeId,
TcpPort = 0,
AuthToken = "migration"
});
string databaseName = $"migration_{DateTimeOffset.UtcNow.ToUnixTimeSeconds()}";
var services = new ServiceCollection();
services.AddLogging();
services.AddSingleton<IPeerNodeConfigurationProvider>(configProvider);
services.AddSingleton<ICBDDCSurrealSchemaInitializer, SampleSurrealSchemaInitializer>();
services.AddSingleton<SampleDbContext>();
services.AddCBDDCCore()
.AddCBDDCSurrealEmbedded<SampleDocumentStore>(_ => new CBDDCSurrealEmbeddedOptions
{
Endpoint = "rocksdb://local",
DatabasePath = targetPath,
Namespace = "cbddc_migration",
Database = databaseName
});
using var provider = services.BuildServiceProvider();
var snapshotService = provider.GetRequiredService<ISnapshotService>();
await using (var snapshotStream = File.OpenRead(snapshotPath))
{
await snapshotService.ReplaceDatabaseAsync(snapshotStream);
}
await VerifyMigrationAsync(provider, snapshotPath);
System.Console.WriteLine($"Migration completed successfully to: {targetPath}");
return true;
}
private static async Task VerifyMigrationAsync(IServiceProvider provider, string snapshotPath)
{
await using var snapshotStream = File.OpenRead(snapshotPath);
var source = await JsonSerializer.DeserializeAsync<SnapshotDto>(snapshotStream)
?? throw new InvalidOperationException("Unable to deserialize source snapshot.");
var documentStore = provider.GetRequiredService<IDocumentStore>();
var oplogStore = provider.GetRequiredService<IOplogStore>();
var peerStore = provider.GetRequiredService<IPeerConfigurationStore>();
var confirmationStore = provider.GetService<IPeerOplogConfirmationStore>();
int destinationDocuments = (await documentStore.ExportAsync()).Count();
int destinationOplog = (await oplogStore.ExportAsync()).Count();
int destinationPeers = (await peerStore.ExportAsync()).Count();
int destinationConfirmations = confirmationStore == null
? 0
: (await confirmationStore.ExportAsync()).Count();
if (destinationDocuments != source.Documents.Count ||
destinationOplog != source.Oplog.Count ||
destinationPeers != source.RemotePeers.Count ||
destinationConfirmations != source.PeerConfirmations.Count)
throw new InvalidOperationException("Snapshot parity verification failed after migration.");
if (source.Oplog.Count > 0)
{
string firstHash = source.Oplog[0].Hash;
string lastHash = source.Oplog[^1].Hash;
var firstEntry = await oplogStore.GetEntryByHashAsync(firstHash);
var lastEntry = await oplogStore.GetEntryByHashAsync(lastHash);
if (firstEntry == null || lastEntry == null)
throw new InvalidOperationException("Oplog hash spot-check failed after migration.");
}
}
private static string GetRequiredArgumentValue(string[] args, int optionIndex, string optionName)
{
if (optionIndex < 0 || optionIndex + 1 >= args.Length || args[optionIndex + 1].StartsWith("--"))
throw new ArgumentException($"Missing value for {optionName}.");
return args[optionIndex + 1];
}
private static string? GetOptionalArgumentValue(string[] args, string optionName)
{
int index = Array.IndexOf(args, optionName);
if (index < 0) return null;
if (index + 1 >= args.Length || args[index + 1].StartsWith("--"))
throw new ArgumentException($"Missing value for {optionName}.");
return args[index + 1];
}
private class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvider
{
/// <summary>

View File

@@ -58,6 +58,14 @@ dotnet run -- --node-id node3 --tcp-port 5003 --udp-port 6003
Changes made on any node will automatically sync to all peers!
### Import Snapshot Into Surreal (Migration Utility)
```bash
dotnet run -- --migrate-snapshot /path/to/snapshot.json --target-db /path/to/data.rocksdb
```
This imports a CBDDC snapshot into embedded Surreal RocksDB and validates parity (counts plus oplog hash spot checks).
## Available Commands
| Command | Description |
@@ -149,7 +157,7 @@ var page = await users.Find(u => true, skip: 10, take: 5);
## Architecture
- **Storage**: SQLite with HLC timestamps
- **Storage**: Surreal embedded RocksDB with HLC timestamps
- **Sync**: TCP for data transfer, UDP for discovery
- **Conflict Resolution**: Last-Write-Wins based on Hybrid Logical Clocks
- **Serialization**: System.Text.Json

View File

@@ -1,50 +1,299 @@
using BLite.Core.Collections;
using BLite.Core.Metadata;
using BLite.Core.Storage;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using System.Text.Json.Serialization;
using System.Security.Cryptography;
using System.Text;
using SurrealDb.Net;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console;
public class SampleDbContext : CBDDCDocumentDbContext
public class SampleDbContext : IDisposable
{
/// <summary>
/// Initializes a new instance of the SampleDbContext class using the specified database file path.
/// </summary>
/// <param name="databasePath">The file system path to the database file. Cannot be null or empty.</param>
public SampleDbContext(string databasePath) : base(databasePath)
private const string UsersTable = "sample_users";
private const string TodoListsTable = "sample_todo_lists";
private readonly bool _ownsClient;
public SampleDbContext(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer)
{
SurrealEmbeddedClient = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient));
SchemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
Users = new SampleSurrealCollection<User>(UsersTable, u => u.Id, SurrealEmbeddedClient, SchemaInitializer);
TodoLists = new SampleSurrealCollection<TodoList>(TodoListsTable, t => t.Id, SurrealEmbeddedClient, SchemaInitializer);
OplogEntries = new SampleSurrealReadOnlyCollection<SampleOplogEntry>(
CBDDCSurrealSchemaNames.OplogEntriesTable,
SurrealEmbeddedClient,
SchemaInitializer);
}
/// <summary>
/// Initializes a new instance of the SampleDbContext class using the specified database file path and page file
/// configuration.
/// </summary>
/// <param name="databasePath">The file system path to the database file. Cannot be null or empty.</param>
/// <param name="config">The configuration settings for the page file. Cannot be null.</param>
public SampleDbContext(string databasePath, PageFileConfig config) : base(databasePath, config)
public SampleDbContext(string databasePath)
{
}
/// <summary>
/// Gets the users collection.
/// </summary>
public DocumentCollection<string, User> Users { get; private set; } = null!;
/// <summary>
/// Gets the todo lists collection.
/// </summary>
public DocumentCollection<string, TodoList> TodoLists { get; private set; } = null!;
/// <inheritdoc />
protected override void OnModelCreating(ModelBuilder modelBuilder)
string normalizedPath = NormalizeDatabasePath(databasePath);
string suffix = ComputeDeterministicSuffix(normalizedPath);
var options = new CBDDCSurrealEmbeddedOptions
{
base.OnModelCreating(modelBuilder);
modelBuilder.Entity<User>()
.ToCollection("Users")
.HasKey(u => u.Id);
Endpoint = "rocksdb://local",
DatabasePath = normalizedPath,
Namespace = $"cbddc_sample_{suffix}",
Database = $"main_{suffix}"
};
modelBuilder.Entity<TodoList>()
.ToCollection("TodoLists")
.HasKey(t => t.Id);
SurrealEmbeddedClient = new CBDDCSurrealEmbeddedClient(options);
_ownsClient = true;
SchemaInitializer = new SampleSurrealSchemaInitializer(SurrealEmbeddedClient);
Users = new SampleSurrealCollection<User>(UsersTable, u => u.Id, SurrealEmbeddedClient, SchemaInitializer);
TodoLists = new SampleSurrealCollection<TodoList>(TodoListsTable, t => t.Id, SurrealEmbeddedClient, SchemaInitializer);
OplogEntries = new SampleSurrealReadOnlyCollection<SampleOplogEntry>(
CBDDCSurrealSchemaNames.OplogEntriesTable,
SurrealEmbeddedClient,
SchemaInitializer);
}
public ICBDDCSurrealEmbeddedClient SurrealEmbeddedClient { get; }
public ICBDDCSurrealSchemaInitializer SchemaInitializer { get; private set; }
public SampleSurrealCollection<User> Users { get; private set; }
public SampleSurrealCollection<TodoList> TodoLists { get; private set; }
public SampleSurrealReadOnlyCollection<SampleOplogEntry> OplogEntries { get; private set; }
public async Task SaveChangesAsync(CancellationToken cancellationToken = default)
{
await SchemaInitializer.EnsureInitializedAsync(cancellationToken);
}
public void Dispose()
{
Users.Dispose();
TodoLists.Dispose();
if (_ownsClient) SurrealEmbeddedClient.Dispose();
}
private static string NormalizeDatabasePath(string databasePath)
{
if (string.IsNullOrWhiteSpace(databasePath))
throw new ArgumentException("Database path is required.", nameof(databasePath));
return Path.GetFullPath(databasePath);
}
private static string ComputeDeterministicSuffix(string value)
{
byte[] bytes = SHA256.HashData(Encoding.UTF8.GetBytes(value));
return Convert.ToHexString(bytes).ToLowerInvariant()[..12];
}
}
public sealed class SampleSurrealSchemaInitializer : ICBDDCSurrealSchemaInitializer
{
private const string SampleSchemaSql = """
DEFINE TABLE OVERWRITE sample_users SCHEMALESS CHANGEFEED 7d;
DEFINE TABLE OVERWRITE sample_todo_lists SCHEMALESS CHANGEFEED 7d;
""";
private readonly ICBDDCSurrealEmbeddedClient _client;
private int _initialized;
public SampleSurrealSchemaInitializer(ICBDDCSurrealEmbeddedClient client)
{
_client = client ?? throw new ArgumentNullException(nameof(client));
}
public async Task EnsureInitializedAsync(CancellationToken cancellationToken = default)
{
if (Volatile.Read(ref _initialized) == 1) return;
await _client.InitializeAsync(cancellationToken);
await _client.RawQueryAsync(SampleSchemaSql, cancellationToken: cancellationToken);
Volatile.Write(ref _initialized, 1);
}
}
public sealed class SampleSurrealCollection<TEntity> : ISurrealWatchableCollection<TEntity>, IDisposable
where TEntity : class
{
private readonly SurrealCollectionChangeFeed<TEntity> _changeFeed = new();
private readonly ISurrealDbClient _client;
private readonly Func<TEntity, string> _keySelector;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
private readonly string _tableName;
public SampleSurrealCollection(
string tableName,
Func<TEntity, string> keySelector,
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer)
{
if (string.IsNullOrWhiteSpace(tableName))
throw new ArgumentException("Table name is required.", nameof(tableName));
_tableName = tableName;
_keySelector = keySelector ?? throw new ArgumentNullException(nameof(keySelector));
_client = (surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient))).Client;
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
}
public IDisposable Subscribe(IObserver<SurrealCollectionChange<TEntity>> observer)
{
return _changeFeed.Subscribe(observer);
}
public async Task InsertAsync(TEntity entity, CancellationToken cancellationToken = default)
{
await UpsertAsync(entity, cancellationToken);
}
public async Task UpdateAsync(TEntity entity, CancellationToken cancellationToken = default)
{
await UpsertAsync(entity, cancellationToken);
}
public async Task DeleteAsync(string id, CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(id))
throw new ArgumentException("Document id is required.", nameof(id));
await EnsureReadyAsync(cancellationToken);
await _client.Delete(RecordId.From(_tableName, id), cancellationToken);
_changeFeed.PublishDelete(id);
}
public TEntity? FindById(string id)
{
return FindByIdAsync(id).GetAwaiter().GetResult();
}
public async Task<TEntity?> FindByIdAsync(string id, CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(id))
throw new ArgumentException("Document id is required.", nameof(id));
await EnsureReadyAsync(cancellationToken);
var record = await _client.Select<SampleEntityRecord<TEntity>>(RecordId.From(_tableName, id), cancellationToken);
return record?.Entity;
}
public IEnumerable<TEntity> FindAll()
{
return FindAllAsync().GetAwaiter().GetResult();
}
public async Task<IReadOnlyList<TEntity>> FindAllAsync(CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
var rows = await _client.Select<SampleEntityRecord<TEntity>>(_tableName, cancellationToken);
return rows?
.Where(r => r.Entity != null)
.Select(r => r.Entity!)
.ToList()
?? [];
}
public IEnumerable<TEntity> Find(Func<TEntity, bool> predicate)
{
ArgumentNullException.ThrowIfNull(predicate);
return FindAll().Where(predicate);
}
public void Dispose()
{
_changeFeed.Dispose();
}
private async Task UpsertAsync(TEntity entity, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(entity);
string key = _keySelector(entity);
if (string.IsNullOrWhiteSpace(key))
throw new InvalidOperationException("Entity key cannot be null or empty.");
await EnsureReadyAsync(cancellationToken);
await _client.Upsert<SampleEntityRecord<TEntity>, SampleEntityRecord<TEntity>>(
RecordId.From(_tableName, key),
new SampleEntityRecord<TEntity> { Entity = entity },
cancellationToken);
_changeFeed.PublishPut(entity, key);
}
private async Task EnsureReadyAsync(CancellationToken cancellationToken)
{
await _schemaInitializer.EnsureInitializedAsync(cancellationToken);
}
}
public sealed class SampleSurrealReadOnlyCollection<TEntity>
where TEntity : class
{
private readonly ISurrealDbClient _client;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
private readonly string _tableName;
public SampleSurrealReadOnlyCollection(
string tableName,
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer)
{
if (string.IsNullOrWhiteSpace(tableName))
throw new ArgumentException("Table name is required.", nameof(tableName));
_tableName = tableName;
_client = (surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient))).Client;
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
}
public IEnumerable<TEntity> FindAll()
{
return FindAllAsync().GetAwaiter().GetResult();
}
public async Task<IReadOnlyList<TEntity>> FindAllAsync(CancellationToken cancellationToken = default)
{
await _schemaInitializer.EnsureInitializedAsync(cancellationToken);
var rows = await _client.Select<TEntity>(_tableName, cancellationToken);
return rows?.ToList() ?? [];
}
public IEnumerable<TEntity> Find(Func<TEntity, bool> predicate)
{
ArgumentNullException.ThrowIfNull(predicate);
return FindAll().Where(predicate);
}
}
public sealed class SampleEntityRecord<TEntity> : Record
where TEntity : class
{
[JsonPropertyName("entity")]
public TEntity? Entity { get; set; }
}
public sealed class SampleOplogEntry : Record
{
[JsonPropertyName("collection")]
public string Collection { get; set; } = "";
[JsonPropertyName("key")]
public string Key { get; set; } = "";
[JsonPropertyName("operation")]
public int Operation { get; set; }
[JsonPropertyName("timestampNodeId")]
public string TimestampNodeId { get; set; } = "";
[JsonPropertyName("timestampPhysicalTime")]
public long TimestampPhysicalTime { get; set; }
[JsonPropertyName("timestampLogicalCounter")]
public int TimestampLogicalCounter { get; set; }
[JsonPropertyName("hash")]
public string Hash { get; set; } = "";
}

View File

@@ -3,90 +3,125 @@ using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console;
/// <summary>
/// Document store implementation for CBDDC Sample using BLite persistence.
/// Extends BLiteDocumentStore to automatically handle Oplog creation via CDC.
/// Surreal-backed document store for the sample app.
/// </summary>
public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
public class SampleDocumentStore : SurrealDocumentStore<SampleDbContext>
{
private const string UsersCollection = "Users";
private const string TodoListsCollection = "TodoLists";
/// <summary>
/// Initializes a new instance of the <see cref="SampleDocumentStore" /> class.
/// </summary>
/// <param name="context">The sample database context.</param>
/// <param name="configProvider">The peer node configuration provider.</param>
/// <param name="vectorClockService">The vector clock service.</param>
/// <param name="logger">The optional logger instance.</param>
public SampleDocumentStore(
SampleDbContext context,
IPeerNodeConfigurationProvider configProvider,
IVectorClockService vectorClockService,
ILogger<SampleDocumentStore>? logger = null)
: base(context, configProvider, vectorClockService, new LastWriteWinsConflictResolver(), logger)
: base(
context,
context.SurrealEmbeddedClient,
context.SchemaInitializer,
configProvider,
vectorClockService,
new LastWriteWinsConflictResolver(),
null,
null,
logger)
{
// Register CDC watchers for local change detection
// InterestedCollection is automatically populated
WatchCollection(UsersCollection, context.Users, u => u.Id);
WatchCollection(TodoListsCollection, context.TodoLists, t => t.Id);
}
#region Helper Methods
private static JsonElement? SerializeEntity<T>(T? entity) where T : class
{
if (entity == null) return null;
return JsonSerializer.SerializeToElement(entity);
}
#endregion
#region Abstract Method Implementations
/// <inheritdoc />
protected override async Task ApplyContentToEntityAsync(
string collection, string key, JsonElement content, CancellationToken cancellationToken)
string collection,
string key,
JsonElement content,
CancellationToken cancellationToken)
{
UpsertEntity(collection, key, content);
await _context.SaveChangesAsync(cancellationToken);
await UpsertEntityAsync(collection, key, content, cancellationToken);
}
/// <inheritdoc />
protected override async Task ApplyContentToEntitiesBatchAsync(
IEnumerable<(string Collection, string Key, JsonElement Content)> documents,
CancellationToken cancellationToken)
{
foreach ((string collection, string key, var content) in documents) UpsertEntity(collection, key, content);
await _context.SaveChangesAsync(cancellationToken);
foreach ((string collection, string key, var content) in documents)
await UpsertEntityAsync(collection, key, content, cancellationToken);
}
private void UpsertEntity(string collection, string key, JsonElement content)
protected override async Task<JsonElement?> GetEntityAsJsonAsync(
string collection,
string key,
CancellationToken cancellationToken)
{
return collection switch
{
UsersCollection => SerializeEntity(await _context.Users.FindByIdAsync(key, cancellationToken)),
TodoListsCollection => SerializeEntity(await _context.TodoLists.FindByIdAsync(key, cancellationToken)),
_ => null
};
}
protected override async Task RemoveEntityAsync(
string collection,
string key,
CancellationToken cancellationToken)
{
await DeleteEntityAsync(collection, key, cancellationToken);
}
protected override async Task RemoveEntitiesBatchAsync(
IEnumerable<(string Collection, string Key)> documents,
CancellationToken cancellationToken)
{
foreach ((string collection, string key) in documents)
await DeleteEntityAsync(collection, key, cancellationToken);
}
protected override async Task<IEnumerable<(string Key, JsonElement Content)>> GetAllEntitiesAsJsonAsync(
string collection,
CancellationToken cancellationToken)
{
return collection switch
{
UsersCollection => (await _context.Users.FindAllAsync(cancellationToken))
.Select(u => (u.Id, SerializeEntity(u)!.Value))
.ToList(),
TodoListsCollection => (await _context.TodoLists.FindAllAsync(cancellationToken))
.Select(t => (t.Id, SerializeEntity(t)!.Value))
.ToList(),
_ => []
};
}
private async Task UpsertEntityAsync(
string collection,
string key,
JsonElement content,
CancellationToken cancellationToken)
{
switch (collection)
{
case UsersCollection:
var user = content.Deserialize<User>()!;
var user = content.Deserialize<User>() ?? throw new InvalidOperationException("Failed to deserialize user.");
user.Id = key;
var existingUser = _context.Users.Find(u => u.Id == key).FirstOrDefault();
if (existingUser != null)
_context.Users.Update(user);
if (await _context.Users.FindByIdAsync(key, cancellationToken) == null)
await _context.Users.InsertAsync(user, cancellationToken);
else
_context.Users.Insert(user);
await _context.Users.UpdateAsync(user, cancellationToken);
break;
case TodoListsCollection:
var todoList = content.Deserialize<TodoList>()!;
todoList.Id = key;
var existingTodoList = _context.TodoLists.Find(t => t.Id == key).FirstOrDefault();
if (existingTodoList != null)
_context.TodoLists.Update(todoList);
var todo = content.Deserialize<TodoList>() ??
throw new InvalidOperationException("Failed to deserialize todo list.");
todo.Id = key;
if (await _context.TodoLists.FindByIdAsync(key, cancellationToken) == null)
await _context.TodoLists.InsertAsync(todo, cancellationToken);
else
_context.TodoLists.Insert(todoList);
await _context.TodoLists.UpdateAsync(todo, cancellationToken);
break;
default:
@@ -94,43 +129,15 @@ public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
}
}
/// <inheritdoc />
protected override Task<JsonElement?> GetEntityAsJsonAsync(
string collection, string key, CancellationToken cancellationToken)
{
return Task.FromResult(collection switch
{
UsersCollection => SerializeEntity(_context.Users.Find(u => u.Id == key).FirstOrDefault()),
TodoListsCollection => SerializeEntity(_context.TodoLists.Find(t => t.Id == key).FirstOrDefault()),
_ => null
});
}
/// <inheritdoc />
protected override async Task RemoveEntityAsync(
string collection, string key, CancellationToken cancellationToken)
{
DeleteEntity(collection, key);
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
protected override async Task RemoveEntitiesBatchAsync(
IEnumerable<(string Collection, string Key)> documents, CancellationToken cancellationToken)
{
foreach ((string collection, string key) in documents) DeleteEntity(collection, key);
await _context.SaveChangesAsync(cancellationToken);
}
private void DeleteEntity(string collection, string key)
private async Task DeleteEntityAsync(string collection, string key, CancellationToken cancellationToken)
{
switch (collection)
{
case UsersCollection:
_context.Users.Delete(key);
await _context.Users.DeleteAsync(key, cancellationToken);
break;
case TodoListsCollection:
_context.TodoLists.Delete(key);
await _context.TodoLists.DeleteAsync(key, cancellationToken);
break;
default:
_logger.LogWarning("Attempted to remove entity from unsupported collection: {Collection}", collection);
@@ -138,21 +145,8 @@ public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
}
}
/// <inheritdoc />
protected override async Task<IEnumerable<(string Key, JsonElement Content)>> GetAllEntitiesAsJsonAsync(
string collection, CancellationToken cancellationToken)
private static JsonElement? SerializeEntity<T>(T? entity) where T : class
{
return await Task.Run(() => collection switch
{
UsersCollection => _context.Users.FindAll()
.Select(u => (u.Id, SerializeEntity(u)!.Value)),
TodoListsCollection => _context.TodoLists.FindAll()
.Select(t => (t.Id, SerializeEntity(t)!.Value)),
_ => Enumerable.Empty<(string, JsonElement)>()
}, cancellationToken);
return entity == null ? null : JsonSerializer.SerializeToElement(entity);
}
#endregion
}

View File

@@ -2,20 +2,16 @@
<ItemGroup>
<PackageReference Include="Lifter.Core" Version="1.1.0"/>
<PackageReference Include="BLite.SourceGenerators" Version="1.3.1">
<PrivateAssets>all</PrivateAssets>
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
</PackageReference>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj"/>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Network\ZB.MOM.WW.CBDDC.Network.csproj"/>
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Persistence\ZB.MOM.WW.CBDDC.Persistence.csproj"/>
</ItemGroup>
<ItemGroup>
<PackageReference Include="Microsoft.Extensions.Configuration.Binder" Version="9.0.0"/>
<PackageReference Include="Microsoft.Extensions.Configuration.Json" Version="9.0.0"/>
<PackageReference Include="Microsoft.Extensions.DependencyInjection" Version="9.0.0"/>
<PackageReference Include="Microsoft.Extensions.Hosting" Version="9.0.0"/>
<PackageReference Include="Microsoft.Extensions.Configuration.Binder" Version="9.0.4"/>
<PackageReference Include="Microsoft.Extensions.Configuration.Json" Version="9.0.4"/>
<PackageReference Include="Microsoft.Extensions.DependencyInjection" Version="9.0.4"/>
<PackageReference Include="Microsoft.Extensions.Hosting" Version="9.0.4"/>
<PackageReference Include="Serilog" Version="4.2.0"/>
<PackageReference Include="Serilog.Extensions.Hosting" Version="9.0.0"/>
<PackageReference Include="Serilog.Sinks.Console" Version="6.0.0"/>

View File

@@ -4,7 +4,7 @@ Core abstractions and logic for **CBDDC**, a peer-to-peer data synchronization m
## What Is CBDDC?
CBDDC is **not** a database <20> it's a sync layer that plugs into your existing data store (BLite) and enables automatic
CBDDC is **not** a database <20> it's a sync layer that plugs into your existing data store (for example SurrealDB) and enables automatic
P2P replication across nodes in a mesh network. Your application reads and writes to its database as usual; CBDDC
handles synchronization in the background.
@@ -28,19 +28,18 @@ dotnet add package ZB.MOM.WW.CBDDC.Network
## Quick Start
```csharp
// 1. Define your DbContext
public class MyDbContext : CBDDCDocumentDbContext
// 1. Define your context exposing watchable collections
public class MyDbContext
{
public DocumentCollection<string, User> Users { get; private set; }
public MyDbContext(string path) : base(path) { }
public MySurrealCollection<User> Users { get; }
}
// 2. Create your DocumentStore (the sync bridge)
public class MyDocumentStore : BLiteDocumentStore<MyDbContext>
public class MyDocumentStore : SurrealDocumentStore<MyDbContext>
{
public MyDocumentStore(MyDbContext ctx, IPeerNodeConfigurationProvider cfg,
IVectorClockService vc, ILogger<MyDocumentStore>? log = null)
: base(ctx, cfg, vc, logger: log)
: base(ctx, ctx.SurrealEmbeddedClient, ctx.SchemaInitializer, cfg, vc, logger: log)
{
WatchCollection("Users", ctx.Users, u => u.Id);
}
@@ -50,18 +49,23 @@ public class MyDocumentStore : BLiteDocumentStore<MyDbContext>
{
var user = content.Deserialize<User>()!;
user.Id = key;
var existing = _context.Users.Find(u => u.Id == key).FirstOrDefault();
if (existing != null) _context.Users.Update(user);
else _context.Users.Insert(user);
await _context.SaveChangesAsync(ct);
if (await _context.Users.FindByIdAsync(key, ct) is null)
await _context.Users.InsertAsync(user, ct);
else
await _context.Users.UpdateAsync(user, ct);
}
// ... implement other abstract methods
}
// 3. Register and use
builder.Services.AddCBDDCCore()
.AddCBDDCBLite<MyDbContext, MyDocumentStore>(
sp => new MyDbContext("data.blite"))
.AddCBDDCSurrealEmbedded<MyDocumentStore>(_ => new CBDDCSurrealEmbeddedOptions
{
Endpoint = "rocksdb://local",
DatabasePath = "data/cbddc.rocksdb",
Namespace = "cbddc",
Database = "main"
})
.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>();
```
@@ -93,7 +97,7 @@ Your App ? DbContext.SaveChangesAsync()
## Related Packages
- **ZB.MOM.WW.CBDDC.Persistence** <20> BLite embedded provider (.NET 10+)
- **ZB.MOM.WW.CBDDC.Persistence** <20> Surreal embedded RocksDB provider (.NET 10+)
- **ZB.MOM.WW.CBDDC.Network** <20> P2P networking (UDP discovery, TCP sync, Gossip)
## Documentation

View File

@@ -20,10 +20,15 @@ dotnet add package ZB.MOM.WW.CBDDC.Hosting
```csharp
var builder = WebApplication.CreateBuilder(args);
// Add CBDDC core + BLite persistence (custom DbContext + DocumentStore required)
// Add CBDDC core + Surreal embedded persistence (custom DocumentStore required)
builder.Services.AddCBDDCCore()
.AddCBDDCBLite<MyDbContext, MyDocumentStore>(
sp => new MyDbContext("/var/lib/cbddc/data.blite"));
.AddCBDDCSurrealEmbedded<MyDocumentStore>(_ => new CBDDCSurrealEmbeddedOptions
{
Endpoint = "rocksdb://local",
DatabasePath = "/var/lib/cbddc/data.rocksdb",
Namespace = "cbddc",
Database = "main"
});
// Add ASP.NET integration (cluster mode)
builder.Services.AddCBDDCHosting(options =>
@@ -80,10 +85,10 @@ CBDDC servers operate in respond-only mode:
## Production Checklist
- Store BLite database files on durable storage in production
- Store Surreal RocksDB data files on durable storage in production
- Configure health checks for load balancer
- Set up proper logging and monitoring
- Configure backup/restore for BLite database files
- Configure backup/restore for Surreal RocksDB data files
- Configure proper firewall rules for TCP port
- Set unique NodeId per instance
- Test failover scenarios

View File

@@ -1,238 +0,0 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// <summary>
/// BLite implementation of document metadata storage for sync tracking.
/// </summary>
/// <typeparam name="TDbContext">The type of CBDDCDocumentDbContext.</typeparam>
public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore where TDbContext : CBDDCDocumentDbContext
{
private readonly TDbContext _context;
private readonly ILogger<BLiteDocumentMetadataStore<TDbContext>> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="BLiteDocumentMetadataStore{TDbContext}" /> class.
/// </summary>
/// <param name="context">The BLite document database context.</param>
/// <param name="logger">The optional logger instance.</param>
public BLiteDocumentMetadataStore(TDbContext context,
ILogger<BLiteDocumentMetadataStore<TDbContext>>? logger = null)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
_logger = logger ?? NullLogger<BLiteDocumentMetadataStore<TDbContext>>.Instance;
}
/// <inheritdoc />
public override async Task<DocumentMetadata?> GetMetadataAsync(string collection, string key,
CancellationToken cancellationToken = default)
{
var entity = _context.DocumentMetadatas
.Find(m => m.Collection == collection && m.Key == key)
.FirstOrDefault();
return entity != null ? ToDomain(entity) : null;
}
/// <inheritdoc />
public override async Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection,
CancellationToken cancellationToken = default)
{
return _context.DocumentMetadatas
.Find(m => m.Collection == collection)
.Select(ToDomain)
.ToList();
}
/// <inheritdoc />
public override async Task UpsertMetadataAsync(DocumentMetadata metadata,
CancellationToken cancellationToken = default)
{
var existing = _context.DocumentMetadatas
.Find(m => m.Collection == metadata.Collection && m.Key == metadata.Key)
.FirstOrDefault();
if (existing == null)
{
await _context.DocumentMetadatas.InsertAsync(ToEntity(metadata));
}
else
{
existing.HlcPhysicalTime = metadata.UpdatedAt.PhysicalTime;
existing.HlcLogicalCounter = metadata.UpdatedAt.LogicalCounter;
existing.HlcNodeId = metadata.UpdatedAt.NodeId;
existing.IsDeleted = metadata.IsDeleted;
await _context.DocumentMetadatas.UpdateAsync(existing);
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas,
CancellationToken cancellationToken = default)
{
foreach (var metadata in metadatas)
{
var existing = _context.DocumentMetadatas
.Find(m => m.Collection == metadata.Collection && m.Key == metadata.Key)
.FirstOrDefault();
if (existing == null)
{
await _context.DocumentMetadatas.InsertAsync(ToEntity(metadata));
}
else
{
existing.HlcPhysicalTime = metadata.UpdatedAt.PhysicalTime;
existing.HlcLogicalCounter = metadata.UpdatedAt.LogicalCounter;
existing.HlcNodeId = metadata.UpdatedAt.NodeId;
existing.IsDeleted = metadata.IsDeleted;
await _context.DocumentMetadatas.UpdateAsync(existing);
}
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp,
CancellationToken cancellationToken = default)
{
var existing = _context.DocumentMetadatas
.Find(m => m.Collection == collection && m.Key == key)
.FirstOrDefault();
if (existing == null)
{
await _context.DocumentMetadatas.InsertAsync(new DocumentMetadataEntity
{
Id = Guid.NewGuid().ToString(),
Collection = collection,
Key = key,
HlcPhysicalTime = timestamp.PhysicalTime,
HlcLogicalCounter = timestamp.LogicalCounter,
HlcNodeId = timestamp.NodeId,
IsDeleted = true
});
}
else
{
existing.HlcPhysicalTime = timestamp.PhysicalTime;
existing.HlcLogicalCounter = timestamp.LogicalCounter;
existing.HlcNodeId = timestamp.NodeId;
existing.IsDeleted = true;
await _context.DocumentMetadatas.UpdateAsync(existing);
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var query = _context.DocumentMetadatas.AsQueryable()
.Where(m => m.HlcPhysicalTime > since.PhysicalTime ||
(m.HlcPhysicalTime == since.PhysicalTime && m.HlcLogicalCounter > since.LogicalCounter));
if (collections != null)
{
var collectionSet = new HashSet<string>(collections);
query = query.Where(m => collectionSet.Contains(m.Collection));
}
return query
.OrderBy(m => m.HlcPhysicalTime)
.ThenBy(m => m.HlcLogicalCounter)
.Select(ToDomain)
.ToList();
}
/// <inheritdoc />
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
var allIds = _context.DocumentMetadatas.FindAll().Select(m => m.Id).ToList();
await _context.DocumentMetadatas.DeleteBulkAsync(allIds);
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task<IEnumerable<DocumentMetadata>> ExportAsync(CancellationToken cancellationToken = default)
{
return _context.DocumentMetadatas.FindAll().Select(ToDomain).ToList();
}
/// <inheritdoc />
public override async Task ImportAsync(IEnumerable<DocumentMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items) await _context.DocumentMetadatas.InsertAsync(ToEntity(item));
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task MergeAsync(IEnumerable<DocumentMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = _context.DocumentMetadatas
.Find(m => m.Collection == item.Collection && m.Key == item.Key)
.FirstOrDefault();
if (existing == null)
{
await _context.DocumentMetadatas.InsertAsync(ToEntity(item));
}
else
{
// Update only if incoming is newer
var existingTs = new HlcTimestamp(existing.HlcPhysicalTime, existing.HlcLogicalCounter,
existing.HlcNodeId);
if (item.UpdatedAt.CompareTo(existingTs) > 0)
{
existing.HlcPhysicalTime = item.UpdatedAt.PhysicalTime;
existing.HlcLogicalCounter = item.UpdatedAt.LogicalCounter;
existing.HlcNodeId = item.UpdatedAt.NodeId;
existing.IsDeleted = item.IsDeleted;
await _context.DocumentMetadatas.UpdateAsync(existing);
}
}
}
await _context.SaveChangesAsync(cancellationToken);
}
#region Mappers
private static DocumentMetadata ToDomain(DocumentMetadataEntity entity)
{
return new DocumentMetadata(
entity.Collection,
entity.Key,
new HlcTimestamp(entity.HlcPhysicalTime, entity.HlcLogicalCounter, entity.HlcNodeId),
entity.IsDeleted
);
}
private static DocumentMetadataEntity ToEntity(DocumentMetadata metadata)
{
return new DocumentMetadataEntity
{
Id = Guid.NewGuid().ToString(),
Collection = metadata.Collection,
Key = metadata.Key,
HlcPhysicalTime = metadata.UpdatedAt.PhysicalTime,
HlcLogicalCounter = metadata.UpdatedAt.LogicalCounter,
HlcNodeId = metadata.UpdatedAt.NodeId,
IsDeleted = metadata.IsDeleted
};
}
#endregion
}

View File

@@ -1,214 +0,0 @@
# BLiteDocumentStore - Usage Guide
## Overview
`BLiteDocumentStore<TDbContext>` is an abstract base class that simplifies creating document stores for CBDDC with BLite
persistence. It handles all Oplog management internally, so you only need to implement entity-to-JSON mapping methods.
## Key Features
- ? **Automatic Oplog Creation** - Local changes automatically create Oplog entries
- ? **Remote Sync Handling** - AsyncLocal flag suppresses Oplog during sync (prevents duplicates)
- ? **No CDC Events Needed** - Direct Oplog management eliminates event loops
- ? **Simple API** - Only 4 abstract methods to implement
## Architecture
```
User Code ? SampleDocumentStore (extends BLiteDocumentStore)
?
BLiteDocumentStore
??? _context.Users / TodoLists (read/write entities)
??? _context.OplogEntries (write oplog directly)
Remote Sync ? OplogStore.ApplyBatchAsync()
?
BLiteDocumentStore.PutDocumentAsync(fromSync=true)
??? _context.Users / TodoLists (write only)
??? _context.OplogEntries (skip - already exists)
```
**Key Advantage**: No circular dependency! `BLiteDocumentStore` writes directly to `CBDDCDocumentDbContext.OplogEntries`
collection.
## Implementation Example
```csharp
public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
{
public SampleDocumentStore(
SampleDbContext context,
IPeerNodeConfigurationProvider configProvider,
ILogger<SampleDocumentStore>? logger = null)
: base(context, configProvider, new LastWriteWinsConflictResolver(), logger)
{
}
public override IEnumerable<string> InterestedCollection => new[] { "Users", "TodoLists" };
protected override async Task ApplyContentToEntityAsync(
string collection, string key, JsonElement content, CancellationToken ct)
{
switch (collection)
{
case "Users":
var user = content.Deserialize<User>()!;
user.Id = key;
var existingUser = _context.Users.FindById(key);
if (existingUser != null)
await _context.Users.UpdateAsync(user);
else
await _context.Users.InsertAsync(user);
await _context.SaveChangesAsync(ct);
break;
case "TodoLists":
var todoList = content.Deserialize<TodoList>()!;
todoList.Id = key;
var existingTodoList = _context.TodoLists.FindById(key);
if (existingTodoList != null)
await _context.TodoLists.UpdateAsync(todoList);
else
await _context.TodoLists.InsertAsync(todoList);
await _context.SaveChangesAsync(ct);
break;
default:
throw new NotSupportedException($"Collection '{collection}' is not supported");
}
}
protected override Task<JsonElement?> GetEntityAsJsonAsync(
string collection, string key, CancellationToken ct)
{
return Task.FromResult<JsonElement?>(collection switch
{
"Users" => SerializeEntity(_context.Users.FindById(key)),
"TodoLists" => SerializeEntity(_context.TodoLists.FindById(key)),
_ => null
});
}
protected override async Task RemoveEntityAsync(
string collection, string key, CancellationToken ct)
{
switch (collection)
{
case "Users":
await _context.Users.DeleteAsync(key);
await _context.SaveChangesAsync(ct);
break;
case "TodoLists":
await _context.TodoLists.DeleteAsync(key);
await _context.SaveChangesAsync(ct);
break;
}
}
protected override async Task<IEnumerable<(string Key, JsonElement Content)>> GetAllEntitiesAsJsonAsync(
string collection, CancellationToken ct)
{
return await Task.Run(() => collection switch
{
"Users" => _context.Users.FindAll()
.Select(u => (u.Id, SerializeEntity(u)!.Value)),
"TodoLists" => _context.TodoLists.FindAll()
.Select(t => (t.Id, SerializeEntity(t)!.Value)),
_ => Enumerable.Empty<(string, JsonElement)>()
}, ct);
}
private static JsonElement? SerializeEntity<T>(T? entity) where T : class
{
if (entity == null) return null;
return JsonSerializer.SerializeToElement(entity);
}
}
```
## Usage in Application
### Setup (DI Container)
```csharp
services.AddSingleton<SampleDbContext>(sp =>
new SampleDbContext("data/sample.blite"));
// No OplogStore dependency needed!
services.AddSingleton<IDocumentStore, SampleDocumentStore>();
services.AddSingleton<IOplogStore, BLiteOplogStore<SampleDbContext>>();
```
### Local Changes (User operations)
```csharp
// User inserts a new user
var user = new User { Id = "user-1", Name = "Alice" };
await _context.Users.InsertAsync(user);
await _context.SaveChangesAsync();
// The application then needs to notify the DocumentStore:
var document = new Document(
"Users",
"user-1",
JsonSerializer.SerializeToElement(user),
new HlcTimestamp(0, 0, ""),
false);
await documentStore.PutDocumentAsync(document);
// ? This creates an OplogEntry automatically
```
### Remote Sync (Automatic)
```csharp
// When OplogStore.ApplyBatchAsync receives remote changes:
await oplogStore.ApplyBatchAsync(remoteEntries, cancellationToken);
// Internally, this calls:
using (documentStore.BeginRemoteSync()) // ? Suppresses Oplog creation
{
foreach (var entry in remoteEntries)
{
await documentStore.PutDocumentAsync(entryAsDocument);
// ? Writes to DB only, no Oplog duplication
}
}
```
## Migration from Old CDC-based Approach
### Before (with CDC Events)
```csharp
// SampleDocumentStore subscribes to BLite CDC
// CDC emits events ? OplogCoordinator creates Oplog
// Problem: Remote sync also triggers CDC ? duplicate Oplog entries
```
### After (with BLiteDocumentStore)
```csharp
// Direct Oplog management in DocumentStore
// AsyncLocal flag prevents duplicates during sync
// No CDC events needed
```
## Benefits
1. **No Event Loops** - Direct control over Oplog creation
2. **Thread-Safe** - AsyncLocal handles concurrent operations
3. **Simpler** - Only 4 methods to implement vs full CDC subscription
4. **Transparent** - Oplog management is hidden from user code
## Next Steps
After implementing your DocumentStore:
1. Remove CDC subscriptions from your code
2. Remove `OplogCoordinator` from DI (no longer needed)
3. Test local operations create Oplog entries
4. Test remote sync doesn't create duplicate entries

View File

@@ -1,783 +0,0 @@
using System.Collections.Concurrent;
using System.Text.Json;
using BLite.Core.CDC;
using BLite.Core.Collections;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
using BLiteOperationType = BLite.Core.Transactions.OperationType;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// <summary>
/// Abstract base class for BLite-based document stores.
/// Handles Oplog creation internally - subclasses only implement entity mapping.
/// </summary>
/// <typeparam name="TDbContext">The BLite DbContext type.</typeparam>
public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposable
where TDbContext : CBDDCDocumentDbContext
{
private readonly List<IDisposable> _cdcWatchers = new();
private readonly object _clockLock = new();
protected readonly IPeerNodeConfigurationProvider _configProvider;
protected readonly IConflictResolver _conflictResolver;
protected readonly TDbContext _context;
protected readonly ILogger<BLiteDocumentStore<TDbContext>> _logger;
private readonly HashSet<string> _registeredCollections = new();
/// <summary>
/// Semaphore used to suppress CDC-triggered OplogEntry creation during remote sync.
/// CurrentCount == 0 ? sync in progress, CDC must skip.
/// CurrentCount == 1 ? no sync, CDC creates OplogEntry.
/// </summary>
private readonly SemaphoreSlim _remoteSyncGuard = new(1, 1);
private readonly ConcurrentDictionary<string, int> _suppressedCdcEvents = new(StringComparer.Ordinal);
protected readonly IVectorClockService _vectorClock;
// HLC state for generating timestamps for local changes
private long _lastPhysicalTime;
private int _logicalCounter;
/// <summary>
/// Initializes a new instance of the <see cref="BLiteDocumentStore{TDbContext}" /> class.
/// </summary>
/// <param name="context">The BLite database context.</param>
/// <param name="configProvider">The peer node configuration provider.</param>
/// <param name="vectorClockService">The vector clock service.</param>
/// <param name="conflictResolver">The conflict resolver to use for merges.</param>
/// <param name="logger">The logger instance.</param>
protected BLiteDocumentStore(
TDbContext context,
IPeerNodeConfigurationProvider configProvider,
IVectorClockService vectorClockService,
IConflictResolver? conflictResolver = null,
ILogger? logger = null)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
_configProvider = configProvider ?? throw new ArgumentNullException(nameof(configProvider));
_vectorClock = vectorClockService ?? throw new ArgumentNullException(nameof(vectorClockService));
_conflictResolver = conflictResolver ?? new LastWriteWinsConflictResolver();
_logger = CreateTypedLogger(logger);
_lastPhysicalTime = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
_logicalCounter = 0;
}
/// <summary>
/// Releases managed resources used by this document store.
/// </summary>
public virtual void Dispose()
{
foreach (var watcher in _cdcWatchers)
try
{
watcher.Dispose();
}
catch
{
}
_cdcWatchers.Clear();
_remoteSyncGuard.Dispose();
}
private static ILogger<BLiteDocumentStore<TDbContext>> CreateTypedLogger(ILogger? logger)
{
if (logger is null) return NullLogger<BLiteDocumentStore<TDbContext>>.Instance;
if (logger is ILogger<BLiteDocumentStore<TDbContext>> typedLogger) return typedLogger;
return new ForwardingLogger(logger);
}
private sealed class ForwardingLogger : ILogger<BLiteDocumentStore<TDbContext>>
{
private readonly ILogger _inner;
/// <summary>
/// Initializes a new instance of the <see cref="ForwardingLogger" /> class.
/// </summary>
/// <param name="inner">The underlying logger instance.</param>
public ForwardingLogger(ILogger inner)
{
_inner = inner;
}
/// <inheritdoc />
public IDisposable? BeginScope<TState>(TState state) where TState : notnull
{
return _inner.BeginScope(state);
}
/// <inheritdoc />
public bool IsEnabled(LogLevel logLevel)
{
return _inner.IsEnabled(logLevel);
}
/// <inheritdoc />
public void Log<TState>(
LogLevel logLevel,
EventId eventId,
TState state,
Exception? exception,
Func<TState, Exception?, string> formatter)
{
_inner.Log(logLevel, eventId, state, exception, formatter);
}
}
#region CDC Registration
private static string BuildSuppressionKey(string collection, string key, OperationType operationType)
{
return $"{collection}|{key}|{(int)operationType}";
}
private void RegisterSuppressedCdcEvent(string collection, string key, OperationType operationType)
{
string suppressionKey = BuildSuppressionKey(collection, key, operationType);
_suppressedCdcEvents.AddOrUpdate(suppressionKey, 1, (_, current) => current + 1);
}
private bool TryConsumeSuppressedCdcEvent(string collection, string key, OperationType operationType)
{
string suppressionKey = BuildSuppressionKey(collection, key, operationType);
while (true)
{
if (!_suppressedCdcEvents.TryGetValue(suppressionKey, out int current)) return false;
if (current <= 1) return _suppressedCdcEvents.TryRemove(suppressionKey, out _);
if (_suppressedCdcEvents.TryUpdate(suppressionKey, current - 1, current)) return true;
}
}
/// <summary>
/// Registers a BLite collection for CDC tracking.
/// Call in subclass constructor for each collection to sync.
/// </summary>
/// <typeparam name="TEntity">The entity type.</typeparam>
/// <param name="collectionName">The logical collection name used in Oplog.</param>
/// <param name="collection">The BLite DocumentCollection.</param>
/// <param name="keySelector">Function to extract the entity key.</param>
protected void WatchCollection<TEntity>(
string collectionName,
DocumentCollection<string, TEntity> collection,
Func<TEntity, string> keySelector)
where TEntity : class
{
_registeredCollections.Add(collectionName);
var watcher = collection.Watch(true)
.Subscribe(new CdcObserver<TEntity>(collectionName, keySelector, this));
_cdcWatchers.Add(watcher);
}
/// <summary>
/// Generic CDC observer. Forwards BLite change events to OnLocalChangeDetectedAsync.
/// Automatically skips events when remote sync is in progress.
/// </summary>
private class CdcObserver<TEntity> : IObserver<ChangeStreamEvent<string, TEntity>>
where TEntity : class
{
private readonly string _collectionName;
private readonly Func<TEntity, string> _keySelector;
private readonly BLiteDocumentStore<TDbContext> _store;
/// <summary>
/// Initializes a new instance of the <see cref="CdcObserver{TEntity}" /> class.
/// </summary>
/// <param name="collectionName">The logical collection name.</param>
/// <param name="keySelector">The key selector for observed entities.</param>
/// <param name="store">The owning document store instance.</param>
public CdcObserver(
string collectionName,
Func<TEntity, string> keySelector,
BLiteDocumentStore<TDbContext> store)
{
_collectionName = collectionName;
_keySelector = keySelector;
_store = store;
}
/// <summary>
/// Handles a change stream event from BLite CDC.
/// </summary>
/// <param name="changeEvent">The change event payload.</param>
public void OnNext(ChangeStreamEvent<string, TEntity> changeEvent)
{
var operationType = changeEvent.Type == BLiteOperationType.Delete
? OperationType.Delete
: OperationType.Put;
string entityId = changeEvent.DocumentId ?? "";
if (operationType == OperationType.Put && changeEvent.Entity != null)
entityId = _keySelector(changeEvent.Entity);
if (_store.TryConsumeSuppressedCdcEvent(_collectionName, entityId, operationType)) return;
if (_store._remoteSyncGuard.CurrentCount == 0) return;
if (changeEvent.Type == BLiteOperationType.Delete)
{
_store.OnLocalChangeDetectedAsync(_collectionName, entityId, OperationType.Delete, null)
.GetAwaiter().GetResult();
}
else if (changeEvent.Entity != null)
{
var content = JsonSerializer.SerializeToElement(changeEvent.Entity);
string key = _keySelector(changeEvent.Entity);
_store.OnLocalChangeDetectedAsync(_collectionName, key, OperationType.Put, content)
.GetAwaiter().GetResult();
}
}
/// <summary>
/// Handles CDC observer errors.
/// </summary>
/// <param name="error">The observed exception.</param>
public void OnError(Exception error)
{
}
/// <summary>
/// Handles completion of the CDC stream.
/// </summary>
public void OnCompleted()
{
}
}
#endregion
#region Abstract Methods - Implemented by subclass
/// <summary>
/// Applies JSON content to a single entity (insert or update) and commits changes.
/// Called for single-document operations.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="content">The document content to apply.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected abstract Task ApplyContentToEntityAsync(
string collection, string key, JsonElement content, CancellationToken cancellationToken);
/// <summary>
/// Applies JSON content to multiple entities (insert or update) with a single commit.
/// Called for batch operations. Must commit all changes in a single SaveChanges.
/// </summary>
/// <param name="documents">The documents to apply in one batch.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected abstract Task ApplyContentToEntitiesBatchAsync(
IEnumerable<(string Collection, string Key, JsonElement Content)> documents,
CancellationToken cancellationToken);
/// <summary>
/// Reads an entity from the DbContext and returns it as JsonElement.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected abstract Task<JsonElement?> GetEntityAsJsonAsync(
string collection, string key, CancellationToken cancellationToken);
/// <summary>
/// Removes a single entity from the DbContext and commits changes.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected abstract Task RemoveEntityAsync(
string collection, string key, CancellationToken cancellationToken);
/// <summary>
/// Removes multiple entities from the DbContext with a single commit.
/// </summary>
/// <param name="documents">The documents to remove in one batch.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected abstract Task RemoveEntitiesBatchAsync(
IEnumerable<(string Collection, string Key)> documents, CancellationToken cancellationToken);
/// <summary>
/// Reads all entities from a collection as JsonElements.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected abstract Task<IEnumerable<(string Key, JsonElement Content)>> GetAllEntitiesAsJsonAsync(
string collection, CancellationToken cancellationToken);
#endregion
#region IDocumentStore Implementation
/// <summary>
/// Returns the collections registered via WatchCollection.
/// </summary>
public IEnumerable<string> InterestedCollection => _registeredCollections;
/// <summary>
/// Gets a document by collection and key.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The matching document, or <see langword="null" /> when not found.</returns>
public async Task<Document?> GetDocumentAsync(string collection, string key,
CancellationToken cancellationToken = default)
{
var content = await GetEntityAsJsonAsync(collection, key, cancellationToken);
if (content == null) return null;
var timestamp = new HlcTimestamp(0, 0, ""); // Will be populated from metadata if needed
return new Document(collection, key, content.Value, timestamp, false);
}
/// <summary>
/// Gets all documents for a collection.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The documents in the specified collection.</returns>
public async Task<IEnumerable<Document>> GetDocumentsByCollectionAsync(string collection,
CancellationToken cancellationToken = default)
{
var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken);
var timestamp = new HlcTimestamp(0, 0, "");
return entities.Select(e => new Document(collection, e.Key, e.Content, timestamp, false));
}
/// <summary>
/// Gets documents for the specified collection and key pairs.
/// </summary>
/// <param name="documentKeys">The collection and key pairs to resolve.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The documents that were found.</returns>
public async Task<IEnumerable<Document>> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys,
CancellationToken cancellationToken)
{
var documents = new List<Document>();
foreach ((string collection, string key) in documentKeys)
{
var doc = await GetDocumentAsync(collection, key, cancellationToken);
if (doc != null) documents.Add(doc);
}
return documents;
}
/// <summary>
/// Inserts or updates a single document.
/// </summary>
/// <param name="document">The document to persist.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> PutDocumentAsync(Document document, CancellationToken cancellationToken = default)
{
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
await PutDocumentInternalAsync(document, cancellationToken);
}
finally
{
_remoteSyncGuard.Release();
}
return true;
}
private async Task PutDocumentInternalAsync(Document document, CancellationToken cancellationToken)
{
RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put);
await ApplyContentToEntityAsync(document.Collection, document.Key, document.Content, cancellationToken);
}
/// <summary>
/// Updates a batch of documents.
/// </summary>
/// <param name="documents">The documents to update.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> UpdateBatchDocumentsAsync(IEnumerable<Document> documents,
CancellationToken cancellationToken = default)
{
var documentList = documents.ToList();
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
foreach (var document in documentList)
RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put);
await ApplyContentToEntitiesBatchAsync(
documentList.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken);
}
finally
{
_remoteSyncGuard.Release();
}
return true;
}
/// <summary>
/// Inserts a batch of documents.
/// </summary>
/// <param name="documents">The documents to insert.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> InsertBatchDocumentsAsync(IEnumerable<Document> documents,
CancellationToken cancellationToken = default)
{
var documentList = documents.ToList();
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
foreach (var document in documentList)
RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put);
await ApplyContentToEntitiesBatchAsync(
documentList.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken);
}
finally
{
_remoteSyncGuard.Release();
}
return true;
}
/// <summary>
/// Deletes a single document.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> DeleteDocumentAsync(string collection, string key,
CancellationToken cancellationToken = default)
{
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
await DeleteDocumentInternalAsync(collection, key, cancellationToken);
}
finally
{
_remoteSyncGuard.Release();
}
return true;
}
private async Task DeleteDocumentInternalAsync(string collection, string key, CancellationToken cancellationToken)
{
RegisterSuppressedCdcEvent(collection, key, OperationType.Delete);
await RemoveEntityAsync(collection, key, cancellationToken);
}
/// <summary>
/// Deletes a batch of documents by composite keys.
/// </summary>
/// <param name="documentKeys">The document keys in collection/key format.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> DeleteBatchDocumentsAsync(IEnumerable<string> documentKeys,
CancellationToken cancellationToken = default)
{
var parsedKeys = new List<(string Collection, string Key)>();
foreach (string key in documentKeys)
{
string[] parts = key.Split('/');
if (parts.Length == 2)
parsedKeys.Add((parts[0], parts[1]));
else
_logger.LogWarning("Invalid document key format: {Key}", key);
}
if (parsedKeys.Count == 0) return true;
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
foreach ((string collection, string key) in parsedKeys)
RegisterSuppressedCdcEvent(collection, key, OperationType.Delete);
await RemoveEntitiesBatchAsync(parsedKeys, cancellationToken);
}
finally
{
_remoteSyncGuard.Release();
}
return true;
}
/// <summary>
/// Merges an incoming document with the current stored document.
/// </summary>
/// <param name="incoming">The incoming document.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The stored document after merge resolution.</returns>
public async Task<Document> MergeAsync(Document incoming, CancellationToken cancellationToken = default)
{
var existing = await GetDocumentAsync(incoming.Collection, incoming.Key, cancellationToken);
if (existing == null)
{
// Use internal method - guard not acquired yet in single-document merge
await PutDocumentInternalAsync(incoming, cancellationToken);
return incoming;
}
// Use conflict resolver to merge
var resolution = _conflictResolver.Resolve(existing, new OplogEntry(
incoming.Collection,
incoming.Key,
OperationType.Put,
incoming.Content,
incoming.UpdatedAt,
""));
if (resolution.ShouldApply && resolution.MergedDocument != null)
{
await PutDocumentInternalAsync(resolution.MergedDocument, cancellationToken);
return resolution.MergedDocument;
}
return existing;
}
#endregion
#region ISnapshotable Implementation
/// <summary>
/// Removes all tracked documents from registered collections.
/// </summary>
/// <param name="cancellationToken">The cancellation token.</param>
public async Task DropAsync(CancellationToken cancellationToken = default)
{
foreach (string collection in InterestedCollection)
{
var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken);
foreach ((string key, var _) in entities) await RemoveEntityAsync(collection, key, cancellationToken);
}
}
/// <summary>
/// Exports all tracked documents from registered collections.
/// </summary>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The exported documents.</returns>
public async Task<IEnumerable<Document>> ExportAsync(CancellationToken cancellationToken = default)
{
var documents = new List<Document>();
foreach (string collection in InterestedCollection)
{
var collectionDocs = await GetDocumentsByCollectionAsync(collection, cancellationToken);
documents.AddRange(collectionDocs);
}
return documents;
}
/// <summary>
/// Imports a batch of documents.
/// </summary>
/// <param name="items">The documents to import.</param>
/// <param name="cancellationToken">The cancellation token.</param>
public async Task ImportAsync(IEnumerable<Document> items, CancellationToken cancellationToken = default)
{
var documents = items.ToList();
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
foreach (var document in documents)
RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put);
await ApplyContentToEntitiesBatchAsync(
documents.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken);
}
finally
{
_remoteSyncGuard.Release();
}
}
/// <summary>
/// Merges a batch of incoming documents.
/// </summary>
/// <param name="items">The incoming documents.</param>
/// <param name="cancellationToken">The cancellation token.</param>
public async Task MergeAsync(IEnumerable<Document> items, CancellationToken cancellationToken = default)
{
// Acquire guard to prevent Oplog creation during merge
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
foreach (var document in items) await MergeAsync(document, cancellationToken);
}
finally
{
_remoteSyncGuard.Release();
}
}
#endregion
#region Oplog Management
/// <summary>
/// Returns true if a remote sync operation is in progress (guard acquired).
/// CDC listeners should check this before creating OplogEntry.
/// </summary>
protected bool IsRemoteSyncInProgress => _remoteSyncGuard.CurrentCount == 0;
/// <summary>
/// Called by subclass CDC listeners when a local change is detected.
/// Creates OplogEntry + DocumentMetadata only if no remote sync is in progress.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="operationType">The detected operation type.</param>
/// <param name="content">The document content when available.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected async Task OnLocalChangeDetectedAsync(
string collection,
string key,
OperationType operationType,
JsonElement? content,
CancellationToken cancellationToken = default)
{
if (IsRemoteSyncInProgress) return;
await CreateOplogEntryAsync(collection, key, operationType, content, cancellationToken);
}
private HlcTimestamp GenerateTimestamp(string nodeId)
{
lock (_clockLock)
{
long now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
if (now > _lastPhysicalTime)
{
_lastPhysicalTime = now;
_logicalCounter = 0;
}
else
{
_logicalCounter++;
}
return new HlcTimestamp(_lastPhysicalTime, _logicalCounter, nodeId);
}
}
private async Task CreateOplogEntryAsync(
string collection,
string key,
OperationType operationType,
JsonElement? content,
CancellationToken cancellationToken)
{
var config = await _configProvider.GetConfiguration();
string nodeId = config.NodeId;
// Get last hash from OplogEntries collection directly
var lastEntry = _context.OplogEntries
.Find(e => e.TimestampNodeId == nodeId)
.OrderByDescending(e => e.TimestampPhysicalTime)
.ThenByDescending(e => e.TimestampLogicalCounter)
.FirstOrDefault();
string previousHash = lastEntry?.Hash ?? string.Empty;
var timestamp = GenerateTimestamp(nodeId);
var oplogEntry = new OplogEntry(
collection,
key,
operationType,
content,
timestamp,
previousHash);
// Write directly to OplogEntries collection
await _context.OplogEntries.InsertAsync(oplogEntry.ToEntity());
// Write DocumentMetadata for sync tracking
var docMetadata = EntityMappers.CreateDocumentMetadata(
collection,
key,
timestamp,
operationType == OperationType.Delete);
var existingMetadata = _context.DocumentMetadatas
.Find(m => m.Collection == collection && m.Key == key)
.FirstOrDefault();
if (existingMetadata != null)
{
// Update existing metadata
existingMetadata.HlcPhysicalTime = timestamp.PhysicalTime;
existingMetadata.HlcLogicalCounter = timestamp.LogicalCounter;
existingMetadata.HlcNodeId = timestamp.NodeId;
existingMetadata.IsDeleted = operationType == OperationType.Delete;
await _context.DocumentMetadatas.UpdateAsync(existingMetadata);
}
else
{
await _context.DocumentMetadatas.InsertAsync(docMetadata);
}
await _context.SaveChangesAsync(cancellationToken);
// Notify VectorClockService so sync sees local changes
_vectorClock.Update(oplogEntry);
_logger.LogDebug(
"Created Oplog entry: {Operation} {Collection}/{Key} at {Timestamp} (hash: {Hash})",
operationType, collection, key, timestamp, oplogEntry.Hash);
}
/// <summary>
/// Marks the start of remote sync operations (suppresses CDC-triggered Oplog creation).
/// Use in using statement: using (store.BeginRemoteSync()) { ... }
/// </summary>
public IDisposable BeginRemoteSync()
{
_remoteSyncGuard.Wait();
return new RemoteSyncScope(_remoteSyncGuard);
}
private class RemoteSyncScope : IDisposable
{
private readonly SemaphoreSlim _guard;
/// <summary>
/// Initializes a new instance of the <see cref="RemoteSyncScope" /> class.
/// </summary>
/// <param name="guard">The semaphore guarding remote sync operations.</param>
public RemoteSyncScope(SemaphoreSlim guard)
{
_guard = guard;
}
/// <summary>
/// Releases the remote sync guard.
/// </summary>
public void Dispose()
{
_guard.Release();
}
}
#endregion
}

View File

@@ -1,253 +0,0 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDocumentDbContext
{
protected readonly TDbContext _context;
protected readonly ILogger<BLiteOplogStore<TDbContext>> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="BLiteOplogStore{TDbContext}" /> class.
/// </summary>
/// <param name="dbContext">The BLite database context.</param>
/// <param name="documentStore">The document store used by the oplog store.</param>
/// <param name="conflictResolver">The conflict resolver used during merges.</param>
/// <param name="vectorClockService">The vector clock service used for timestamp coordination.</param>
/// <param name="snapshotMetadataStore">Optional snapshot metadata store used for initialization.</param>
/// <param name="logger">Optional logger instance.</param>
public BLiteOplogStore(
TDbContext dbContext,
IDocumentStore documentStore,
IConflictResolver conflictResolver,
IVectorClockService vectorClockService,
ISnapshotMetadataStore? snapshotMetadataStore = null,
ILogger<BLiteOplogStore<TDbContext>>? logger = null) : base(documentStore, conflictResolver, vectorClockService,
snapshotMetadataStore)
{
_context = dbContext ?? throw new ArgumentNullException(nameof(dbContext));
_logger = logger ?? NullLogger<BLiteOplogStore<TDbContext>>.Instance;
}
/// <inheritdoc />
public override async Task ApplyBatchAsync(IEnumerable<OplogEntry> oplogEntries,
CancellationToken cancellationToken = default)
{
// BLite transactions are committed by each SaveChangesAsync internally.
// Wrapping in an explicit transaction causes "Cannot rollback committed transaction"
// because PutDocumentAsync → SaveChangesAsync already commits.
await base.ApplyBatchAsync(oplogEntries, cancellationToken);
}
/// <inheritdoc />
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
// Use Id (technical key) for deletion, not Hash (business key)
await _context.OplogEntries.DeleteBulkAsync(_context.OplogEntries.FindAll().Select(e => e.Id));
await _context.SaveChangesAsync(cancellationToken);
_vectorClock.Invalidate();
}
/// <inheritdoc />
public override async Task<IEnumerable<OplogEntry>> ExportAsync(CancellationToken cancellationToken = default)
{
return _context.OplogEntries.FindAll().ToDomain();
}
/// <inheritdoc />
public override async Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash,
CancellationToken cancellationToken = default)
{
var startRow = _context.OplogEntries.Find(o => o.Hash == startHash).FirstOrDefault();
var endRow = _context.OplogEntries.Find(o => o.Hash == endHash).FirstOrDefault();
if (startRow == null || endRow == null) return [];
string nodeId = startRow.TimestampNodeId;
// 2. Fetch range (Start < Entry <= End)
var entities = _context.OplogEntries
.Find(o => o.TimestampNodeId == nodeId &&
(o.TimestampPhysicalTime > startRow.TimestampPhysicalTime ||
(o.TimestampPhysicalTime == startRow.TimestampPhysicalTime &&
o.TimestampLogicalCounter > startRow.TimestampLogicalCounter)) &&
(o.TimestampPhysicalTime < endRow.TimestampPhysicalTime ||
(o.TimestampPhysicalTime == endRow.TimestampPhysicalTime &&
o.TimestampLogicalCounter <= endRow.TimestampLogicalCounter)))
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
.ToList();
return entities.ToDomain();
}
/// <inheritdoc />
public override async Task<OplogEntry?> GetEntryByHashAsync(string hash,
CancellationToken cancellationToken = default)
{
// Hash is now a regular indexed property, not the Key
return _context.OplogEntries.Find(o => o.Hash == hash).FirstOrDefault()?.ToDomain();
}
/// <inheritdoc />
public override async Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var query = _context.OplogEntries
.Find(o => o.TimestampPhysicalTime > timestamp.PhysicalTime ||
(o.TimestampPhysicalTime == timestamp.PhysicalTime &&
o.TimestampLogicalCounter > timestamp.LogicalCounter));
if (collections != null)
{
var collectionSet = new HashSet<string>(collections);
query = query.Where(o => collectionSet.Contains(o.Collection));
}
return query
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
.ToDomain()
.ToList();
}
/// <inheritdoc />
public override async Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var query = _context.OplogEntries.AsQueryable()
.Where(o => o.TimestampNodeId == nodeId &&
(o.TimestampPhysicalTime > since.PhysicalTime ||
(o.TimestampPhysicalTime == since.PhysicalTime &&
o.TimestampLogicalCounter > since.LogicalCounter)));
if (collections != null)
{
var collectionSet = new HashSet<string>(collections);
query = query.Where(o => collectionSet.Contains(o.Collection));
}
return query
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
.ToDomain()
.ToList();
}
/// <inheritdoc />
public override async Task ImportAsync(IEnumerable<OplogEntry> items, CancellationToken cancellationToken = default)
{
foreach (var item in items) await _context.OplogEntries.InsertAsync(item.ToEntity());
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task MergeAsync(IEnumerable<OplogEntry> items, CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
// Hash is now a regular indexed property, not the Key
var existing = _context.OplogEntries.Find(o => o.Hash == item.Hash).FirstOrDefault();
if (existing == null) await _context.OplogEntries.InsertAsync(item.ToEntity());
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default)
{
var toDelete = _context.OplogEntries.AsQueryable()
.Where(o => o.TimestampPhysicalTime < cutoff.PhysicalTime ||
(o.TimestampPhysicalTime == cutoff.PhysicalTime &&
o.TimestampLogicalCounter <= cutoff.LogicalCounter))
.Select(o => o.Hash)
.ToList();
await _context.OplogEntries.DeleteBulkAsync(toDelete);
}
/// <inheritdoc />
protected override void InitializeVectorClock()
{
if (_vectorClock.IsInitialized) return;
// Early check: if context or OplogEntries is null, skip initialization
if (_context?.OplogEntries == null)
{
_vectorClock.IsInitialized = true;
return;
}
// Step 1: Load from SnapshotMetadata FIRST (base state after prune)
if (_snapshotMetadataStore != null)
try
{
var snapshots = _snapshotMetadataStore.GetAllSnapshotMetadataAsync().GetAwaiter().GetResult();
foreach (var snapshot in snapshots)
_vectorClock.UpdateNode(
snapshot.NodeId,
new HlcTimestamp(snapshot.TimestampPhysicalTime, snapshot.TimestampLogicalCounter,
snapshot.NodeId),
snapshot.Hash ?? "");
}
catch
{
// Ignore errors during initialization - oplog data will be used as fallback
}
// Step 2: Load from Oplog (Latest State - Overrides Snapshot if newer)
var latestPerNode = _context.OplogEntries.AsQueryable()
.GroupBy(o => o.TimestampNodeId)
.Select(g => new
{
NodeId = g.Key,
MaxEntry = g.OrderByDescending(o => o.TimestampPhysicalTime)
.ThenByDescending(o => o.TimestampLogicalCounter)
.FirstOrDefault()
})
.ToList()
.Where(x => x.MaxEntry != null)
.ToList();
foreach (var node in latestPerNode)
if (node.MaxEntry != null)
_vectorClock.UpdateNode(
node.NodeId,
new HlcTimestamp(node.MaxEntry.TimestampPhysicalTime, node.MaxEntry.TimestampLogicalCounter,
node.MaxEntry.TimestampNodeId),
node.MaxEntry.Hash ?? "");
_vectorClock.IsInitialized = true;
}
/// <inheritdoc />
protected override async Task InsertOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default)
{
await _context.OplogEntries.InsertAsync(entry.ToEntity());
}
/// <inheritdoc />
protected override async Task<string?> QueryLastHashForNodeAsync(string nodeId,
CancellationToken cancellationToken = default)
{
var lastEntry = _context.OplogEntries.AsQueryable()
.Where(o => o.TimestampNodeId == nodeId)
.OrderByDescending(o => o.TimestampPhysicalTime)
.ThenByDescending(o => o.TimestampLogicalCounter)
.FirstOrDefault();
return lastEntry?.Hash;
}
/// <inheritdoc />
protected override async Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash,
CancellationToken cancellationToken = default)
{
// Hash is now a regular indexed property, not the Key
var entry = _context.OplogEntries.Find(o => o.Hash == hash).FirstOrDefault();
if (entry == null) return null;
return (entry.TimestampPhysicalTime, entry.TimestampLogicalCounter);
}
}

View File

@@ -1,131 +0,0 @@
using System.Text.Json;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// <summary>
/// Provides a peer configuration store implementation that uses a specified CBDDCDocumentDbContext for persistence
/// operations.
/// </summary>
/// <remarks>
/// This class enables storage, retrieval, and management of remote peer configurations using the provided
/// database context. It is typically used in scenarios where peer configurations need to be persisted in a document
/// database.
/// </remarks>
/// <typeparam name="TDbContext">
/// The type of the document database context used for accessing and managing peer configurations. Must inherit from
/// CBDDCDocumentDbContext.
/// </typeparam>
public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore where TDbContext : CBDDCDocumentDbContext
{
/// <summary>
/// Represents the database context used for data access operations within the derived class.
/// </summary>
protected readonly TDbContext _context;
/// <summary>
/// Provides logging capabilities for the BLitePeerConfigurationStore operations.
/// </summary>
protected readonly ILogger<BLitePeerConfigurationStore<TDbContext>> _logger;
/// <summary>
/// Initializes a new instance of the BLitePeerConfigurationStore class using the specified database context and
/// optional logger.
/// </summary>
/// <param name="context">The database context used to access and manage peer configuration data. Cannot be null.</param>
/// <param name="logger">An optional logger for logging diagnostic messages. If null, a no-op logger is used.</param>
/// <exception cref="ArgumentNullException">Thrown if the context parameter is null.</exception>
public BLitePeerConfigurationStore(TDbContext context,
ILogger<BLitePeerConfigurationStore<TDbContext>>? logger = null)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
_logger = logger ?? NullLogger<BLitePeerConfigurationStore<TDbContext>>.Instance;
}
/// <inheritdoc />
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
_logger.LogWarning(
"Dropping peer configuration store - all remote peer configurations will be permanently deleted!");
// Use Id (technical key) for deletion, not NodeId (business key)
var allIds = await Task.Run(() => _context.RemotePeerConfigurations.FindAll().Select(p => p.Id).ToList(),
cancellationToken);
await _context.RemotePeerConfigurations.DeleteBulkAsync(allIds);
await _context.SaveChangesAsync(cancellationToken);
_logger.LogInformation("Peer configuration store dropped successfully.");
}
/// <inheritdoc />
public override async Task<IEnumerable<RemotePeerConfiguration>> ExportAsync(
CancellationToken cancellationToken = default)
{
return await Task.Run(() => _context.RemotePeerConfigurations.FindAll().ToDomain().ToList(), cancellationToken);
}
/// <inheritdoc />
public override async Task<RemotePeerConfiguration?> GetRemotePeerAsync(string nodeId,
CancellationToken cancellationToken)
{
// NodeId is now a regular indexed property, not the Key
return await Task.Run(
() => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault()?.ToDomain(),
cancellationToken);
}
/// <inheritdoc />
public override async Task<IEnumerable<RemotePeerConfiguration>> GetRemotePeersAsync(
CancellationToken cancellationToken = default)
{
return await Task.Run(() => _context.RemotePeerConfigurations.FindAll().ToDomain().ToList(), cancellationToken);
}
/// <inheritdoc />
public override async Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default)
{
// NodeId is now a regular indexed property, not the Key
var peer = await Task.Run(
() => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault(), cancellationToken);
if (peer != null)
{
await _context.RemotePeerConfigurations.DeleteAsync(peer.Id);
await _context.SaveChangesAsync(cancellationToken);
_logger.LogInformation("Removed remote peer configuration: {NodeId}", nodeId);
}
else
{
_logger.LogWarning("Attempted to remove non-existent remote peer: {NodeId}", nodeId);
}
}
/// <inheritdoc />
public override async Task SaveRemotePeerAsync(RemotePeerConfiguration peer,
CancellationToken cancellationToken = default)
{
// NodeId is now a regular indexed property, not the Key
var existing =
await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == peer.NodeId).FirstOrDefault(),
cancellationToken);
if (existing == null)
{
await _context.RemotePeerConfigurations.InsertAsync(peer.ToEntity());
}
else
{
existing.NodeId = peer.NodeId;
existing.Address = peer.Address;
existing.Type = (int)peer.Type;
existing.IsEnabled = peer.IsEnabled;
existing.InterestsJson = peer.InterestingCollections.Count > 0
? JsonSerializer.Serialize(peer.InterestingCollections)
: "";
await _context.RemotePeerConfigurations.UpdateAsync(existing);
}
await _context.SaveChangesAsync(cancellationToken);
_logger.LogInformation("Saved remote peer configuration: {NodeId} ({Type})", peer.NodeId, peer.Type);
}
}

View File

@@ -1,300 +0,0 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// <summary>
/// BLite-backed peer oplog confirmation store.
/// </summary>
/// <typeparam name="TDbContext">The BLite context type.</typeparam>
public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmationStore
where TDbContext : CBDDCDocumentDbContext
{
internal const string RegistrationSourceNodeId = "__peer_registration__";
private readonly TDbContext _context;
private readonly ILogger<BLitePeerOplogConfirmationStore<TDbContext>> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="BLitePeerOplogConfirmationStore{TDbContext}" /> class.
/// </summary>
/// <param name="context">The BLite context.</param>
/// <param name="logger">An optional logger.</param>
public BLitePeerOplogConfirmationStore(
TDbContext context,
ILogger<BLitePeerOplogConfirmationStore<TDbContext>>? logger = null)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
_logger = logger ?? NullLogger<BLitePeerOplogConfirmationStore<TDbContext>>.Instance;
}
/// <inheritdoc />
public override async Task EnsurePeerRegisteredAsync(
string peerNodeId,
string address,
PeerType type,
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
var existing = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId == RegistrationSourceNodeId)
.FirstOrDefault();
if (existing == null)
{
await _context.PeerOplogConfirmations.InsertAsync(new PeerOplogConfirmationEntity
{
Id = Guid.NewGuid().ToString(),
PeerNodeId = peerNodeId,
SourceNodeId = RegistrationSourceNodeId,
ConfirmedWall = 0,
ConfirmedLogic = 0,
ConfirmedHash = "",
LastConfirmedUtcMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(),
IsActive = true
});
await _context.SaveChangesAsync(cancellationToken);
_logger.LogDebug("Registered peer confirmation tracking for {PeerNodeId} ({Address}, {Type}).", peerNodeId,
address, type);
return;
}
if (!existing.IsActive)
{
existing.IsActive = true;
existing.LastConfirmedUtcMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
await _context.PeerOplogConfirmations.UpdateAsync(existing);
await _context.SaveChangesAsync(cancellationToken);
}
}
/// <inheritdoc />
public override async Task UpdateConfirmationAsync(
string peerNodeId,
string sourceNodeId,
HlcTimestamp timestamp,
string hash,
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
if (string.IsNullOrWhiteSpace(sourceNodeId))
throw new ArgumentException("Source node id is required.", nameof(sourceNodeId));
var existing = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId == sourceNodeId)
.FirstOrDefault();
long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
if (existing == null)
{
await _context.PeerOplogConfirmations.InsertAsync(new PeerOplogConfirmationEntity
{
Id = Guid.NewGuid().ToString(),
PeerNodeId = peerNodeId,
SourceNodeId = sourceNodeId,
ConfirmedWall = timestamp.PhysicalTime,
ConfirmedLogic = timestamp.LogicalCounter,
ConfirmedHash = hash ?? "",
LastConfirmedUtcMs = nowMs,
IsActive = true
});
await _context.SaveChangesAsync(cancellationToken);
return;
}
bool isNewer = IsIncomingTimestampNewer(timestamp, existing);
bool samePointHashChanged = timestamp.PhysicalTime == existing.ConfirmedWall &&
timestamp.LogicalCounter == existing.ConfirmedLogic &&
!string.Equals(existing.ConfirmedHash, hash, StringComparison.Ordinal);
if (!isNewer && !samePointHashChanged && existing.IsActive) return;
existing.ConfirmedWall = timestamp.PhysicalTime;
existing.ConfirmedLogic = timestamp.LogicalCounter;
existing.ConfirmedHash = hash ?? "";
existing.LastConfirmedUtcMs = nowMs;
existing.IsActive = true;
await _context.PeerOplogConfirmations.UpdateAsync(existing);
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override Task<IEnumerable<PeerOplogConfirmation>> GetConfirmationsAsync(
CancellationToken cancellationToken = default)
{
var confirmations = _context.PeerOplogConfirmations
.Find(c => c.SourceNodeId != RegistrationSourceNodeId)
.ToDomain()
.ToList();
return Task.FromResult<IEnumerable<PeerOplogConfirmation>>(confirmations);
}
/// <inheritdoc />
public override Task<IEnumerable<PeerOplogConfirmation>> GetConfirmationsForPeerAsync(
string peerNodeId,
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
var confirmations = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId != RegistrationSourceNodeId)
.ToDomain()
.ToList();
return Task.FromResult<IEnumerable<PeerOplogConfirmation>>(confirmations);
}
/// <inheritdoc />
public override async Task RemovePeerTrackingAsync(string peerNodeId, CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
var matches = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId)
.ToList();
if (matches.Count == 0) return;
long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
foreach (var match in matches)
{
if (!match.IsActive) continue;
match.IsActive = false;
match.LastConfirmedUtcMs = nowMs;
await _context.PeerOplogConfirmations.UpdateAsync(match);
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override Task<IEnumerable<string>> GetActiveTrackedPeersAsync(CancellationToken cancellationToken = default)
{
var peers = _context.PeerOplogConfirmations
.Find(c => c.IsActive)
.Select(c => c.PeerNodeId)
.Distinct(StringComparer.Ordinal)
.ToList();
return Task.FromResult<IEnumerable<string>>(peers);
}
/// <inheritdoc />
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
var allIds = _context.PeerOplogConfirmations.FindAll().Select(c => c.Id).ToList();
await _context.PeerOplogConfirmations.DeleteBulkAsync(allIds);
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override Task<IEnumerable<PeerOplogConfirmation>> ExportAsync(CancellationToken cancellationToken = default)
{
var exported = _context.PeerOplogConfirmations
.FindAll()
.ToDomain()
.ToList();
return Task.FromResult<IEnumerable<PeerOplogConfirmation>>(exported);
}
/// <inheritdoc />
public override async Task ImportAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == item.PeerNodeId && c.SourceNodeId == item.SourceNodeId)
.FirstOrDefault();
if (existing == null)
{
await _context.PeerOplogConfirmations.InsertAsync(item.ToEntity());
continue;
}
existing.ConfirmedWall = item.ConfirmedWall;
existing.ConfirmedLogic = item.ConfirmedLogic;
existing.ConfirmedHash = item.ConfirmedHash;
existing.LastConfirmedUtcMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds();
existing.IsActive = item.IsActive;
await _context.PeerOplogConfirmations.UpdateAsync(existing);
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task MergeAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == item.PeerNodeId && c.SourceNodeId == item.SourceNodeId)
.FirstOrDefault();
if (existing == null)
{
await _context.PeerOplogConfirmations.InsertAsync(item.ToEntity());
continue;
}
var changed = false;
var incomingTimestamp = new HlcTimestamp(item.ConfirmedWall, item.ConfirmedLogic, item.SourceNodeId);
var existingTimestamp =
new HlcTimestamp(existing.ConfirmedWall, existing.ConfirmedLogic, existing.SourceNodeId);
if (incomingTimestamp > existingTimestamp)
{
existing.ConfirmedWall = item.ConfirmedWall;
existing.ConfirmedLogic = item.ConfirmedLogic;
existing.ConfirmedHash = item.ConfirmedHash;
changed = true;
}
long incomingLastConfirmedMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds();
if (incomingLastConfirmedMs > existing.LastConfirmedUtcMs)
{
existing.LastConfirmedUtcMs = incomingLastConfirmedMs;
changed = true;
}
if (existing.IsActive != item.IsActive)
{
existing.IsActive = item.IsActive;
changed = true;
}
if (changed) await _context.PeerOplogConfirmations.UpdateAsync(existing);
}
await _context.SaveChangesAsync(cancellationToken);
}
private static bool IsIncomingTimestampNewer(HlcTimestamp incomingTimestamp, PeerOplogConfirmationEntity existing)
{
if (incomingTimestamp.PhysicalTime > existing.ConfirmedWall) return true;
if (incomingTimestamp.PhysicalTime == existing.ConfirmedWall &&
incomingTimestamp.LogicalCounter > existing.ConfirmedLogic)
return true;
return false;
}
}

View File

@@ -1,167 +0,0 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// <summary>
/// Provides a snapshot metadata store implementation that uses a specified CBDDCDocumentDbContext for persistence
/// operations.
/// </summary>
/// <remarks>
/// This class enables storage, retrieval, and management of snapshot metadata using the provided
/// database context. It is typically used in scenarios where snapshot metadata needs to be persisted in a document
/// database. The class supports bulk operations and incremental updates, and can be extended for custom database
/// contexts. Thread safety depends on the underlying context implementation.
/// </remarks>
/// <typeparam name="TDbContext">
/// The type of the document database context used for accessing and managing snapshot metadata. Must inherit from
/// CBDDCDocumentDbContext.
/// </typeparam>
public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore where TDbContext : CBDDCDocumentDbContext
{
/// <summary>
/// Represents the database context used for data access operations within the derived class.
/// </summary>
/// <remarks>
/// Intended for use by derived classes to interact with the underlying database. The context
/// should be properly disposed of according to the application's lifetime management strategy.
/// </remarks>
protected readonly TDbContext _context;
/// <summary>
/// Provides logging capabilities for the BLiteSnapshotMetadataStore operations.
/// </summary>
/// <remarks>
/// Intended for use by derived classes to record diagnostic and operational information. The
/// logger instance is specific to the BLiteSnapshotMetadataStore<TDbContext> type.
/// </remarks>
protected readonly ILogger<BLiteSnapshotMetadataStore<TDbContext>> _logger;
/// <summary>
/// Initializes a new instance of the BLiteSnapshotMetadataStore class using the specified database context and
/// optional logger.
/// </summary>
/// <param name="context">The database context to be used for accessing snapshot metadata. Cannot be null.</param>
/// <param name="logger">An optional logger for logging diagnostic messages. If null, a no-op logger is used.</param>
/// <exception cref="ArgumentNullException">Thrown if the context parameter is null.</exception>
public BLiteSnapshotMetadataStore(TDbContext context,
ILogger<BLiteSnapshotMetadataStore<TDbContext>>? logger = null)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
_logger = logger ?? NullLogger<BLiteSnapshotMetadataStore<TDbContext>>.Instance;
}
/// <inheritdoc />
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
// Use Id (technical key) for deletion, not NodeId (business key)
var allIds = await Task.Run(() => _context.SnapshotMetadatas.FindAll().Select(s => s.Id).ToList(),
cancellationToken);
await _context.SnapshotMetadatas.DeleteBulkAsync(allIds);
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task<IEnumerable<SnapshotMetadata>> ExportAsync(CancellationToken cancellationToken = default)
{
return await Task.Run(() => _context.SnapshotMetadatas.FindAll().ToDomain().ToList(), cancellationToken);
}
/// <inheritdoc />
public override async Task<string?> GetSnapshotHashAsync(string nodeId,
CancellationToken cancellationToken = default)
{
// NodeId is now a regular indexed property, not the Key
var snapshot = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault(),
cancellationToken);
return snapshot?.Hash;
}
/// <inheritdoc />
public override async Task ImportAsync(IEnumerable<SnapshotMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var metadata in items) await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity());
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata,
CancellationToken cancellationToken = default)
{
await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity());
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task MergeAsync(IEnumerable<SnapshotMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var metadata in items)
{
// NodeId is now a regular indexed property, not the Key
var existing =
await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == metadata.NodeId).FirstOrDefault(),
cancellationToken);
if (existing == null)
{
await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity());
}
else
{
// Update only if incoming is newer
if (metadata.TimestampPhysicalTime > existing.TimestampPhysicalTime ||
(metadata.TimestampPhysicalTime == existing.TimestampPhysicalTime &&
metadata.TimestampLogicalCounter > existing.TimestampLogicalCounter))
{
existing.NodeId = metadata.NodeId;
existing.TimestampPhysicalTime = metadata.TimestampPhysicalTime;
existing.TimestampLogicalCounter = metadata.TimestampLogicalCounter;
existing.Hash = metadata.Hash;
await _context.SnapshotMetadatas.UpdateAsync(existing);
}
}
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta,
CancellationToken cancellationToken)
{
// NodeId is now a regular indexed property, not the Key - find existing by NodeId
var existing =
await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == existingMeta.NodeId).FirstOrDefault(),
cancellationToken);
if (existing != null)
{
existing.NodeId = existingMeta.NodeId;
existing.TimestampPhysicalTime = existingMeta.TimestampPhysicalTime;
existing.TimestampLogicalCounter = existingMeta.TimestampLogicalCounter;
existing.Hash = existingMeta.Hash;
await _context.SnapshotMetadatas.UpdateAsync(existing);
await _context.SaveChangesAsync(cancellationToken);
}
}
/// <inheritdoc />
public override async Task<SnapshotMetadata?> GetSnapshotMetadataAsync(string nodeId,
CancellationToken cancellationToken = default)
{
// NodeId is now a regular indexed property, not the Key
return await Task.Run(
() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault()?.ToDomain(),
cancellationToken);
}
/// <inheritdoc />
public override async Task<IEnumerable<SnapshotMetadata>> GetAllSnapshotMetadataAsync(
CancellationToken cancellationToken = default)
{
return await Task.Run(() => _context.SnapshotMetadatas.FindAll().ToDomain().ToList(), cancellationToken);
}
}

View File

@@ -1,102 +0,0 @@
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// <summary>
/// Extension methods for configuring BLite persistence for ZB.MOM.WW.CBDDC.
/// </summary>
public static class CBDDCBLiteExtensions
{
/// <summary>
/// Adds BLite persistence to CBDDC using a custom DbContext and DocumentStore implementation.
/// </summary>
/// <typeparam name="TDbContext">The type of the BLite document database context. Must inherit from CBDDCDocumentDbContext.</typeparam>
/// <typeparam name="TDocumentStore">The type of the document store implementation. Must implement IDocumentStore.</typeparam>
/// <param name="services">The service collection to add the services to.</param>
/// <param name="contextFactory">A factory function that creates the DbContext instance.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddCBDDCBLite<TDbContext, TDocumentStore>(
this IServiceCollection services,
Func<IServiceProvider, TDbContext> contextFactory)
where TDbContext : CBDDCDocumentDbContext
where TDocumentStore : class, IDocumentStore
{
if (services == null) throw new ArgumentNullException(nameof(services));
if (contextFactory == null) throw new ArgumentNullException(nameof(contextFactory));
// Register the DbContext as singleton (must match store lifetime)
services.TryAddSingleton<TDbContext>(contextFactory);
services.TryAddSingleton<CBDDCDocumentDbContext>(sp => sp.GetRequiredService<TDbContext>());
// Default Conflict Resolver (Last Write Wins) if none is provided
services.TryAddSingleton<IConflictResolver, LastWriteWinsConflictResolver>();
// Vector Clock Service (shared between DocumentStore and OplogStore)
services.TryAddSingleton<IVectorClockService, VectorClockService>();
// Register BLite Stores (all Singleton)
services.TryAddSingleton<IOplogStore, BLiteOplogStore<TDbContext>>();
services.TryAddSingleton<IPeerConfigurationStore, BLitePeerConfigurationStore<TDbContext>>();
services.TryAddSingleton<IPeerOplogConfirmationStore, BLitePeerOplogConfirmationStore<TDbContext>>();
services.TryAddSingleton<ISnapshotMetadataStore, BLiteSnapshotMetadataStore<TDbContext>>();
services.TryAddSingleton<IDocumentMetadataStore, BLiteDocumentMetadataStore<TDbContext>>();
// Register the DocumentStore implementation
services.TryAddSingleton<IDocumentStore, TDocumentStore>();
// Register the SnapshotService (uses the generic SnapshotStore from ZB.MOM.WW.CBDDC.Persistence)
services.TryAddSingleton<ISnapshotService, SnapshotStore>();
return services;
}
/// <summary>
/// Adds BLite persistence to CBDDC using a custom DbContext (without explicit DocumentStore type).
/// </summary>
/// <typeparam name="TDbContext">The type of the BLite document database context. Must inherit from CBDDCDocumentDbContext.</typeparam>
/// <param name="services">The service collection to add the services to.</param>
/// <param name="contextFactory">A factory function that creates the DbContext instance.</param>
/// <returns>The service collection for chaining.</returns>
/// <remarks>You must manually register IDocumentStore after calling this method.</remarks>
public static IServiceCollection AddCBDDCBLite<TDbContext>(
this IServiceCollection services,
Func<IServiceProvider, TDbContext> contextFactory)
where TDbContext : CBDDCDocumentDbContext
{
if (services == null) throw new ArgumentNullException(nameof(services));
if (contextFactory == null) throw new ArgumentNullException(nameof(contextFactory));
// Register the DbContext as singleton
services.TryAddSingleton<TDbContext>(contextFactory);
services.TryAddSingleton<CBDDCDocumentDbContext>(sp => sp.GetRequiredService<TDbContext>());
// Default Conflict Resolver (Last Write Wins) if none is provided
services.TryAddSingleton<IConflictResolver, LastWriteWinsConflictResolver>();
// Register BLite Stores (all Singleton)
services.TryAddSingleton<IOplogStore, BLiteOplogStore<TDbContext>>();
services.TryAddSingleton<IPeerConfigurationStore, BLitePeerConfigurationStore<TDbContext>>();
services.TryAddSingleton<IPeerOplogConfirmationStore, BLitePeerOplogConfirmationStore<TDbContext>>();
services.TryAddSingleton<ISnapshotMetadataStore, BLiteSnapshotMetadataStore<TDbContext>>();
services.TryAddSingleton<IDocumentMetadataStore, BLiteDocumentMetadataStore<TDbContext>>();
// Register the SnapshotService (uses the generic SnapshotStore from ZB.MOM.WW.CBDDC.Persistence)
services.TryAddSingleton<ISnapshotService, SnapshotStore>();
return services;
}
}
/// <summary>
/// Options for configuring BLite persistence.
/// </summary>
public class BLiteOptions
{
/// <summary>
/// Gets or sets the file path to the BLite database file.
/// </summary>
public string DatabasePath { get; set; } = "";
}

View File

@@ -1,117 +0,0 @@
using BLite.Core;
using BLite.Core.Collections;
using BLite.Core.Metadata;
using BLite.Core.Storage;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
public partial class CBDDCDocumentDbContext : DocumentDbContext
{
/// <summary>
/// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database file path.
/// </summary>
/// <param name="databasePath">
/// The file system path to the database file to be used by the context. Cannot be null or
/// empty.
/// </param>
public CBDDCDocumentDbContext(string databasePath) : base(databasePath)
{
}
/// <summary>
/// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database path and page file
/// configuration.
/// </summary>
/// <param name="databasePath">The file system path to the database file. This value cannot be null or empty.</param>
/// <param name="config">
/// The configuration settings for the page file. Specifies options that control how the database
/// pages are managed.
/// </param>
public CBDDCDocumentDbContext(string databasePath, PageFileConfig config) : base(databasePath, config)
{
}
/// <summary>
/// Gets the collection of operation log entries associated with this instance.
/// </summary>
/// <remarks>
/// The collection provides access to all recorded operation log (oplog) entries, which can be
/// used to track changes or replicate operations. The collection is read-only; entries cannot be added or removed
/// directly through this property.
/// </remarks>
public DocumentCollection<string, OplogEntity> OplogEntries { get; private set; } = null!;
/// <summary>
/// Gets the collection of snapshot metadata associated with the document.
/// </summary>
public DocumentCollection<string, SnapshotMetadataEntity> SnapshotMetadatas { get; private set; } = null!;
/// <summary>
/// Gets the collection of remote peer configurations associated with this instance.
/// </summary>
/// <remarks>
/// Use this collection to access or enumerate the configuration settings for each remote peer.
/// The collection is read-only; to modify peer configurations, use the appropriate methods provided by the
/// containing class.
/// </remarks>
public DocumentCollection<string, RemotePeerEntity> RemotePeerConfigurations { get; private set; } = null!;
/// <summary>
/// Gets the collection of document metadata for sync tracking.
/// </summary>
/// <remarks>
/// Stores HLC timestamps and deleted state for each document without modifying application entities.
/// Used to track document versions for incremental sync instead of full snapshots.
/// </remarks>
public DocumentCollection<string, DocumentMetadataEntity> DocumentMetadatas { get; private set; } = null!;
/// <summary>
/// Gets the collection of peer oplog confirmation records for pruning safety tracking.
/// </summary>
public DocumentCollection<string, PeerOplogConfirmationEntity> PeerOplogConfirmations { get; private set; } = null!;
/// <inheritdoc />
protected override void OnModelCreating(ModelBuilder modelBuilder)
{
base.OnModelCreating(modelBuilder);
// OplogEntries: Use Id as technical key, Hash as unique business key
modelBuilder.Entity<OplogEntity>()
.ToCollection("OplogEntries")
.HasKey(e => e.Id)
.HasIndex(e => e.Hash, unique: true) // Hash is unique business key
.HasIndex(e => new { e.TimestampPhysicalTime, e.TimestampLogicalCounter, e.TimestampNodeId })
.HasIndex(e => e.Collection);
// SnapshotMetadatas: Use Id as technical key, NodeId as unique business key
modelBuilder.Entity<SnapshotMetadataEntity>()
.ToCollection("SnapshotMetadatas")
.HasKey(e => e.Id)
.HasIndex(e => e.NodeId, unique: true) // NodeId is unique business key
.HasIndex(e => new { e.TimestampPhysicalTime, e.TimestampLogicalCounter });
// RemotePeerConfigurations: Use Id as technical key, NodeId as unique business key
modelBuilder.Entity<RemotePeerEntity>()
.ToCollection("RemotePeerConfigurations")
.HasKey(e => e.Id)
.HasIndex(e => e.NodeId, unique: true) // NodeId is unique business key
.HasIndex(e => e.IsEnabled);
// DocumentMetadatas: Use Id as technical key, Collection+Key as unique composite business key
modelBuilder.Entity<DocumentMetadataEntity>()
.ToCollection("DocumentMetadatas")
.HasKey(e => e.Id)
.HasIndex(e => new { e.Collection, e.Key }, unique: true) // Composite business key
.HasIndex(e => new { e.HlcPhysicalTime, e.HlcLogicalCounter, e.HlcNodeId })
.HasIndex(e => e.Collection);
// PeerOplogConfirmations: Use Id as technical key, PeerNodeId+SourceNodeId as unique business key
modelBuilder.Entity<PeerOplogConfirmationEntity>()
.ToCollection("PeerOplogConfirmations")
.HasKey(e => e.Id)
.HasIndex(e => new { e.PeerNodeId, e.SourceNodeId }, unique: true)
.HasIndex(e => e.IsActive)
.HasIndex(e => new { e.SourceNodeId, e.ConfirmedWall, e.ConfirmedLogic });
}
}

View File

@@ -1,47 +0,0 @@
using System.ComponentModel.DataAnnotations;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
/// <summary>
/// BLite entity representing document metadata for sync tracking.
/// Stores HLC timestamp and deleted state for each document without modifying application entities.
/// </summary>
public class DocumentMetadataEntity
{
/// <summary>
/// Gets or sets the unique identifier for this entity (technical key).
/// Auto-generated GUID string.
/// </summary>
[Key]
public string Id { get; set; } = "";
/// <summary>
/// Gets or sets the collection name (business key part 1).
/// </summary>
public string Collection { get; set; } = "";
/// <summary>
/// Gets or sets the document key within the collection (business key part 2).
/// </summary>
public string Key { get; set; } = "";
/// <summary>
/// Gets or sets the physical time component of the HLC timestamp.
/// </summary>
public long HlcPhysicalTime { get; set; }
/// <summary>
/// Gets or sets the logical counter component of the HLC timestamp.
/// </summary>
public int HlcLogicalCounter { get; set; }
/// <summary>
/// Gets or sets the node ID that last modified this document.
/// </summary>
public string HlcNodeId { get; set; } = "";
/// <summary>
/// Gets or sets whether this document is marked as deleted (tombstone).
/// </summary>
public bool IsDeleted { get; set; }
}

View File

@@ -1,240 +0,0 @@
using System.Text.Json;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
/// <summary>
/// Provides extension methods for mapping between BLite entities and domain models.
/// </summary>
public static class EntityMappers
{
#region DocumentMetadataEntity Helpers
/// <summary>
/// Creates a DocumentMetadataEntity from collection, key, timestamp, and deleted state.
/// Used for tracking document sync state.
/// </summary>
/// <param name="collection">The collection name that owns the document.</param>
/// <param name="key">The document key within the collection.</param>
/// <param name="timestamp">The hybrid logical clock timestamp for the document state.</param>
/// <param name="isDeleted">Indicates whether the document is marked as deleted.</param>
public static DocumentMetadataEntity CreateDocumentMetadata(string collection, string key, HlcTimestamp timestamp,
bool isDeleted = false)
{
return new DocumentMetadataEntity
{
Id = Guid.NewGuid().ToString(),
Collection = collection,
Key = key,
HlcPhysicalTime = timestamp.PhysicalTime,
HlcLogicalCounter = timestamp.LogicalCounter,
HlcNodeId = timestamp.NodeId,
IsDeleted = isDeleted
};
}
#endregion
#region OplogEntity Mappers
/// <summary>
/// Converts an OplogEntry domain model to an OplogEntity for persistence.
/// </summary>
/// <param name="entry">The oplog entry to convert.</param>
public static OplogEntity ToEntity(this OplogEntry entry)
{
return new OplogEntity
{
Id = Guid.NewGuid().ToString(), // Auto-generate technical key
Collection = entry.Collection,
Key = entry.Key,
Operation = (int)entry.Operation,
// Use empty string instead of null to avoid BLite BSON serialization issues
PayloadJson = entry.Payload?.GetRawText() ?? "",
TimestampPhysicalTime = entry.Timestamp.PhysicalTime,
TimestampLogicalCounter = entry.Timestamp.LogicalCounter,
TimestampNodeId = entry.Timestamp.NodeId,
Hash = entry.Hash,
PreviousHash = entry.PreviousHash
};
}
/// <summary>
/// Converts an OplogEntity to an OplogEntry domain model.
/// </summary>
/// <param name="entity">The persisted oplog entity to convert.</param>
public static OplogEntry ToDomain(this OplogEntity entity)
{
JsonElement? payload = null;
// Treat empty string as null payload (Delete operations)
if (!string.IsNullOrEmpty(entity.PayloadJson))
payload = JsonSerializer.Deserialize<JsonElement>(entity.PayloadJson);
return new OplogEntry(
entity.Collection,
entity.Key,
(OperationType)entity.Operation,
payload,
new HlcTimestamp(entity.TimestampPhysicalTime, entity.TimestampLogicalCounter, entity.TimestampNodeId),
entity.PreviousHash,
entity.Hash);
}
/// <summary>
/// Converts a collection of OplogEntity to OplogEntry domain models.
/// </summary>
/// <param name="entities">The oplog entities to convert.</param>
public static IEnumerable<OplogEntry> ToDomain(this IEnumerable<OplogEntity> entities)
{
return entities.Select(e => e.ToDomain());
}
#endregion
#region SnapshotMetadataEntity Mappers
/// <summary>
/// Converts a SnapshotMetadata domain model to a SnapshotMetadataEntity for persistence.
/// </summary>
/// <param name="metadata">The snapshot metadata to convert.</param>
public static SnapshotMetadataEntity ToEntity(this SnapshotMetadata metadata)
{
return new SnapshotMetadataEntity
{
Id = Guid.NewGuid().ToString(), // Auto-generate technical key
NodeId = metadata.NodeId,
TimestampPhysicalTime = metadata.TimestampPhysicalTime,
TimestampLogicalCounter = metadata.TimestampLogicalCounter,
Hash = metadata.Hash
};
}
/// <summary>
/// Converts a SnapshotMetadataEntity to a SnapshotMetadata domain model.
/// </summary>
/// <param name="entity">The persisted snapshot metadata entity to convert.</param>
public static SnapshotMetadata ToDomain(this SnapshotMetadataEntity entity)
{
return new SnapshotMetadata
{
NodeId = entity.NodeId,
TimestampPhysicalTime = entity.TimestampPhysicalTime,
TimestampLogicalCounter = entity.TimestampLogicalCounter,
Hash = entity.Hash
};
}
/// <summary>
/// Converts a collection of SnapshotMetadataEntity to SnapshotMetadata domain models.
/// </summary>
/// <param name="entities">The snapshot metadata entities to convert.</param>
public static IEnumerable<SnapshotMetadata> ToDomain(this IEnumerable<SnapshotMetadataEntity> entities)
{
return entities.Select(e => e.ToDomain());
}
#endregion
#region RemotePeerEntity Mappers
/// <summary>
/// Converts a RemotePeerConfiguration domain model to a RemotePeerEntity for persistence.
/// </summary>
/// <param name="config">The remote peer configuration to convert.</param>
public static RemotePeerEntity ToEntity(this RemotePeerConfiguration config)
{
return new RemotePeerEntity
{
Id = Guid.NewGuid().ToString(), // Auto-generate technical key
NodeId = config.NodeId,
Address = config.Address,
Type = (int)config.Type,
IsEnabled = config.IsEnabled,
InterestsJson = config.InterestingCollections.Count > 0
? JsonSerializer.Serialize(config.InterestingCollections)
: ""
};
}
/// <summary>
/// Converts a RemotePeerEntity to a RemotePeerConfiguration domain model.
/// </summary>
/// <param name="entity">The persisted remote peer entity to convert.</param>
public static RemotePeerConfiguration ToDomain(this RemotePeerEntity entity)
{
var config = new RemotePeerConfiguration
{
NodeId = entity.NodeId,
Address = entity.Address,
Type = (PeerType)entity.Type,
IsEnabled = entity.IsEnabled
};
if (!string.IsNullOrEmpty(entity.InterestsJson))
config.InterestingCollections = JsonSerializer.Deserialize<List<string>>(entity.InterestsJson) ?? [];
return config;
}
/// <summary>
/// Converts a collection of RemotePeerEntity to RemotePeerConfiguration domain models.
/// </summary>
/// <param name="entities">The remote peer entities to convert.</param>
public static IEnumerable<RemotePeerConfiguration> ToDomain(this IEnumerable<RemotePeerEntity> entities)
{
return entities.Select(e => e.ToDomain());
}
#endregion
#region PeerOplogConfirmationEntity Mappers
/// <summary>
/// Converts a peer oplog confirmation domain model to a BLite entity.
/// </summary>
/// <param name="confirmation">The confirmation to convert.</param>
public static PeerOplogConfirmationEntity ToEntity(this PeerOplogConfirmation confirmation)
{
return new PeerOplogConfirmationEntity
{
Id = Guid.NewGuid().ToString(),
PeerNodeId = confirmation.PeerNodeId,
SourceNodeId = confirmation.SourceNodeId,
ConfirmedWall = confirmation.ConfirmedWall,
ConfirmedLogic = confirmation.ConfirmedLogic,
ConfirmedHash = confirmation.ConfirmedHash,
LastConfirmedUtcMs = confirmation.LastConfirmedUtc.ToUnixTimeMilliseconds(),
IsActive = confirmation.IsActive
};
}
/// <summary>
/// Converts a peer oplog confirmation entity to a domain model.
/// </summary>
/// <param name="entity">The entity to convert.</param>
public static PeerOplogConfirmation ToDomain(this PeerOplogConfirmationEntity entity)
{
return new PeerOplogConfirmation
{
PeerNodeId = entity.PeerNodeId,
SourceNodeId = entity.SourceNodeId,
ConfirmedWall = entity.ConfirmedWall,
ConfirmedLogic = entity.ConfirmedLogic,
ConfirmedHash = entity.ConfirmedHash,
LastConfirmedUtc = DateTimeOffset.FromUnixTimeMilliseconds(entity.LastConfirmedUtcMs),
IsActive = entity.IsActive
};
}
/// <summary>
/// Converts a collection of peer oplog confirmation entities to domain models.
/// </summary>
/// <param name="entities">The entities to convert.</param>
public static IEnumerable<PeerOplogConfirmation> ToDomain(this IEnumerable<PeerOplogConfirmationEntity> entities)
{
return entities.Select(e => e.ToDomain());
}
#endregion
}

View File

@@ -1,61 +0,0 @@
using System.ComponentModel.DataAnnotations;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
/// <summary>
/// BLite entity representing an operation log entry.
/// </summary>
public class OplogEntity
{
/// <summary>
/// Gets or sets the unique identifier for this entity (technical key).
/// Auto-generated GUID string.
/// </summary>
[Key]
public string Id { get; set; } = "";
/// <summary>
/// Gets or sets the collection name.
/// </summary>
public string Collection { get; set; } = "";
/// <summary>
/// Gets or sets the document key.
/// </summary>
public string Key { get; set; } = "";
/// <summary>
/// Gets or sets the operation type (0 = Put, 1 = Delete).
/// </summary>
public int Operation { get; set; }
/// <summary>
/// Gets or sets the payload JSON (empty string for Delete operations).
/// </summary>
public string PayloadJson { get; set; } = "";
/// <summary>
/// Gets or sets the physical time component of the HLC timestamp.
/// </summary>
public long TimestampPhysicalTime { get; set; }
/// <summary>
/// Gets or sets the logical counter component of the HLC timestamp.
/// </summary>
public int TimestampLogicalCounter { get; set; }
/// <summary>
/// Gets or sets the node ID component of the HLC timestamp.
/// </summary>
public string TimestampNodeId { get; set; } = "";
/// <summary>
/// Gets or sets the cryptographic hash of this entry (business key).
/// </summary>
public string Hash { get; set; } = "";
/// <summary>
/// Gets or sets the hash of the previous entry in the chain.
/// </summary>
public string PreviousHash { get; set; } = "";
}

View File

@@ -1,50 +0,0 @@
using System.ComponentModel.DataAnnotations;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
/// <summary>
/// BLite entity representing a peer oplog confirmation watermark.
/// </summary>
public class PeerOplogConfirmationEntity
{
/// <summary>
/// Gets or sets the unique technical identifier for this entity.
/// </summary>
[Key]
public string Id { get; set; } = "";
/// <summary>
/// Gets or sets the tracked peer node identifier.
/// </summary>
public string PeerNodeId { get; set; } = "";
/// <summary>
/// Gets or sets the source node identifier for this confirmation.
/// </summary>
public string SourceNodeId { get; set; } = "";
/// <summary>
/// Gets or sets the physical wall-clock component of the confirmed HLC timestamp.
/// </summary>
public long ConfirmedWall { get; set; }
/// <summary>
/// Gets or sets the logical component of the confirmed HLC timestamp.
/// </summary>
public int ConfirmedLogic { get; set; }
/// <summary>
/// Gets or sets the confirmed hash value.
/// </summary>
public string ConfirmedHash { get; set; } = "";
/// <summary>
/// Gets or sets the UTC instant of the last update as unix milliseconds.
/// </summary>
public long LastConfirmedUtcMs { get; set; }
/// <summary>
/// Gets or sets whether the tracked peer remains active.
/// </summary>
public bool IsActive { get; set; } = true;
}

View File

@@ -1,42 +0,0 @@
using System.ComponentModel.DataAnnotations;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
/// <summary>
/// BLite entity representing a remote peer configuration.
/// </summary>
public class RemotePeerEntity
{
/// <summary>
/// Gets or sets the unique identifier for this entity (technical key).
/// Auto-generated GUID string.
/// </summary>
[Key]
public string Id { get; set; } = "";
/// <summary>
/// Gets or sets the unique identifier for the remote peer node (business key).
/// </summary>
public string NodeId { get; set; } = "";
/// <summary>
/// Gets or sets the network address of the remote peer (hostname:port).
/// </summary>
public string Address { get; set; } = "";
/// <summary>
/// Gets or sets the type of the peer (0=LanDiscovered, 1=StaticRemote, 2=CloudRemote).
/// </summary>
public int Type { get; set; }
/// <summary>
/// Gets or sets whether this peer is enabled for synchronization.
/// </summary>
public bool IsEnabled { get; set; } = true;
/// <summary>
/// Gets or sets the collection interests as a JSON string.
/// Use empty string instead of null for BLite compatibility.
/// </summary>
public string InterestsJson { get; set; } = "";
}

View File

@@ -1,36 +0,0 @@
using System.ComponentModel.DataAnnotations;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
/// <summary>
/// BLite entity representing snapshot metadata (oplog pruning checkpoint).
/// </summary>
public class SnapshotMetadataEntity
{
/// <summary>
/// Gets or sets the unique identifier for this entity (technical key).
/// Auto-generated GUID string.
/// </summary>
[Key]
public string Id { get; set; } = "";
/// <summary>
/// Gets or sets the node identifier (business key).
/// </summary>
public string NodeId { get; set; } = "";
/// <summary>
/// Gets or sets the physical time component of the timestamp.
/// </summary>
public long TimestampPhysicalTime { get; set; }
/// <summary>
/// Gets or sets the logical counter component of the timestamp.
/// </summary>
public int TimestampLogicalCounter { get; set; }
/// <summary>
/// Gets or sets the hash of the snapshot.
/// </summary>
public string Hash { get; set; } = "";
}

View File

@@ -1,10 +1,10 @@
# ZB.MOM.WW.CBDDC.Persistence
BLite persistence provider and foundational persistence implementations for **CBDDC**.
SurrealDB (embedded RocksDB) persistence provider and foundational persistence implementations for **CBDDC**.
## What's Included
This package provides both BLite provider types and core persistence services:
This package provides Surreal provider types and core persistence services:
- **OplogStore**: Base implementation for append-only operation log storage
- **VectorClockService**: Thread-safe in-memory vector clock management
@@ -14,7 +14,7 @@ This package provides both BLite provider types and core persistence services:
## When To Use This Package
- **As a Library User**: Install this package to use CBDDC with BLite persistence.
- **As a Library User**: Install this package to use CBDDC with Surreal embedded persistence.
- **As a Provider Developer**: Reference this package to build custom persistence providers by extending the base
classes

View File

@@ -0,0 +1,142 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using SurrealDb.Embedded.Options;
using SurrealDb.Embedded.RocksDb;
using SurrealDb.Net;
using SurrealDb.Net.Models.Response;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Embedded RocksDB-backed Surreal client wrapper used by CBDDC persistence components.
/// </summary>
public sealed class CBDDCSurrealEmbeddedClient : ICBDDCSurrealEmbeddedClient
{
private static readonly IReadOnlyDictionary<string, object?> EmptyParameters = new Dictionary<string, object?>();
private readonly SemaphoreSlim _initializeGate = new(1, 1);
private readonly ILogger<CBDDCSurrealEmbeddedClient> _logger;
private readonly CBDDCSurrealEmbeddedOptions _options;
private bool _disposed;
private bool _initialized;
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCSurrealEmbeddedClient" /> class.
/// </summary>
/// <param name="options">Embedded Surreal options.</param>
/// <param name="logger">Optional logger.</param>
public CBDDCSurrealEmbeddedClient(
CBDDCSurrealEmbeddedOptions options,
ILogger<CBDDCSurrealEmbeddedClient>? logger = null)
{
_options = options ?? throw new ArgumentNullException(nameof(options));
_logger = logger ?? NullLogger<CBDDCSurrealEmbeddedClient>.Instance;
if (!string.IsNullOrWhiteSpace(_options.Endpoint) &&
!_options.Endpoint.StartsWith("rocksdb://", StringComparison.OrdinalIgnoreCase))
throw new ArgumentException(
"Embedded Surreal endpoint must use the rocksdb:// scheme.",
nameof(options));
if (string.IsNullOrWhiteSpace(_options.Namespace))
throw new ArgumentException("Namespace is required.", nameof(options));
if (string.IsNullOrWhiteSpace(_options.Database))
throw new ArgumentException("Database is required.", nameof(options));
if (string.IsNullOrWhiteSpace(_options.NamingPolicy))
throw new ArgumentException("Naming policy is required.", nameof(options));
string dbPath = ResolveDatabasePath(_options.DatabasePath);
var embeddedOptionsBuilder = SurrealDbEmbeddedOptions.Create();
if (_options.StrictMode.HasValue)
embeddedOptionsBuilder.WithStrictMode(_options.StrictMode.Value);
Client = new SurrealDbRocksDbClient(dbPath, embeddedOptionsBuilder.Build(), _options.NamingPolicy);
}
/// <inheritdoc />
public ISurrealDbClient Client { get; }
/// <inheritdoc />
public async Task InitializeAsync(CancellationToken cancellationToken = default)
{
ThrowIfDisposed();
if (_initialized) return;
await _initializeGate.WaitAsync(cancellationToken);
try
{
if (_initialized) return;
await Client.Connect(cancellationToken);
await Client.Use(_options.Namespace, _options.Database, cancellationToken);
_initialized = true;
_logger.LogInformation("Surreal embedded client initialized for namespace '{Namespace}' and database '{Database}'.",
_options.Namespace, _options.Database);
}
finally
{
_initializeGate.Release();
}
}
/// <inheritdoc />
public async Task<SurrealDbResponse> RawQueryAsync(
string query,
IReadOnlyDictionary<string, object?>? parameters = null,
CancellationToken cancellationToken = default)
{
ThrowIfDisposed();
if (string.IsNullOrWhiteSpace(query))
throw new ArgumentException("Query is required.", nameof(query));
await InitializeAsync(cancellationToken);
return await Client.RawQuery(query, parameters ?? EmptyParameters, cancellationToken);
}
/// <inheritdoc />
public async Task<bool> HealthAsync(CancellationToken cancellationToken = default)
{
ThrowIfDisposed();
await InitializeAsync(cancellationToken);
return await Client.Health(cancellationToken);
}
/// <inheritdoc />
public void Dispose()
{
if (_disposed) return;
_disposed = true;
Client.Dispose();
_initializeGate.Dispose();
}
/// <inheritdoc />
public async ValueTask DisposeAsync()
{
if (_disposed) return;
_disposed = true;
await Client.DisposeAsync();
_initializeGate.Dispose();
}
private void ThrowIfDisposed()
{
ObjectDisposedException.ThrowIf(_disposed, this);
}
private static string ResolveDatabasePath(string databasePath)
{
if (string.IsNullOrWhiteSpace(databasePath))
throw new ArgumentException("DatabasePath is required.", nameof(databasePath));
string fullPath = Path.GetFullPath(databasePath);
string? directory = Path.GetDirectoryName(fullPath);
if (!string.IsNullOrWhiteSpace(directory)) Directory.CreateDirectory(directory);
return fullPath;
}
}

View File

@@ -0,0 +1,75 @@
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using ZB.MOM.WW.CBDDC.Core.Network;
using SurrealDb.Net;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Extension methods for configuring embedded Surreal persistence for CBDDC.
/// </summary>
public static class CBDDCSurrealEmbeddedExtensions
{
/// <summary>
/// Adds embedded Surreal infrastructure to CBDDC and registers a document store implementation.
/// </summary>
/// <typeparam name="TDocumentStore">The concrete document store implementation.</typeparam>
/// <param name="services">The service collection to add services to.</param>
/// <param name="optionsFactory">Factory used to build embedded Surreal options.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddCBDDCSurrealEmbedded<TDocumentStore>(
this IServiceCollection services,
Func<IServiceProvider, CBDDCSurrealEmbeddedOptions> optionsFactory)
where TDocumentStore : class, IDocumentStore
{
RegisterCoreServices(services, optionsFactory);
services.TryAddSingleton<IDocumentStore, TDocumentStore>();
return services;
}
/// <summary>
/// Adds embedded Surreal infrastructure to CBDDC without registering store implementations.
/// </summary>
/// <param name="services">The service collection to add services to.</param>
/// <param name="optionsFactory">Factory used to build embedded Surreal options.</param>
/// <returns>The service collection for chaining.</returns>
/// <remarks>
/// Register store implementations separately when they become available.
/// </remarks>
public static IServiceCollection AddCBDDCSurrealEmbedded(
this IServiceCollection services,
Func<IServiceProvider, CBDDCSurrealEmbeddedOptions> optionsFactory)
{
RegisterCoreServices(services, optionsFactory);
return services;
}
private static void RegisterCoreServices(
IServiceCollection services,
Func<IServiceProvider, CBDDCSurrealEmbeddedOptions> optionsFactory)
{
if (services == null) throw new ArgumentNullException(nameof(services));
if (optionsFactory == null) throw new ArgumentNullException(nameof(optionsFactory));
services.TryAddSingleton(optionsFactory);
services.TryAddSingleton<ICBDDCSurrealEmbeddedClient, CBDDCSurrealEmbeddedClient>();
services.TryAddSingleton<ISurrealDbClient>(sp => sp.GetRequiredService<ICBDDCSurrealEmbeddedClient>().Client);
services.TryAddSingleton<ICBDDCSurrealSchemaInitializer, CBDDCSurrealSchemaInitializer>();
services.TryAddSingleton<ICBDDCSurrealReadinessProbe, CBDDCSurrealReadinessProbe>();
services.TryAddSingleton<ISurrealCdcCheckpointPersistence, SurrealCdcCheckpointPersistence>();
services.TryAddSingleton<IConflictResolver, LastWriteWinsConflictResolver>();
services.TryAddSingleton<IVectorClockService, VectorClockService>();
services.TryAddSingleton<IPeerConfigurationStore, SurrealPeerConfigurationStore>();
services.TryAddSingleton<IPeerOplogConfirmationStore, SurrealPeerOplogConfirmationStore>();
services.TryAddSingleton<ISnapshotMetadataStore, SurrealSnapshotMetadataStore>();
services.TryAddSingleton<IDocumentMetadataStore, SurrealDocumentMetadataStore>();
services.TryAddSingleton<IOplogStore, SurrealOplogStore>();
// SnapshotStore registration matches the other provider extension patterns.
services.TryAddSingleton<ISnapshotService, SnapshotStore>();
}
}

View File

@@ -0,0 +1,88 @@
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Configuration for the embedded SurrealDB RocksDB provider.
/// </summary>
public sealed class CBDDCSurrealEmbeddedOptions
{
/// <summary>
/// Logical endpoint for this provider. For embedded RocksDB this should use the <c>rocksdb://</c> scheme.
/// </summary>
public string Endpoint { get; set; } = "rocksdb://local";
/// <summary>
/// File path used by the embedded RocksDB engine.
/// </summary>
public string DatabasePath { get; set; } = "data/cbddc-surreal.db";
/// <summary>
/// Surreal namespace.
/// </summary>
public string Namespace { get; set; } = "cbddc";
/// <summary>
/// Surreal database name inside the namespace.
/// </summary>
public string Database { get; set; } = "main";
/// <summary>
/// Naming policy used by the Surreal .NET client serializer.
/// </summary>
public string NamingPolicy { get; set; } = "camelCase";
/// <summary>
/// Optional strict mode flag for embedded Surreal.
/// </summary>
public bool? StrictMode { get; set; }
/// <summary>
/// CDC-related options used by persistence stores.
/// </summary>
public CBDDCSurrealCdcOptions Cdc { get; set; } = new();
}
/// <summary>
/// CDC/checkpoint configuration for the embedded Surreal provider.
/// </summary>
public sealed class CBDDCSurrealCdcOptions
{
/// <summary>
/// Enables CDC-oriented checkpoint bookkeeping.
/// </summary>
public bool Enabled { get; set; } = true;
/// <summary>
/// Checkpoint table name used for CDC progress tracking.
/// </summary>
public string CheckpointTable { get; set; } = "cbddc_cdc_checkpoint";
/// <summary>
/// Enables LIVE SELECT subscriptions as a low-latency wake-up signal for polling.
/// </summary>
public bool EnableLiveSelectAccelerator { get; set; } = true;
/// <summary>
/// Logical consumer identifier used by checkpoint records.
/// </summary>
public string ConsumerId { get; set; } = "default";
/// <summary>
/// Polling interval for CDC readers that use pull-based processing.
/// </summary>
public TimeSpan PollingInterval { get; set; } = TimeSpan.FromSeconds(1);
/// <summary>
/// Maximum number of changefeed entries fetched per poll cycle.
/// </summary>
public int BatchSize { get; set; } = 500;
/// <summary>
/// Delay before re-subscribing LIVE SELECT after failures or closure.
/// </summary>
public TimeSpan LiveSelectReconnectDelay { get; set; } = TimeSpan.FromSeconds(2);
/// <summary>
/// Retention window used when defining Surreal changefeed history.
/// </summary>
public TimeSpan RetentionDuration { get; set; } = TimeSpan.FromDays(7);
}

View File

@@ -0,0 +1,45 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Health/readiness helper for the embedded Surreal provider.
/// </summary>
public sealed class CBDDCSurrealReadinessProbe : ICBDDCSurrealReadinessProbe
{
private readonly ICBDDCSurrealEmbeddedClient _surrealClient;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
private readonly ILogger<CBDDCSurrealReadinessProbe> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCSurrealReadinessProbe" /> class.
/// </summary>
/// <param name="surrealClient">Surreal client abstraction.</param>
/// <param name="schemaInitializer">Schema initializer.</param>
/// <param name="logger">Optional logger.</param>
public CBDDCSurrealReadinessProbe(
ICBDDCSurrealEmbeddedClient surrealClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
ILogger<CBDDCSurrealReadinessProbe>? logger = null)
{
_surrealClient = surrealClient ?? throw new ArgumentNullException(nameof(surrealClient));
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
_logger = logger ?? NullLogger<CBDDCSurrealReadinessProbe>.Instance;
}
/// <inheritdoc />
public async Task<bool> IsReadyAsync(CancellationToken cancellationToken = default)
{
try
{
await _schemaInitializer.EnsureInitializedAsync(cancellationToken);
return await _surrealClient.HealthAsync(cancellationToken);
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Surreal embedded readiness probe failed.");
return false;
}
}
}

View File

@@ -0,0 +1,131 @@
using System.Text.RegularExpressions;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Initializes Surreal schema objects required by CBDDC persistence stores.
/// </summary>
public sealed class CBDDCSurrealSchemaInitializer : ICBDDCSurrealSchemaInitializer
{
private static readonly Regex IdentifierRegex = new("^[A-Za-z_][A-Za-z0-9_]*$", RegexOptions.Compiled);
private readonly SemaphoreSlim _initializeGate = new(1, 1);
private readonly ICBDDCSurrealEmbeddedClient _surrealClient;
private readonly ILogger<CBDDCSurrealSchemaInitializer> _logger;
private readonly string _checkpointTable;
private readonly string _changefeedRetentionLiteral;
private bool _initialized;
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCSurrealSchemaInitializer" /> class.
/// </summary>
/// <param name="surrealClient">Surreal client abstraction.</param>
/// <param name="options">Embedded options.</param>
/// <param name="logger">Optional logger.</param>
public CBDDCSurrealSchemaInitializer(
ICBDDCSurrealEmbeddedClient surrealClient,
CBDDCSurrealEmbeddedOptions options,
ILogger<CBDDCSurrealSchemaInitializer>? logger = null)
{
_surrealClient = surrealClient ?? throw new ArgumentNullException(nameof(surrealClient));
_logger = logger ?? NullLogger<CBDDCSurrealSchemaInitializer>.Instance;
if (options == null) throw new ArgumentNullException(nameof(options));
if (options.Cdc == null) throw new ArgumentException("CDC options are required.", nameof(options));
_checkpointTable = EnsureValidIdentifier(options.Cdc.CheckpointTable, nameof(options.Cdc.CheckpointTable));
_changefeedRetentionLiteral = ToSurrealDurationLiteral(
options.Cdc.RetentionDuration,
nameof(options.Cdc.RetentionDuration));
}
/// <inheritdoc />
public async Task EnsureInitializedAsync(CancellationToken cancellationToken = default)
{
if (_initialized) return;
await _initializeGate.WaitAsync(cancellationToken);
try
{
if (_initialized) return;
string schemaSql = BuildSchemaSql();
await _surrealClient.RawQueryAsync(schemaSql, cancellationToken: cancellationToken);
_initialized = true;
_logger.LogInformation(
"Surreal schema initialized with checkpoint table '{CheckpointTable}'.",
_checkpointTable);
}
finally
{
_initializeGate.Release();
}
}
private string BuildSchemaSql()
{
return $"""
DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.OplogEntriesTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral};
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.OplogHashIndex} ON TABLE {CBDDCSurrealSchemaNames.OplogEntriesTable} COLUMNS hash UNIQUE;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.OplogHlcIndex} ON TABLE {CBDDCSurrealSchemaNames.OplogEntriesTable} COLUMNS timestampPhysicalTime, timestampLogicalCounter, timestampNodeId;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.OplogCollectionIndex} ON TABLE {CBDDCSurrealSchemaNames.OplogEntriesTable} COLUMNS collection;
DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.SnapshotMetadataTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral};
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.SnapshotNodeIdIndex} ON TABLE {CBDDCSurrealSchemaNames.SnapshotMetadataTable} COLUMNS nodeId UNIQUE;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.SnapshotHlcIndex} ON TABLE {CBDDCSurrealSchemaNames.SnapshotMetadataTable} COLUMNS timestampPhysicalTime, timestampLogicalCounter;
DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral};
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerNodeIdIndex} ON TABLE {CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable} COLUMNS nodeId UNIQUE;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerEnabledIndex} ON TABLE {CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable} COLUMNS isEnabled;
DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.DocumentMetadataTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral};
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.DocumentMetadataCollectionKeyIndex} ON TABLE {CBDDCSurrealSchemaNames.DocumentMetadataTable} COLUMNS collection, key UNIQUE;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.DocumentMetadataHlcIndex} ON TABLE {CBDDCSurrealSchemaNames.DocumentMetadataTable} COLUMNS hlcPhysicalTime, hlcLogicalCounter, hlcNodeId;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.DocumentMetadataCollectionIndex} ON TABLE {CBDDCSurrealSchemaNames.DocumentMetadataTable} COLUMNS collection;
DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral};
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerConfirmationPairIndex} ON TABLE {CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable} COLUMNS peerNodeId, sourceNodeId UNIQUE;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerConfirmationActiveIndex} ON TABLE {CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable} COLUMNS isActive;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerConfirmationSourceHlcIndex} ON TABLE {CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable} COLUMNS sourceNodeId, confirmedWall, confirmedLogic;
DEFINE TABLE OVERWRITE {_checkpointTable} SCHEMAFULL;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.CdcCheckpointConsumerIndex} ON TABLE {_checkpointTable} COLUMNS consumerId UNIQUE;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.CdcCheckpointVersionstampIndex} ON TABLE {_checkpointTable} COLUMNS versionstampCursor;
""";
}
private static string EnsureValidIdentifier(string? identifier, string argumentName)
{
if (string.IsNullOrWhiteSpace(identifier))
throw new ArgumentException("Surreal identifier is required.", argumentName);
if (!IdentifierRegex.IsMatch(identifier))
throw new ArgumentException(
$"Invalid Surreal identifier '{identifier}'. Use letters, numbers, and underscores only.",
argumentName);
return identifier;
}
private static string ToSurrealDurationLiteral(TimeSpan duration, string argumentName)
{
if (duration <= TimeSpan.Zero)
throw new ArgumentOutOfRangeException(argumentName, "Surreal changefeed retention duration must be positive.");
if (duration.TotalDays >= 1 && duration.TotalDays == Math.Truncate(duration.TotalDays))
return $"{(long)duration.TotalDays}d";
if (duration.TotalHours >= 1 && duration.TotalHours == Math.Truncate(duration.TotalHours))
return $"{(long)duration.TotalHours}h";
if (duration.TotalMinutes >= 1 && duration.TotalMinutes == Math.Truncate(duration.TotalMinutes))
return $"{(long)duration.TotalMinutes}m";
if (duration.TotalSeconds >= 1 && duration.TotalSeconds == Math.Truncate(duration.TotalSeconds))
return $"{(long)duration.TotalSeconds}s";
long totalMs = checked((long)Math.Ceiling(duration.TotalMilliseconds));
return $"{totalMs}ms";
}
}

View File

@@ -0,0 +1,29 @@
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Surreal table and index names shared by the embedded CBDDC provider.
/// </summary>
public static class CBDDCSurrealSchemaNames
{
public const string OplogEntriesTable = "cbddc_oplog_entries";
public const string SnapshotMetadataTable = "cbddc_snapshot_metadatas";
public const string RemotePeerConfigurationsTable = "cbddc_remote_peer_configurations";
public const string DocumentMetadataTable = "cbddc_document_metadatas";
public const string PeerOplogConfirmationsTable = "cbddc_peer_oplog_confirmations";
public const string OplogHashIndex = "idx_cbddc_oplog_hash";
public const string OplogHlcIndex = "idx_cbddc_oplog_hlc";
public const string OplogCollectionIndex = "idx_cbddc_oplog_collection";
public const string SnapshotNodeIdIndex = "idx_cbddc_snapshot_node";
public const string SnapshotHlcIndex = "idx_cbddc_snapshot_hlc";
public const string PeerNodeIdIndex = "idx_cbddc_peer_node";
public const string PeerEnabledIndex = "idx_cbddc_peer_enabled";
public const string DocumentMetadataCollectionKeyIndex = "idx_cbddc_docmeta_collection_key";
public const string DocumentMetadataHlcIndex = "idx_cbddc_docmeta_hlc";
public const string DocumentMetadataCollectionIndex = "idx_cbddc_docmeta_collection";
public const string PeerConfirmationPairIndex = "idx_cbddc_peer_confirm_pair";
public const string PeerConfirmationActiveIndex = "idx_cbddc_peer_confirm_active";
public const string PeerConfirmationSourceHlcIndex = "idx_cbddc_peer_confirm_source_hlc";
public const string CdcCheckpointConsumerIndex = "idx_cbddc_cdc_checkpoint_consumer";
public const string CdcCheckpointVersionstampIndex = "idx_cbddc_cdc_checkpoint_versionstamp";
}

View File

@@ -0,0 +1,32 @@
using SurrealDb.Net;
using SurrealDb.Net.Models.Response;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Abstraction over the embedded Surreal client used by CBDDC persistence stores.
/// </summary>
public interface ICBDDCSurrealEmbeddedClient : IAsyncDisposable, IDisposable
{
/// <summary>
/// Gets the underlying Surreal client.
/// </summary>
ISurrealDbClient Client { get; }
/// <summary>
/// Connects and selects namespace/database exactly once.
/// </summary>
Task InitializeAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Executes a raw SurrealQL statement.
/// </summary>
Task<SurrealDbResponse> RawQueryAsync(string query,
IReadOnlyDictionary<string, object?>? parameters = null,
CancellationToken cancellationToken = default);
/// <summary>
/// Checks whether the embedded client responds to health probes.
/// </summary>
Task<bool> HealthAsync(CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,12 @@
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Simple readiness probe for embedded Surreal infrastructure.
/// </summary>
public interface ICBDDCSurrealReadinessProbe
{
/// <summary>
/// Returns true when client initialization, schema initialization, and health checks pass.
/// </summary>
Task<bool> IsReadyAsync(CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,12 @@
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Ensures required Surreal schema objects exist.
/// </summary>
public interface ICBDDCSurrealSchemaInitializer
{
/// <summary>
/// Creates required tables/indexes/checkpoint schema for CBDDC stores.
/// </summary>
Task EnsureInitializedAsync(CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,76 @@
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Represents durable CDC progress for a logical consumer.
/// </summary>
public sealed class SurrealCdcCheckpoint
{
/// <summary>
/// Gets or sets the logical consumer identifier.
/// </summary>
public string ConsumerId { get; set; } = "";
/// <summary>
/// Gets or sets the last processed hybrid logical timestamp.
/// </summary>
public HlcTimestamp Timestamp { get; set; }
/// <summary>
/// Gets or sets the last processed hash in the local chain.
/// </summary>
public string LastHash { get; set; } = "";
/// <summary>
/// Gets or sets the UTC instant when the checkpoint was updated.
/// </summary>
public DateTimeOffset UpdatedUtc { get; set; }
/// <summary>
/// Gets or sets the optional changefeed versionstamp cursor associated with this checkpoint.
/// </summary>
public long? VersionstampCursor { get; set; }
}
/// <summary>
/// Defines persistence operations for local CDC checkpoint progress.
/// </summary>
public interface ISurrealCdcCheckpointPersistence
{
/// <summary>
/// Reads the checkpoint for a consumer.
/// </summary>
/// <param name="consumerId">Optional consumer id. Defaults to configured CDC consumer id.</param>
/// <param name="cancellationToken">A cancellation token.</param>
/// <returns>The checkpoint if found; otherwise <see langword="null" />.</returns>
Task<SurrealCdcCheckpoint?> GetCheckpointAsync(
string? consumerId = null,
CancellationToken cancellationToken = default);
/// <summary>
/// Upserts checkpoint progress for a consumer.
/// </summary>
/// <param name="timestamp">The last processed timestamp.</param>
/// <param name="lastHash">The last processed hash.</param>
/// <param name="consumerId">Optional consumer id. Defaults to configured CDC consumer id.</param>
/// <param name="cancellationToken">A cancellation token.</param>
/// <param name="versionstampCursor">Optional changefeed versionstamp cursor.</param>
Task UpsertCheckpointAsync(
HlcTimestamp timestamp,
string lastHash,
string? consumerId = null,
CancellationToken cancellationToken = default,
long? versionstampCursor = null);
/// <summary>
/// Advances checkpoint progress from an oplog entry.
/// </summary>
/// <param name="entry">The oplog entry that was processed.</param>
/// <param name="consumerId">Optional consumer id. Defaults to configured CDC consumer id.</param>
/// <param name="cancellationToken">A cancellation token.</param>
Task AdvanceCheckpointAsync(
OplogEntry entry,
string? consumerId = null,
CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,27 @@
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Defines lifecycle controls for the durable Surreal CDC polling worker.
/// </summary>
public interface ISurrealCdcWorkerLifecycle
{
/// <summary>
/// Gets a value indicating whether the CDC worker is currently running.
/// </summary>
bool IsCdcWorkerRunning { get; }
/// <summary>
/// Starts the CDC worker.
/// </summary>
Task StartCdcWorkerAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Executes one CDC polling pass across all watched collections.
/// </summary>
Task PollCdcOnceAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Stops the CDC worker.
/// </summary>
Task StopCdcWorkerAsync(CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,191 @@
using System.Security.Cryptography;
using System.Text;
using System.Text.Json.Serialization;
using SurrealDb.Net;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Surreal-backed persistence for CDC checkpoint progress.
/// </summary>
public sealed class SurrealCdcCheckpointPersistence : ISurrealCdcCheckpointPersistence
{
private readonly bool _enabled;
private readonly string _checkpointTable;
private readonly string _defaultConsumerId;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
private readonly ISurrealDbClient _surrealClient;
/// <summary>
/// Initializes a new instance of the <see cref="SurrealCdcCheckpointPersistence" /> class.
/// </summary>
/// <param name="surrealEmbeddedClient">The embedded Surreal client abstraction.</param>
/// <param name="schemaInitializer">The Surreal schema initializer.</param>
/// <param name="options">Embedded Surreal options.</param>
public SurrealCdcCheckpointPersistence(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
CBDDCSurrealEmbeddedOptions options)
{
_ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient));
_surrealClient = surrealEmbeddedClient.Client;
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
if (options == null) throw new ArgumentNullException(nameof(options));
_enabled = options.Cdc.Enabled;
_checkpointTable = options.Cdc.CheckpointTable;
_defaultConsumerId = options.Cdc.ConsumerId;
if (string.IsNullOrWhiteSpace(_checkpointTable))
throw new ArgumentException("CDC checkpoint table is required.", nameof(options));
if (string.IsNullOrWhiteSpace(_defaultConsumerId))
throw new ArgumentException("CDC consumer id is required.", nameof(options));
}
/// <inheritdoc />
public async Task<SurrealCdcCheckpoint?> GetCheckpointAsync(
string? consumerId = null,
CancellationToken cancellationToken = default)
{
if (!_enabled) return null;
string resolvedConsumerId = ResolveConsumerId(consumerId);
var existing = await FindByConsumerIdAsync(resolvedConsumerId, cancellationToken);
return existing?.ToDomain();
}
/// <inheritdoc />
public async Task UpsertCheckpointAsync(
HlcTimestamp timestamp,
string lastHash,
string? consumerId = null,
CancellationToken cancellationToken = default,
long? versionstampCursor = null)
{
if (!_enabled) return;
string resolvedConsumerId = ResolveConsumerId(consumerId);
await EnsureReadyAsync(cancellationToken);
long? effectiveVersionstampCursor = versionstampCursor;
if (!effectiveVersionstampCursor.HasValue)
{
var existing = await FindByConsumerIdAsync(
resolvedConsumerId,
cancellationToken,
ensureInitialized: false);
effectiveVersionstampCursor = existing?.VersionstampCursor;
}
RecordId recordId = RecordId.From(_checkpointTable, ComputeConsumerKey(resolvedConsumerId));
var record = new SurrealCdcCheckpointRecord
{
ConsumerId = resolvedConsumerId,
TimestampPhysicalTime = timestamp.PhysicalTime,
TimestampLogicalCounter = timestamp.LogicalCounter,
TimestampNodeId = timestamp.NodeId,
LastHash = lastHash ?? string.Empty,
UpdatedUtcMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(),
VersionstampCursor = effectiveVersionstampCursor
};
await _surrealClient.Upsert<SurrealCdcCheckpointRecord, SurrealCdcCheckpointRecord>(
recordId,
record,
cancellationToken);
}
/// <inheritdoc />
public Task AdvanceCheckpointAsync(
OplogEntry entry,
string? consumerId = null,
CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(entry);
return UpsertCheckpointAsync(entry.Timestamp, entry.Hash, consumerId, cancellationToken);
}
private string ResolveConsumerId(string? consumerId)
{
string resolved = string.IsNullOrWhiteSpace(consumerId) ? _defaultConsumerId : consumerId;
if (string.IsNullOrWhiteSpace(resolved))
throw new ArgumentException("CDC consumer id is required.", nameof(consumerId));
return resolved;
}
private async Task EnsureReadyAsync(CancellationToken cancellationToken)
{
await _schemaInitializer.EnsureInitializedAsync(cancellationToken);
}
private async Task<SurrealCdcCheckpointRecord?> FindByConsumerIdAsync(
string consumerId,
CancellationToken cancellationToken,
bool ensureInitialized = true)
{
if (ensureInitialized) await EnsureReadyAsync(cancellationToken);
RecordId deterministicId = RecordId.From(_checkpointTable, ComputeConsumerKey(consumerId));
var deterministic = await _surrealClient.Select<SurrealCdcCheckpointRecord>(deterministicId, cancellationToken);
if (deterministic != null &&
string.Equals(deterministic.ConsumerId, consumerId, StringComparison.Ordinal))
return deterministic;
var all = await _surrealClient.Select<SurrealCdcCheckpointRecord>(_checkpointTable, cancellationToken);
return all?.FirstOrDefault(c =>
string.Equals(c.ConsumerId, consumerId, StringComparison.Ordinal));
}
private static string ComputeConsumerKey(string consumerId)
{
byte[] input = Encoding.UTF8.GetBytes(consumerId);
return Convert.ToHexString(SHA256.HashData(input)).ToLowerInvariant();
}
}
internal sealed class SurrealCdcCheckpointRecord : Record
{
[JsonPropertyName("consumerId")]
public string ConsumerId { get; set; } = "";
[JsonPropertyName("timestampPhysicalTime")]
public long TimestampPhysicalTime { get; set; }
[JsonPropertyName("timestampLogicalCounter")]
public int TimestampLogicalCounter { get; set; }
[JsonPropertyName("timestampNodeId")]
public string TimestampNodeId { get; set; } = "";
[JsonPropertyName("lastHash")]
public string LastHash { get; set; } = "";
[JsonPropertyName("updatedUtcMs")]
public long UpdatedUtcMs { get; set; }
[JsonPropertyName("versionstampCursor")]
public long? VersionstampCursor { get; set; }
}
internal static class SurrealCdcCheckpointRecordMappers
{
public static SurrealCdcCheckpoint ToDomain(this SurrealCdcCheckpointRecord record)
{
return new SurrealCdcCheckpoint
{
ConsumerId = record.ConsumerId,
Timestamp = new HlcTimestamp(
record.TimestampPhysicalTime,
record.TimestampLogicalCounter,
record.TimestampNodeId),
LastHash = record.LastHash,
UpdatedUtc = DateTimeOffset.FromUnixTimeMilliseconds(record.UpdatedUtcMs),
VersionstampCursor = record.VersionstampCursor
};
}
}

View File

@@ -0,0 +1,32 @@
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Configuration for the Surreal SHOW CHANGES polling worker.
/// </summary>
public sealed class SurrealCdcPollingOptions
{
/// <summary>
/// Gets or sets a value indicating whether polling is enabled.
/// </summary>
public bool Enabled { get; set; } = true;
/// <summary>
/// Gets or sets the polling interval.
/// </summary>
public TimeSpan PollInterval { get; set; } = TimeSpan.FromMilliseconds(250);
/// <summary>
/// Gets or sets the maximum number of changefeed rows fetched per poll.
/// </summary>
public int BatchSize { get; set; } = 100;
/// <summary>
/// Gets or sets a value indicating whether LIVE SELECT wake-ups are enabled.
/// </summary>
public bool EnableLiveSelectAccelerator { get; set; } = true;
/// <summary>
/// Gets or sets the delay used before re-subscribing a failed LIVE SELECT stream.
/// </summary>
public TimeSpan LiveSelectReconnectDelay { get; set; } = TimeSpan.FromSeconds(2);
}

View File

@@ -0,0 +1,164 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using SurrealDb.Net;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
public class SurrealDocumentMetadataStore : DocumentMetadataStore
{
private readonly ILogger<SurrealDocumentMetadataStore> _logger;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
private readonly ISurrealDbClient _surrealClient;
public SurrealDocumentMetadataStore(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
ILogger<SurrealDocumentMetadataStore>? logger = null)
{
_ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient));
_surrealClient = surrealEmbeddedClient.Client;
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
_logger = logger ?? NullLogger<SurrealDocumentMetadataStore>.Instance;
}
public override async Task<DocumentMetadata?> GetMetadataAsync(string collection, string key,
CancellationToken cancellationToken = default)
{
var existing = await FindByCollectionKeyAsync(collection, key, cancellationToken);
return existing?.ToDomain();
}
public override async Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection,
CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all
.Where(m => string.Equals(m.Collection, collection, StringComparison.Ordinal))
.Select(m => m.ToDomain())
.ToList();
}
public override async Task UpsertMetadataAsync(DocumentMetadata metadata,
CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
var existing = await FindByCollectionKeyAsync(metadata.Collection, metadata.Key, cancellationToken);
RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.DocumentMetadata(metadata.Collection, metadata.Key);
await _surrealClient.Upsert<SurrealDocumentMetadataRecord, SurrealDocumentMetadataRecord>(
recordId,
metadata.ToSurrealRecord(),
cancellationToken);
}
public override async Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas,
CancellationToken cancellationToken = default)
{
foreach (var metadata in metadatas)
await UpsertMetadataAsync(metadata, cancellationToken);
}
public override async Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp,
CancellationToken cancellationToken = default)
{
var metadata = new DocumentMetadata(collection, key, timestamp, true);
await UpsertMetadataAsync(metadata, cancellationToken);
}
public override async Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
HashSet<string>? collectionSet = collections != null ? new HashSet<string>(collections) : null;
return all
.Where(m =>
(m.HlcPhysicalTime > since.PhysicalTime ||
(m.HlcPhysicalTime == since.PhysicalTime && m.HlcLogicalCounter > since.LogicalCounter)) &&
(collectionSet == null || collectionSet.Contains(m.Collection)))
.OrderBy(m => m.HlcPhysicalTime)
.ThenBy(m => m.HlcLogicalCounter)
.Select(m => m.ToDomain())
.ToList();
}
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Delete(CBDDCSurrealSchemaNames.DocumentMetadataTable, cancellationToken);
}
public override async Task<IEnumerable<DocumentMetadata>> ExportAsync(CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all.Select(m => m.ToDomain()).ToList();
}
public override async Task ImportAsync(IEnumerable<DocumentMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items) await UpsertMetadataAsync(item, cancellationToken);
}
public override async Task MergeAsync(IEnumerable<DocumentMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = await FindByCollectionKeyAsync(item.Collection, item.Key, cancellationToken);
if (existing == null)
{
await UpsertMetadataAsync(item, cancellationToken);
continue;
}
var existingTimestamp =
new HlcTimestamp(existing.HlcPhysicalTime, existing.HlcLogicalCounter, existing.HlcNodeId);
if (item.UpdatedAt.CompareTo(existingTimestamp) <= 0) continue;
RecordId recordId = existing.Id ?? SurrealStoreRecordIds.DocumentMetadata(item.Collection, item.Key);
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Upsert<SurrealDocumentMetadataRecord, SurrealDocumentMetadataRecord>(
recordId,
item.ToSurrealRecord(),
cancellationToken);
}
}
private async Task EnsureReadyAsync(CancellationToken cancellationToken)
{
await _schemaInitializer.EnsureInitializedAsync(cancellationToken);
}
private async Task<List<SurrealDocumentMetadataRecord>> SelectAllAsync(CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
var rows = await _surrealClient.Select<SurrealDocumentMetadataRecord>(
CBDDCSurrealSchemaNames.DocumentMetadataTable,
cancellationToken);
return rows?.ToList() ?? [];
}
private async Task<SurrealDocumentMetadataRecord?> FindByCollectionKeyAsync(string collection, string key,
CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
RecordId deterministicId = SurrealStoreRecordIds.DocumentMetadata(collection, key);
var deterministic = await _surrealClient.Select<SurrealDocumentMetadataRecord>(deterministicId, cancellationToken);
if (deterministic != null &&
string.Equals(deterministic.Collection, collection, StringComparison.Ordinal) &&
string.Equals(deterministic.Key, key, StringComparison.Ordinal))
return deterministic;
var all = await SelectAllAsync(cancellationToken);
return all.FirstOrDefault(m =>
string.Equals(m.Collection, collection, StringComparison.Ordinal) &&
string.Equals(m.Key, key, StringComparison.Ordinal));
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,144 @@
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Represents a single change notification emitted by a watchable collection.
/// </summary>
/// <typeparam name="TEntity">The entity type being observed.</typeparam>
public readonly record struct SurrealCollectionChange<TEntity>(
OperationType OperationType,
string? DocumentId,
TEntity? Entity)
where TEntity : class;
/// <summary>
/// Abstraction for a collection that can publish change notifications to document-store watchers.
/// </summary>
/// <typeparam name="TEntity">The entity type being observed.</typeparam>
public interface ISurrealWatchableCollection<TEntity> where TEntity : class
{
/// <summary>
/// Subscribes to collection change notifications.
/// </summary>
/// <param name="observer">The observer receiving collection changes.</param>
/// <returns>A disposable subscription.</returns>
IDisposable Subscribe(IObserver<SurrealCollectionChange<TEntity>> observer);
}
/// <summary>
/// In-memory watchable collection feed used to publish local change events.
/// </summary>
/// <typeparam name="TEntity">The entity type being observed.</typeparam>
public sealed class SurrealCollectionChangeFeed<TEntity> : ISurrealWatchableCollection<TEntity>, IDisposable
where TEntity : class
{
private readonly object _observersGate = new();
private readonly List<IObserver<SurrealCollectionChange<TEntity>>> _observers = new();
private bool _disposed;
/// <inheritdoc />
public IDisposable Subscribe(IObserver<SurrealCollectionChange<TEntity>> observer)
{
ArgumentNullException.ThrowIfNull(observer);
lock (_observersGate)
{
ThrowIfDisposed();
_observers.Add(observer);
}
return new Subscription(this, observer);
}
/// <summary>
/// Publishes a put notification for an entity.
/// </summary>
/// <param name="entity">The changed entity.</param>
/// <param name="documentId">Optional explicit document identifier.</param>
public void PublishPut(TEntity entity, string? documentId = null)
{
ArgumentNullException.ThrowIfNull(entity);
Publish(new SurrealCollectionChange<TEntity>(OperationType.Put, documentId, entity));
}
/// <summary>
/// Publishes a delete notification for an entity key.
/// </summary>
/// <param name="documentId">The document identifier that was removed.</param>
public void PublishDelete(string documentId)
{
if (string.IsNullOrWhiteSpace(documentId))
throw new ArgumentException("Document id is required.", nameof(documentId));
Publish(new SurrealCollectionChange<TEntity>(OperationType.Delete, documentId, null));
}
/// <summary>
/// Publishes a raw collection change notification.
/// </summary>
/// <param name="change">The change payload.</param>
public void Publish(SurrealCollectionChange<TEntity> change)
{
List<IObserver<SurrealCollectionChange<TEntity>>> snapshot;
lock (_observersGate)
{
if (_disposed) return;
snapshot = _observers.ToList();
}
foreach (var observer in snapshot)
observer.OnNext(change);
}
/// <inheritdoc />
public void Dispose()
{
List<IObserver<SurrealCollectionChange<TEntity>>> snapshot;
lock (_observersGate)
{
if (_disposed) return;
_disposed = true;
snapshot = _observers.ToList();
_observers.Clear();
}
foreach (var observer in snapshot)
observer.OnCompleted();
}
private void Unsubscribe(IObserver<SurrealCollectionChange<TEntity>> observer)
{
lock (_observersGate)
{
if (_disposed) return;
_observers.Remove(observer);
}
}
private void ThrowIfDisposed()
{
ObjectDisposedException.ThrowIf(_disposed, this);
}
private sealed class Subscription : IDisposable
{
private readonly SurrealCollectionChangeFeed<TEntity> _owner;
private readonly IObserver<SurrealCollectionChange<TEntity>> _observer;
private int _disposed;
public Subscription(
SurrealCollectionChangeFeed<TEntity> owner,
IObserver<SurrealCollectionChange<TEntity>> observer)
{
_owner = owner;
_observer = observer;
}
public void Dispose()
{
if (Interlocked.Exchange(ref _disposed, 1) == 1) return;
_owner.Unsubscribe(_observer);
}
}
}

View File

@@ -0,0 +1,272 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using SurrealDb.Net;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
public class SurrealOplogStore : OplogStore
{
private readonly ILogger<SurrealOplogStore> _logger;
private readonly ICBDDCSurrealSchemaInitializer? _schemaInitializer;
private readonly ISurrealDbClient? _surrealClient;
public SurrealOplogStore(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
IDocumentStore documentStore,
IConflictResolver conflictResolver,
IVectorClockService vectorClockService,
ISnapshotMetadataStore? snapshotMetadataStore = null,
ILogger<SurrealOplogStore>? logger = null) : base(
documentStore,
conflictResolver,
vectorClockService,
snapshotMetadataStore)
{
_ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient));
_surrealClient = surrealEmbeddedClient.Client;
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
_logger = logger ?? NullLogger<SurrealOplogStore>.Instance;
_vectorClock.Invalidate();
InitializeVectorClock();
}
public override async Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash,
CancellationToken cancellationToken = default)
{
var startRow = await FindByHashAsync(startHash, cancellationToken);
var endRow = await FindByHashAsync(endHash, cancellationToken);
if (startRow == null || endRow == null) return [];
string nodeId = startRow.TimestampNodeId;
var all = await SelectAllAsync(cancellationToken);
return all
.Where(o => string.Equals(o.TimestampNodeId, nodeId, StringComparison.Ordinal) &&
(o.TimestampPhysicalTime > startRow.TimestampPhysicalTime ||
(o.TimestampPhysicalTime == startRow.TimestampPhysicalTime &&
o.TimestampLogicalCounter > startRow.TimestampLogicalCounter)) &&
(o.TimestampPhysicalTime < endRow.TimestampPhysicalTime ||
(o.TimestampPhysicalTime == endRow.TimestampPhysicalTime &&
o.TimestampLogicalCounter <= endRow.TimestampLogicalCounter)))
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
.Select(o => o.ToDomain())
.ToList();
}
public override async Task<OplogEntry?> GetEntryByHashAsync(string hash, CancellationToken cancellationToken = default)
{
var existing = await FindByHashAsync(hash, cancellationToken);
return existing?.ToDomain();
}
public override async Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
HashSet<string>? collectionSet = collections != null ? new HashSet<string>(collections) : null;
return all
.Where(o =>
(o.TimestampPhysicalTime > timestamp.PhysicalTime ||
(o.TimestampPhysicalTime == timestamp.PhysicalTime &&
o.TimestampLogicalCounter > timestamp.LogicalCounter)) &&
(collectionSet == null || collectionSet.Contains(o.Collection)))
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
.Select(o => o.ToDomain())
.ToList();
}
public override async Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
HashSet<string>? collectionSet = collections != null ? new HashSet<string>(collections) : null;
return all
.Where(o =>
string.Equals(o.TimestampNodeId, nodeId, StringComparison.Ordinal) &&
(o.TimestampPhysicalTime > since.PhysicalTime ||
(o.TimestampPhysicalTime == since.PhysicalTime &&
o.TimestampLogicalCounter > since.LogicalCounter)) &&
(collectionSet == null || collectionSet.Contains(o.Collection)))
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
.Select(o => o.ToDomain())
.ToList();
}
public override async Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
var toDelete = all
.Where(o => o.TimestampPhysicalTime < cutoff.PhysicalTime ||
(o.TimestampPhysicalTime == cutoff.PhysicalTime &&
o.TimestampLogicalCounter <= cutoff.LogicalCounter))
.ToList();
foreach (var row in toDelete)
{
RecordId recordId = row.Id ?? SurrealStoreRecordIds.Oplog(row.Hash);
await EnsureReadyAsync(cancellationToken);
await _surrealClient!.Delete(recordId, cancellationToken);
}
}
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient!.Delete(CBDDCSurrealSchemaNames.OplogEntriesTable, cancellationToken);
_vectorClock.Invalidate();
}
public override async Task<IEnumerable<OplogEntry>> ExportAsync(CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all.Select(o => o.ToDomain()).ToList();
}
public override async Task ImportAsync(IEnumerable<OplogEntry> items, CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = await FindByHashAsync(item.Hash, cancellationToken);
RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.Oplog(item.Hash);
await UpsertAsync(item, recordId, cancellationToken);
}
}
public override async Task MergeAsync(IEnumerable<OplogEntry> items, CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = await FindByHashAsync(item.Hash, cancellationToken);
if (existing != null) continue;
await UpsertAsync(item, SurrealStoreRecordIds.Oplog(item.Hash), cancellationToken);
}
}
protected override void InitializeVectorClock()
{
if (_vectorClock.IsInitialized) return;
if (_surrealClient == null || _schemaInitializer == null)
{
_vectorClock.IsInitialized = true;
return;
}
if (_snapshotMetadataStore != null)
try
{
var snapshots = _snapshotMetadataStore.GetAllSnapshotMetadataAsync().GetAwaiter().GetResult();
foreach (var snapshot in snapshots)
_vectorClock.UpdateNode(
snapshot.NodeId,
new HlcTimestamp(
snapshot.TimestampPhysicalTime,
snapshot.TimestampLogicalCounter,
snapshot.NodeId),
snapshot.Hash ?? "");
}
catch
{
// Ignore snapshot bootstrap failures to keep oplog fallback behavior aligned.
}
EnsureReadyAsync(CancellationToken.None).GetAwaiter().GetResult();
var all = _surrealClient.Select<SurrealOplogRecord>(CBDDCSurrealSchemaNames.OplogEntriesTable, CancellationToken.None)
.GetAwaiter().GetResult()
?? [];
var latestPerNode = all
.Where(x => !string.IsNullOrWhiteSpace(x.TimestampNodeId))
.GroupBy(x => x.TimestampNodeId)
.Select(g => g
.OrderByDescending(x => x.TimestampPhysicalTime)
.ThenByDescending(x => x.TimestampLogicalCounter)
.First())
.ToList();
foreach (var latest in latestPerNode)
_vectorClock.UpdateNode(
latest.TimestampNodeId,
new HlcTimestamp(latest.TimestampPhysicalTime, latest.TimestampLogicalCounter, latest.TimestampNodeId),
latest.Hash ?? "");
_vectorClock.IsInitialized = true;
}
protected override async Task InsertOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default)
{
var existing = await FindByHashAsync(entry.Hash, cancellationToken);
if (existing != null) return;
await UpsertAsync(entry, SurrealStoreRecordIds.Oplog(entry.Hash), cancellationToken);
}
protected override async Task<string?> QueryLastHashForNodeAsync(string nodeId,
CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
var lastEntry = all
.Where(o => string.Equals(o.TimestampNodeId, nodeId, StringComparison.Ordinal))
.OrderByDescending(o => o.TimestampPhysicalTime)
.ThenByDescending(o => o.TimestampLogicalCounter)
.FirstOrDefault();
return lastEntry?.Hash;
}
protected override async Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash,
CancellationToken cancellationToken = default)
{
var existing = await FindByHashAsync(hash, cancellationToken);
if (existing == null) return null;
return (existing.TimestampPhysicalTime, existing.TimestampLogicalCounter);
}
private async Task UpsertAsync(OplogEntry entry, RecordId recordId, CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient!.Upsert<SurrealOplogRecord, SurrealOplogRecord>(
recordId,
entry.ToSurrealRecord(),
cancellationToken);
}
private async Task EnsureReadyAsync(CancellationToken cancellationToken)
{
await _schemaInitializer!.EnsureInitializedAsync(cancellationToken);
}
private async Task<List<SurrealOplogRecord>> SelectAllAsync(CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
var rows = await _surrealClient!.Select<SurrealOplogRecord>(
CBDDCSurrealSchemaNames.OplogEntriesTable,
cancellationToken);
return rows?.ToList() ?? [];
}
private async Task<SurrealOplogRecord?> FindByHashAsync(string hash, CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
RecordId deterministicId = SurrealStoreRecordIds.Oplog(hash);
var deterministic = await _surrealClient!.Select<SurrealOplogRecord>(deterministicId, cancellationToken);
if (deterministic != null && string.Equals(deterministic.Hash, hash, StringComparison.Ordinal))
return deterministic;
var all = await SelectAllAsync(cancellationToken);
return all.FirstOrDefault(o => string.Equals(o.Hash, hash, StringComparison.Ordinal));
}
}

View File

@@ -0,0 +1,111 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using SurrealDb.Net;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
public class SurrealPeerConfigurationStore : PeerConfigurationStore
{
private readonly ILogger<SurrealPeerConfigurationStore> _logger;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
private readonly ISurrealDbClient _surrealClient;
public SurrealPeerConfigurationStore(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
ILogger<SurrealPeerConfigurationStore>? logger = null)
{
_ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient));
_surrealClient = surrealEmbeddedClient.Client;
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
_logger = logger ?? NullLogger<SurrealPeerConfigurationStore>.Instance;
}
public override async Task<IEnumerable<RemotePeerConfiguration>> GetRemotePeersAsync(
CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all.Select(p => p.ToDomain()).ToList();
}
public override async Task<RemotePeerConfiguration?> GetRemotePeerAsync(string nodeId,
CancellationToken cancellationToken)
{
var existing = await FindByNodeIdAsync(nodeId, cancellationToken);
return existing?.ToDomain();
}
public override async Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
var existing = await FindByNodeIdAsync(nodeId, cancellationToken);
if (existing == null)
{
_logger.LogWarning("Attempted to remove non-existent remote peer: {NodeId}", nodeId);
return;
}
RecordId recordId = existing.Id ?? SurrealStoreRecordIds.RemotePeer(nodeId);
await _surrealClient.Delete(recordId, cancellationToken);
_logger.LogInformation("Removed remote peer configuration: {NodeId}", nodeId);
}
public override async Task SaveRemotePeerAsync(RemotePeerConfiguration peer,
CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
var existing = await FindByNodeIdAsync(peer.NodeId, cancellationToken);
RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.RemotePeer(peer.NodeId);
await _surrealClient.Upsert<SurrealRemotePeerRecord, SurrealRemotePeerRecord>(
recordId,
peer.ToSurrealRecord(),
cancellationToken);
_logger.LogInformation("Saved remote peer configuration: {NodeId} ({Type})", peer.NodeId, peer.Type);
}
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
_logger.LogWarning(
"Dropping peer configuration store - all remote peer configurations will be permanently deleted!");
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Delete(CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable, cancellationToken);
_logger.LogInformation("Peer configuration store dropped successfully.");
}
public override async Task<IEnumerable<RemotePeerConfiguration>> ExportAsync(
CancellationToken cancellationToken = default)
{
return await GetRemotePeersAsync(cancellationToken);
}
private async Task EnsureReadyAsync(CancellationToken cancellationToken)
{
await _schemaInitializer.EnsureInitializedAsync(cancellationToken);
}
private async Task<List<SurrealRemotePeerRecord>> SelectAllAsync(CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
var rows = await _surrealClient.Select<SurrealRemotePeerRecord>(
CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable,
cancellationToken);
return rows?.ToList() ?? [];
}
private async Task<SurrealRemotePeerRecord?> FindByNodeIdAsync(string nodeId, CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
RecordId deterministicId = SurrealStoreRecordIds.RemotePeer(nodeId);
var deterministic = await _surrealClient.Select<SurrealRemotePeerRecord>(deterministicId, cancellationToken);
if (deterministic != null &&
string.Equals(deterministic.NodeId, nodeId, StringComparison.Ordinal))
return deterministic;
var all = await SelectAllAsync(cancellationToken);
return all.FirstOrDefault(p => string.Equals(p.NodeId, nodeId, StringComparison.Ordinal));
}
}

View File

@@ -0,0 +1,311 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using SurrealDb.Net;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
public class SurrealPeerOplogConfirmationStore : PeerOplogConfirmationStore
{
internal const string RegistrationSourceNodeId = "__peer_registration__";
private readonly ILogger<SurrealPeerOplogConfirmationStore> _logger;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
private readonly ISurrealDbClient _surrealClient;
public SurrealPeerOplogConfirmationStore(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
ILogger<SurrealPeerOplogConfirmationStore>? logger = null)
{
_ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient));
_surrealClient = surrealEmbeddedClient.Client;
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
_logger = logger ?? NullLogger<SurrealPeerOplogConfirmationStore>.Instance;
}
public override async Task EnsurePeerRegisteredAsync(
string peerNodeId,
string address,
PeerType type,
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
var existing =
await FindByPairAsync(peerNodeId, RegistrationSourceNodeId, cancellationToken);
if (existing == null)
{
var created = new PeerOplogConfirmation
{
PeerNodeId = peerNodeId,
SourceNodeId = RegistrationSourceNodeId,
ConfirmedWall = 0,
ConfirmedLogic = 0,
ConfirmedHash = "",
LastConfirmedUtc = DateTimeOffset.UtcNow,
IsActive = true
};
await UpsertAsync(created, SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, RegistrationSourceNodeId),
cancellationToken);
_logger.LogDebug("Registered peer confirmation tracking for {PeerNodeId} ({Address}, {Type}).", peerNodeId,
address, type);
return;
}
if (existing.IsActive) return;
existing.IsActive = true;
existing.LastConfirmedUtcMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
RecordId recordId =
existing.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, RegistrationSourceNodeId);
await UpsertAsync(existing, recordId, cancellationToken);
}
public override async Task UpdateConfirmationAsync(
string peerNodeId,
string sourceNodeId,
HlcTimestamp timestamp,
string hash,
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
if (string.IsNullOrWhiteSpace(sourceNodeId))
throw new ArgumentException("Source node id is required.", nameof(sourceNodeId));
var existing = await FindByPairAsync(peerNodeId, sourceNodeId, cancellationToken);
long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
if (existing == null)
{
var created = new PeerOplogConfirmation
{
PeerNodeId = peerNodeId,
SourceNodeId = sourceNodeId,
ConfirmedWall = timestamp.PhysicalTime,
ConfirmedLogic = timestamp.LogicalCounter,
ConfirmedHash = hash ?? "",
LastConfirmedUtc = DateTimeOffset.FromUnixTimeMilliseconds(nowMs),
IsActive = true
};
await UpsertAsync(created, SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, sourceNodeId),
cancellationToken);
return;
}
bool isNewer = IsIncomingTimestampNewer(timestamp, existing);
bool samePointHashChanged = timestamp.PhysicalTime == existing.ConfirmedWall &&
timestamp.LogicalCounter == existing.ConfirmedLogic &&
!string.Equals(existing.ConfirmedHash, hash, StringComparison.Ordinal);
if (!isNewer && !samePointHashChanged && existing.IsActive) return;
existing.ConfirmedWall = timestamp.PhysicalTime;
existing.ConfirmedLogic = timestamp.LogicalCounter;
existing.ConfirmedHash = hash ?? "";
existing.LastConfirmedUtcMs = nowMs;
existing.IsActive = true;
RecordId recordId = existing.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, sourceNodeId);
await UpsertAsync(existing, recordId, cancellationToken);
}
public override async Task<IEnumerable<PeerOplogConfirmation>> GetConfirmationsAsync(
CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all
.Where(c => !string.Equals(c.SourceNodeId, RegistrationSourceNodeId, StringComparison.Ordinal))
.Select(c => c.ToDomain())
.ToList();
}
public override async Task<IEnumerable<PeerOplogConfirmation>> GetConfirmationsForPeerAsync(
string peerNodeId,
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
var all = await SelectAllAsync(cancellationToken);
return all
.Where(c => string.Equals(c.PeerNodeId, peerNodeId, StringComparison.Ordinal) &&
!string.Equals(c.SourceNodeId, RegistrationSourceNodeId, StringComparison.Ordinal))
.Select(c => c.ToDomain())
.ToList();
}
public override async Task RemovePeerTrackingAsync(string peerNodeId, CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
var matches = (await SelectAllAsync(cancellationToken))
.Where(c => string.Equals(c.PeerNodeId, peerNodeId, StringComparison.Ordinal))
.ToList();
if (matches.Count == 0) return;
long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
foreach (var match in matches)
{
if (!match.IsActive) continue;
match.IsActive = false;
match.LastConfirmedUtcMs = nowMs;
RecordId recordId = match.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(match.PeerNodeId, match.SourceNodeId);
await UpsertAsync(match, recordId, cancellationToken);
}
}
public override async Task<IEnumerable<string>> GetActiveTrackedPeersAsync(
CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all
.Where(c => c.IsActive)
.Select(c => c.PeerNodeId)
.Distinct(StringComparer.Ordinal)
.ToList();
}
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Delete(CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable, cancellationToken);
}
public override async Task<IEnumerable<PeerOplogConfirmation>> ExportAsync(CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all.Select(c => c.ToDomain()).ToList();
}
public override async Task ImportAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = await FindByPairAsync(item.PeerNodeId, item.SourceNodeId, cancellationToken);
RecordId recordId =
existing?.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(item.PeerNodeId, item.SourceNodeId);
await UpsertAsync(item, recordId, cancellationToken);
}
}
public override async Task MergeAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = await FindByPairAsync(item.PeerNodeId, item.SourceNodeId, cancellationToken);
if (existing == null)
{
await UpsertAsync(item, SurrealStoreRecordIds.PeerOplogConfirmation(item.PeerNodeId, item.SourceNodeId),
cancellationToken);
continue;
}
bool changed = false;
var incomingTimestamp = new HlcTimestamp(item.ConfirmedWall, item.ConfirmedLogic, item.SourceNodeId);
var existingTimestamp = new HlcTimestamp(existing.ConfirmedWall, existing.ConfirmedLogic, existing.SourceNodeId);
if (incomingTimestamp > existingTimestamp)
{
existing.ConfirmedWall = item.ConfirmedWall;
existing.ConfirmedLogic = item.ConfirmedLogic;
existing.ConfirmedHash = item.ConfirmedHash;
changed = true;
}
long incomingLastConfirmedMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds();
if (incomingLastConfirmedMs > existing.LastConfirmedUtcMs)
{
existing.LastConfirmedUtcMs = incomingLastConfirmedMs;
changed = true;
}
if (existing.IsActive != item.IsActive)
{
existing.IsActive = item.IsActive;
changed = true;
}
if (!changed) continue;
RecordId recordId =
existing.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(existing.PeerNodeId, existing.SourceNodeId);
await UpsertAsync(existing, recordId, cancellationToken);
}
}
private async Task UpsertAsync(PeerOplogConfirmation confirmation, RecordId recordId, CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Upsert<SurrealPeerOplogConfirmationRecord, SurrealPeerOplogConfirmationRecord>(
recordId,
confirmation.ToSurrealRecord(),
cancellationToken);
}
private async Task UpsertAsync(SurrealPeerOplogConfirmationRecord confirmation, RecordId recordId,
CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Upsert<SurrealPeerOplogConfirmationRecord, SurrealPeerOplogConfirmationRecord>(
recordId,
confirmation,
cancellationToken);
}
private async Task EnsureReadyAsync(CancellationToken cancellationToken)
{
await _schemaInitializer.EnsureInitializedAsync(cancellationToken);
}
private async Task<List<SurrealPeerOplogConfirmationRecord>> SelectAllAsync(CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
var rows = await _surrealClient.Select<SurrealPeerOplogConfirmationRecord>(
CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable,
cancellationToken);
return rows?.ToList() ?? [];
}
private async Task<SurrealPeerOplogConfirmationRecord?> FindByPairAsync(string peerNodeId, string sourceNodeId,
CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
RecordId deterministicId = SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, sourceNodeId);
var deterministic = await _surrealClient.Select<SurrealPeerOplogConfirmationRecord>(deterministicId, cancellationToken);
if (deterministic != null &&
string.Equals(deterministic.PeerNodeId, peerNodeId, StringComparison.Ordinal) &&
string.Equals(deterministic.SourceNodeId, sourceNodeId, StringComparison.Ordinal))
return deterministic;
var all = await SelectAllAsync(cancellationToken);
return all.FirstOrDefault(c =>
string.Equals(c.PeerNodeId, peerNodeId, StringComparison.Ordinal) &&
string.Equals(c.SourceNodeId, sourceNodeId, StringComparison.Ordinal));
}
private static bool IsIncomingTimestampNewer(HlcTimestamp incomingTimestamp, SurrealPeerOplogConfirmationRecord existing)
{
if (incomingTimestamp.PhysicalTime > existing.ConfirmedWall) return true;
if (incomingTimestamp.PhysicalTime == existing.ConfirmedWall &&
incomingTimestamp.LogicalCounter > existing.ConfirmedLogic)
return true;
return false;
}
}

View File

@@ -0,0 +1,296 @@
using System.Text.Json;
using Dahomey.Cbor.ObjectModel;
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
internal readonly record struct SurrealPolledChangeRow(
ulong Versionstamp,
IReadOnlyList<SurrealPolledChange> Changes);
internal readonly record struct SurrealPolledChange(
OperationType OperationType,
string Key,
JsonElement? Content);
internal static class SurrealShowChangesCborDecoder
{
private static readonly string[] PutChangeKinds = ["create", "update", "upsert", "insert", "set", "replace"];
public static IReadOnlyList<SurrealPolledChangeRow> DecodeRows(
IEnumerable<CborObject> rows,
string expectedTableName)
{
var result = new List<SurrealPolledChangeRow>();
foreach (var row in rows)
{
if (!TryGetProperty(row, "versionstamp", out CborValue versionstampValue)) continue;
if (!TryReadUInt64(versionstampValue, out ulong versionstamp)) continue;
var changes = new List<SurrealPolledChange>();
if (TryGetProperty(row, "changes", out CborValue rawChanges) &&
rawChanges is CborArray changeArray)
foreach (CborValue changeValue in changeArray)
{
if (changeValue is not CborObject changeObject) continue;
if (TryExtractChange(changeObject, expectedTableName, out SurrealPolledChange change))
changes.Add(change);
}
result.Add(new SurrealPolledChangeRow(versionstamp, changes));
}
return result;
}
private static bool TryExtractChange(
CborObject changeObject,
string expectedTableName,
out SurrealPolledChange change)
{
if (TryGetProperty(changeObject, "delete", out CborValue deletePayload))
if (TryExtractRecordKey(deletePayload, expectedTableName, out string deleteKey))
{
change = new SurrealPolledChange(OperationType.Delete, deleteKey, null);
return true;
}
foreach (string putKind in PutChangeKinds)
if (TryGetProperty(changeObject, putKind, out CborValue putPayload))
if (TryExtractRecordKey(putPayload, expectedTableName, out string putKey))
{
JsonElement? content = BuildNormalizedJsonPayload(putPayload, putKey);
change = new SurrealPolledChange(OperationType.Put, putKey, content);
return true;
}
change = default;
return false;
}
private static bool TryExtractRecordKey(
CborValue payload,
string expectedTableName,
out string key)
{
key = "";
if (payload is not CborObject payloadObject) return false;
if (!TryGetProperty(payloadObject, "id", out CborValue idValue)) return false;
if (TryExtractRecordKeyFromIdValue(idValue, expectedTableName, out string extracted))
{
if (string.IsNullOrWhiteSpace(extracted)) return false;
key = extracted;
return true;
}
return false;
}
private static bool TryExtractRecordKeyFromIdValue(
CborValue idValue,
string expectedTableName,
out string key)
{
key = "";
if (idValue is CborArray arrayId)
{
if (arrayId.Count < 2) return false;
if (!TryReadString(arrayId[0], out string tableName)) return false;
if (!string.IsNullOrWhiteSpace(expectedTableName) &&
!string.Equals(tableName, expectedTableName, StringComparison.Ordinal))
return false;
if (!TryReadString(arrayId[1], out string recordKey)) return false;
key = recordKey;
return true;
}
if (idValue is CborString)
{
if (!TryReadString(idValue, out string recordId)) return false;
key = ExtractKeyFromRecordId(recordId) ?? "";
return !string.IsNullOrWhiteSpace(key);
}
if (idValue is CborObject idObject)
{
string? tableName = null;
if (TryGetProperty(idObject, "tb", out CborValue tbValue) && TryReadString(tbValue, out string tb))
tableName = tb;
else if (TryGetProperty(idObject, "table", out CborValue tableValue) &&
TryReadString(tableValue, out string table))
tableName = table;
if (!string.IsNullOrWhiteSpace(expectedTableName) &&
!string.IsNullOrWhiteSpace(tableName) &&
!string.Equals(tableName, expectedTableName, StringComparison.Ordinal))
return false;
if (TryGetProperty(idObject, "id", out CborValue nestedId))
{
if (TryReadString(nestedId, out string nestedIdValue))
{
key = nestedIdValue;
return true;
}
key = nestedId.ToString()?.Trim('"') ?? "";
return !string.IsNullOrWhiteSpace(key);
}
}
return false;
}
private static JsonElement? BuildNormalizedJsonPayload(CborValue payload, string key)
{
object? clrValue = ConvertCborToClr(payload);
if (clrValue == null) return null;
if (clrValue is Dictionary<string, object?> payloadMap)
payloadMap["id"] = key;
return JsonSerializer.SerializeToElement(clrValue);
}
private static object? ConvertCborToClr(CborValue value)
{
switch (value)
{
case CborNull:
return null;
case CborObject cborObject:
var map = new Dictionary<string, object?>(StringComparer.Ordinal);
foreach ((CborValue rawKey, CborValue rawValue) in cborObject)
{
if (!TryReadString(rawKey, out string key) || string.IsNullOrWhiteSpace(key))
key = rawKey.ToString()?.Trim('"') ?? "";
if (string.IsNullOrWhiteSpace(key)) continue;
map[key] = ConvertCborToClr(rawValue);
}
return map;
case CborArray cborArray:
return cborArray.Select(ConvertCborToClr).ToList();
default:
if (TryReadString(value, out string stringValue)) return stringValue;
if (TryReadBoolean(value, out bool boolValue)) return boolValue;
if (TryReadInt64(value, out long intValue)) return intValue;
if (TryReadUInt64(value, out ulong uintValue)) return uintValue;
if (TryReadDouble(value, out double doubleValue)) return doubleValue;
return value.ToString();
}
}
private static bool TryGetProperty(CborObject source, string name, out CborValue value)
{
if (source.TryGetValue((CborValue)name, out CborValue? found))
{
value = found;
return true;
}
value = CborValue.Null;
return false;
}
private static bool TryReadString(CborValue value, out string result)
{
try
{
string? parsed = value.Value<string>();
if (parsed == null)
{
result = "";
return false;
}
result = parsed;
return true;
}
catch
{
result = "";
return false;
}
}
private static bool TryReadBoolean(CborValue value, out bool result)
{
try
{
result = value.Value<bool>();
return true;
}
catch
{
result = default;
return false;
}
}
private static bool TryReadInt64(CborValue value, out long result)
{
try
{
result = value.Value<long>();
return true;
}
catch
{
result = default;
return false;
}
}
private static bool TryReadUInt64(CborValue value, out ulong result)
{
try
{
result = value.Value<ulong>();
return true;
}
catch
{
result = default;
return false;
}
}
private static bool TryReadDouble(CborValue value, out double result)
{
try
{
result = value.Value<double>();
return true;
}
catch
{
result = default;
return false;
}
}
private static string? ExtractKeyFromRecordId(string recordId)
{
if (string.IsNullOrWhiteSpace(recordId)) return null;
int separator = recordId.IndexOf(':');
if (separator < 0) return recordId;
string key = recordId[(separator + 1)..].Trim();
if (key.StartsWith('"') && key.EndsWith('"') && key.Length >= 2)
key = key[1..^1];
if (key.StartsWith('`') && key.EndsWith('`') && key.Length >= 2)
key = key[1..^1];
return string.IsNullOrWhiteSpace(key) ? null : key;
}
}

View File

@@ -0,0 +1,142 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using SurrealDb.Net;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
public class SurrealSnapshotMetadataStore : SnapshotMetadataStore
{
private readonly ILogger<SurrealSnapshotMetadataStore> _logger;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
private readonly ISurrealDbClient _surrealClient;
public SurrealSnapshotMetadataStore(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
ILogger<SurrealSnapshotMetadataStore>? logger = null)
{
_ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient));
_surrealClient = surrealEmbeddedClient.Client;
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
_logger = logger ?? NullLogger<SurrealSnapshotMetadataStore>.Instance;
}
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Delete(CBDDCSurrealSchemaNames.SnapshotMetadataTable, cancellationToken);
}
public override async Task<IEnumerable<SnapshotMetadata>> ExportAsync(CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all.Select(m => m.ToDomain()).ToList();
}
public override async Task<SnapshotMetadata?> GetSnapshotMetadataAsync(string nodeId,
CancellationToken cancellationToken = default)
{
var existing = await FindByNodeIdAsync(nodeId, cancellationToken);
return existing?.ToDomain();
}
public override async Task<string?> GetSnapshotHashAsync(string nodeId, CancellationToken cancellationToken = default)
{
var existing = await FindByNodeIdAsync(nodeId, cancellationToken);
return existing?.Hash;
}
public override async Task ImportAsync(IEnumerable<SnapshotMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = await FindByNodeIdAsync(item.NodeId, cancellationToken);
RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.SnapshotMetadata(item.NodeId);
await UpsertAsync(item, recordId, cancellationToken);
}
}
public override async Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata,
CancellationToken cancellationToken = default)
{
var existing = await FindByNodeIdAsync(metadata.NodeId, cancellationToken);
RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.SnapshotMetadata(metadata.NodeId);
await UpsertAsync(metadata, recordId, cancellationToken);
}
public override async Task MergeAsync(IEnumerable<SnapshotMetadata> items, CancellationToken cancellationToken = default)
{
foreach (var metadata in items)
{
var existing = await FindByNodeIdAsync(metadata.NodeId, cancellationToken);
if (existing == null)
{
await UpsertAsync(metadata, SurrealStoreRecordIds.SnapshotMetadata(metadata.NodeId), cancellationToken);
continue;
}
if (metadata.TimestampPhysicalTime < existing.TimestampPhysicalTime ||
(metadata.TimestampPhysicalTime == existing.TimestampPhysicalTime &&
metadata.TimestampLogicalCounter <= existing.TimestampLogicalCounter))
continue;
RecordId recordId = existing.Id ?? SurrealStoreRecordIds.SnapshotMetadata(metadata.NodeId);
await UpsertAsync(metadata, recordId, cancellationToken);
}
}
public override async Task UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta,
CancellationToken cancellationToken)
{
var existing = await FindByNodeIdAsync(existingMeta.NodeId, cancellationToken);
if (existing == null) return;
RecordId recordId = existing.Id ?? SurrealStoreRecordIds.SnapshotMetadata(existingMeta.NodeId);
await UpsertAsync(existingMeta, recordId, cancellationToken);
}
public override async Task<IEnumerable<SnapshotMetadata>> GetAllSnapshotMetadataAsync(
CancellationToken cancellationToken = default)
{
return await ExportAsync(cancellationToken);
}
private async Task UpsertAsync(SnapshotMetadata metadata, RecordId recordId, CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Upsert<SurrealSnapshotMetadataRecord, SurrealSnapshotMetadataRecord>(
recordId,
metadata.ToSurrealRecord(),
cancellationToken);
}
private async Task EnsureReadyAsync(CancellationToken cancellationToken)
{
await _schemaInitializer.EnsureInitializedAsync(cancellationToken);
}
private async Task<List<SurrealSnapshotMetadataRecord>> SelectAllAsync(CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
var rows = await _surrealClient.Select<SurrealSnapshotMetadataRecord>(
CBDDCSurrealSchemaNames.SnapshotMetadataTable,
cancellationToken);
return rows?.ToList() ?? [];
}
private async Task<SurrealSnapshotMetadataRecord?> FindByNodeIdAsync(string nodeId, CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
RecordId deterministicId = SurrealStoreRecordIds.SnapshotMetadata(nodeId);
var deterministic = await _surrealClient.Select<SurrealSnapshotMetadataRecord>(deterministicId, cancellationToken);
if (deterministic != null &&
string.Equals(deterministic.NodeId, nodeId, StringComparison.Ordinal))
return deterministic;
var all = await SelectAllAsync(cancellationToken);
return all.FirstOrDefault(m => string.Equals(m.NodeId, nodeId, StringComparison.Ordinal));
}
}

View File

@@ -0,0 +1,294 @@
using System.Security.Cryptography;
using System.Text;
using System.Text.Json;
using System.Text.Json.Serialization;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
internal static class SurrealStoreRecordIds
{
public static RecordId Oplog(string hash)
{
return RecordId.From(CBDDCSurrealSchemaNames.OplogEntriesTable, hash);
}
public static RecordId DocumentMetadata(string collection, string key)
{
return RecordId.From(
CBDDCSurrealSchemaNames.DocumentMetadataTable,
CompositeKey("docmeta", collection, key));
}
public static RecordId SnapshotMetadata(string nodeId)
{
return RecordId.From(CBDDCSurrealSchemaNames.SnapshotMetadataTable, nodeId);
}
public static RecordId RemotePeer(string nodeId)
{
return RecordId.From(CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable, nodeId);
}
public static RecordId PeerOplogConfirmation(string peerNodeId, string sourceNodeId)
{
return RecordId.From(
CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable,
CompositeKey("peerconfirm", peerNodeId, sourceNodeId));
}
private static string CompositeKey(string prefix, string first, string second)
{
byte[] bytes = Encoding.UTF8.GetBytes($"{prefix}\n{first}\n{second}");
return Convert.ToHexString(SHA256.HashData(bytes)).ToLowerInvariant();
}
}
internal sealed class SurrealOplogRecord : Record
{
[JsonPropertyName("collection")]
public string Collection { get; set; } = "";
[JsonPropertyName("key")]
public string Key { get; set; } = "";
[JsonPropertyName("operation")]
public int Operation { get; set; }
[JsonPropertyName("payloadJson")]
public string PayloadJson { get; set; } = "";
[JsonPropertyName("timestampPhysicalTime")]
public long TimestampPhysicalTime { get; set; }
[JsonPropertyName("timestampLogicalCounter")]
public int TimestampLogicalCounter { get; set; }
[JsonPropertyName("timestampNodeId")]
public string TimestampNodeId { get; set; } = "";
[JsonPropertyName("hash")]
public string Hash { get; set; } = "";
[JsonPropertyName("previousHash")]
public string PreviousHash { get; set; } = "";
}
internal sealed class SurrealDocumentMetadataRecord : Record
{
[JsonPropertyName("collection")]
public string Collection { get; set; } = "";
[JsonPropertyName("key")]
public string Key { get; set; } = "";
[JsonPropertyName("hlcPhysicalTime")]
public long HlcPhysicalTime { get; set; }
[JsonPropertyName("hlcLogicalCounter")]
public int HlcLogicalCounter { get; set; }
[JsonPropertyName("hlcNodeId")]
public string HlcNodeId { get; set; } = "";
[JsonPropertyName("isDeleted")]
public bool IsDeleted { get; set; }
}
internal sealed class SurrealRemotePeerRecord : Record
{
[JsonPropertyName("nodeId")]
public string NodeId { get; set; } = "";
[JsonPropertyName("address")]
public string Address { get; set; } = "";
[JsonPropertyName("type")]
public int Type { get; set; }
[JsonPropertyName("isEnabled")]
public bool IsEnabled { get; set; }
[JsonPropertyName("interestsJson")]
public string InterestsJson { get; set; } = "";
}
internal sealed class SurrealPeerOplogConfirmationRecord : Record
{
[JsonPropertyName("peerNodeId")]
public string PeerNodeId { get; set; } = "";
[JsonPropertyName("sourceNodeId")]
public string SourceNodeId { get; set; } = "";
[JsonPropertyName("confirmedWall")]
public long ConfirmedWall { get; set; }
[JsonPropertyName("confirmedLogic")]
public int ConfirmedLogic { get; set; }
[JsonPropertyName("confirmedHash")]
public string ConfirmedHash { get; set; } = "";
[JsonPropertyName("lastConfirmedUtcMs")]
public long LastConfirmedUtcMs { get; set; }
[JsonPropertyName("isActive")]
public bool IsActive { get; set; }
}
internal sealed class SurrealSnapshotMetadataRecord : Record
{
[JsonPropertyName("nodeId")]
public string NodeId { get; set; } = "";
[JsonPropertyName("timestampPhysicalTime")]
public long TimestampPhysicalTime { get; set; }
[JsonPropertyName("timestampLogicalCounter")]
public int TimestampLogicalCounter { get; set; }
[JsonPropertyName("hash")]
public string Hash { get; set; } = "";
}
internal static class SurrealStoreRecordMappers
{
public static SurrealOplogRecord ToSurrealRecord(this OplogEntry entry)
{
return new SurrealOplogRecord
{
Collection = entry.Collection,
Key = entry.Key,
Operation = (int)entry.Operation,
PayloadJson = entry.Payload?.GetRawText() ?? "",
TimestampPhysicalTime = entry.Timestamp.PhysicalTime,
TimestampLogicalCounter = entry.Timestamp.LogicalCounter,
TimestampNodeId = entry.Timestamp.NodeId,
Hash = entry.Hash,
PreviousHash = entry.PreviousHash
};
}
public static OplogEntry ToDomain(this SurrealOplogRecord record)
{
JsonElement? payload = null;
if (!string.IsNullOrEmpty(record.PayloadJson))
payload = JsonSerializer.Deserialize<JsonElement>(record.PayloadJson);
return new OplogEntry(
record.Collection,
record.Key,
(OperationType)record.Operation,
payload,
new HlcTimestamp(record.TimestampPhysicalTime, record.TimestampLogicalCounter, record.TimestampNodeId),
record.PreviousHash,
record.Hash);
}
public static SurrealDocumentMetadataRecord ToSurrealRecord(this DocumentMetadata metadata)
{
return new SurrealDocumentMetadataRecord
{
Collection = metadata.Collection,
Key = metadata.Key,
HlcPhysicalTime = metadata.UpdatedAt.PhysicalTime,
HlcLogicalCounter = metadata.UpdatedAt.LogicalCounter,
HlcNodeId = metadata.UpdatedAt.NodeId,
IsDeleted = metadata.IsDeleted
};
}
public static DocumentMetadata ToDomain(this SurrealDocumentMetadataRecord record)
{
return new DocumentMetadata(
record.Collection,
record.Key,
new HlcTimestamp(record.HlcPhysicalTime, record.HlcLogicalCounter, record.HlcNodeId),
record.IsDeleted);
}
public static SurrealRemotePeerRecord ToSurrealRecord(this RemotePeerConfiguration peer)
{
return new SurrealRemotePeerRecord
{
NodeId = peer.NodeId,
Address = peer.Address,
Type = (int)peer.Type,
IsEnabled = peer.IsEnabled,
InterestsJson = peer.InterestingCollections.Count > 0
? JsonSerializer.Serialize(peer.InterestingCollections)
: ""
};
}
public static RemotePeerConfiguration ToDomain(this SurrealRemotePeerRecord record)
{
var result = new RemotePeerConfiguration
{
NodeId = record.NodeId,
Address = record.Address,
Type = (PeerType)record.Type,
IsEnabled = record.IsEnabled
};
if (!string.IsNullOrEmpty(record.InterestsJson))
result.InterestingCollections =
JsonSerializer.Deserialize<List<string>>(record.InterestsJson) ?? [];
return result;
}
public static SurrealPeerOplogConfirmationRecord ToSurrealRecord(this PeerOplogConfirmation confirmation)
{
return new SurrealPeerOplogConfirmationRecord
{
PeerNodeId = confirmation.PeerNodeId,
SourceNodeId = confirmation.SourceNodeId,
ConfirmedWall = confirmation.ConfirmedWall,
ConfirmedLogic = confirmation.ConfirmedLogic,
ConfirmedHash = confirmation.ConfirmedHash,
LastConfirmedUtcMs = confirmation.LastConfirmedUtc.ToUnixTimeMilliseconds(),
IsActive = confirmation.IsActive
};
}
public static PeerOplogConfirmation ToDomain(this SurrealPeerOplogConfirmationRecord record)
{
return new PeerOplogConfirmation
{
PeerNodeId = record.PeerNodeId,
SourceNodeId = record.SourceNodeId,
ConfirmedWall = record.ConfirmedWall,
ConfirmedLogic = record.ConfirmedLogic,
ConfirmedHash = record.ConfirmedHash,
LastConfirmedUtc = DateTimeOffset.FromUnixTimeMilliseconds(record.LastConfirmedUtcMs),
IsActive = record.IsActive
};
}
public static SurrealSnapshotMetadataRecord ToSurrealRecord(this SnapshotMetadata metadata)
{
return new SurrealSnapshotMetadataRecord
{
NodeId = metadata.NodeId,
TimestampPhysicalTime = metadata.TimestampPhysicalTime,
TimestampLogicalCounter = metadata.TimestampLogicalCounter,
Hash = metadata.Hash
};
}
public static SnapshotMetadata ToDomain(this SurrealSnapshotMetadataRecord record)
{
return new SnapshotMetadata
{
NodeId = record.NodeId,
TimestampPhysicalTime = record.TimestampPhysicalTime,
TimestampLogicalCounter = record.TimestampLogicalCounter,
Hash = record.Hash
};
}
}

View File

@@ -12,7 +12,7 @@
<Authors>MrDevRobot</Authors>
<Description>Persistence provider for CBDDC.</Description>
<PackageLicenseExpression>MIT</PackageLicenseExpression>
<PackageTags>p2p;database;sqlite;persistence;storage;wal</PackageTags>
<PackageTags>p2p;database;surrealdb;rocksdb;persistence;storage;wal</PackageTags>
<PackageProjectUrl>https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net</PackageProjectUrl>
<RepositoryUrl>https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net</RepositoryUrl>
<RepositoryType>git</RepositoryType>
@@ -20,13 +20,10 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="BLite" Version="1.3.1"/>
<PackageReference Include="BLite.SourceGenerators" Version="1.3.1">
<PrivateAssets>all</PrivateAssets>
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
</PackageReference>
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="8.0.0"/>
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="8.0.0"/>
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="9.0.4" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="9.0.4" />
<PackageReference Include="SurrealDb.Embedded.RocksDb" Version="0.9.0" />
<PackageReference Include="SurrealDb.Net" Version="0.9.0" />
</ItemGroup>
<ItemGroup>

View File

@@ -7,9 +7,10 @@ using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.E2E.Tests;
@@ -26,7 +27,7 @@ public class ClusterCrudSyncE2ETests
int nodeBPort = GetAvailableTcpPort();
while (nodeBPort == nodeAPort) nodeBPort = GetAvailableTcpPort();
await using var nodeA = TestPeerNode.Create(
var nodeA = TestPeerNode.Create(
"node-a",
nodeAPort,
clusterToken,
@@ -160,6 +161,287 @@ public class ClusterCrudSyncE2ETests
() => BuildDiagnostics(nodeA, nodeB));
}
/// <summary>
/// Verifies a reconnecting peer catches up mutations that happened while it was offline.
/// </summary>
[Fact]
public async Task PeerReconnect_ShouldCatchUpMissedChanges()
{
var clusterToken = Guid.NewGuid().ToString("N");
int nodeAPort = GetAvailableTcpPort();
int nodeBPort = GetAvailableTcpPort();
while (nodeBPort == nodeAPort) nodeBPort = GetAvailableTcpPort();
var nodeA = TestPeerNode.Create(
"node-a",
nodeAPort,
clusterToken,
[
new KnownPeerConfiguration
{
NodeId = "node-b",
Host = "127.0.0.1",
Port = nodeBPort
}
]);
await using var nodeB = TestPeerNode.Create(
"node-b",
nodeBPort,
clusterToken,
[
new KnownPeerConfiguration
{
NodeId = "node-a",
Host = "127.0.0.1",
Port = nodeAPort
}
]);
await nodeA.StartAsync();
await nodeB.StartAsync();
await nodeB.StopAsync();
const string userId = "reconnect-user";
await nodeA.UpsertUserAsync(new User
{
Id = userId,
Name = "Offline Create",
Age = 20,
Address = new Address { City = "Rome" }
});
await nodeA.UpsertUserAsync(new User
{
Id = userId,
Name = "Offline Update",
Age = 21,
Address = new Address { City = "Milan" }
});
await nodeA.UpsertUserAsync(new User
{
Id = userId,
Name = "Offline Final",
Age = 22,
Address = new Address { City = "Turin" }
});
await nodeB.StartAsync();
await AssertEventuallyAsync(() =>
{
var replicated = nodeB.ReadUser(userId);
return replicated is not null &&
replicated.Name == "Offline Final" &&
replicated.Age == 22 &&
replicated.Address?.City == "Turin";
}, 60, "Node B did not catch up missed reconnect mutations.", () => BuildDiagnostics(nodeA, nodeB));
}
/// <summary>
/// Verifies a burst of rapid multi-node mutations converges to a deterministic final state.
/// </summary>
[Fact]
public async Task MultiChangeBurst_ShouldConvergeDeterministically()
{
var clusterToken = Guid.NewGuid().ToString("N");
int nodeAPort = GetAvailableTcpPort();
int nodeBPort = GetAvailableTcpPort();
while (nodeBPort == nodeAPort) nodeBPort = GetAvailableTcpPort();
await using var nodeA = TestPeerNode.Create(
"node-a",
nodeAPort,
clusterToken,
[
new KnownPeerConfiguration
{
NodeId = "node-b",
Host = "127.0.0.1",
Port = nodeBPort
}
]);
await using var nodeB = TestPeerNode.Create(
"node-b",
nodeBPort,
clusterToken,
[
new KnownPeerConfiguration
{
NodeId = "node-a",
Host = "127.0.0.1",
Port = nodeAPort
}
]);
await nodeA.StartAsync();
await nodeB.StartAsync();
const int burstCount = 8;
for (var i = 0; i < burstCount; i++)
{
string aId = $"burst-a-{i:D2}";
string bId = $"burst-b-{i:D2}";
await nodeA.UpsertUserAsync(new User
{
Id = aId,
Name = $"A-{i}",
Age = 30 + i,
Address = new Address { City = $"CityA-{i}" }
});
await nodeB.UpsertUserAsync(new User
{
Id = bId,
Name = $"B-{i}",
Age = 40 + i,
Address = new Address { City = $"CityB-{i}" }
});
}
await AssertEventuallyAsync(
() => nodeA.Context.Users.FindAll().Count() == burstCount * 2 &&
nodeB.Context.Users.FindAll().Count() == burstCount * 2,
60,
"Burst convergence did not reach expected document counts.",
() => BuildDiagnostics(nodeA, nodeB));
await AssertEventuallyAsync(() =>
{
for (var i = 0; i < burstCount; i++)
{
var aOnB = nodeB.ReadUser($"burst-a-{i:D2}");
var bOnA = nodeA.ReadUser($"burst-b-{i:D2}");
if (aOnB is null || bOnA is null) return false;
if (aOnB.Name != $"A-{i}" || bOnA.Name != $"B-{i}") return false;
}
return true;
}, 60, "Burst convergence content mismatch.", () => BuildDiagnostics(nodeA, nodeB));
}
/// <summary>
/// Verifies recovery safety when a process crashes after oplog commit but before checkpoint advance.
/// </summary>
[Fact]
public async Task CrashBetweenOplogAndCheckpoint_ShouldReplaySafelyOnRestart()
{
var clusterToken = Guid.NewGuid().ToString("N");
int nodeAPort = GetAvailableTcpPort();
int nodeBPort = GetAvailableTcpPort();
while (nodeBPort == nodeAPort) nodeBPort = GetAvailableTcpPort();
string sharedWorkDir = Path.Combine(Path.GetTempPath(), $"cbddc-e2e-crash-{Guid.NewGuid():N}");
Directory.CreateDirectory(sharedWorkDir);
await using var nodeA = TestPeerNode.Create(
"node-a",
nodeAPort,
clusterToken,
[
new KnownPeerConfiguration
{
NodeId = "node-b",
Host = "127.0.0.1",
Port = nodeBPort
}
],
workDirOverride: sharedWorkDir,
preserveWorkDirOnDispose: true,
useFaultInjectedCheckpointStore: true);
bool nodeADisposed = false;
try
{
await using var nodeB = TestPeerNode.Create(
"node-b",
nodeBPort,
clusterToken,
[
new KnownPeerConfiguration
{
NodeId = "node-a",
Host = "127.0.0.1",
Port = nodeAPort
}
]);
await nodeA.StartAsync();
await nodeB.StartAsync();
const string userId = "crash-window-user";
var payload = new User
{
Id = userId,
Name = "Crash Recovered",
Age = 45,
Address = new Address { City = "Naples" }
};
await Should.ThrowAsync<InvalidOperationException>(() => nodeA.UpsertUserAsync(payload));
nodeA.ReadUser(userId).ShouldNotBeNull();
nodeA.GetLocalOplogCountForKey("Users", userId).ShouldBe(1);
await nodeA.StopAsync();
await nodeA.DisposeAsync();
nodeADisposed = true;
TestPeerNode? recoveredNodeA = null;
for (var attempt = 0; attempt < 10; attempt++)
try
{
recoveredNodeA = TestPeerNode.Create(
"node-a",
nodeAPort,
clusterToken,
[
new KnownPeerConfiguration
{
NodeId = "node-b",
Host = "127.0.0.1",
Port = nodeBPort
}
],
workDirOverride: sharedWorkDir);
break;
}
catch (Exception ex) when (IsRocksDbLockContention(ex) && attempt < 9)
{
await Task.Delay(100);
}
recoveredNodeA.ShouldNotBeNull();
await using (recoveredNodeA)
{
await recoveredNodeA.StartAsync();
await AssertEventuallyAsync(() =>
{
var replicated = nodeB.ReadUser(userId);
return replicated is not null
&& replicated.Name == payload.Name
&& replicated.Age == payload.Age
&& replicated.Address?.City == payload.Address?.City;
}, 60, "Node B did not converge after crash-window recovery.", () => BuildDiagnostics(recoveredNodeA, nodeB));
await AssertEventuallyAsync(
() => recoveredNodeA.GetOplogCountForKey("Users", userId) == 1 &&
nodeB.GetOplogCountForKey("Users", userId) == 1,
60,
"Crash-window recovery created duplicate oplog entries.",
() => BuildDiagnostics(recoveredNodeA, nodeB));
}
}
finally
{
if (!nodeADisposed) await nodeA.DisposeAsync();
}
}
private static async Task AssertEventuallyAsync(
Func<bool> predicate,
int timeoutSeconds,
@@ -222,6 +504,11 @@ public class ClusterCrudSyncE2ETests
return ((IPEndPoint)listener.LocalEndpoint).Port;
}
private static bool IsRocksDbLockContention(Exception exception)
{
return exception.ToString().Contains("No locks available", StringComparison.OrdinalIgnoreCase);
}
private sealed class TestPeerNode : IAsyncDisposable
{
private readonly InMemoryLogSink _logSink;
@@ -230,6 +517,7 @@ public class ClusterCrudSyncE2ETests
private readonly IOplogStore _oplogStore;
private readonly ServiceProvider _services;
private readonly string _workDir;
private readonly bool _preserveWorkDirOnDispose;
private long _lastPhysicalTime = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
private int _logicalCounter;
private bool _started;
@@ -241,7 +529,8 @@ public class ClusterCrudSyncE2ETests
SampleDbContext context,
InMemoryLogSink logSink,
string workDir,
string nodeId)
string nodeId,
bool preserveWorkDirOnDispose)
{
_services = services;
_node = node;
@@ -250,10 +539,11 @@ public class ClusterCrudSyncE2ETests
_logSink = logSink;
_workDir = workDir;
_nodeId = nodeId;
_preserveWorkDirOnDispose = preserveWorkDirOnDispose;
}
/// <summary>
/// Gets the LiteDB-backed context used by this test peer.
/// Gets the Surreal-backed context used by this test peer.
/// </summary>
public SampleDbContext Context { get; }
@@ -269,7 +559,7 @@ public class ClusterCrudSyncE2ETests
}
_services.Dispose();
TryDeleteDirectory(_workDir);
if (!_preserveWorkDirOnDispose) TryDeleteDirectory(_workDir);
}
/// <summary>
@@ -284,11 +574,15 @@ public class ClusterCrudSyncE2ETests
string nodeId,
int tcpPort,
string authToken,
IReadOnlyList<KnownPeerConfiguration> knownPeers)
IReadOnlyList<KnownPeerConfiguration> knownPeers,
string? workDirOverride = null,
bool preserveWorkDirOnDispose = false,
bool useFaultInjectedCheckpointStore = false)
{
string workDir = Path.Combine(Path.GetTempPath(), $"cbddc-e2e-{nodeId}-{Guid.NewGuid():N}");
string workDir = workDirOverride ?? Path.Combine(Path.GetTempPath(), $"cbddc-e2e-{nodeId}-{Guid.NewGuid():N}");
Directory.CreateDirectory(workDir);
string dbPath = Path.Combine(workDir, "node.blite");
string dbPath = Path.Combine(workDir, "node.rocksdb");
string surrealDatabase = nodeId.Replace("-", "_", StringComparison.Ordinal);
var configProvider = new StaticPeerNodeConfigurationProvider(new PeerNodeConfiguration
{
@@ -304,9 +598,33 @@ public class ClusterCrudSyncE2ETests
services.AddLogging(builder => builder.SetMinimumLevel(LogLevel.Debug));
services.AddSingleton(configProvider);
services.AddSingleton<IPeerNodeConfigurationProvider>(configProvider);
services.AddCBDDCCore()
.AddCBDDCBLite<SampleDbContext, SampleDocumentStore>(_ => new SampleDbContext(dbPath))
services.AddSingleton<ICBDDCSurrealSchemaInitializer, SampleSurrealSchemaInitializer>();
services.AddSingleton<SampleDbContext>();
var surrealOptionsFactory = new Func<IServiceProvider, CBDDCSurrealEmbeddedOptions>(_ => new CBDDCSurrealEmbeddedOptions
{
Endpoint = "rocksdb://local",
DatabasePath = dbPath,
Namespace = "cbddc_e2e",
Database = surrealDatabase,
Cdc = new CBDDCSurrealCdcOptions
{
Enabled = true,
ConsumerId = $"{nodeId}-main"
}
});
var coreBuilder = services.AddCBDDCCore();
if (useFaultInjectedCheckpointStore)
{
services.AddSingleton<ISurrealCdcCheckpointPersistence, CrashAfterFirstAdvanceCheckpointPersistence>();
coreBuilder.AddCBDDCSurrealEmbedded<FaultInjectedSampleDocumentStore>(surrealOptionsFactory)
.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>(false);
}
else
{
coreBuilder.AddCBDDCSurrealEmbedded<SampleDocumentStore>(surrealOptionsFactory)
.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>(false);
}
// Deterministic tests: sync uses explicit known peers, so disable UDP discovery.
services.AddSingleton<IDiscoveryService, PassiveDiscoveryService>();
@@ -317,7 +635,15 @@ public class ClusterCrudSyncE2ETests
var oplogStore = provider.GetRequiredService<IOplogStore>();
var context = provider.GetRequiredService<SampleDbContext>();
var logSink = provider.GetRequiredService<InMemoryLogSink>();
return new TestPeerNode(provider, node, oplogStore, context, logSink, workDir, nodeId);
return new TestPeerNode(
provider,
node,
oplogStore,
context,
logSink,
workDir,
nodeId,
preserveWorkDirOnDispose);
}
/// <summary>
@@ -340,7 +666,17 @@ public class ClusterCrudSyncE2ETests
{
if (!_started) return;
try
{
await _node.Stop();
}
catch (ObjectDisposedException)
{
}
catch (AggregateException ex) when (ex.InnerExceptions.All(e => e is ObjectDisposedException))
{
}
_started = false;
}
@@ -354,6 +690,23 @@ public class ClusterCrudSyncE2ETests
return Context.Users.Find(u => u.Id == userId).FirstOrDefault();
}
public int GetLocalOplogCountForKey(string collection, string key)
{
return Context.OplogEntries.FindAll()
.Count(e =>
string.Equals(e.Collection, collection, StringComparison.Ordinal) &&
string.Equals(e.Key, key, StringComparison.Ordinal) &&
string.Equals(e.TimestampNodeId, _nodeId, StringComparison.Ordinal));
}
public int GetOplogCountForKey(string collection, string key)
{
return Context.OplogEntries.FindAll()
.Count(e =>
string.Equals(e.Collection, collection, StringComparison.Ordinal) &&
string.Equals(e.Key, key, StringComparison.Ordinal));
}
/// <summary>
/// Inserts or updates a user and persists the matching oplog entry.
/// </summary>
@@ -466,6 +819,183 @@ public class ClusterCrudSyncE2ETests
}
}
private sealed class FaultInjectedSampleDocumentStore : SurrealDocumentStore<SampleDbContext>
{
private const string UsersCollection = "Users";
private const string TodoListsCollection = "TodoLists";
public FaultInjectedSampleDocumentStore(
SampleDbContext context,
IPeerNodeConfigurationProvider configProvider,
IVectorClockService vectorClockService,
ISurrealCdcCheckpointPersistence checkpointPersistence,
ILogger<FaultInjectedSampleDocumentStore>? logger = null)
: base(
context,
context.SurrealEmbeddedClient,
context.SchemaInitializer,
configProvider,
vectorClockService,
new LastWriteWinsConflictResolver(),
checkpointPersistence,
new SurrealCdcPollingOptions
{
Enabled = false,
EnableLiveSelectAccelerator = false
},
logger)
{
WatchCollection(UsersCollection, context.Users, u => u.Id);
WatchCollection(TodoListsCollection, context.TodoLists, t => t.Id);
}
protected override async Task ApplyContentToEntityAsync(
string collection,
string key,
JsonElement content,
CancellationToken cancellationToken)
{
await UpsertEntityAsync(collection, key, content, cancellationToken);
}
protected override async Task ApplyContentToEntitiesBatchAsync(
IEnumerable<(string Collection, string Key, JsonElement Content)> documents,
CancellationToken cancellationToken)
{
foreach ((string collection, string key, var content) in documents)
await UpsertEntityAsync(collection, key, content, cancellationToken);
}
protected override async Task<JsonElement?> GetEntityAsJsonAsync(
string collection,
string key,
CancellationToken cancellationToken)
{
return collection switch
{
UsersCollection => SerializeEntity(await _context.Users.FindByIdAsync(key, cancellationToken)),
TodoListsCollection => SerializeEntity(await _context.TodoLists.FindByIdAsync(key, cancellationToken)),
_ => null
};
}
protected override async Task RemoveEntityAsync(
string collection,
string key,
CancellationToken cancellationToken)
{
await DeleteEntityAsync(collection, key, cancellationToken);
}
protected override async Task RemoveEntitiesBatchAsync(
IEnumerable<(string Collection, string Key)> documents,
CancellationToken cancellationToken)
{
foreach ((string collection, string key) in documents)
await DeleteEntityAsync(collection, key, cancellationToken);
}
protected override async Task<IEnumerable<(string Key, JsonElement Content)>> GetAllEntitiesAsJsonAsync(
string collection,
CancellationToken cancellationToken)
{
return collection switch
{
UsersCollection => (await _context.Users.FindAllAsync(cancellationToken))
.Select(u => (u.Id, SerializeEntity(u)!.Value))
.ToList(),
TodoListsCollection => (await _context.TodoLists.FindAllAsync(cancellationToken))
.Select(t => (t.Id, SerializeEntity(t)!.Value))
.ToList(),
_ => []
};
}
private async Task UpsertEntityAsync(
string collection,
string key,
JsonElement content,
CancellationToken cancellationToken)
{
switch (collection)
{
case UsersCollection:
var user = content.Deserialize<User>() ??
throw new InvalidOperationException("Failed to deserialize user.");
user.Id = key;
if (await _context.Users.FindByIdAsync(key, cancellationToken) == null)
await _context.Users.InsertAsync(user, cancellationToken);
else
await _context.Users.UpdateAsync(user, cancellationToken);
break;
case TodoListsCollection:
var todo = content.Deserialize<TodoList>() ??
throw new InvalidOperationException("Failed to deserialize todo list.");
todo.Id = key;
if (await _context.TodoLists.FindByIdAsync(key, cancellationToken) == null)
await _context.TodoLists.InsertAsync(todo, cancellationToken);
else
await _context.TodoLists.UpdateAsync(todo, cancellationToken);
break;
default:
throw new NotSupportedException($"Collection '{collection}' is not supported for sync.");
}
}
private async Task DeleteEntityAsync(string collection, string key, CancellationToken cancellationToken)
{
switch (collection)
{
case UsersCollection:
await _context.Users.DeleteAsync(key, cancellationToken);
break;
case TodoListsCollection:
await _context.TodoLists.DeleteAsync(key, cancellationToken);
break;
}
}
private static JsonElement? SerializeEntity<T>(T? entity) where T : class
{
return entity == null ? null : JsonSerializer.SerializeToElement(entity);
}
}
private sealed class CrashAfterFirstAdvanceCheckpointPersistence : ISurrealCdcCheckpointPersistence
{
private int _failOnNextAdvance = 1;
public Task<SurrealCdcCheckpoint?> GetCheckpointAsync(
string? consumerId = null,
CancellationToken cancellationToken = default)
{
return Task.FromResult<SurrealCdcCheckpoint?>(null);
}
public Task UpsertCheckpointAsync(
HlcTimestamp timestamp,
string lastHash,
string? consumerId = null,
CancellationToken cancellationToken = default,
long? versionstampCursor = null)
{
return Task.CompletedTask;
}
public Task AdvanceCheckpointAsync(
OplogEntry entry,
string? consumerId = null,
CancellationToken cancellationToken = default)
{
if (Interlocked.Exchange(ref _failOnNextAdvance, 0) == 1)
throw new InvalidOperationException("Injected crash between oplog commit and checkpoint advance.");
return Task.CompletedTask;
}
}
private sealed class PassiveDiscoveryService : IDiscoveryService
{
/// <inheritdoc />

View File

@@ -4,44 +4,51 @@ using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
/// <summary>
/// Tests for BLite persistence stores: Export, Import, Merge, Drop operations.
/// Tests for Surreal persistence stores: Export, Import, Merge, Drop operations.
/// </summary>
public class BLiteStoreExportImportTests : IDisposable
public class SurrealStoreExportImportTests : IDisposable
{
private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly SampleDbContext _context;
private readonly SampleDocumentStore _documentStore;
private readonly BLiteOplogStore<SampleDbContext> _oplogStore;
private readonly BLitePeerConfigurationStore<SampleDbContext> _peerConfigStore;
private readonly BLiteSnapshotMetadataStore<SampleDbContext> _snapshotMetadataStore;
private readonly SurrealOplogStore _oplogStore;
private readonly SurrealPeerConfigurationStore _peerConfigStore;
private readonly SurrealSnapshotMetadataStore _snapshotMetadataStore;
private readonly string _testDbPath;
/// <summary>
/// Initializes a new instance of the <see cref="BLiteStoreExportImportTests" /> class.
/// Initializes a new instance of the <see cref="SurrealStoreExportImportTests" /> class.
/// </summary>
public BLiteStoreExportImportTests()
public SurrealStoreExportImportTests()
{
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-export-import-{Guid.NewGuid()}.blite");
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-export-import-{Guid.NewGuid()}.rocksdb");
_context = new SampleDbContext(_testDbPath);
_configProvider = CreateConfigProvider("test-node");
var vectorClock = new VectorClockService();
_documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock,
NullLogger<SampleDocumentStore>.Instance);
_snapshotMetadataStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
_context, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
_oplogStore = new BLiteOplogStore<SampleDbContext>(
_context, _documentStore, new LastWriteWinsConflictResolver(),
logger: NullLogger<SampleDocumentStore>.Instance);
_snapshotMetadataStore = new SurrealSnapshotMetadataStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
NullLogger<SurrealSnapshotMetadataStore>.Instance);
_oplogStore = new SurrealOplogStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
_documentStore,
new LastWriteWinsConflictResolver(),
vectorClock,
_snapshotMetadataStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
_peerConfigStore = new BLitePeerConfigurationStore<SampleDbContext>(
_context, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
NullLogger<SurrealOplogStore>.Instance);
_peerConfigStore = new SurrealPeerConfigurationStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
NullLogger<SurrealPeerConfigurationStore>.Instance);
}
/// <summary>
@@ -52,10 +59,10 @@ public class BLiteStoreExportImportTests : IDisposable
_documentStore?.Dispose();
_context?.Dispose();
if (File.Exists(_testDbPath))
if (Directory.Exists(_testDbPath))
try
{
File.Delete(_testDbPath);
Directory.Delete(_testDbPath, true);
}
catch
{

View File

@@ -1,14 +1,14 @@
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class PeerOplogConfirmationStoreTests : IDisposable
{
private readonly SampleDbContext _context;
private readonly BLitePeerOplogConfirmationStore<SampleDbContext> _store;
private readonly SurrealPeerOplogConfirmationStore _store;
private readonly string _testDbPath;
/// <summary>
@@ -16,21 +16,22 @@ public class PeerOplogConfirmationStoreTests : IDisposable
/// </summary>
public PeerOplogConfirmationStoreTests()
{
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-peer-confirmation-{Guid.NewGuid()}.blite");
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-peer-confirmation-{Guid.NewGuid()}.rocksdb");
_context = new SampleDbContext(_testDbPath);
_store = new BLitePeerOplogConfirmationStore<SampleDbContext>(
_context,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
_store = new SurrealPeerOplogConfirmationStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
NullLogger<SurrealPeerOplogConfirmationStore>.Instance);
}
/// <inheritdoc />
public void Dispose()
{
_context?.Dispose();
if (File.Exists(_testDbPath))
if (Directory.Exists(_testDbPath))
try
{
File.Delete(_testDbPath);
Directory.Delete(_testDbPath, true);
}
catch
{

View File

@@ -10,7 +10,7 @@ public class SampleDbContextTests : IDisposable
/// </summary>
public SampleDbContextTests()
{
_dbPath = Path.Combine(Path.GetTempPath(), $"test_sample_{Guid.NewGuid()}.db");
_dbPath = Path.Combine(Path.GetTempPath(), $"test_sample_{Guid.NewGuid()}.rocksdb");
_context = new SampleDbContext(_dbPath);
}
@@ -20,10 +20,10 @@ public class SampleDbContextTests : IDisposable
public void Dispose()
{
_context?.Dispose();
if (File.Exists(_dbPath))
if (Directory.Exists(_dbPath))
try
{
File.Delete(_dbPath);
Directory.Delete(_dbPath, true);
}
catch
{
@@ -38,9 +38,9 @@ public class SampleDbContextTests : IDisposable
{
// Verifica che le collezioni siano state inizializzate
_context.ShouldNotBeNull();
_context.Users.ShouldNotBeNull("Users collection should be initialized by BLite");
_context.TodoLists.ShouldNotBeNull("TodoLists collection should be initialized by BLite");
_context.OplogEntries.ShouldNotBeNull("OplogEntries collection should be initialized by BLite");
_context.Users.ShouldNotBeNull("Users collection should be initialized by Surreal context");
_context.TodoLists.ShouldNotBeNull("TodoLists collection should be initialized by Surreal context");
_context.OplogEntries.ShouldNotBeNull("OplogEntries view should be initialized by Surreal context");
}
/// <summary>

View File

@@ -5,7 +5,7 @@ using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
@@ -14,9 +14,9 @@ public class SnapshotStoreTests : IDisposable
private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly SampleDbContext _context;
private readonly SampleDocumentStore _documentStore;
private readonly BLiteOplogStore<SampleDbContext> _oplogStore;
private readonly BLitePeerConfigurationStore<SampleDbContext> _peerConfigStore;
private readonly BLitePeerOplogConfirmationStore<SampleDbContext> _peerConfirmationStore;
private readonly SurrealOplogStore _oplogStore;
private readonly SurrealPeerConfigurationStore _peerConfigStore;
private readonly SurrealPeerOplogConfirmationStore _peerConfirmationStore;
private readonly SnapshotStore _snapshotStore;
private readonly string _testDbPath;
@@ -25,29 +25,33 @@ public class SnapshotStoreTests : IDisposable
/// </summary>
public SnapshotStoreTests()
{
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-{Guid.NewGuid()}.blite");
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-{Guid.NewGuid()}.rocksdb");
_context = new SampleDbContext(_testDbPath);
_configProvider = CreateConfigProvider("test-node");
var vectorClock = new VectorClockService();
_documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock,
NullLogger<SampleDocumentStore>.Instance);
var snapshotMetadataStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
_context,
NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
_oplogStore = new BLiteOplogStore<SampleDbContext>(
_context,
logger: NullLogger<SampleDocumentStore>.Instance);
var snapshotMetadataStore = new SurrealSnapshotMetadataStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
NullLogger<SurrealSnapshotMetadataStore>.Instance);
_oplogStore = new SurrealOplogStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
_documentStore,
new LastWriteWinsConflictResolver(),
vectorClock,
snapshotMetadataStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
_peerConfigStore = new BLitePeerConfigurationStore<SampleDbContext>(
_context,
NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
_peerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
_context,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
NullLogger<SurrealOplogStore>.Instance);
_peerConfigStore = new SurrealPeerConfigurationStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
NullLogger<SurrealPeerConfigurationStore>.Instance);
_peerConfirmationStore = new SurrealPeerOplogConfirmationStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
NullLogger<SurrealPeerOplogConfirmationStore>.Instance);
_snapshotStore = new SnapshotStore(
_documentStore,
@@ -66,10 +70,10 @@ public class SnapshotStoreTests : IDisposable
_documentStore?.Dispose();
_context?.Dispose();
if (File.Exists(_testDbPath))
if (Directory.Exists(_testDbPath))
try
{
File.Delete(_testDbPath);
Directory.Delete(_testDbPath, true);
}
catch
{
@@ -170,26 +174,34 @@ public class SnapshotStoreTests : IDisposable
snapshotStream.Position = 0;
// Create a new context/stores (simulating a different node)
string newDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-target-{Guid.NewGuid()}.blite");
string newDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-target-{Guid.NewGuid()}.rocksdb");
try
{
using var newContext = new SampleDbContext(newDbPath);
var newConfigProvider = CreateConfigProvider("test-new-node");
var newVectorClock = new VectorClockService();
var newDocStore = new SampleDocumentStore(newContext, newConfigProvider, newVectorClock,
NullLogger<SampleDocumentStore>.Instance);
var newSnapshotMetaStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
newContext, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
var newOplogStore = new BLiteOplogStore<SampleDbContext>(
newContext, newDocStore, new LastWriteWinsConflictResolver(),
logger: NullLogger<SampleDocumentStore>.Instance);
var newSnapshotMetaStore = new SurrealSnapshotMetadataStore(
newContext.SurrealEmbeddedClient,
newContext.SchemaInitializer,
NullLogger<SurrealSnapshotMetadataStore>.Instance);
var newOplogStore = new SurrealOplogStore(
newContext.SurrealEmbeddedClient,
newContext.SchemaInitializer,
newDocStore,
new LastWriteWinsConflictResolver(),
newVectorClock,
newSnapshotMetaStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
var newPeerStore = new BLitePeerConfigurationStore<SampleDbContext>(
newContext, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
var newPeerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
newContext,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
NullLogger<SurrealOplogStore>.Instance);
var newPeerStore = new SurrealPeerConfigurationStore(
newContext.SurrealEmbeddedClient,
newContext.SchemaInitializer,
NullLogger<SurrealPeerConfigurationStore>.Instance);
var newPeerConfirmationStore = new SurrealPeerOplogConfirmationStore(
newContext.SurrealEmbeddedClient,
newContext.SchemaInitializer,
NullLogger<SurrealPeerOplogConfirmationStore>.Instance);
var newSnapshotStore = new SnapshotStore(
newDocStore,
@@ -218,10 +230,10 @@ public class SnapshotStoreTests : IDisposable
}
finally
{
if (File.Exists(newDbPath))
if (Directory.Exists(newDbPath))
try
{
File.Delete(newDbPath);
Directory.Delete(newDbPath, true);
}
catch
{
@@ -250,7 +262,7 @@ public class SnapshotStoreTests : IDisposable
await _context.SaveChangesAsync();
// Create snapshot with different data
string sourceDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-source-{Guid.NewGuid()}.blite");
string sourceDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-source-{Guid.NewGuid()}.rocksdb");
MemoryStream snapshotStream;
try
@@ -262,19 +274,27 @@ public class SnapshotStoreTests : IDisposable
var sourceConfigProvider = CreateConfigProvider("test-source-node");
var sourceVectorClock = new VectorClockService();
var sourceDocStore = new SampleDocumentStore(sourceContext, sourceConfigProvider, sourceVectorClock,
NullLogger<SampleDocumentStore>.Instance);
var sourceSnapshotMetaStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
sourceContext, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
var sourceOplogStore = new BLiteOplogStore<SampleDbContext>(
sourceContext, sourceDocStore, new LastWriteWinsConflictResolver(),
logger: NullLogger<SampleDocumentStore>.Instance);
var sourceSnapshotMetaStore = new SurrealSnapshotMetadataStore(
sourceContext.SurrealEmbeddedClient,
sourceContext.SchemaInitializer,
NullLogger<SurrealSnapshotMetadataStore>.Instance);
var sourceOplogStore = new SurrealOplogStore(
sourceContext.SurrealEmbeddedClient,
sourceContext.SchemaInitializer,
sourceDocStore,
new LastWriteWinsConflictResolver(),
sourceVectorClock,
sourceSnapshotMetaStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
var sourcePeerStore = new BLitePeerConfigurationStore<SampleDbContext>(
sourceContext, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
var sourcePeerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
sourceContext,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
NullLogger<SurrealOplogStore>.Instance);
var sourcePeerStore = new SurrealPeerConfigurationStore(
sourceContext.SurrealEmbeddedClient,
sourceContext.SchemaInitializer,
NullLogger<SurrealPeerConfigurationStore>.Instance);
var sourcePeerConfirmationStore = new SurrealPeerOplogConfirmationStore(
sourceContext.SurrealEmbeddedClient,
sourceContext.SchemaInitializer,
NullLogger<SurrealPeerOplogConfirmationStore>.Instance);
await sourcePeerConfirmationStore.UpdateConfirmationAsync(
"peer-merge",
"source-a",
@@ -300,10 +320,10 @@ public class SnapshotStoreTests : IDisposable
}
finally
{
if (File.Exists(sourceDbPath))
if (Directory.Exists(sourceDbPath))
try
{
File.Delete(sourceDbPath);
Directory.Delete(sourceDbPath, true);
}
catch
{

View File

@@ -0,0 +1,580 @@
using System.Text.Json;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
[Collection("SurrealCdcDurability")]
public class SurrealCdcDurabilityTests
{
[Fact]
public async Task CheckpointPersistence_ShouldTrackLatestLocalChange_AndPersistPerConsumer()
{
string dbPath = CreateTemporaryDatabasePath();
const string nodeId = "node-checkpoint";
const string defaultConsumer = "consumer-default";
const string secondaryConsumer = "consumer-secondary";
try
{
HlcTimestamp expectedTimestamp = default;
string expectedHash = "";
DateTimeOffset previousUpdatedUtc = DateTimeOffset.MinValue;
await using (var harness = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, defaultConsumer))
{
var user = CreateUser("checkpoint-user", "Alice", 30, "Austin");
await harness.Context.Users.InsertAsync(user);
await harness.Context.SaveChangesAsync();
await harness.PollAsync();
user.Age = 31;
user.Address = new Address { City = "Dallas" };
await harness.Context.Users.UpdateAsync(user);
await harness.Context.SaveChangesAsync();
await harness.PollAsync();
await WaitForConditionAsync(
async () => (await harness.GetEntriesByKeyAsync("Users", "checkpoint-user")).Count >= 2,
"Timed out waiting for checkpoint-user oplog entries.");
var entries = await harness.GetEntriesByKeyAsync("Users", "checkpoint-user");
entries.Count.ShouldBe(2);
expectedTimestamp = entries[^1].Timestamp;
expectedHash = entries[^1].Hash;
var checkpoint = await harness.CheckpointPersistence.GetCheckpointAsync();
checkpoint.ShouldNotBeNull();
checkpoint!.Timestamp.ShouldBe(expectedTimestamp);
checkpoint.LastHash.ShouldBe(expectedHash);
previousUpdatedUtc = checkpoint.UpdatedUtc;
await harness.CheckpointPersistence.UpsertCheckpointAsync(
entries[0].Timestamp,
entries[0].Hash,
secondaryConsumer);
var secondary = await harness.CheckpointPersistence.GetCheckpointAsync(secondaryConsumer);
secondary.ShouldNotBeNull();
secondary!.Timestamp.ShouldBe(entries[0].Timestamp);
secondary.LastHash.ShouldBe(entries[0].Hash);
}
await using (var restarted = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, defaultConsumer))
{
var restoredDefault = await restarted.CheckpointPersistence.GetCheckpointAsync();
restoredDefault.ShouldNotBeNull();
restoredDefault!.Timestamp.ShouldBe(expectedTimestamp);
restoredDefault.LastHash.ShouldBe(expectedHash);
restoredDefault.UpdatedUtc.ShouldBe(previousUpdatedUtc);
var restoredSecondary = await restarted.CheckpointPersistence.GetCheckpointAsync(secondaryConsumer);
restoredSecondary.ShouldNotBeNull();
restoredSecondary!.LastHash.ShouldNotBe(restoredDefault.LastHash);
}
}
finally
{
await DeleteDirectoryWithRetriesAsync(dbPath);
}
}
[Fact]
public async Task RestartRecovery_ShouldResumeCatchUpFromPersistedCheckpoint_InRocksDb()
{
string dbPath = CreateTemporaryDatabasePath();
const string nodeId = "node-resume";
const string consumerId = "consumer-resume";
HlcTimestamp resumeTimestamp = default;
string resumeHash = "";
string expectedFinalHash = "";
try
{
await using (var initial = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, consumerId))
{
await initial.Context.Users.InsertAsync(CreateUser("resume-1", "User One", 18, "Rome"));
await initial.Context.SaveChangesAsync();
await initial.PollAsync();
await initial.Context.Users.InsertAsync(CreateUser("resume-2", "User Two", 19, "Milan"));
await initial.Context.SaveChangesAsync();
await initial.PollAsync();
await WaitForConditionAsync(
async () => (await initial.GetEntriesByCollectionAsync("Users")).Count >= 2,
"Timed out waiting for resume oplog entries.");
var entries = await initial.GetEntriesByCollectionAsync("Users");
entries.Count.ShouldBe(2);
resumeTimestamp = entries[0].Timestamp;
resumeHash = entries[0].Hash;
expectedFinalHash = entries[1].Hash;
await initial.CheckpointPersistence.UpsertCheckpointAsync(resumeTimestamp, resumeHash);
}
await using (var restarted = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, consumerId))
{
var checkpoint = await restarted.CheckpointPersistence.GetCheckpointAsync();
checkpoint.ShouldNotBeNull();
checkpoint!.Timestamp.ShouldBe(resumeTimestamp);
checkpoint.LastHash.ShouldBe(resumeHash);
var catchUp = (await restarted.OplogStore.GetOplogAfterAsync(checkpoint.Timestamp))
.OrderBy(e => e.Timestamp.PhysicalTime)
.ThenBy(e => e.Timestamp.LogicalCounter)
.ToList();
catchUp.Count.ShouldBe(1);
catchUp[0].Hash.ShouldBe(expectedFinalHash);
await restarted.CheckpointPersistence.AdvanceCheckpointAsync(catchUp[0]);
}
await using (var recovered = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, consumerId))
{
var finalCheckpoint = await recovered.CheckpointPersistence.GetCheckpointAsync();
finalCheckpoint.ShouldNotBeNull();
finalCheckpoint!.LastHash.ShouldBe(expectedFinalHash);
var remaining = await recovered.OplogStore.GetOplogAfterAsync(finalCheckpoint.Timestamp);
remaining.ShouldBeEmpty();
}
}
finally
{
await DeleteDirectoryWithRetriesAsync(dbPath);
}
}
[Fact]
public async Task RemoteApply_ShouldBeIdempotentAcrossDuplicateWindow_WithoutLoopbackEntries()
{
string dbPath = CreateTemporaryDatabasePath();
const string localNodeId = "node-local";
const string remoteNodeId = "node-remote";
try
{
await using var harness = await CdcTestHarness.OpenWithRetriesAsync(
dbPath,
localNodeId,
"consumer-loopback");
await harness.Context.Users.InsertAsync(CreateUser("loopback-user", "Loopback", 40, "Boston"));
await harness.Context.SaveChangesAsync();
await harness.PollAsync();
await WaitForConditionAsync(
async () => (await harness.GetEntriesByKeyAsync("Users", "loopback-user")).Count >= 1,
"Timed out waiting for loopback-user insert oplog entry.");
var localEntries = await harness.GetEntriesByKeyAsync("Users", "loopback-user");
localEntries.Count.ShouldBe(1);
localEntries[0].Operation.ShouldBe(OperationType.Put);
localEntries[0].Timestamp.NodeId.ShouldBe(localNodeId);
var remoteDelete = new OplogEntry(
"Users",
"loopback-user",
OperationType.Delete,
null,
new HlcTimestamp(localEntries[0].Timestamp.PhysicalTime + 10, 0, remoteNodeId),
localEntries[0].Hash);
var duplicateWindow = new[] { remoteDelete, remoteDelete };
await harness.OplogStore.ApplyBatchAsync(duplicateWindow);
await harness.OplogStore.ApplyBatchAsync(duplicateWindow);
harness.Context.Users.FindById("loopback-user").ShouldBeNull();
var allEntries = await harness.GetEntriesByKeyAsync("Users", "loopback-user");
allEntries.Count(e => e.Hash == remoteDelete.Hash).ShouldBe(1);
allEntries.Count(e => e.Operation == OperationType.Delete && e.Timestamp.NodeId == localNodeId)
.ShouldBe(0);
allEntries.Count(e => e.Operation == OperationType.Delete && e.Timestamp.NodeId == remoteNodeId)
.ShouldBe(1);
}
finally
{
await DeleteDirectoryWithRetriesAsync(dbPath);
}
}
[Fact]
public async Task LocalDelete_ShouldPersistTombstoneMetadata_AndAdvanceCheckpoint()
{
string dbPath = CreateTemporaryDatabasePath();
const string nodeId = "node-tombstone";
try
{
await using var harness = await CdcTestHarness.OpenWithRetriesAsync(
dbPath,
nodeId,
"consumer-tombstone");
await harness.Context.Users.InsertAsync(CreateUser("tombstone-user", "Before Delete", 28, "Turin"));
await harness.Context.SaveChangesAsync();
await harness.PollAsync();
await harness.Context.Users.DeleteAsync("tombstone-user");
await harness.Context.SaveChangesAsync();
await harness.PollAsync();
harness.Context.Users.FindById("tombstone-user").ShouldBeNull();
await WaitForConditionAsync(
async () => (await harness.GetEntriesByKeyAsync("Users", "tombstone-user")).Count >= 2,
"Timed out waiting for tombstone-user oplog entries.");
var entries = await harness.GetEntriesByKeyAsync("Users", "tombstone-user");
entries.Count.ShouldBe(2);
var deleteEntry = entries.Last(e => e.Operation == OperationType.Delete);
var metadata = await harness.MetadataStore.GetMetadataAsync("Users", "tombstone-user");
metadata.ShouldNotBeNull();
metadata!.IsDeleted.ShouldBeTrue();
metadata.UpdatedAt.ShouldBe(deleteEntry.Timestamp);
var checkpoint = await harness.CheckpointPersistence.GetCheckpointAsync();
checkpoint.ShouldNotBeNull();
checkpoint!.LastHash.ShouldBe(deleteEntry.Hash);
checkpoint.Timestamp.ShouldBe(deleteEntry.Timestamp);
}
finally
{
await DeleteDirectoryWithRetriesAsync(dbPath);
}
}
private static User CreateUser(string id, string name, int age, string city)
{
return new User
{
Id = id,
Name = name,
Age = age,
Address = new Address { City = city }
};
}
private static string CreateTemporaryDatabasePath()
{
return Path.Combine(Path.GetTempPath(), $"cbddc-cdc-{Guid.NewGuid():N}.rocksdb");
}
private static async Task DeleteDirectoryWithRetriesAsync(string path)
{
for (var attempt = 0; attempt < 5; attempt++)
try
{
if (Directory.Exists(path)) Directory.Delete(path, true);
return;
}
catch when (attempt < 4)
{
await Task.Delay(50);
}
}
private static async Task WaitForConditionAsync(
Func<Task<bool>> predicate,
string failureMessage,
int timeoutMs = 6000,
int pollMs = 50)
{
DateTimeOffset deadline = DateTimeOffset.UtcNow.AddMilliseconds(timeoutMs);
while (DateTimeOffset.UtcNow < deadline)
{
if (await predicate()) return;
await Task.Delay(pollMs);
}
throw new TimeoutException(failureMessage);
}
}
[CollectionDefinition("SurrealCdcDurability", DisableParallelization = true)]
public sealed class SurrealCdcDurabilityCollection;
internal sealed class CdcTestHarness : IAsyncDisposable
{
private readonly VectorClockService _vectorClock;
private readonly CBDDCSurrealEmbeddedOptions _options;
private CdcTestHarness(string databasePath, string nodeId, string consumerId)
{
_options = new CBDDCSurrealEmbeddedOptions
{
Cdc = new CBDDCSurrealCdcOptions
{
Enabled = true,
ConsumerId = consumerId,
CheckpointTable = "cbddc_cdc_checkpoint"
}
};
Context = new SampleDbContext(databasePath);
_vectorClock = new VectorClockService();
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration
{
NodeId = nodeId,
AuthToken = "test-token",
TcpPort = 0
});
CheckpointPersistence = new SurrealCdcCheckpointPersistence(
Context.SurrealEmbeddedClient,
Context.SchemaInitializer,
_options);
DocumentStore = new CheckpointedSampleDocumentStore(
Context,
configProvider,
_vectorClock,
CheckpointPersistence,
_options,
NullLogger<CheckpointedSampleDocumentStore>.Instance);
OplogStore = new SurrealOplogStore(
Context.SurrealEmbeddedClient,
Context.SchemaInitializer,
DocumentStore,
new LastWriteWinsConflictResolver(),
_vectorClock,
null,
NullLogger<SurrealOplogStore>.Instance);
MetadataStore = new SurrealDocumentMetadataStore(
Context.SurrealEmbeddedClient,
Context.SchemaInitializer,
NullLogger<SurrealDocumentMetadataStore>.Instance);
}
public SampleDbContext Context { get; }
public CheckpointedSampleDocumentStore DocumentStore { get; }
public SurrealOplogStore OplogStore { get; }
public SurrealDocumentMetadataStore MetadataStore { get; }
public ISurrealCdcCheckpointPersistence CheckpointPersistence { get; }
public async Task PollAsync()
{
await DocumentStore.PollCdcOnceAsync();
}
public static async Task<CdcTestHarness> OpenWithRetriesAsync(
string databasePath,
string nodeId,
string consumerId)
{
for (var attempt = 0; attempt < 8; attempt++)
try
{
return new CdcTestHarness(databasePath, nodeId, consumerId);
}
catch (Exception ex) when (IsLockContention(ex) && attempt < 7)
{
await Task.Delay(75);
}
throw new InvalidOperationException("Unable to acquire RocksDB lock for test harness.");
}
public async Task<List<OplogEntry>> GetEntriesByCollectionAsync(string collection)
{
return (await OplogStore.ExportAsync())
.Where(e => string.Equals(e.Collection, collection, StringComparison.Ordinal))
.OrderBy(e => e.Timestamp.PhysicalTime)
.ThenBy(e => e.Timestamp.LogicalCounter)
.ToList();
}
public async Task<List<OplogEntry>> GetEntriesByKeyAsync(string collection, string key)
{
return (await OplogStore.ExportAsync())
.Where(e => string.Equals(e.Collection, collection, StringComparison.Ordinal) &&
string.Equals(e.Key, key, StringComparison.Ordinal))
.OrderBy(e => e.Timestamp.PhysicalTime)
.ThenBy(e => e.Timestamp.LogicalCounter)
.ToList();
}
public async ValueTask DisposeAsync()
{
DocumentStore.Dispose();
Context.Dispose();
await Task.Delay(75);
}
private static bool IsLockContention(Exception exception)
{
return exception.ToString().Contains("No locks available", StringComparison.OrdinalIgnoreCase);
}
}
internal sealed class CheckpointedSampleDocumentStore : SurrealDocumentStore<SampleDbContext>
{
private const string UsersCollection = "Users";
private const string TodoListsCollection = "TodoLists";
public CheckpointedSampleDocumentStore(
SampleDbContext context,
IPeerNodeConfigurationProvider configProvider,
IVectorClockService vectorClockService,
ISurrealCdcCheckpointPersistence checkpointPersistence,
CBDDCSurrealEmbeddedOptions? surrealOptions = null,
ILogger<CheckpointedSampleDocumentStore>? logger = null)
: base(
context,
context.SurrealEmbeddedClient,
context.SchemaInitializer,
configProvider,
vectorClockService,
new LastWriteWinsConflictResolver(),
checkpointPersistence,
BuildPollingOptions(surrealOptions),
logger)
{
WatchCollection(UsersCollection, context.Users, u => u.Id, subscribeForInMemoryEvents: false);
WatchCollection(TodoListsCollection, context.TodoLists, t => t.Id, subscribeForInMemoryEvents: false);
}
protected override async Task ApplyContentToEntityAsync(
string collection,
string key,
JsonElement content,
CancellationToken cancellationToken)
{
await UpsertEntityAsync(collection, key, content, cancellationToken);
}
protected override async Task ApplyContentToEntitiesBatchAsync(
IEnumerable<(string Collection, string Key, JsonElement Content)> documents,
CancellationToken cancellationToken)
{
foreach ((string collection, string key, var content) in documents)
await UpsertEntityAsync(collection, key, content, cancellationToken);
}
protected override async Task<JsonElement?> GetEntityAsJsonAsync(
string collection,
string key,
CancellationToken cancellationToken)
{
return collection switch
{
UsersCollection => SerializeEntity(await _context.Users.FindByIdAsync(key, cancellationToken)),
TodoListsCollection => SerializeEntity(await _context.TodoLists.FindByIdAsync(key, cancellationToken)),
_ => null
};
}
protected override async Task RemoveEntityAsync(
string collection,
string key,
CancellationToken cancellationToken)
{
await DeleteEntityAsync(collection, key, cancellationToken);
}
protected override async Task RemoveEntitiesBatchAsync(
IEnumerable<(string Collection, string Key)> documents,
CancellationToken cancellationToken)
{
foreach ((string collection, string key) in documents)
await DeleteEntityAsync(collection, key, cancellationToken);
}
protected override async Task<IEnumerable<(string Key, JsonElement Content)>> GetAllEntitiesAsJsonAsync(
string collection,
CancellationToken cancellationToken)
{
return collection switch
{
UsersCollection => (await _context.Users.FindAllAsync(cancellationToken))
.Select(u => (u.Id, SerializeEntity(u)!.Value))
.ToList(),
TodoListsCollection => (await _context.TodoLists.FindAllAsync(cancellationToken))
.Select(t => (t.Id, SerializeEntity(t)!.Value))
.ToList(),
_ => []
};
}
private async Task UpsertEntityAsync(
string collection,
string key,
JsonElement content,
CancellationToken cancellationToken)
{
switch (collection)
{
case UsersCollection:
var user = content.Deserialize<User>() ??
throw new InvalidOperationException("Failed to deserialize user.");
user.Id = key;
if (await _context.Users.FindByIdAsync(key, cancellationToken) == null)
await _context.Users.InsertAsync(user, cancellationToken);
else
await _context.Users.UpdateAsync(user, cancellationToken);
break;
case TodoListsCollection:
var todo = content.Deserialize<TodoList>() ??
throw new InvalidOperationException("Failed to deserialize todo list.");
todo.Id = key;
if (await _context.TodoLists.FindByIdAsync(key, cancellationToken) == null)
await _context.TodoLists.InsertAsync(todo, cancellationToken);
else
await _context.TodoLists.UpdateAsync(todo, cancellationToken);
break;
default:
throw new NotSupportedException($"Collection '{collection}' is not supported for sync.");
}
}
private async Task DeleteEntityAsync(string collection, string key, CancellationToken cancellationToken)
{
switch (collection)
{
case UsersCollection:
await _context.Users.DeleteAsync(key, cancellationToken);
break;
case TodoListsCollection:
await _context.TodoLists.DeleteAsync(key, cancellationToken);
break;
}
}
private static JsonElement? SerializeEntity<T>(T? entity) where T : class
{
return entity == null ? null : JsonSerializer.SerializeToElement(entity);
}
private static SurrealCdcPollingOptions? BuildPollingOptions(CBDDCSurrealEmbeddedOptions? options)
{
if (options == null) return null;
return new SurrealCdcPollingOptions
{
Enabled = options.Cdc.Enabled,
PollInterval = options.Cdc.PollingInterval,
BatchSize = options.Cdc.BatchSize,
EnableLiveSelectAccelerator = options.Cdc.EnableLiveSelectAccelerator,
LiveSelectReconnectDelay = options.Cdc.LiveSelectReconnectDelay
};
}
}

View File

@@ -0,0 +1,219 @@
using System.Text.Json;
using System.Reflection;
using Microsoft.Extensions.Logging.Abstractions;
using SurrealDb.Net;
using SurrealDb.Net.Models.Response;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class SurrealCdcMatrixCompletionTests
{
[Theory]
[InlineData("versionstamp is outside the configured retention window", true)]
[InlineData("change feed history since cursor is unavailable", true)]
[InlineData("socket closed unexpectedly", false)]
public void RetentionBoundaryClassifier_ShouldDetectExpectedPatterns(string message, bool expected)
{
var closedType = typeof(SurrealDocumentStore<>).MakeGenericType(typeof(object));
var classifier = closedType.GetMethod(
"IsLikelyChangefeedRetentionBoundary",
BindingFlags.NonPublic | BindingFlags.Static);
classifier.ShouldNotBeNull();
bool actual = (bool)classifier!.Invoke(null, [new InvalidOperationException(message)])!;
actual.ShouldBe(expected);
}
[Fact]
public async Task LocalWrite_ShouldEmitExactlyOneOplogEntry()
{
string dbPath = Path.Combine(Path.GetTempPath(), $"cbddc-cdc-matrix-{Guid.NewGuid():N}.rocksdb");
try
{
await using var harness = await CdcTestHarness.OpenWithRetriesAsync(dbPath, "node-single-write", "consumer-single");
await harness.Context.Users.InsertAsync(new User
{
Id = "single-write-user",
Name = "Single Write",
Age = 25,
Address = new Address { City = "Bologna" }
});
await harness.Context.SaveChangesAsync();
await harness.PollAsync();
await WaitForConditionAsync(
async () => (await harness.GetEntriesByKeyAsync("Users", "single-write-user")).Count == 1,
"Timed out waiting for exactly one local oplog entry.");
var entries = await harness.GetEntriesByKeyAsync("Users", "single-write-user");
entries.Count.ShouldBe(1);
entries[0].Operation.ShouldBe(OperationType.Put);
entries[0].Timestamp.NodeId.ShouldBe("node-single-write");
}
finally
{
await DeleteDirectoryWithRetriesAsync(dbPath);
}
}
[Fact]
public async Task Checkpoint_ShouldNotAdvance_WhenAtomicWriteFails()
{
var surrealClient = Substitute.For<ISurrealDbClient>();
surrealClient.RawQuery(
Arg.Any<string>(),
Arg.Any<IReadOnlyDictionary<string, object?>>(),
Arg.Any<CancellationToken>())
.Returns(Task.FromException<SurrealDbResponse>(new InvalidOperationException("forced atomic write failure")));
var embeddedClient = Substitute.For<ICBDDCSurrealEmbeddedClient>();
embeddedClient.Client.Returns(surrealClient);
var schemaInitializer = Substitute.For<ICBDDCSurrealSchemaInitializer>();
schemaInitializer.EnsureInitializedAsync(Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration
{
NodeId = "node-failure",
TcpPort = 0,
AuthToken = "test-token"
});
var checkpointPersistence = Substitute.For<ISurrealCdcCheckpointPersistence>();
var vectorClock = Substitute.For<IVectorClockService>();
vectorClock.GetLastHash(Arg.Any<string>()).Returns("seed-hash");
var store = new FailureInjectedDocumentStore(
embeddedClient,
schemaInitializer,
configProvider,
vectorClock,
checkpointPersistence);
var payload = JsonSerializer.SerializeToElement(new { Id = "failure-user", Value = "x" });
await Should.ThrowAsync<InvalidOperationException>(
() => store.TriggerLocalChangeAsync("Users", "failure-user", OperationType.Put, payload));
checkpointPersistence.ReceivedCalls().ShouldBeEmpty();
}
private static async Task WaitForConditionAsync(
Func<Task<bool>> predicate,
string failureMessage,
int timeoutMs = 6000,
int pollMs = 50)
{
DateTimeOffset deadline = DateTimeOffset.UtcNow.AddMilliseconds(timeoutMs);
while (DateTimeOffset.UtcNow < deadline)
{
if (await predicate()) return;
await Task.Delay(pollMs);
}
throw new TimeoutException(failureMessage);
}
private static async Task DeleteDirectoryWithRetriesAsync(string path)
{
for (var attempt = 0; attempt < 5; attempt++)
try
{
if (Directory.Exists(path)) Directory.Delete(path, true);
return;
}
catch when (attempt < 4)
{
await Task.Delay(50);
}
}
}
internal sealed class FailureInjectedDocumentStore : SurrealDocumentStore<object>
{
public FailureInjectedDocumentStore(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
IPeerNodeConfigurationProvider configProvider,
IVectorClockService vectorClockService,
ISurrealCdcCheckpointPersistence checkpointPersistence)
: base(
new object(),
surrealEmbeddedClient,
schemaInitializer,
configProvider,
vectorClockService,
new LastWriteWinsConflictResolver(),
checkpointPersistence,
new SurrealCdcPollingOptions { Enabled = false },
NullLogger<FailureInjectedDocumentStore>.Instance)
{
}
public Task TriggerLocalChangeAsync(
string collection,
string key,
OperationType operationType,
JsonElement? content,
CancellationToken cancellationToken = default)
{
return OnLocalChangeDetectedAsync(
collection,
key,
operationType,
content,
pendingCursorCheckpoint: null,
cancellationToken);
}
protected override Task ApplyContentToEntityAsync(
string collection,
string key,
JsonElement content,
CancellationToken cancellationToken)
{
return Task.CompletedTask;
}
protected override Task ApplyContentToEntitiesBatchAsync(
IEnumerable<(string Collection, string Key, JsonElement Content)> documents,
CancellationToken cancellationToken)
{
return Task.CompletedTask;
}
protected override Task<JsonElement?> GetEntityAsJsonAsync(
string collection,
string key,
CancellationToken cancellationToken)
{
return Task.FromResult<JsonElement?>(null);
}
protected override Task RemoveEntityAsync(string collection, string key, CancellationToken cancellationToken)
{
return Task.CompletedTask;
}
protected override Task RemoveEntitiesBatchAsync(
IEnumerable<(string Collection, string Key)> documents,
CancellationToken cancellationToken)
{
return Task.CompletedTask;
}
protected override Task<IEnumerable<(string Key, JsonElement Content)>> GetAllEntitiesAsJsonAsync(
string collection,
CancellationToken cancellationToken)
{
return Task.FromResult<IEnumerable<(string Key, JsonElement Content)>>([]);
}
}

View File

@@ -0,0 +1,434 @@
using System.Text.Json;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class SurrealOplogStoreContractTests
{
[Fact]
public async Task OplogStore_AppendQueryMergeDrop_AndLastHash_Works()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreateOplogStore();
var entry1 = CreateOplogEntry("Users", "u1", "node-a", 100, 0, "");
var entry2 = CreateOplogEntry("Users", "u2", "node-a", 110, 0, entry1.Hash);
var entry3 = CreateOplogEntry("Users", "u3", "node-a", 120, 1, entry2.Hash);
var otherNode = CreateOplogEntry("Users", "u4", "node-b", 115, 0, "");
await store.AppendOplogEntryAsync(entry1);
await store.AppendOplogEntryAsync(entry2);
await store.AppendOplogEntryAsync(entry3);
await store.AppendOplogEntryAsync(otherNode);
var chainRange = (await store.GetChainRangeAsync(entry1.Hash, entry3.Hash)).ToList();
chainRange.Select(x => x.Hash).ToList().ShouldBe(new[] { entry2.Hash, entry3.Hash });
var after = (await store.GetOplogAfterAsync(new HlcTimestamp(100, 0, "node-a"))).ToList();
after.Select(x => x.Hash).ToList().ShouldBe(new[] { entry2.Hash, otherNode.Hash, entry3.Hash });
var mergedEntry = CreateOplogEntry("Users", "u5", "node-a", 130, 0, entry3.Hash);
await store.MergeAsync(new[] { entry2, mergedEntry });
var exported = (await store.ExportAsync()).ToList();
exported.Count.ShouldBe(5);
exported.Count(x => x.Hash == entry2.Hash).ShouldBe(1);
var cachedLastNodeAHash = await store.GetLastEntryHashAsync("node-a");
cachedLastNodeAHash.ShouldBe(entry3.Hash);
var rehydratedStore = harness.CreateOplogStore();
var persistedLastNodeAHash = await rehydratedStore.GetLastEntryHashAsync("node-a");
persistedLastNodeAHash.ShouldBe(mergedEntry.Hash);
await store.DropAsync();
(await store.ExportAsync()).ShouldBeEmpty();
}
private static OplogEntry CreateOplogEntry(
string collection,
string key,
string nodeId,
long wall,
int logic,
string previousHash)
{
return new OplogEntry(
collection,
key,
OperationType.Put,
JsonSerializer.SerializeToElement(new { key }),
new HlcTimestamp(wall, logic, nodeId),
previousHash);
}
}
public class SurrealDocumentMetadataStoreContractTests
{
[Fact]
public async Task DocumentMetadataStore_UpsertMarkDeletedGetAfterAndMergeNewer_Works()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreateDocumentMetadataStore();
await store.UpsertMetadataAsync(new DocumentMetadata("Users", "doc-1", new HlcTimestamp(100, 0, "node-a")));
await store.UpsertMetadataAsync(new DocumentMetadata("Users", "doc-2", new HlcTimestamp(105, 0, "node-a")));
await store.MarkDeletedAsync("Users", "doc-1", new HlcTimestamp(110, 1, "node-a"));
var doc1 = await store.GetMetadataAsync("Users", "doc-1");
doc1.ShouldNotBeNull();
doc1.IsDeleted.ShouldBeTrue();
doc1.UpdatedAt.ShouldBe(new HlcTimestamp(110, 1, "node-a"));
var after = (await store.GetMetadataAfterAsync(new HlcTimestamp(100, 0, "node-a"), new[] { "Users" })).ToList();
after.Select(x => x.Key).ToList().ShouldBe(new[] { "doc-2", "doc-1" });
await store.MergeAsync(new[]
{
new DocumentMetadata("Users", "doc-1", new HlcTimestamp(109, 0, "node-a"), true),
new DocumentMetadata("Users", "doc-1", new HlcTimestamp(120, 0, "node-a"), false),
new DocumentMetadata("Users", "doc-3", new HlcTimestamp(130, 0, "node-b"), false)
});
var mergedDoc1 = await store.GetMetadataAsync("Users", "doc-1");
mergedDoc1.ShouldNotBeNull();
mergedDoc1.UpdatedAt.ShouldBe(new HlcTimestamp(120, 0, "node-a"));
mergedDoc1.IsDeleted.ShouldBeFalse();
var exported = (await store.ExportAsync()).ToList();
exported.Count.ShouldBe(3);
}
}
public class SurrealPeerConfigurationStoreContractTests
{
[Fact]
public async Task PeerConfigurationStore_SaveGetRemoveAndMerge_Works()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreatePeerConfigurationStore();
await store.SaveRemotePeerAsync(CreatePeer("peer-1", "10.0.0.1:5000", true));
var peer1 = await store.GetRemotePeerAsync("peer-1", CancellationToken.None);
peer1.ShouldNotBeNull();
peer1.Address.ShouldBe("10.0.0.1:5000");
await store.SaveRemotePeerAsync(CreatePeer("peer-1", "10.0.0.1:6000", false));
await store.MergeAsync(new[]
{
CreatePeer("peer-1", "10.0.0.1:7000", true),
CreatePeer("peer-2", "10.0.0.2:5000", true)
});
var afterMergePeer1 = await store.GetRemotePeerAsync("peer-1", CancellationToken.None);
var afterMergePeer2 = await store.GetRemotePeerAsync("peer-2", CancellationToken.None);
afterMergePeer1.ShouldNotBeNull();
afterMergePeer1.Address.ShouldBe("10.0.0.1:6000");
afterMergePeer1.IsEnabled.ShouldBeFalse();
afterMergePeer2.ShouldNotBeNull();
afterMergePeer2.Address.ShouldBe("10.0.0.2:5000");
await store.RemoveRemotePeerAsync("peer-1");
var removedPeer = await store.GetRemotePeerAsync("peer-1", CancellationToken.None);
removedPeer.ShouldBeNull();
var peers = (await store.GetRemotePeersAsync()).ToList();
peers.Count.ShouldBe(1);
peers[0].NodeId.ShouldBe("peer-2");
}
private static RemotePeerConfiguration CreatePeer(string nodeId, string address, bool enabled)
{
return new RemotePeerConfiguration
{
NodeId = nodeId,
Address = address,
Type = PeerType.StaticRemote,
IsEnabled = enabled,
InterestingCollections = new List<string> { "Users" }
};
}
}
public class SurrealPeerOplogConfirmationStoreContractTests
{
[Fact]
public async Task PeerOplogConfirmationStore_EnsureUpdateAndDeactivate_Works()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreatePeerOplogConfirmationStore();
await store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote);
await store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote);
await store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(100, 1, "source-1"), "hash-1");
await store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(90, 0, "source-1"), "hash-old");
await store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(100, 1, "source-1"), "hash-2");
var peerConfirmations = (await store.GetConfirmationsForPeerAsync("peer-a")).ToList();
peerConfirmations.Count.ShouldBe(1);
peerConfirmations[0].ConfirmedWall.ShouldBe(100);
peerConfirmations[0].ConfirmedLogic.ShouldBe(1);
peerConfirmations[0].ConfirmedHash.ShouldBe("hash-2");
var all = (await store.ExportAsync()).Where(x => x.PeerNodeId == "peer-a").ToList();
all.Count(x => x.SourceNodeId == "__peer_registration__").ShouldBe(1);
await store.RemovePeerTrackingAsync("peer-a");
var activePeers = (await store.GetActiveTrackedPeersAsync()).ToList();
activePeers.ShouldNotContain("peer-a");
var afterDeactivate = (await store.ExportAsync()).Where(x => x.PeerNodeId == "peer-a").ToList();
afterDeactivate.All(x => x.IsActive == false).ShouldBeTrue();
}
[Fact]
public async Task PeerOplogConfirmationStore_Merge_UsesNewerAndActiveStateSemantics()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreatePeerOplogConfirmationStore();
await store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote);
await store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(100, 1, "source-1"), "hash-1");
var existing = (await store.ExportAsync())
.Single(x => x.PeerNodeId == "peer-a" && x.SourceNodeId == "source-1");
await store.MergeAsync(new[]
{
new PeerOplogConfirmation
{
PeerNodeId = "peer-a",
SourceNodeId = "source-1",
ConfirmedWall = 90,
ConfirmedLogic = 0,
ConfirmedHash = "hash-old",
LastConfirmedUtc = existing.LastConfirmedUtc.AddMinutes(-5),
IsActive = true
},
new PeerOplogConfirmation
{
PeerNodeId = "peer-a",
SourceNodeId = "source-1",
ConfirmedWall = 130,
ConfirmedLogic = 0,
ConfirmedHash = "hash-2",
LastConfirmedUtc = existing.LastConfirmedUtc.AddMinutes(5),
IsActive = false
},
new PeerOplogConfirmation
{
PeerNodeId = "peer-a",
SourceNodeId = "source-2",
ConfirmedWall = 50,
ConfirmedLogic = 0,
ConfirmedHash = "hash-3",
LastConfirmedUtc = existing.LastConfirmedUtc.AddMinutes(5),
IsActive = true
}
});
var all = (await store.ExportAsync())
.Where(x => x.PeerNodeId == "peer-a" && x.SourceNodeId != "__peer_registration__")
.OrderBy(x => x.SourceNodeId)
.ToList();
all.Count.ShouldBe(2);
var source1 = all.Single(x => x.SourceNodeId == "source-1");
source1.ConfirmedWall.ShouldBe(130);
source1.ConfirmedLogic.ShouldBe(0);
source1.ConfirmedHash.ShouldBe("hash-2");
source1.IsActive.ShouldBeFalse();
var source2 = all.Single(x => x.SourceNodeId == "source-2");
source2.ConfirmedWall.ShouldBe(50);
source2.ConfirmedHash.ShouldBe("hash-3");
source2.IsActive.ShouldBeTrue();
}
}
public class SurrealSnapshotMetadataStoreContractTests
{
[Fact]
public async Task SnapshotMetadataStore_InsertUpdateMergeAndHashLookup_Works()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreateSnapshotMetadataStore();
await store.InsertSnapshotMetadataAsync(new SnapshotMetadata
{
NodeId = "node-a",
TimestampPhysicalTime = 100,
TimestampLogicalCounter = 0,
Hash = "hash-1"
});
var initialHash = await store.GetSnapshotHashAsync("node-a");
initialHash.ShouldBe("hash-1");
await store.UpdateSnapshotMetadataAsync(new SnapshotMetadata
{
NodeId = "node-a",
TimestampPhysicalTime = 120,
TimestampLogicalCounter = 1,
Hash = "hash-2"
}, CancellationToken.None);
var updatedHash = await store.GetSnapshotHashAsync("node-a");
updatedHash.ShouldBe("hash-2");
await store.MergeAsync(new[]
{
new SnapshotMetadata
{
NodeId = "node-a",
TimestampPhysicalTime = 119,
TimestampLogicalCounter = 9,
Hash = "hash-old"
},
new SnapshotMetadata
{
NodeId = "node-a",
TimestampPhysicalTime = 130,
TimestampLogicalCounter = 0,
Hash = "hash-3"
},
new SnapshotMetadata
{
NodeId = "node-b",
TimestampPhysicalTime = 140,
TimestampLogicalCounter = 0,
Hash = "hash-b"
}
});
var finalNodeA = await store.GetSnapshotMetadataAsync("node-a");
finalNodeA.ShouldNotBeNull();
finalNodeA.Hash.ShouldBe("hash-3");
finalNodeA.TimestampPhysicalTime.ShouldBe(130);
var all = (await store.GetAllSnapshotMetadataAsync()).OrderBy(x => x.NodeId).ToList();
all.Count.ShouldBe(2);
all[0].NodeId.ShouldBe("node-a");
all[1].NodeId.ShouldBe("node-b");
}
}
internal sealed class SurrealTestHarness : IAsyncDisposable
{
private readonly CBDDCSurrealEmbeddedClient _client;
private readonly string _rootPath;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
public SurrealTestHarness()
{
string suffix = Guid.NewGuid().ToString("N");
_rootPath = Path.Combine(Path.GetTempPath(), "cbddc-surreal-tests", suffix);
string databasePath = Path.Combine(_rootPath, "rocksdb");
var options = new CBDDCSurrealEmbeddedOptions
{
Endpoint = "rocksdb://local",
DatabasePath = databasePath,
Namespace = $"cbddc_tests_{suffix}",
Database = $"main_{suffix}"
};
_client = new CBDDCSurrealEmbeddedClient(options, NullLogger<CBDDCSurrealEmbeddedClient>.Instance);
_schemaInitializer = new TestSurrealSchemaInitializer(_client);
}
public SurrealDocumentMetadataStore CreateDocumentMetadataStore()
{
return new SurrealDocumentMetadataStore(
_client,
_schemaInitializer,
NullLogger<SurrealDocumentMetadataStore>.Instance);
}
public SurrealOplogStore CreateOplogStore()
{
return new SurrealOplogStore(
_client,
_schemaInitializer,
Substitute.For<IDocumentStore>(),
new LastWriteWinsConflictResolver(),
new VectorClockService(),
null,
NullLogger<SurrealOplogStore>.Instance);
}
public SurrealPeerConfigurationStore CreatePeerConfigurationStore()
{
return new SurrealPeerConfigurationStore(
_client,
_schemaInitializer,
NullLogger<SurrealPeerConfigurationStore>.Instance);
}
public SurrealPeerOplogConfirmationStore CreatePeerOplogConfirmationStore()
{
return new SurrealPeerOplogConfirmationStore(
_client,
_schemaInitializer,
NullLogger<SurrealPeerOplogConfirmationStore>.Instance);
}
public SurrealSnapshotMetadataStore CreateSnapshotMetadataStore()
{
return new SurrealSnapshotMetadataStore(
_client,
_schemaInitializer,
NullLogger<SurrealSnapshotMetadataStore>.Instance);
}
public async ValueTask DisposeAsync()
{
await _client.DisposeAsync();
await DeleteDirectoryWithRetriesAsync(_rootPath);
}
private static async Task DeleteDirectoryWithRetriesAsync(string path)
{
for (var attempt = 0; attempt < 5; attempt++)
try
{
if (Directory.Exists(path)) Directory.Delete(path, true);
return;
}
catch when (attempt < 4)
{
await Task.Delay(50);
}
}
}
internal sealed class TestSurrealSchemaInitializer : ICBDDCSurrealSchemaInitializer
{
private readonly ICBDDCSurrealEmbeddedClient _client;
private int _initialized;
public TestSurrealSchemaInitializer(ICBDDCSurrealEmbeddedClient client)
{
_client = client;
}
public async Task EnsureInitializedAsync(CancellationToken cancellationToken = default)
{
if (Interlocked.Exchange(ref _initialized, 1) == 1) return;
await _client.InitializeAsync(cancellationToken);
}
}