From 9c2a77dc3cff6a0e9dbe3dcebb01ea3f4dc1b3d0 Mon Sep 17 00:00:00 2001 From: Joseph Doherty Date: Sun, 22 Feb 2026 05:21:53 -0500 Subject: [PATCH] Replace BLite with Surreal embedded persistence --- README.md | 124 +- docs/persistence-providers.md | 85 +- .../ZB.MOM.WW.CBDDC.Sample.Console/Program.cs | 125 +- .../ZB.MOM.WW.CBDDC.Sample.Console/README.md | 12 +- .../SampleDbContext.cs | 321 +++- .../SampleDocumentStore.cs | 178 ++- .../ZB.MOM.WW.CBDDC.Sample.Console.csproj | 12 +- src/ZB.MOM.WW.CBDDC.Core/README.md | 44 +- src/ZB.MOM.WW.CBDDC.Hosting/README.md | 15 +- .../BLite/BLiteDocumentMetadataStore.cs | 238 --- .../BLite/BLiteDocumentStore.README.md | 214 --- .../BLite/BLiteDocumentStore.cs | 783 ---------- .../BLite/BLiteOplogStore.cs | 253 ---- .../BLite/BLitePeerConfigurationStore.cs | 131 -- .../BLite/BLitePeerOplogConfirmationStore.cs | 300 ---- .../BLite/BLiteSnapshotMetadataStore.cs | 167 --- .../BLite/CBDDCBLiteExtensions.cs | 102 -- .../BLite/CBDDCDocumentDbContext.cs | 117 -- .../BLite/Entities/DocumentMetadataEntity.cs | 47 - .../BLite/Entities/EntityMappers.cs | 240 --- .../BLite/Entities/OplogEntity.cs | 61 - .../Entities/PeerOplogConfirmationEntity.cs | 50 - .../BLite/Entities/RemotePeerEntity.cs | 42 - .../BLite/Entities/SnapshotMetadataEntity.cs | 36 - src/ZB.MOM.WW.CBDDC.Persistence/README.md | 6 +- .../Surreal/CBDDCSurrealEmbeddedClient.cs | 142 ++ .../Surreal/CBDDCSurrealEmbeddedExtensions.cs | 75 + .../Surreal/CBDDCSurrealEmbeddedOptions.cs | 88 ++ .../Surreal/CBDDCSurrealReadinessProbe.cs | 45 + .../Surreal/CBDDCSurrealSchemaInitializer.cs | 131 ++ .../Surreal/CBDDCSurrealSchemaNames.cs | 29 + .../Surreal/ICBDDCSurrealEmbeddedClient.cs | 32 + .../Surreal/ICBDDCSurrealReadinessProbe.cs | 12 + .../Surreal/ICBDDCSurrealSchemaInitializer.cs | 12 + .../ISurrealCdcCheckpointPersistence.cs | 76 + .../Surreal/ISurrealCdcWorkerLifecycle.cs | 27 + .../SurrealCdcCheckpointPersistence.cs | 191 +++ .../Surreal/SurrealCdcPollingOptions.cs | 32 + .../Surreal/SurrealDocumentMetadataStore.cs | 164 ++ .../Surreal/SurrealDocumentStore.cs | 1331 +++++++++++++++++ .../Surreal/SurrealDocumentStoreWatch.cs | 144 ++ .../Surreal/SurrealOplogStore.cs | 272 ++++ .../Surreal/SurrealPeerConfigurationStore.cs | 111 ++ .../SurrealPeerOplogConfirmationStore.cs | 311 ++++ .../Surreal/SurrealShowChangesCborDecoder.cs | 296 ++++ .../Surreal/SurrealSnapshotMetadataStore.cs | 142 ++ .../Surreal/SurrealStoreRecords.cs | 294 ++++ .../ZB.MOM.WW.CBDDC.Persistence.csproj | 17 +- .../ClusterCrudSyncE2ETests.cs | 558 ++++++- .../BLiteStoreExportImportTests.cs | 89 +- .../PeerOplogConfirmationStoreTests.cs | 19 +- .../SampleDbContextTests.cs | 14 +- .../SnapshotStoreTests.cs | 200 +-- .../SurrealCdcDurabilityTests.cs | 580 +++++++ .../SurrealCdcMatrixCompletionTests.cs | 219 +++ .../SurrealStoreContractTests.cs | 434 ++++++ 56 files changed, 6613 insertions(+), 3177 deletions(-) delete mode 100755 src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentMetadataStore.cs delete mode 100755 src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.README.md delete mode 100755 src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.cs delete mode 100755 src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteOplogStore.cs delete mode 100755 src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerConfigurationStore.cs delete mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerOplogConfirmationStore.cs delete mode 100755 src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteSnapshotMetadataStore.cs delete mode 100755 src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCBLiteExtensions.cs delete mode 100755 src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCDocumentDbContext.cs delete mode 100755 src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/DocumentMetadataEntity.cs delete mode 100755 src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/EntityMappers.cs delete mode 100755 src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/OplogEntity.cs delete mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/PeerOplogConfirmationEntity.cs delete mode 100755 src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/RemotePeerEntity.cs delete mode 100755 src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/SnapshotMetadataEntity.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealEmbeddedClient.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealEmbeddedExtensions.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealEmbeddedOptions.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealReadinessProbe.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealSchemaInitializer.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealSchemaNames.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ICBDDCSurrealEmbeddedClient.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ICBDDCSurrealReadinessProbe.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ICBDDCSurrealSchemaInitializer.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ISurrealCdcCheckpointPersistence.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ISurrealCdcWorkerLifecycle.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealCdcCheckpointPersistence.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealCdcPollingOptions.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealDocumentMetadataStore.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealDocumentStore.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealDocumentStoreWatch.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealOplogStore.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealPeerConfigurationStore.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealPeerOplogConfirmationStore.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealShowChangesCborDecoder.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealSnapshotMetadataStore.cs create mode 100644 src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealStoreRecords.cs create mode 100644 tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SurrealCdcDurabilityTests.cs create mode 100644 tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SurrealCdcMatrixCompletionTests.cs create mode 100644 tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SurrealStoreContractTests.cs diff --git a/README.md b/README.md index 0c246f1..7fdfc6d 100755 --- a/README.md +++ b/README.md @@ -76,7 +76,7 @@ Your application continues to read and write to its database as usual. CBDDC wor +---------------------------------------------------+ | uses your DbContext directly +---------------------------------------------------+ -| Your Database (BLite) | +| Your Database (Surreal embedded RocksDB) | | +---------------------------------------------+ | | | Users | Orders | Products | ... | | +---------------------------------------------+ | @@ -155,7 +155,7 @@ Nodes advertise which collections they sync. The orchestrator prioritizes peers ### [Cloud] Cloud Infrastructure - ASP.NET Core hosting (single-cluster mode) -- BLite embedded persistence +- Surreal embedded RocksDB persistence - shared-token authentication --- @@ -167,11 +167,11 @@ Nodes advertise which collections they sync. The orchestrator prioritizes peers | Package | Purpose | |---------|---------| | `ZB.MOM.WW.CBDDC.Core` | Interfaces, models, conflict resolution (.NET Standard 2.0+) | -| `ZB.MOM.WW.CBDDC.Persistence` | BLite persistence provider, OplogStore, VectorClockService (.NET 10+) | +| `ZB.MOM.WW.CBDDC.Persistence` | Surreal embedded RocksDB provider, OplogStore, VectorClockService (.NET 10+) | | `CBDDC.Network` | TCP sync, UDP discovery, Protobuf protocol (.NET Standard 2.0+) | ```bash -# BLite (embedded document DB) +# Surreal embedded (RocksDB) dotnet add package ZB.MOM.WW.CBDDC.Core dotnet add package ZB.MOM.WW.CBDDC.Persistence dotnet add package CBDDC.Network @@ -285,12 +285,25 @@ High-priority troubleshooting topics: ### 1. Define Your Database Context ```csharp -public class MyDbContext : CBDDCDocumentDbContext +public sealed class MyDbContext { - public DocumentCollection Customers { get; private set; } - public DocumentCollection Orders { get; private set; } + public MyDbContext( + ICBDDCSurrealEmbeddedClient embeddedClient, + ICBDDCSurrealSchemaInitializer schemaInitializer) + { + EmbeddedClient = embeddedClient; + SchemaInitializer = schemaInitializer; + Customers = new SampleSurrealCollection("customers", c => c.Id, embeddedClient, schemaInitializer); + Orders = new SampleSurrealCollection("orders", o => o.Id, embeddedClient, schemaInitializer); + } - public MyDbContext(string dbPath) : base(dbPath) { } + public ICBDDCSurrealEmbeddedClient EmbeddedClient { get; } + public ICBDDCSurrealSchemaInitializer SchemaInitializer { get; } + public SampleSurrealCollection Customers { get; } + public SampleSurrealCollection Orders { get; } + + public Task SaveChangesAsync(CancellationToken ct = default) + => SchemaInitializer.EnsureInitializedAsync(ct); } ``` @@ -299,14 +312,20 @@ public class MyDbContext : CBDDCDocumentDbContext This is where you tell CBDDC **which collections to sync** and **how to map** between your entities and the sync engine: ```csharp -public class MyDocumentStore : BLiteDocumentStore +public sealed class MyDocumentStore : SurrealDocumentStore { public MyDocumentStore( MyDbContext context, IPeerNodeConfigurationProvider configProvider, IVectorClockService vectorClockService, ILogger? logger = null) - : base(context, configProvider, vectorClockService, logger: logger) + : base( + context, + context.EmbeddedClient, + context.SchemaInitializer, + configProvider, + vectorClockService, + logger: logger) { // Register collections for CDC - only these will be synced WatchCollection("Customers", context.Customers, c => c.Id); @@ -322,34 +341,31 @@ public class MyDocumentStore : BLiteDocumentStore case "Customers": var customer = content.Deserialize()!; customer.Id = key; - var existing = _context.Customers - .Find(c => c.Id == key).FirstOrDefault(); - if (existing != null) _context.Customers.Update(customer); - else _context.Customers.Insert(customer); + var existing = await _context.Customers.FindByIdAsync(key, ct); + if (existing != null) await _context.Customers.UpdateAsync(customer, ct); + else await _context.Customers.InsertAsync(customer, ct); break; case "Orders": var order = content.Deserialize()!; order.Id = key; - var existingOrder = _context.Orders - .Find(o => o.Id == key).FirstOrDefault(); - if (existingOrder != null) _context.Orders.Update(order); - else _context.Orders.Insert(order); + var existingOrder = await _context.Orders.FindByIdAsync(key, ct); + if (existingOrder != null) await _context.Orders.UpdateAsync(order, ct); + else await _context.Orders.InsertAsync(order, ct); break; } await _context.SaveChangesAsync(ct); } - protected override Task GetEntityAsJsonAsync( + protected override async Task GetEntityAsJsonAsync( string collection, string key, CancellationToken ct) { object? entity = collection switch { - "Customers" => _context.Customers.Find(c => c.Id == key).FirstOrDefault(), - "Orders" => _context.Orders.Find(o => o.Id == key).FirstOrDefault(), + "Customers" => await _context.Customers.FindByIdAsync(key, ct), + "Orders" => await _context.Orders.FindByIdAsync(key, ct), _ => null }; - return Task.FromResult(entity != null - ? (JsonElement?)JsonSerializer.SerializeToElement(entity) : null); + return entity != null ? JsonSerializer.SerializeToElement(entity) : null; } protected override async Task RemoveEntityAsync( @@ -357,8 +373,8 @@ public class MyDocumentStore : BLiteDocumentStore { switch (collection) { - case "Customers": _context.Customers.Delete(key); break; - case "Orders": _context.Orders.Delete(key); break; + case "Customers": await _context.Customers.DeleteAsync(key, ct); break; + case "Orders": await _context.Orders.DeleteAsync(key, ct); break; } await _context.SaveChangesAsync(ct); } @@ -395,9 +411,15 @@ builder.Services.AddSingleton( // Register CBDDC services builder.Services + .AddSingleton() .AddCBDDCCore() - .AddCBDDCBLite( - sp => new MyDbContext("mydata.blite")) + .AddCBDDCSurrealEmbedded(_ => new CBDDCSurrealEmbeddedOptions + { + Endpoint = "rocksdb://local", + DatabasePath = "data/mydata.rocksdb", + Namespace = "myapp", + Database = "main" + }) .AddCBDDCNetwork(); await builder.Build().RunAsync(); @@ -442,31 +464,40 @@ If you have an **existing database** and want to add P2P sync: ### Step 1 - Wrap your context -Create a `DbContext` extending `CBDDCDocumentDbContext`. This can wrap your existing collections/tables. +Create a context that exposes your collections and holds the Surreal embedded services. ```csharp -public class MyExistingDbContext : CBDDCDocumentDbContext +public sealed class MyExistingDbContext { - // Your existing collections - public DocumentCollection Products { get; private set; } - public DocumentCollection Inventory { get; private set; } - - public MyExistingDbContext(string dbPath) : base(dbPath) { } + public MyExistingDbContext( + ICBDDCSurrealEmbeddedClient embeddedClient, + ICBDDCSurrealSchemaInitializer schemaInitializer) + { + EmbeddedClient = embeddedClient; + SchemaInitializer = schemaInitializer; + Products = new SampleSurrealCollection("products", p => p.Id, embeddedClient, schemaInitializer); + Inventory = new SampleSurrealCollection("inventory", i => i.Id, embeddedClient, schemaInitializer); + } + + public ICBDDCSurrealEmbeddedClient EmbeddedClient { get; } + public ICBDDCSurrealSchemaInitializer SchemaInitializer { get; } + public SampleSurrealCollection Products { get; } + public SampleSurrealCollection Inventory { get; } } ``` ### Step 2 - Create a DocumentStore -Extend `BLiteDocumentStore`. This is the **bridge** between your data model and the sync engine. +Extend `SurrealDocumentStore`. This is the **bridge** between your data model and the sync engine. ```csharp -public class MyDocumentStore : BLiteDocumentStore +public class MyDocumentStore : SurrealDocumentStore { public MyDocumentStore(MyExistingDbContext ctx, IPeerNodeConfigurationProvider cfg, IVectorClockService vc, ILogger? log = null) - : base(ctx, cfg, vc, logger: log) + : base(ctx, ctx.EmbeddedClient, ctx.SchemaInitializer, cfg, vc, logger: log) { // Continue to next step... } @@ -481,7 +512,7 @@ Call `WatchCollection()` in the constructor for each collection you want to repl ```csharp public MyDocumentStore(...) - : base(ctx, cfg, vc, logger: log) + : base(ctx, ctx.EmbeddedClient, ctx.SchemaInitializer, cfg, vc, logger: log) { // Only these 2 collections will be synced across the mesh WatchCollection("Products", ctx.Products, p => p.Id); @@ -585,7 +616,7 @@ protected override async Task ApplyContentToEntitiesBatchAsync( Your Code: db.Users.InsertAsync(user) | v -BLite: SaveChangesAsync() +Surreal context write committed | | CDC fires (WatchCollection observer) DocumentStore: CreateOplogEntryAsync() @@ -610,15 +641,22 @@ DocumentStore: CreateOplogEntryAsync() ## Cloud Deployment -CBDDC supports ASP.NET Core hosting with BLite persistence for cloud deployments. +CBDDC supports ASP.NET Core hosting with Surreal embedded RocksDB persistence for cloud deployments. -### Example: ASP.NET Core with BLite +### Example: ASP.NET Core with Surreal embedded ```csharp var builder = WebApplication.CreateBuilder(args); +builder.Services.AddSingleton(); builder.Services.AddCBDDCCore() - .AddCBDDCBLite(sp => new MyDbContext("cbddc.blite")) + .AddCBDDCSurrealEmbedded(_ => new CBDDCSurrealEmbeddedOptions + { + Endpoint = "rocksdb://local", + DatabasePath = "data/cbddc.rocksdb", + Namespace = "cbddc", + Database = "main" + }) .AddCBDDCNetwork(); builder.Services.AddCBDDCHostingSingleCluster(options => @@ -737,7 +775,7 @@ Console.WriteLine($"Peers: {status.ConnectedPeers}"); ### API - **[API Reference](docs/api-reference.md)** - Complete API documentation -- **[Persistence Providers](docs/persistence-providers.md)** - BLite, custom +- **[Persistence Providers](docs/persistence-providers.md)** - Surreal embedded RocksDB, custom --- diff --git a/docs/persistence-providers.md b/docs/persistence-providers.md index fa81a3f..ac4dc9d 100755 --- a/docs/persistence-providers.md +++ b/docs/persistence-providers.md @@ -9,6 +9,7 @@ CBDDC supports multiple persistence backends to suit different deployment scenar | **SQLite (Direct)** | Embedded apps, single-node | ⭐⭐⭐⭐⭐ | ⭐⭐⭐⭐⭐ | ✅ Yes | | **EF Core (Generic)** | Multi-DB support, migrations | ⭐⭐⭐ | ⭐⭐⭐ | ✅ Yes | | **PostgreSQL** | Production, high load, JSON queries | ⭐⭐⭐⭐⭐ | ⭐⭐⭐ | ✅ Yes | +| **Surreal Embedded (RocksDB)** | Embedded multi-peer sync with local CDC | ⭐⭐⭐⭐ | ⭐⭐⭐⭐ | ✅ Yes | ## SQLite (Direct) @@ -179,19 +180,68 @@ Host=prod-db.example.com;Database=CBDDC;Username=admin;Password=secret;SSL Mode= Host=localhost;Database=CBDDC;Username=admin;Password=secret;Pooling=true;Minimum Pool Size=5;Maximum Pool Size=100 ``` +## Surreal Embedded (RocksDB) + +**Package:** `ZB.MOM.WW.CBDDC.Persistence` + +### Characteristics + +- ✅ **Embedded + durable**: Uses local RocksDB storage via Surreal embedded endpoint +- ✅ **CDC-native workflow**: Collection watches emit oplog entries and metadata updates +- ✅ **Durable checkpointing**: CDC cursor state is persisted per consumer id +- ✅ **Restart recovery**: Oplog + checkpoint data survive process restart and resume catch-up +- ✅ **Loopback suppression**: Remote apply path suppresses local CDC re-emission +- ✅ **Idempotent merge window**: Duplicate remote entries are merged by deterministic hash + +### When to Use + +- Embedded deployments that still need multi-peer replication +- Edge nodes where local durability is required without an external DB server +- CDC-heavy sync topologies that need restart-safe cursor tracking +- Environments that benefit from document-style storage and local operation logs + +### Configuration + +```csharp +services.AddCBDDCCore() + .AddCBDDCSurrealEmbedded(_ => new CBDDCSurrealEmbeddedOptions + { + Endpoint = "rocksdb://local", + DatabasePath = "/var/lib/cbddc/node-a.rocksdb", + Namespace = "cbddc", + Database = "node_a", + Cdc = new CBDDCSurrealCdcOptions + { + Enabled = true, + ConsumerId = "sync-main", + CheckpointTable = "cbddc_cdc_checkpoint", + EnableLiveSelectAccelerator = true, + LiveSelectReconnectDelay = TimeSpan.FromSeconds(2) + } + }); +``` + +### CDC Durability Notes + +1. **Checkpoint semantics**: each consumer id has an independent durable cursor (`timestamp + hash`). +2. **Catch-up on restart**: read checkpoint, then request oplog entries strictly after the stored timestamp. +3. **Duplicate-window safety**: replayed windows are deduplicated by oplog hash merge semantics. +4. **Delete durability**: deletes persist as oplog delete operations plus tombstone metadata. +5. **Remote apply behavior**: remote sync applies documents without generating local loopback CDC entries. + ## Feature Comparison -| Feature | SQLite (Direct) | EF Core | PostgreSQL | -|---------|----------------|---------|------------| -| **Storage Format** | File-based | Varies | Server-based | -| **JSON Storage** | TEXT | NVARCHAR/TEXT | JSONB | -| **JSON Indexing** | Standard | Standard | GIN/GIST | -| **JSON Queries** | `json_extract()` | In-Memory | Native (future) | -| **Concurrent Writes** | Good (WAL) | Varies | Excellent | -| **Horizontal Scaling** | No | Limited | Yes (replication) | -| **Migrations** | Manual SQL | EF Migrations | EF Migrations | -| **Connection Pooling** | N/A | Built-in | Built-in | -| **Cloud Support** | N/A | Varies | Excellent | +| Feature | SQLite (Direct) | EF Core | PostgreSQL | Surreal Embedded | +|---------|----------------|---------|------------|------------------| +| **Storage Format** | File-based | Varies | Server-based | File-based (RocksDB) | +| **JSON Storage** | TEXT | NVARCHAR/TEXT | JSONB | Native document records | +| **JSON Indexing** | Standard | Standard | GIN/GIST | Table/index schema controls | +| **JSON Queries** | `json_extract()` | In-Memory | Native (future) | Native document querying | +| **Concurrent Writes** | Good (WAL) | Varies | Excellent | Good (embedded engine limits apply) | +| **Horizontal Scaling** | No | Limited | Yes (replication) | Peer replication via CBDDC sync | +| **Migrations** | Manual SQL | EF Migrations | EF Migrations | Schema initializer + scripts | +| **Connection Pooling** | N/A | Built-in | Built-in | N/A (embedded) | +| **Cloud Support** | N/A | Varies | Excellent | Excellent for edge/embedded nodes | ## Performance Benchmarks @@ -251,6 +301,10 @@ _*Benchmarks vary based on hardware, network, and configuration_ - **Use**: PostgreSQL - **Why**: Best performance, scalability, reliability +### Production (Edge / Embedded Mesh) +- **Use**: Surreal Embedded (RocksDB) +- **Why**: Durable local CDC, restart-safe checkpoint resume, no external DB dependency + ### Enterprise - **Use**: EF Core with SQL Server or PostgreSQL - **Why**: Enterprise support, compliance, familiarity @@ -272,6 +326,15 @@ _*Benchmarks vary based on hardware, network, and configuration_ - Check for connection leaks - Consider connection pooler (PgBouncer) +### Surreal Embedded: "CDC replay after restart" +- Ensure `Cdc.Enabled=true` and a stable `Cdc.ConsumerId` is configured +- Verify checkpoint table contains cursor state for the consumer +- Resume from checkpoint timestamp before requesting new oplog window + +### Surreal Embedded: "Unexpected loopback oplog on remote sync" +- Apply remote entries through CBDDC sync/orchestrator paths (not local collection writes) +- Keep remote sync guards enabled in document store implementations + ## Future Enhancements - **JSONB Query Translation**: Native PostgreSQL JSON queries from QueryNode diff --git a/samples/ZB.MOM.WW.CBDDC.Sample.Console/Program.cs b/samples/ZB.MOM.WW.CBDDC.Sample.Console/Program.cs index 356742b..396b37c 100755 --- a/samples/ZB.MOM.WW.CBDDC.Sample.Console/Program.cs +++ b/samples/ZB.MOM.WW.CBDDC.Sample.Console/Program.cs @@ -3,10 +3,13 @@ using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Logging; using Serilog; +using System.Text.Json; using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Sync; using ZB.MOM.WW.CBDDC.Network; -using ZB.MOM.WW.CBDDC.Persistence.BLite; +using ZB.MOM.WW.CBDDC.Persistence.Snapshot; +using ZB.MOM.WW.CBDDC.Persistence.Surreal; namespace ZB.MOM.WW.CBDDC.Sample.Console; @@ -16,6 +19,8 @@ internal class Program { private static async Task Main(string[] args) { + if (await TryRunMigrationAsync(args)) return; + var builder = Host.CreateApplicationBuilder(args); // Configuration @@ -55,11 +60,20 @@ internal class Program // Database path string dataPath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "data"); Directory.CreateDirectory(dataPath); - string databasePath = Path.Combine(dataPath, $"{nodeId}.blite"); + string databasePath = Path.Combine(dataPath, $"{nodeId}.rocksdb"); + string surrealDatabase = nodeId.Replace("-", "_", StringComparison.Ordinal); - // Register CBDDC Services using Fluent Extensions with BLite, SampleDbContext, and SampleDocumentStore + // Register CBDDC services with embedded Surreal (RocksDB). + builder.Services.AddSingleton(); + builder.Services.AddSingleton(); builder.Services.AddCBDDCCore() - .AddCBDDCBLite(sp => new SampleDbContext(databasePath)) + .AddCBDDCSurrealEmbedded(_ => new CBDDCSurrealEmbeddedOptions + { + Endpoint = "rocksdb://local", + DatabasePath = databasePath, + Namespace = "cbddc_sample", + Database = surrealDatabase + }) .AddCBDDCNetwork(); // useHostedService = true by default builder.Services.AddHostedService(); // Runs the Input Loop @@ -73,6 +87,107 @@ internal class Program await host.RunAsync(); } + private static async Task TryRunMigrationAsync(string[] args) + { + int migrateIndex = Array.IndexOf(args, "--migrate-snapshot"); + if (migrateIndex < 0) return false; + + string snapshotPath = GetRequiredArgumentValue(args, migrateIndex, "--migrate-snapshot"); + if (!File.Exists(snapshotPath)) + throw new FileNotFoundException("Snapshot file not found.", snapshotPath); + + string targetPath = GetOptionalArgumentValue(args, "--target-db") + ?? Path.Combine(Directory.GetCurrentDirectory(), "data", "migration.rocksdb"); + Directory.CreateDirectory(Path.GetDirectoryName(Path.GetFullPath(targetPath))!); + + string nodeId = "migration-node"; + var configProvider = new StaticPeerNodeConfigurationProvider(new PeerNodeConfiguration + { + NodeId = nodeId, + TcpPort = 0, + AuthToken = "migration" + }); + + string databaseName = $"migration_{DateTimeOffset.UtcNow.ToUnixTimeSeconds()}"; + var services = new ServiceCollection(); + services.AddLogging(); + services.AddSingleton(configProvider); + services.AddSingleton(); + services.AddSingleton(); + services.AddCBDDCCore() + .AddCBDDCSurrealEmbedded(_ => new CBDDCSurrealEmbeddedOptions + { + Endpoint = "rocksdb://local", + DatabasePath = targetPath, + Namespace = "cbddc_migration", + Database = databaseName + }); + + using var provider = services.BuildServiceProvider(); + var snapshotService = provider.GetRequiredService(); + await using (var snapshotStream = File.OpenRead(snapshotPath)) + { + await snapshotService.ReplaceDatabaseAsync(snapshotStream); + } + + await VerifyMigrationAsync(provider, snapshotPath); + System.Console.WriteLine($"Migration completed successfully to: {targetPath}"); + return true; + } + + private static async Task VerifyMigrationAsync(IServiceProvider provider, string snapshotPath) + { + await using var snapshotStream = File.OpenRead(snapshotPath); + var source = await JsonSerializer.DeserializeAsync(snapshotStream) + ?? throw new InvalidOperationException("Unable to deserialize source snapshot."); + + var documentStore = provider.GetRequiredService(); + var oplogStore = provider.GetRequiredService(); + var peerStore = provider.GetRequiredService(); + var confirmationStore = provider.GetService(); + + int destinationDocuments = (await documentStore.ExportAsync()).Count(); + int destinationOplog = (await oplogStore.ExportAsync()).Count(); + int destinationPeers = (await peerStore.ExportAsync()).Count(); + int destinationConfirmations = confirmationStore == null + ? 0 + : (await confirmationStore.ExportAsync()).Count(); + + if (destinationDocuments != source.Documents.Count || + destinationOplog != source.Oplog.Count || + destinationPeers != source.RemotePeers.Count || + destinationConfirmations != source.PeerConfirmations.Count) + throw new InvalidOperationException("Snapshot parity verification failed after migration."); + + if (source.Oplog.Count > 0) + { + string firstHash = source.Oplog[0].Hash; + string lastHash = source.Oplog[^1].Hash; + + var firstEntry = await oplogStore.GetEntryByHashAsync(firstHash); + var lastEntry = await oplogStore.GetEntryByHashAsync(lastHash); + if (firstEntry == null || lastEntry == null) + throw new InvalidOperationException("Oplog hash spot-check failed after migration."); + } + } + + private static string GetRequiredArgumentValue(string[] args, int optionIndex, string optionName) + { + if (optionIndex < 0 || optionIndex + 1 >= args.Length || args[optionIndex + 1].StartsWith("--")) + throw new ArgumentException($"Missing value for {optionName}."); + + return args[optionIndex + 1]; + } + + private static string? GetOptionalArgumentValue(string[] args, string optionName) + { + int index = Array.IndexOf(args, optionName); + if (index < 0) return null; + if (index + 1 >= args.Length || args[index + 1].StartsWith("--")) + throw new ArgumentException($"Missing value for {optionName}."); + return args[index + 1]; + } + private class StaticPeerNodeConfigurationProvider : IPeerNodeConfigurationProvider { /// @@ -112,4 +227,4 @@ internal class Program ConfigurationChanged?.Invoke(this, newConfig); } } -} \ No newline at end of file +} diff --git a/samples/ZB.MOM.WW.CBDDC.Sample.Console/README.md b/samples/ZB.MOM.WW.CBDDC.Sample.Console/README.md index 4c38012..8b625d6 100755 --- a/samples/ZB.MOM.WW.CBDDC.Sample.Console/README.md +++ b/samples/ZB.MOM.WW.CBDDC.Sample.Console/README.md @@ -56,7 +56,15 @@ Terminal 3: dotnet run -- --node-id node3 --tcp-port 5003 --udp-port 6003 ``` -Changes made on any node will automatically sync to all peers! +Changes made on any node will automatically sync to all peers! + +### Import Snapshot Into Surreal (Migration Utility) + +```bash +dotnet run -- --migrate-snapshot /path/to/snapshot.json --target-db /path/to/data.rocksdb +``` + +This imports a CBDDC snapshot into embedded Surreal RocksDB and validates parity (counts plus oplog hash spot checks). ## Available Commands @@ -149,7 +157,7 @@ var page = await users.Find(u => true, skip: 10, take: 5); ## Architecture -- **Storage**: SQLite with HLC timestamps +- **Storage**: Surreal embedded RocksDB with HLC timestamps - **Sync**: TCP for data transfer, UDP for discovery - **Conflict Resolution**: Last-Write-Wins based on Hybrid Logical Clocks - **Serialization**: System.Text.Json diff --git a/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDbContext.cs b/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDbContext.cs index 7943a78..498663b 100755 --- a/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDbContext.cs +++ b/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDbContext.cs @@ -1,50 +1,299 @@ -using BLite.Core.Collections; -using BLite.Core.Metadata; -using BLite.Core.Storage; -using ZB.MOM.WW.CBDDC.Persistence.BLite; +using System.Text.Json.Serialization; +using System.Security.Cryptography; +using System.Text; +using SurrealDb.Net; +using SurrealDb.Net.Models; +using ZB.MOM.WW.CBDDC.Persistence.Surreal; namespace ZB.MOM.WW.CBDDC.Sample.Console; -public class SampleDbContext : CBDDCDocumentDbContext +public class SampleDbContext : IDisposable { - /// - /// Initializes a new instance of the SampleDbContext class using the specified database file path. - /// - /// The file system path to the database file. Cannot be null or empty. - public SampleDbContext(string databasePath) : base(databasePath) + private const string UsersTable = "sample_users"; + private const string TodoListsTable = "sample_todo_lists"; + + private readonly bool _ownsClient; + + public SampleDbContext( + ICBDDCSurrealEmbeddedClient surrealEmbeddedClient, + ICBDDCSurrealSchemaInitializer schemaInitializer) { + SurrealEmbeddedClient = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient)); + SchemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer)); + + Users = new SampleSurrealCollection(UsersTable, u => u.Id, SurrealEmbeddedClient, SchemaInitializer); + TodoLists = new SampleSurrealCollection(TodoListsTable, t => t.Id, SurrealEmbeddedClient, SchemaInitializer); + OplogEntries = new SampleSurrealReadOnlyCollection( + CBDDCSurrealSchemaNames.OplogEntriesTable, + SurrealEmbeddedClient, + SchemaInitializer); } - /// - /// Initializes a new instance of the SampleDbContext class using the specified database file path and page file - /// configuration. - /// - /// The file system path to the database file. Cannot be null or empty. - /// The configuration settings for the page file. Cannot be null. - public SampleDbContext(string databasePath, PageFileConfig config) : base(databasePath, config) + public SampleDbContext(string databasePath) { + string normalizedPath = NormalizeDatabasePath(databasePath); + string suffix = ComputeDeterministicSuffix(normalizedPath); + var options = new CBDDCSurrealEmbeddedOptions + { + Endpoint = "rocksdb://local", + DatabasePath = normalizedPath, + Namespace = $"cbddc_sample_{suffix}", + Database = $"main_{suffix}" + }; + + SurrealEmbeddedClient = new CBDDCSurrealEmbeddedClient(options); + + _ownsClient = true; + SchemaInitializer = new SampleSurrealSchemaInitializer(SurrealEmbeddedClient); + + Users = new SampleSurrealCollection(UsersTable, u => u.Id, SurrealEmbeddedClient, SchemaInitializer); + TodoLists = new SampleSurrealCollection(TodoListsTable, t => t.Id, SurrealEmbeddedClient, SchemaInitializer); + OplogEntries = new SampleSurrealReadOnlyCollection( + CBDDCSurrealSchemaNames.OplogEntriesTable, + SurrealEmbeddedClient, + SchemaInitializer); } - /// - /// Gets the users collection. - /// - public DocumentCollection Users { get; private set; } = null!; + public ICBDDCSurrealEmbeddedClient SurrealEmbeddedClient { get; } - /// - /// Gets the todo lists collection. - /// - public DocumentCollection TodoLists { get; private set; } = null!; + public ICBDDCSurrealSchemaInitializer SchemaInitializer { get; private set; } - /// - protected override void OnModelCreating(ModelBuilder modelBuilder) + public SampleSurrealCollection Users { get; private set; } + + public SampleSurrealCollection TodoLists { get; private set; } + + public SampleSurrealReadOnlyCollection OplogEntries { get; private set; } + + public async Task SaveChangesAsync(CancellationToken cancellationToken = default) { - base.OnModelCreating(modelBuilder); - modelBuilder.Entity() - .ToCollection("Users") - .HasKey(u => u.Id); - - modelBuilder.Entity() - .ToCollection("TodoLists") - .HasKey(t => t.Id); + await SchemaInitializer.EnsureInitializedAsync(cancellationToken); } -} \ No newline at end of file + + public void Dispose() + { + Users.Dispose(); + TodoLists.Dispose(); + + if (_ownsClient) SurrealEmbeddedClient.Dispose(); + } + + private static string NormalizeDatabasePath(string databasePath) + { + if (string.IsNullOrWhiteSpace(databasePath)) + throw new ArgumentException("Database path is required.", nameof(databasePath)); + + return Path.GetFullPath(databasePath); + } + + private static string ComputeDeterministicSuffix(string value) + { + byte[] bytes = SHA256.HashData(Encoding.UTF8.GetBytes(value)); + return Convert.ToHexString(bytes).ToLowerInvariant()[..12]; + } +} + +public sealed class SampleSurrealSchemaInitializer : ICBDDCSurrealSchemaInitializer +{ + private const string SampleSchemaSql = """ + DEFINE TABLE OVERWRITE sample_users SCHEMALESS CHANGEFEED 7d; + DEFINE TABLE OVERWRITE sample_todo_lists SCHEMALESS CHANGEFEED 7d; + """; + private readonly ICBDDCSurrealEmbeddedClient _client; + private int _initialized; + + public SampleSurrealSchemaInitializer(ICBDDCSurrealEmbeddedClient client) + { + _client = client ?? throw new ArgumentNullException(nameof(client)); + } + + public async Task EnsureInitializedAsync(CancellationToken cancellationToken = default) + { + if (Volatile.Read(ref _initialized) == 1) return; + await _client.InitializeAsync(cancellationToken); + await _client.RawQueryAsync(SampleSchemaSql, cancellationToken: cancellationToken); + Volatile.Write(ref _initialized, 1); + } +} + +public sealed class SampleSurrealCollection : ISurrealWatchableCollection, IDisposable + where TEntity : class +{ + private readonly SurrealCollectionChangeFeed _changeFeed = new(); + private readonly ISurrealDbClient _client; + private readonly Func _keySelector; + private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer; + private readonly string _tableName; + + public SampleSurrealCollection( + string tableName, + Func keySelector, + ICBDDCSurrealEmbeddedClient surrealEmbeddedClient, + ICBDDCSurrealSchemaInitializer schemaInitializer) + { + if (string.IsNullOrWhiteSpace(tableName)) + throw new ArgumentException("Table name is required.", nameof(tableName)); + + _tableName = tableName; + _keySelector = keySelector ?? throw new ArgumentNullException(nameof(keySelector)); + _client = (surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient))).Client; + _schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer)); + } + + public IDisposable Subscribe(IObserver> observer) + { + return _changeFeed.Subscribe(observer); + } + + public async Task InsertAsync(TEntity entity, CancellationToken cancellationToken = default) + { + await UpsertAsync(entity, cancellationToken); + } + + public async Task UpdateAsync(TEntity entity, CancellationToken cancellationToken = default) + { + await UpsertAsync(entity, cancellationToken); + } + + public async Task DeleteAsync(string id, CancellationToken cancellationToken = default) + { + if (string.IsNullOrWhiteSpace(id)) + throw new ArgumentException("Document id is required.", nameof(id)); + + await EnsureReadyAsync(cancellationToken); + await _client.Delete(RecordId.From(_tableName, id), cancellationToken); + _changeFeed.PublishDelete(id); + } + + public TEntity? FindById(string id) + { + return FindByIdAsync(id).GetAwaiter().GetResult(); + } + + public async Task FindByIdAsync(string id, CancellationToken cancellationToken = default) + { + if (string.IsNullOrWhiteSpace(id)) + throw new ArgumentException("Document id is required.", nameof(id)); + + await EnsureReadyAsync(cancellationToken); + var record = await _client.Select>(RecordId.From(_tableName, id), cancellationToken); + return record?.Entity; + } + + public IEnumerable FindAll() + { + return FindAllAsync().GetAwaiter().GetResult(); + } + + public async Task> FindAllAsync(CancellationToken cancellationToken = default) + { + await EnsureReadyAsync(cancellationToken); + var rows = await _client.Select>(_tableName, cancellationToken); + return rows? + .Where(r => r.Entity != null) + .Select(r => r.Entity!) + .ToList() + ?? []; + } + + public IEnumerable Find(Func predicate) + { + ArgumentNullException.ThrowIfNull(predicate); + return FindAll().Where(predicate); + } + + public void Dispose() + { + _changeFeed.Dispose(); + } + + private async Task UpsertAsync(TEntity entity, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(entity); + + string key = _keySelector(entity); + if (string.IsNullOrWhiteSpace(key)) + throw new InvalidOperationException("Entity key cannot be null or empty."); + + await EnsureReadyAsync(cancellationToken); + await _client.Upsert, SampleEntityRecord>( + RecordId.From(_tableName, key), + new SampleEntityRecord { Entity = entity }, + cancellationToken); + _changeFeed.PublishPut(entity, key); + } + + private async Task EnsureReadyAsync(CancellationToken cancellationToken) + { + await _schemaInitializer.EnsureInitializedAsync(cancellationToken); + } +} + +public sealed class SampleSurrealReadOnlyCollection + where TEntity : class +{ + private readonly ISurrealDbClient _client; + private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer; + private readonly string _tableName; + + public SampleSurrealReadOnlyCollection( + string tableName, + ICBDDCSurrealEmbeddedClient surrealEmbeddedClient, + ICBDDCSurrealSchemaInitializer schemaInitializer) + { + if (string.IsNullOrWhiteSpace(tableName)) + throw new ArgumentException("Table name is required.", nameof(tableName)); + + _tableName = tableName; + _client = (surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient))).Client; + _schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer)); + } + + public IEnumerable FindAll() + { + return FindAllAsync().GetAwaiter().GetResult(); + } + + public async Task> FindAllAsync(CancellationToken cancellationToken = default) + { + await _schemaInitializer.EnsureInitializedAsync(cancellationToken); + var rows = await _client.Select(_tableName, cancellationToken); + return rows?.ToList() ?? []; + } + + public IEnumerable Find(Func predicate) + { + ArgumentNullException.ThrowIfNull(predicate); + return FindAll().Where(predicate); + } +} + +public sealed class SampleEntityRecord : Record + where TEntity : class +{ + [JsonPropertyName("entity")] + public TEntity? Entity { get; set; } +} + +public sealed class SampleOplogEntry : Record +{ + [JsonPropertyName("collection")] + public string Collection { get; set; } = ""; + + [JsonPropertyName("key")] + public string Key { get; set; } = ""; + + [JsonPropertyName("operation")] + public int Operation { get; set; } + + [JsonPropertyName("timestampNodeId")] + public string TimestampNodeId { get; set; } = ""; + + [JsonPropertyName("timestampPhysicalTime")] + public long TimestampPhysicalTime { get; set; } + + [JsonPropertyName("timestampLogicalCounter")] + public int TimestampLogicalCounter { get; set; } + + [JsonPropertyName("hash")] + public string Hash { get; set; } = ""; +} diff --git a/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDocumentStore.cs b/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDocumentStore.cs index f781176..527d2b3 100755 --- a/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDocumentStore.cs +++ b/samples/ZB.MOM.WW.CBDDC.Sample.Console/SampleDocumentStore.cs @@ -3,90 +3,125 @@ using Microsoft.Extensions.Logging; using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Sync; -using ZB.MOM.WW.CBDDC.Persistence.BLite; +using ZB.MOM.WW.CBDDC.Persistence.Surreal; namespace ZB.MOM.WW.CBDDC.Sample.Console; /// -/// Document store implementation for CBDDC Sample using BLite persistence. -/// Extends BLiteDocumentStore to automatically handle Oplog creation via CDC. +/// Surreal-backed document store for the sample app. /// -public class SampleDocumentStore : BLiteDocumentStore +public class SampleDocumentStore : SurrealDocumentStore { private const string UsersCollection = "Users"; private const string TodoListsCollection = "TodoLists"; - /// - /// Initializes a new instance of the class. - /// - /// The sample database context. - /// The peer node configuration provider. - /// The vector clock service. - /// The optional logger instance. public SampleDocumentStore( SampleDbContext context, IPeerNodeConfigurationProvider configProvider, IVectorClockService vectorClockService, ILogger? logger = null) - : base(context, configProvider, vectorClockService, new LastWriteWinsConflictResolver(), logger) + : base( + context, + context.SurrealEmbeddedClient, + context.SchemaInitializer, + configProvider, + vectorClockService, + new LastWriteWinsConflictResolver(), + null, + null, + logger) { - // Register CDC watchers for local change detection - // InterestedCollection is automatically populated WatchCollection(UsersCollection, context.Users, u => u.Id); WatchCollection(TodoListsCollection, context.TodoLists, t => t.Id); } - #region Helper Methods - - private static JsonElement? SerializeEntity(T? entity) where T : class - { - if (entity == null) return null; - return JsonSerializer.SerializeToElement(entity); - } - - #endregion - - #region Abstract Method Implementations - - /// protected override async Task ApplyContentToEntityAsync( - string collection, string key, JsonElement content, CancellationToken cancellationToken) + string collection, + string key, + JsonElement content, + CancellationToken cancellationToken) { - UpsertEntity(collection, key, content); - await _context.SaveChangesAsync(cancellationToken); + await UpsertEntityAsync(collection, key, content, cancellationToken); } - /// protected override async Task ApplyContentToEntitiesBatchAsync( IEnumerable<(string Collection, string Key, JsonElement Content)> documents, CancellationToken cancellationToken) { - foreach ((string collection, string key, var content) in documents) UpsertEntity(collection, key, content); - await _context.SaveChangesAsync(cancellationToken); + foreach ((string collection, string key, var content) in documents) + await UpsertEntityAsync(collection, key, content, cancellationToken); } - private void UpsertEntity(string collection, string key, JsonElement content) + protected override async Task GetEntityAsJsonAsync( + string collection, + string key, + CancellationToken cancellationToken) + { + return collection switch + { + UsersCollection => SerializeEntity(await _context.Users.FindByIdAsync(key, cancellationToken)), + TodoListsCollection => SerializeEntity(await _context.TodoLists.FindByIdAsync(key, cancellationToken)), + _ => null + }; + } + + protected override async Task RemoveEntityAsync( + string collection, + string key, + CancellationToken cancellationToken) + { + await DeleteEntityAsync(collection, key, cancellationToken); + } + + protected override async Task RemoveEntitiesBatchAsync( + IEnumerable<(string Collection, string Key)> documents, + CancellationToken cancellationToken) + { + foreach ((string collection, string key) in documents) + await DeleteEntityAsync(collection, key, cancellationToken); + } + + protected override async Task> GetAllEntitiesAsJsonAsync( + string collection, + CancellationToken cancellationToken) + { + return collection switch + { + UsersCollection => (await _context.Users.FindAllAsync(cancellationToken)) + .Select(u => (u.Id, SerializeEntity(u)!.Value)) + .ToList(), + TodoListsCollection => (await _context.TodoLists.FindAllAsync(cancellationToken)) + .Select(t => (t.Id, SerializeEntity(t)!.Value)) + .ToList(), + _ => [] + }; + } + + private async Task UpsertEntityAsync( + string collection, + string key, + JsonElement content, + CancellationToken cancellationToken) { switch (collection) { case UsersCollection: - var user = content.Deserialize()!; + var user = content.Deserialize() ?? throw new InvalidOperationException("Failed to deserialize user."); user.Id = key; - var existingUser = _context.Users.Find(u => u.Id == key).FirstOrDefault(); - if (existingUser != null) - _context.Users.Update(user); + if (await _context.Users.FindByIdAsync(key, cancellationToken) == null) + await _context.Users.InsertAsync(user, cancellationToken); else - _context.Users.Insert(user); + await _context.Users.UpdateAsync(user, cancellationToken); break; case TodoListsCollection: - var todoList = content.Deserialize()!; - todoList.Id = key; - var existingTodoList = _context.TodoLists.Find(t => t.Id == key).FirstOrDefault(); - if (existingTodoList != null) - _context.TodoLists.Update(todoList); + var todo = content.Deserialize() ?? + throw new InvalidOperationException("Failed to deserialize todo list."); + todo.Id = key; + if (await _context.TodoLists.FindByIdAsync(key, cancellationToken) == null) + await _context.TodoLists.InsertAsync(todo, cancellationToken); else - _context.TodoLists.Insert(todoList); + await _context.TodoLists.UpdateAsync(todo, cancellationToken); break; default: @@ -94,43 +129,15 @@ public class SampleDocumentStore : BLiteDocumentStore } } - /// - protected override Task GetEntityAsJsonAsync( - string collection, string key, CancellationToken cancellationToken) - { - return Task.FromResult(collection switch - { - UsersCollection => SerializeEntity(_context.Users.Find(u => u.Id == key).FirstOrDefault()), - TodoListsCollection => SerializeEntity(_context.TodoLists.Find(t => t.Id == key).FirstOrDefault()), - _ => null - }); - } - - /// - protected override async Task RemoveEntityAsync( - string collection, string key, CancellationToken cancellationToken) - { - DeleteEntity(collection, key); - await _context.SaveChangesAsync(cancellationToken); - } - - /// - protected override async Task RemoveEntitiesBatchAsync( - IEnumerable<(string Collection, string Key)> documents, CancellationToken cancellationToken) - { - foreach ((string collection, string key) in documents) DeleteEntity(collection, key); - await _context.SaveChangesAsync(cancellationToken); - } - - private void DeleteEntity(string collection, string key) + private async Task DeleteEntityAsync(string collection, string key, CancellationToken cancellationToken) { switch (collection) { case UsersCollection: - _context.Users.Delete(key); + await _context.Users.DeleteAsync(key, cancellationToken); break; case TodoListsCollection: - _context.TodoLists.Delete(key); + await _context.TodoLists.DeleteAsync(key, cancellationToken); break; default: _logger.LogWarning("Attempted to remove entity from unsupported collection: {Collection}", collection); @@ -138,21 +145,8 @@ public class SampleDocumentStore : BLiteDocumentStore } } - /// - protected override async Task> GetAllEntitiesAsJsonAsync( - string collection, CancellationToken cancellationToken) + private static JsonElement? SerializeEntity(T? entity) where T : class { - return await Task.Run(() => collection switch - { - UsersCollection => _context.Users.FindAll() - .Select(u => (u.Id, SerializeEntity(u)!.Value)), - - TodoListsCollection => _context.TodoLists.FindAll() - .Select(t => (t.Id, SerializeEntity(t)!.Value)), - - _ => Enumerable.Empty<(string, JsonElement)>() - }, cancellationToken); + return entity == null ? null : JsonSerializer.SerializeToElement(entity); } - - #endregion -} \ No newline at end of file +} diff --git a/samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj b/samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj index 35d85c8..cef80f3 100755 --- a/samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj +++ b/samples/ZB.MOM.WW.CBDDC.Sample.Console/ZB.MOM.WW.CBDDC.Sample.Console.csproj @@ -2,20 +2,16 @@ - - all - runtime; build; native; contentfiles; analyzers; buildtransitive - - - - - + + + + diff --git a/src/ZB.MOM.WW.CBDDC.Core/README.md b/src/ZB.MOM.WW.CBDDC.Core/README.md index 2c82dd4..c46fdd9 100755 --- a/src/ZB.MOM.WW.CBDDC.Core/README.md +++ b/src/ZB.MOM.WW.CBDDC.Core/README.md @@ -4,7 +4,7 @@ Core abstractions and logic for **CBDDC**, a peer-to-peer data synchronization m ## What Is CBDDC? -CBDDC is **not** a database � it's a sync layer that plugs into your existing data store (BLite) and enables automatic +CBDDC is **not** a database � it's a sync layer that plugs into your existing data store (for example SurrealDB) and enables automatic P2P replication across nodes in a mesh network. Your application reads and writes to its database as usual; CBDDC handles synchronization in the background. @@ -28,19 +28,18 @@ dotnet add package ZB.MOM.WW.CBDDC.Network ## Quick Start ```csharp -// 1. Define your DbContext -public class MyDbContext : CBDDCDocumentDbContext -{ - public DocumentCollection Users { get; private set; } - public MyDbContext(string path) : base(path) { } -} - -// 2. Create your DocumentStore (the sync bridge) -public class MyDocumentStore : BLiteDocumentStore +// 1. Define your context exposing watchable collections +public class MyDbContext +{ + public MySurrealCollection Users { get; } +} + +// 2. Create your DocumentStore (the sync bridge) +public class MyDocumentStore : SurrealDocumentStore { public MyDocumentStore(MyDbContext ctx, IPeerNodeConfigurationProvider cfg, IVectorClockService vc, ILogger? log = null) - : base(ctx, cfg, vc, logger: log) + : base(ctx, ctx.SurrealEmbeddedClient, ctx.SchemaInitializer, cfg, vc, logger: log) { WatchCollection("Users", ctx.Users, u => u.Id); } @@ -50,19 +49,24 @@ public class MyDocumentStore : BLiteDocumentStore { var user = content.Deserialize()!; user.Id = key; - var existing = _context.Users.Find(u => u.Id == key).FirstOrDefault(); - if (existing != null) _context.Users.Update(user); - else _context.Users.Insert(user); - await _context.SaveChangesAsync(ct); + if (await _context.Users.FindByIdAsync(key, ct) is null) + await _context.Users.InsertAsync(user, ct); + else + await _context.Users.UpdateAsync(user, ct); } // ... implement other abstract methods } // 3. Register and use -builder.Services.AddCBDDCCore() - .AddCBDDCBLite( - sp => new MyDbContext("data.blite")) - .AddCBDDCNetwork(); +builder.Services.AddCBDDCCore() + .AddCBDDCSurrealEmbedded(_ => new CBDDCSurrealEmbeddedOptions + { + Endpoint = "rocksdb://local", + DatabasePath = "data/cbddc.rocksdb", + Namespace = "cbddc", + Database = "main" + }) + .AddCBDDCNetwork(); ``` ## Key Concepts @@ -93,7 +97,7 @@ Your App ? DbContext.SaveChangesAsync() ## Related Packages -- **ZB.MOM.WW.CBDDC.Persistence** � BLite embedded provider (.NET 10+) +- **ZB.MOM.WW.CBDDC.Persistence** � Surreal embedded RocksDB provider (.NET 10+) - **ZB.MOM.WW.CBDDC.Network** � P2P networking (UDP discovery, TCP sync, Gossip) ## Documentation diff --git a/src/ZB.MOM.WW.CBDDC.Hosting/README.md b/src/ZB.MOM.WW.CBDDC.Hosting/README.md index 330af2d..7817476 100755 --- a/src/ZB.MOM.WW.CBDDC.Hosting/README.md +++ b/src/ZB.MOM.WW.CBDDC.Hosting/README.md @@ -20,10 +20,15 @@ dotnet add package ZB.MOM.WW.CBDDC.Hosting ```csharp var builder = WebApplication.CreateBuilder(args); -// Add CBDDC core + BLite persistence (custom DbContext + DocumentStore required) +// Add CBDDC core + Surreal embedded persistence (custom DocumentStore required) builder.Services.AddCBDDCCore() - .AddCBDDCBLite( - sp => new MyDbContext("/var/lib/cbddc/data.blite")); + .AddCBDDCSurrealEmbedded(_ => new CBDDCSurrealEmbeddedOptions + { + Endpoint = "rocksdb://local", + DatabasePath = "/var/lib/cbddc/data.rocksdb", + Namespace = "cbddc", + Database = "main" + }); // Add ASP.NET integration (cluster mode) builder.Services.AddCBDDCHosting(options => @@ -80,10 +85,10 @@ CBDDC servers operate in respond-only mode: ## Production Checklist -- Store BLite database files on durable storage in production +- Store Surreal RocksDB data files on durable storage in production - Configure health checks for load balancer - Set up proper logging and monitoring -- Configure backup/restore for BLite database files +- Configure backup/restore for Surreal RocksDB data files - Configure proper firewall rules for TCP port - Set unique NodeId per instance - Test failover scenarios diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentMetadataStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentMetadataStore.cs deleted file mode 100755 index 901e179..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentMetadataStore.cs +++ /dev/null @@ -1,238 +0,0 @@ -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; -using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Storage; -using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite; - -/// -/// BLite implementation of document metadata storage for sync tracking. -/// -/// The type of CBDDCDocumentDbContext. -public class BLiteDocumentMetadataStore : DocumentMetadataStore where TDbContext : CBDDCDocumentDbContext -{ - private readonly TDbContext _context; - private readonly ILogger> _logger; - - /// - /// Initializes a new instance of the class. - /// - /// The BLite document database context. - /// The optional logger instance. - public BLiteDocumentMetadataStore(TDbContext context, - ILogger>? logger = null) - { - _context = context ?? throw new ArgumentNullException(nameof(context)); - _logger = logger ?? NullLogger>.Instance; - } - - /// - public override async Task GetMetadataAsync(string collection, string key, - CancellationToken cancellationToken = default) - { - var entity = _context.DocumentMetadatas - .Find(m => m.Collection == collection && m.Key == key) - .FirstOrDefault(); - - return entity != null ? ToDomain(entity) : null; - } - - /// - public override async Task> GetMetadataByCollectionAsync(string collection, - CancellationToken cancellationToken = default) - { - return _context.DocumentMetadatas - .Find(m => m.Collection == collection) - .Select(ToDomain) - .ToList(); - } - - /// - public override async Task UpsertMetadataAsync(DocumentMetadata metadata, - CancellationToken cancellationToken = default) - { - var existing = _context.DocumentMetadatas - .Find(m => m.Collection == metadata.Collection && m.Key == metadata.Key) - .FirstOrDefault(); - - if (existing == null) - { - await _context.DocumentMetadatas.InsertAsync(ToEntity(metadata)); - } - else - { - existing.HlcPhysicalTime = metadata.UpdatedAt.PhysicalTime; - existing.HlcLogicalCounter = metadata.UpdatedAt.LogicalCounter; - existing.HlcNodeId = metadata.UpdatedAt.NodeId; - existing.IsDeleted = metadata.IsDeleted; - await _context.DocumentMetadatas.UpdateAsync(existing); - } - - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override async Task UpsertMetadataBatchAsync(IEnumerable metadatas, - CancellationToken cancellationToken = default) - { - foreach (var metadata in metadatas) - { - var existing = _context.DocumentMetadatas - .Find(m => m.Collection == metadata.Collection && m.Key == metadata.Key) - .FirstOrDefault(); - - if (existing == null) - { - await _context.DocumentMetadatas.InsertAsync(ToEntity(metadata)); - } - else - { - existing.HlcPhysicalTime = metadata.UpdatedAt.PhysicalTime; - existing.HlcLogicalCounter = metadata.UpdatedAt.LogicalCounter; - existing.HlcNodeId = metadata.UpdatedAt.NodeId; - existing.IsDeleted = metadata.IsDeleted; - await _context.DocumentMetadatas.UpdateAsync(existing); - } - } - - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override async Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp, - CancellationToken cancellationToken = default) - { - var existing = _context.DocumentMetadatas - .Find(m => m.Collection == collection && m.Key == key) - .FirstOrDefault(); - - if (existing == null) - { - await _context.DocumentMetadatas.InsertAsync(new DocumentMetadataEntity - { - Id = Guid.NewGuid().ToString(), - Collection = collection, - Key = key, - HlcPhysicalTime = timestamp.PhysicalTime, - HlcLogicalCounter = timestamp.LogicalCounter, - HlcNodeId = timestamp.NodeId, - IsDeleted = true - }); - } - else - { - existing.HlcPhysicalTime = timestamp.PhysicalTime; - existing.HlcLogicalCounter = timestamp.LogicalCounter; - existing.HlcNodeId = timestamp.NodeId; - existing.IsDeleted = true; - await _context.DocumentMetadatas.UpdateAsync(existing); - } - - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override async Task> GetMetadataAfterAsync(HlcTimestamp since, - IEnumerable? collections = null, CancellationToken cancellationToken = default) - { - var query = _context.DocumentMetadatas.AsQueryable() - .Where(m => m.HlcPhysicalTime > since.PhysicalTime || - (m.HlcPhysicalTime == since.PhysicalTime && m.HlcLogicalCounter > since.LogicalCounter)); - - if (collections != null) - { - var collectionSet = new HashSet(collections); - query = query.Where(m => collectionSet.Contains(m.Collection)); - } - - return query - .OrderBy(m => m.HlcPhysicalTime) - .ThenBy(m => m.HlcLogicalCounter) - .Select(ToDomain) - .ToList(); - } - - /// - public override async Task DropAsync(CancellationToken cancellationToken = default) - { - var allIds = _context.DocumentMetadatas.FindAll().Select(m => m.Id).ToList(); - await _context.DocumentMetadatas.DeleteBulkAsync(allIds); - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override async Task> ExportAsync(CancellationToken cancellationToken = default) - { - return _context.DocumentMetadatas.FindAll().Select(ToDomain).ToList(); - } - - /// - public override async Task ImportAsync(IEnumerable items, - CancellationToken cancellationToken = default) - { - foreach (var item in items) await _context.DocumentMetadatas.InsertAsync(ToEntity(item)); - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override async Task MergeAsync(IEnumerable items, - CancellationToken cancellationToken = default) - { - foreach (var item in items) - { - var existing = _context.DocumentMetadatas - .Find(m => m.Collection == item.Collection && m.Key == item.Key) - .FirstOrDefault(); - - if (existing == null) - { - await _context.DocumentMetadatas.InsertAsync(ToEntity(item)); - } - else - { - // Update only if incoming is newer - var existingTs = new HlcTimestamp(existing.HlcPhysicalTime, existing.HlcLogicalCounter, - existing.HlcNodeId); - if (item.UpdatedAt.CompareTo(existingTs) > 0) - { - existing.HlcPhysicalTime = item.UpdatedAt.PhysicalTime; - existing.HlcLogicalCounter = item.UpdatedAt.LogicalCounter; - existing.HlcNodeId = item.UpdatedAt.NodeId; - existing.IsDeleted = item.IsDeleted; - await _context.DocumentMetadatas.UpdateAsync(existing); - } - } - } - - await _context.SaveChangesAsync(cancellationToken); - } - - #region Mappers - - private static DocumentMetadata ToDomain(DocumentMetadataEntity entity) - { - return new DocumentMetadata( - entity.Collection, - entity.Key, - new HlcTimestamp(entity.HlcPhysicalTime, entity.HlcLogicalCounter, entity.HlcNodeId), - entity.IsDeleted - ); - } - - private static DocumentMetadataEntity ToEntity(DocumentMetadata metadata) - { - return new DocumentMetadataEntity - { - Id = Guid.NewGuid().ToString(), - Collection = metadata.Collection, - Key = metadata.Key, - HlcPhysicalTime = metadata.UpdatedAt.PhysicalTime, - HlcLogicalCounter = metadata.UpdatedAt.LogicalCounter, - HlcNodeId = metadata.UpdatedAt.NodeId, - IsDeleted = metadata.IsDeleted - }; - } - - #endregion -} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.README.md b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.README.md deleted file mode 100755 index b9fc335..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.README.md +++ /dev/null @@ -1,214 +0,0 @@ -# BLiteDocumentStore - Usage Guide - -## Overview - -`BLiteDocumentStore` is an abstract base class that simplifies creating document stores for CBDDC with BLite -persistence. It handles all Oplog management internally, so you only need to implement entity-to-JSON mapping methods. - -## Key Features - -- ? **Automatic Oplog Creation** - Local changes automatically create Oplog entries -- ? **Remote Sync Handling** - AsyncLocal flag suppresses Oplog during sync (prevents duplicates) -- ? **No CDC Events Needed** - Direct Oplog management eliminates event loops -- ? **Simple API** - Only 4 abstract methods to implement - -## Architecture - -``` -User Code ? SampleDocumentStore (extends BLiteDocumentStore) - ? - BLiteDocumentStore - ??? _context.Users / TodoLists (read/write entities) - ??? _context.OplogEntries (write oplog directly) - -Remote Sync ? OplogStore.ApplyBatchAsync() - ? - BLiteDocumentStore.PutDocumentAsync(fromSync=true) - ??? _context.Users / TodoLists (write only) - ??? _context.OplogEntries (skip - already exists) -``` - -**Key Advantage**: No circular dependency! `BLiteDocumentStore` writes directly to `CBDDCDocumentDbContext.OplogEntries` -collection. - -## Implementation Example - -```csharp -public class SampleDocumentStore : BLiteDocumentStore -{ - public SampleDocumentStore( - SampleDbContext context, - IPeerNodeConfigurationProvider configProvider, - ILogger? logger = null) - : base(context, configProvider, new LastWriteWinsConflictResolver(), logger) - { - } - - public override IEnumerable InterestedCollection => new[] { "Users", "TodoLists" }; - - protected override async Task ApplyContentToEntityAsync( - string collection, string key, JsonElement content, CancellationToken ct) - { - switch (collection) - { - case "Users": - var user = content.Deserialize()!; - user.Id = key; - var existingUser = _context.Users.FindById(key); - if (existingUser != null) - await _context.Users.UpdateAsync(user); - else - await _context.Users.InsertAsync(user); - await _context.SaveChangesAsync(ct); - break; - - case "TodoLists": - var todoList = content.Deserialize()!; - todoList.Id = key; - var existingTodoList = _context.TodoLists.FindById(key); - if (existingTodoList != null) - await _context.TodoLists.UpdateAsync(todoList); - else - await _context.TodoLists.InsertAsync(todoList); - await _context.SaveChangesAsync(ct); - break; - - default: - throw new NotSupportedException($"Collection '{collection}' is not supported"); - } - } - - protected override Task GetEntityAsJsonAsync( - string collection, string key, CancellationToken ct) - { - return Task.FromResult(collection switch - { - "Users" => SerializeEntity(_context.Users.FindById(key)), - "TodoLists" => SerializeEntity(_context.TodoLists.FindById(key)), - _ => null - }); - } - - protected override async Task RemoveEntityAsync( - string collection, string key, CancellationToken ct) - { - switch (collection) - { - case "Users": - await _context.Users.DeleteAsync(key); - await _context.SaveChangesAsync(ct); - break; - - case "TodoLists": - await _context.TodoLists.DeleteAsync(key); - await _context.SaveChangesAsync(ct); - break; - } - } - - protected override async Task> GetAllEntitiesAsJsonAsync( - string collection, CancellationToken ct) - { - return await Task.Run(() => collection switch - { - "Users" => _context.Users.FindAll() - .Select(u => (u.Id, SerializeEntity(u)!.Value)), - - "TodoLists" => _context.TodoLists.FindAll() - .Select(t => (t.Id, SerializeEntity(t)!.Value)), - - _ => Enumerable.Empty<(string, JsonElement)>() - }, ct); - } - - private static JsonElement? SerializeEntity(T? entity) where T : class - { - if (entity == null) return null; - return JsonSerializer.SerializeToElement(entity); - } -} -``` - -## Usage in Application - -### Setup (DI Container) - -```csharp -services.AddSingleton(sp => - new SampleDbContext("data/sample.blite")); - -// No OplogStore dependency needed! -services.AddSingleton(); -services.AddSingleton>(); -``` - -### Local Changes (User operations) - -```csharp -// User inserts a new user -var user = new User { Id = "user-1", Name = "Alice" }; -await _context.Users.InsertAsync(user); -await _context.SaveChangesAsync(); - -// The application then needs to notify the DocumentStore: -var document = new Document( - "Users", - "user-1", - JsonSerializer.SerializeToElement(user), - new HlcTimestamp(0, 0, ""), - false); - -await documentStore.PutDocumentAsync(document); -// ? This creates an OplogEntry automatically -``` - -### Remote Sync (Automatic) - -```csharp -// When OplogStore.ApplyBatchAsync receives remote changes: -await oplogStore.ApplyBatchAsync(remoteEntries, cancellationToken); - -// Internally, this calls: -using (documentStore.BeginRemoteSync()) // ? Suppresses Oplog creation -{ - foreach (var entry in remoteEntries) - { - await documentStore.PutDocumentAsync(entryAsDocument); - // ? Writes to DB only, no Oplog duplication - } -} -``` - -## Migration from Old CDC-based Approach - -### Before (with CDC Events) - -```csharp -// SampleDocumentStore subscribes to BLite CDC -// CDC emits events ? OplogCoordinator creates Oplog -// Problem: Remote sync also triggers CDC ? duplicate Oplog entries -``` - -### After (with BLiteDocumentStore) - -```csharp -// Direct Oplog management in DocumentStore -// AsyncLocal flag prevents duplicates during sync -// No CDC events needed -``` - -## Benefits - -1. **No Event Loops** - Direct control over Oplog creation -2. **Thread-Safe** - AsyncLocal handles concurrent operations -3. **Simpler** - Only 4 methods to implement vs full CDC subscription -4. **Transparent** - Oplog management is hidden from user code - -## Next Steps - -After implementing your DocumentStore: - -1. Remove CDC subscriptions from your code -2. Remove `OplogCoordinator` from DI (no longer needed) -3. Test local operations create Oplog entries -4. Test remote sync doesn't create duplicate entries diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.cs deleted file mode 100755 index 8025d75..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteDocumentStore.cs +++ /dev/null @@ -1,783 +0,0 @@ -using System.Collections.Concurrent; -using System.Text.Json; -using BLite.Core.CDC; -using BLite.Core.Collections; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; -using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Network; -using ZB.MOM.WW.CBDDC.Core.Storage; -using ZB.MOM.WW.CBDDC.Core.Sync; -using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; -using BLiteOperationType = BLite.Core.Transactions.OperationType; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite; - -/// -/// Abstract base class for BLite-based document stores. -/// Handles Oplog creation internally - subclasses only implement entity mapping. -/// -/// The BLite DbContext type. -public abstract class BLiteDocumentStore : IDocumentStore, IDisposable - where TDbContext : CBDDCDocumentDbContext -{ - private readonly List _cdcWatchers = new(); - private readonly object _clockLock = new(); - protected readonly IPeerNodeConfigurationProvider _configProvider; - protected readonly IConflictResolver _conflictResolver; - protected readonly TDbContext _context; - protected readonly ILogger> _logger; - private readonly HashSet _registeredCollections = new(); - - /// - /// Semaphore used to suppress CDC-triggered OplogEntry creation during remote sync. - /// CurrentCount == 0 ? sync in progress, CDC must skip. - /// CurrentCount == 1 ? no sync, CDC creates OplogEntry. - /// - private readonly SemaphoreSlim _remoteSyncGuard = new(1, 1); - - private readonly ConcurrentDictionary _suppressedCdcEvents = new(StringComparer.Ordinal); - protected readonly IVectorClockService _vectorClock; - - // HLC state for generating timestamps for local changes - private long _lastPhysicalTime; - private int _logicalCounter; - - /// - /// Initializes a new instance of the class. - /// - /// The BLite database context. - /// The peer node configuration provider. - /// The vector clock service. - /// The conflict resolver to use for merges. - /// The logger instance. - protected BLiteDocumentStore( - TDbContext context, - IPeerNodeConfigurationProvider configProvider, - IVectorClockService vectorClockService, - IConflictResolver? conflictResolver = null, - ILogger? logger = null) - { - _context = context ?? throw new ArgumentNullException(nameof(context)); - _configProvider = configProvider ?? throw new ArgumentNullException(nameof(configProvider)); - _vectorClock = vectorClockService ?? throw new ArgumentNullException(nameof(vectorClockService)); - _conflictResolver = conflictResolver ?? new LastWriteWinsConflictResolver(); - _logger = CreateTypedLogger(logger); - - _lastPhysicalTime = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); - _logicalCounter = 0; - } - - /// - /// Releases managed resources used by this document store. - /// - public virtual void Dispose() - { - foreach (var watcher in _cdcWatchers) - try - { - watcher.Dispose(); - } - catch - { - } - - _cdcWatchers.Clear(); - _remoteSyncGuard.Dispose(); - } - - private static ILogger> CreateTypedLogger(ILogger? logger) - { - if (logger is null) return NullLogger>.Instance; - - if (logger is ILogger> typedLogger) return typedLogger; - - return new ForwardingLogger(logger); - } - - private sealed class ForwardingLogger : ILogger> - { - private readonly ILogger _inner; - - /// - /// Initializes a new instance of the class. - /// - /// The underlying logger instance. - public ForwardingLogger(ILogger inner) - { - _inner = inner; - } - - /// - public IDisposable? BeginScope(TState state) where TState : notnull - { - return _inner.BeginScope(state); - } - - /// - public bool IsEnabled(LogLevel logLevel) - { - return _inner.IsEnabled(logLevel); - } - - /// - public void Log( - LogLevel logLevel, - EventId eventId, - TState state, - Exception? exception, - Func formatter) - { - _inner.Log(logLevel, eventId, state, exception, formatter); - } - } - - #region CDC Registration - - private static string BuildSuppressionKey(string collection, string key, OperationType operationType) - { - return $"{collection}|{key}|{(int)operationType}"; - } - - private void RegisterSuppressedCdcEvent(string collection, string key, OperationType operationType) - { - string suppressionKey = BuildSuppressionKey(collection, key, operationType); - _suppressedCdcEvents.AddOrUpdate(suppressionKey, 1, (_, current) => current + 1); - } - - private bool TryConsumeSuppressedCdcEvent(string collection, string key, OperationType operationType) - { - string suppressionKey = BuildSuppressionKey(collection, key, operationType); - while (true) - { - if (!_suppressedCdcEvents.TryGetValue(suppressionKey, out int current)) return false; - - if (current <= 1) return _suppressedCdcEvents.TryRemove(suppressionKey, out _); - - if (_suppressedCdcEvents.TryUpdate(suppressionKey, current - 1, current)) return true; - } - } - - /// - /// Registers a BLite collection for CDC tracking. - /// Call in subclass constructor for each collection to sync. - /// - /// The entity type. - /// The logical collection name used in Oplog. - /// The BLite DocumentCollection. - /// Function to extract the entity key. - protected void WatchCollection( - string collectionName, - DocumentCollection collection, - Func keySelector) - where TEntity : class - { - _registeredCollections.Add(collectionName); - - var watcher = collection.Watch(true) - .Subscribe(new CdcObserver(collectionName, keySelector, this)); - _cdcWatchers.Add(watcher); - } - - /// - /// Generic CDC observer. Forwards BLite change events to OnLocalChangeDetectedAsync. - /// Automatically skips events when remote sync is in progress. - /// - private class CdcObserver : IObserver> - where TEntity : class - { - private readonly string _collectionName; - private readonly Func _keySelector; - private readonly BLiteDocumentStore _store; - - /// - /// Initializes a new instance of the class. - /// - /// The logical collection name. - /// The key selector for observed entities. - /// The owning document store instance. - public CdcObserver( - string collectionName, - Func keySelector, - BLiteDocumentStore store) - { - _collectionName = collectionName; - _keySelector = keySelector; - _store = store; - } - - /// - /// Handles a change stream event from BLite CDC. - /// - /// The change event payload. - public void OnNext(ChangeStreamEvent changeEvent) - { - var operationType = changeEvent.Type == BLiteOperationType.Delete - ? OperationType.Delete - : OperationType.Put; - - string entityId = changeEvent.DocumentId ?? ""; - if (operationType == OperationType.Put && changeEvent.Entity != null) - entityId = _keySelector(changeEvent.Entity); - - if (_store.TryConsumeSuppressedCdcEvent(_collectionName, entityId, operationType)) return; - - if (_store._remoteSyncGuard.CurrentCount == 0) return; - - if (changeEvent.Type == BLiteOperationType.Delete) - { - _store.OnLocalChangeDetectedAsync(_collectionName, entityId, OperationType.Delete, null) - .GetAwaiter().GetResult(); - } - else if (changeEvent.Entity != null) - { - var content = JsonSerializer.SerializeToElement(changeEvent.Entity); - string key = _keySelector(changeEvent.Entity); - _store.OnLocalChangeDetectedAsync(_collectionName, key, OperationType.Put, content) - .GetAwaiter().GetResult(); - } - } - - /// - /// Handles CDC observer errors. - /// - /// The observed exception. - public void OnError(Exception error) - { - } - - /// - /// Handles completion of the CDC stream. - /// - public void OnCompleted() - { - } - } - - #endregion - - #region Abstract Methods - Implemented by subclass - - /// - /// Applies JSON content to a single entity (insert or update) and commits changes. - /// Called for single-document operations. - /// - /// The logical collection name. - /// The document key. - /// The document content to apply. - /// The cancellation token. - protected abstract Task ApplyContentToEntityAsync( - string collection, string key, JsonElement content, CancellationToken cancellationToken); - - /// - /// Applies JSON content to multiple entities (insert or update) with a single commit. - /// Called for batch operations. Must commit all changes in a single SaveChanges. - /// - /// The documents to apply in one batch. - /// The cancellation token. - protected abstract Task ApplyContentToEntitiesBatchAsync( - IEnumerable<(string Collection, string Key, JsonElement Content)> documents, - CancellationToken cancellationToken); - - /// - /// Reads an entity from the DbContext and returns it as JsonElement. - /// - /// The logical collection name. - /// The document key. - /// The cancellation token. - protected abstract Task GetEntityAsJsonAsync( - string collection, string key, CancellationToken cancellationToken); - - /// - /// Removes a single entity from the DbContext and commits changes. - /// - /// The logical collection name. - /// The document key. - /// The cancellation token. - protected abstract Task RemoveEntityAsync( - string collection, string key, CancellationToken cancellationToken); - - /// - /// Removes multiple entities from the DbContext with a single commit. - /// - /// The documents to remove in one batch. - /// The cancellation token. - protected abstract Task RemoveEntitiesBatchAsync( - IEnumerable<(string Collection, string Key)> documents, CancellationToken cancellationToken); - - /// - /// Reads all entities from a collection as JsonElements. - /// - /// The logical collection name. - /// The cancellation token. - protected abstract Task> GetAllEntitiesAsJsonAsync( - string collection, CancellationToken cancellationToken); - - #endregion - - #region IDocumentStore Implementation - - /// - /// Returns the collections registered via WatchCollection. - /// - public IEnumerable InterestedCollection => _registeredCollections; - - /// - /// Gets a document by collection and key. - /// - /// The logical collection name. - /// The document key. - /// The cancellation token. - /// The matching document, or when not found. - public async Task GetDocumentAsync(string collection, string key, - CancellationToken cancellationToken = default) - { - var content = await GetEntityAsJsonAsync(collection, key, cancellationToken); - if (content == null) return null; - - var timestamp = new HlcTimestamp(0, 0, ""); // Will be populated from metadata if needed - return new Document(collection, key, content.Value, timestamp, false); - } - - /// - /// Gets all documents for a collection. - /// - /// The logical collection name. - /// The cancellation token. - /// The documents in the specified collection. - public async Task> GetDocumentsByCollectionAsync(string collection, - CancellationToken cancellationToken = default) - { - var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken); - var timestamp = new HlcTimestamp(0, 0, ""); - return entities.Select(e => new Document(collection, e.Key, e.Content, timestamp, false)); - } - - /// - /// Gets documents for the specified collection and key pairs. - /// - /// The collection and key pairs to resolve. - /// The cancellation token. - /// The documents that were found. - public async Task> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys, - CancellationToken cancellationToken) - { - var documents = new List(); - foreach ((string collection, string key) in documentKeys) - { - var doc = await GetDocumentAsync(collection, key, cancellationToken); - if (doc != null) documents.Add(doc); - } - - return documents; - } - - /// - /// Inserts or updates a single document. - /// - /// The document to persist. - /// The cancellation token. - /// when the operation succeeds. - public async Task PutDocumentAsync(Document document, CancellationToken cancellationToken = default) - { - await _remoteSyncGuard.WaitAsync(cancellationToken); - try - { - await PutDocumentInternalAsync(document, cancellationToken); - } - finally - { - _remoteSyncGuard.Release(); - } - - return true; - } - - private async Task PutDocumentInternalAsync(Document document, CancellationToken cancellationToken) - { - RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put); - await ApplyContentToEntityAsync(document.Collection, document.Key, document.Content, cancellationToken); - } - - /// - /// Updates a batch of documents. - /// - /// The documents to update. - /// The cancellation token. - /// when the operation succeeds. - public async Task UpdateBatchDocumentsAsync(IEnumerable documents, - CancellationToken cancellationToken = default) - { - var documentList = documents.ToList(); - await _remoteSyncGuard.WaitAsync(cancellationToken); - try - { - foreach (var document in documentList) - RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put); - - await ApplyContentToEntitiesBatchAsync( - documentList.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken); - } - finally - { - _remoteSyncGuard.Release(); - } - - return true; - } - - /// - /// Inserts a batch of documents. - /// - /// The documents to insert. - /// The cancellation token. - /// when the operation succeeds. - public async Task InsertBatchDocumentsAsync(IEnumerable documents, - CancellationToken cancellationToken = default) - { - var documentList = documents.ToList(); - await _remoteSyncGuard.WaitAsync(cancellationToken); - try - { - foreach (var document in documentList) - RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put); - - await ApplyContentToEntitiesBatchAsync( - documentList.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken); - } - finally - { - _remoteSyncGuard.Release(); - } - - return true; - } - - /// - /// Deletes a single document. - /// - /// The logical collection name. - /// The document key. - /// The cancellation token. - /// when the operation succeeds. - public async Task DeleteDocumentAsync(string collection, string key, - CancellationToken cancellationToken = default) - { - await _remoteSyncGuard.WaitAsync(cancellationToken); - try - { - await DeleteDocumentInternalAsync(collection, key, cancellationToken); - } - finally - { - _remoteSyncGuard.Release(); - } - - return true; - } - - private async Task DeleteDocumentInternalAsync(string collection, string key, CancellationToken cancellationToken) - { - RegisterSuppressedCdcEvent(collection, key, OperationType.Delete); - await RemoveEntityAsync(collection, key, cancellationToken); - } - - /// - /// Deletes a batch of documents by composite keys. - /// - /// The document keys in collection/key format. - /// The cancellation token. - /// when the operation succeeds. - public async Task DeleteBatchDocumentsAsync(IEnumerable documentKeys, - CancellationToken cancellationToken = default) - { - var parsedKeys = new List<(string Collection, string Key)>(); - foreach (string key in documentKeys) - { - string[] parts = key.Split('/'); - if (parts.Length == 2) - parsedKeys.Add((parts[0], parts[1])); - else - _logger.LogWarning("Invalid document key format: {Key}", key); - } - - if (parsedKeys.Count == 0) return true; - - await _remoteSyncGuard.WaitAsync(cancellationToken); - try - { - foreach ((string collection, string key) in parsedKeys) - RegisterSuppressedCdcEvent(collection, key, OperationType.Delete); - - await RemoveEntitiesBatchAsync(parsedKeys, cancellationToken); - } - finally - { - _remoteSyncGuard.Release(); - } - - return true; - } - - /// - /// Merges an incoming document with the current stored document. - /// - /// The incoming document. - /// The cancellation token. - /// The stored document after merge resolution. - public async Task MergeAsync(Document incoming, CancellationToken cancellationToken = default) - { - var existing = await GetDocumentAsync(incoming.Collection, incoming.Key, cancellationToken); - - if (existing == null) - { - // Use internal method - guard not acquired yet in single-document merge - await PutDocumentInternalAsync(incoming, cancellationToken); - return incoming; - } - - // Use conflict resolver to merge - var resolution = _conflictResolver.Resolve(existing, new OplogEntry( - incoming.Collection, - incoming.Key, - OperationType.Put, - incoming.Content, - incoming.UpdatedAt, - "")); - - if (resolution.ShouldApply && resolution.MergedDocument != null) - { - await PutDocumentInternalAsync(resolution.MergedDocument, cancellationToken); - return resolution.MergedDocument; - } - - return existing; - } - - #endregion - - #region ISnapshotable Implementation - - /// - /// Removes all tracked documents from registered collections. - /// - /// The cancellation token. - public async Task DropAsync(CancellationToken cancellationToken = default) - { - foreach (string collection in InterestedCollection) - { - var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken); - foreach ((string key, var _) in entities) await RemoveEntityAsync(collection, key, cancellationToken); - } - } - - /// - /// Exports all tracked documents from registered collections. - /// - /// The cancellation token. - /// The exported documents. - public async Task> ExportAsync(CancellationToken cancellationToken = default) - { - var documents = new List(); - foreach (string collection in InterestedCollection) - { - var collectionDocs = await GetDocumentsByCollectionAsync(collection, cancellationToken); - documents.AddRange(collectionDocs); - } - - return documents; - } - - /// - /// Imports a batch of documents. - /// - /// The documents to import. - /// The cancellation token. - public async Task ImportAsync(IEnumerable items, CancellationToken cancellationToken = default) - { - var documents = items.ToList(); - await _remoteSyncGuard.WaitAsync(cancellationToken); - try - { - foreach (var document in documents) - RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put); - - await ApplyContentToEntitiesBatchAsync( - documents.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken); - } - finally - { - _remoteSyncGuard.Release(); - } - } - - /// - /// Merges a batch of incoming documents. - /// - /// The incoming documents. - /// The cancellation token. - public async Task MergeAsync(IEnumerable items, CancellationToken cancellationToken = default) - { - // Acquire guard to prevent Oplog creation during merge - await _remoteSyncGuard.WaitAsync(cancellationToken); - try - { - foreach (var document in items) await MergeAsync(document, cancellationToken); - } - finally - { - _remoteSyncGuard.Release(); - } - } - - #endregion - - #region Oplog Management - - /// - /// Returns true if a remote sync operation is in progress (guard acquired). - /// CDC listeners should check this before creating OplogEntry. - /// - protected bool IsRemoteSyncInProgress => _remoteSyncGuard.CurrentCount == 0; - - /// - /// Called by subclass CDC listeners when a local change is detected. - /// Creates OplogEntry + DocumentMetadata only if no remote sync is in progress. - /// - /// The logical collection name. - /// The document key. - /// The detected operation type. - /// The document content when available. - /// The cancellation token. - protected async Task OnLocalChangeDetectedAsync( - string collection, - string key, - OperationType operationType, - JsonElement? content, - CancellationToken cancellationToken = default) - { - if (IsRemoteSyncInProgress) return; - - await CreateOplogEntryAsync(collection, key, operationType, content, cancellationToken); - } - - private HlcTimestamp GenerateTimestamp(string nodeId) - { - lock (_clockLock) - { - long now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); - - if (now > _lastPhysicalTime) - { - _lastPhysicalTime = now; - _logicalCounter = 0; - } - else - { - _logicalCounter++; - } - - return new HlcTimestamp(_lastPhysicalTime, _logicalCounter, nodeId); - } - } - - private async Task CreateOplogEntryAsync( - string collection, - string key, - OperationType operationType, - JsonElement? content, - CancellationToken cancellationToken) - { - var config = await _configProvider.GetConfiguration(); - string nodeId = config.NodeId; - - // Get last hash from OplogEntries collection directly - var lastEntry = _context.OplogEntries - .Find(e => e.TimestampNodeId == nodeId) - .OrderByDescending(e => e.TimestampPhysicalTime) - .ThenByDescending(e => e.TimestampLogicalCounter) - .FirstOrDefault(); - - string previousHash = lastEntry?.Hash ?? string.Empty; - var timestamp = GenerateTimestamp(nodeId); - - var oplogEntry = new OplogEntry( - collection, - key, - operationType, - content, - timestamp, - previousHash); - - // Write directly to OplogEntries collection - await _context.OplogEntries.InsertAsync(oplogEntry.ToEntity()); - - // Write DocumentMetadata for sync tracking - var docMetadata = EntityMappers.CreateDocumentMetadata( - collection, - key, - timestamp, - operationType == OperationType.Delete); - - var existingMetadata = _context.DocumentMetadatas - .Find(m => m.Collection == collection && m.Key == key) - .FirstOrDefault(); - - if (existingMetadata != null) - { - // Update existing metadata - existingMetadata.HlcPhysicalTime = timestamp.PhysicalTime; - existingMetadata.HlcLogicalCounter = timestamp.LogicalCounter; - existingMetadata.HlcNodeId = timestamp.NodeId; - existingMetadata.IsDeleted = operationType == OperationType.Delete; - await _context.DocumentMetadatas.UpdateAsync(existingMetadata); - } - else - { - await _context.DocumentMetadatas.InsertAsync(docMetadata); - } - - await _context.SaveChangesAsync(cancellationToken); - - // Notify VectorClockService so sync sees local changes - _vectorClock.Update(oplogEntry); - - _logger.LogDebug( - "Created Oplog entry: {Operation} {Collection}/{Key} at {Timestamp} (hash: {Hash})", - operationType, collection, key, timestamp, oplogEntry.Hash); - } - - /// - /// Marks the start of remote sync operations (suppresses CDC-triggered Oplog creation). - /// Use in using statement: using (store.BeginRemoteSync()) { ... } - /// - public IDisposable BeginRemoteSync() - { - _remoteSyncGuard.Wait(); - return new RemoteSyncScope(_remoteSyncGuard); - } - - private class RemoteSyncScope : IDisposable - { - private readonly SemaphoreSlim _guard; - - /// - /// Initializes a new instance of the class. - /// - /// The semaphore guarding remote sync operations. - public RemoteSyncScope(SemaphoreSlim guard) - { - _guard = guard; - } - - /// - /// Releases the remote sync guard. - /// - public void Dispose() - { - _guard.Release(); - } - } - - #endregion -} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteOplogStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteOplogStore.cs deleted file mode 100755 index e4e2012..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteOplogStore.cs +++ /dev/null @@ -1,253 +0,0 @@ -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; -using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Storage; -using ZB.MOM.WW.CBDDC.Core.Sync; -using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite; - -public class BLiteOplogStore : OplogStore where TDbContext : CBDDCDocumentDbContext -{ - protected readonly TDbContext _context; - protected readonly ILogger> _logger; - - /// - /// Initializes a new instance of the class. - /// - /// The BLite database context. - /// The document store used by the oplog store. - /// The conflict resolver used during merges. - /// The vector clock service used for timestamp coordination. - /// Optional snapshot metadata store used for initialization. - /// Optional logger instance. - public BLiteOplogStore( - TDbContext dbContext, - IDocumentStore documentStore, - IConflictResolver conflictResolver, - IVectorClockService vectorClockService, - ISnapshotMetadataStore? snapshotMetadataStore = null, - ILogger>? logger = null) : base(documentStore, conflictResolver, vectorClockService, - snapshotMetadataStore) - { - _context = dbContext ?? throw new ArgumentNullException(nameof(dbContext)); - _logger = logger ?? NullLogger>.Instance; - } - - /// - public override async Task ApplyBatchAsync(IEnumerable oplogEntries, - CancellationToken cancellationToken = default) - { - // BLite transactions are committed by each SaveChangesAsync internally. - // Wrapping in an explicit transaction causes "Cannot rollback committed transaction" - // because PutDocumentAsync → SaveChangesAsync already commits. - await base.ApplyBatchAsync(oplogEntries, cancellationToken); - } - - /// - public override async Task DropAsync(CancellationToken cancellationToken = default) - { - // Use Id (technical key) for deletion, not Hash (business key) - await _context.OplogEntries.DeleteBulkAsync(_context.OplogEntries.FindAll().Select(e => e.Id)); - await _context.SaveChangesAsync(cancellationToken); - _vectorClock.Invalidate(); - } - - /// - public override async Task> ExportAsync(CancellationToken cancellationToken = default) - { - return _context.OplogEntries.FindAll().ToDomain(); - } - - /// - public override async Task> GetChainRangeAsync(string startHash, string endHash, - CancellationToken cancellationToken = default) - { - var startRow = _context.OplogEntries.Find(o => o.Hash == startHash).FirstOrDefault(); - var endRow = _context.OplogEntries.Find(o => o.Hash == endHash).FirstOrDefault(); - - if (startRow == null || endRow == null) return []; - - string nodeId = startRow.TimestampNodeId; - - // 2. Fetch range (Start < Entry <= End) - var entities = _context.OplogEntries - .Find(o => o.TimestampNodeId == nodeId && - (o.TimestampPhysicalTime > startRow.TimestampPhysicalTime || - (o.TimestampPhysicalTime == startRow.TimestampPhysicalTime && - o.TimestampLogicalCounter > startRow.TimestampLogicalCounter)) && - (o.TimestampPhysicalTime < endRow.TimestampPhysicalTime || - (o.TimestampPhysicalTime == endRow.TimestampPhysicalTime && - o.TimestampLogicalCounter <= endRow.TimestampLogicalCounter))) - .OrderBy(o => o.TimestampPhysicalTime) - .ThenBy(o => o.TimestampLogicalCounter) - .ToList(); - - return entities.ToDomain(); - } - - /// - public override async Task GetEntryByHashAsync(string hash, - CancellationToken cancellationToken = default) - { - // Hash is now a regular indexed property, not the Key - return _context.OplogEntries.Find(o => o.Hash == hash).FirstOrDefault()?.ToDomain(); - } - - /// - public override async Task> GetOplogAfterAsync(HlcTimestamp timestamp, - IEnumerable? collections = null, CancellationToken cancellationToken = default) - { - var query = _context.OplogEntries - .Find(o => o.TimestampPhysicalTime > timestamp.PhysicalTime || - (o.TimestampPhysicalTime == timestamp.PhysicalTime && - o.TimestampLogicalCounter > timestamp.LogicalCounter)); - if (collections != null) - { - var collectionSet = new HashSet(collections); - query = query.Where(o => collectionSet.Contains(o.Collection)); - } - - return query - .OrderBy(o => o.TimestampPhysicalTime) - .ThenBy(o => o.TimestampLogicalCounter) - .ToDomain() - .ToList(); - } - - /// - public override async Task> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since, - IEnumerable? collections = null, CancellationToken cancellationToken = default) - { - var query = _context.OplogEntries.AsQueryable() - .Where(o => o.TimestampNodeId == nodeId && - (o.TimestampPhysicalTime > since.PhysicalTime || - (o.TimestampPhysicalTime == since.PhysicalTime && - o.TimestampLogicalCounter > since.LogicalCounter))); - if (collections != null) - { - var collectionSet = new HashSet(collections); - query = query.Where(o => collectionSet.Contains(o.Collection)); - } - - return query - .OrderBy(o => o.TimestampPhysicalTime) - .ThenBy(o => o.TimestampLogicalCounter) - .ToDomain() - .ToList(); - } - - /// - public override async Task ImportAsync(IEnumerable items, CancellationToken cancellationToken = default) - { - foreach (var item in items) await _context.OplogEntries.InsertAsync(item.ToEntity()); - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override async Task MergeAsync(IEnumerable items, CancellationToken cancellationToken = default) - { - foreach (var item in items) - { - // Hash is now a regular indexed property, not the Key - var existing = _context.OplogEntries.Find(o => o.Hash == item.Hash).FirstOrDefault(); - if (existing == null) await _context.OplogEntries.InsertAsync(item.ToEntity()); - } - - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override async Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default) - { - var toDelete = _context.OplogEntries.AsQueryable() - .Where(o => o.TimestampPhysicalTime < cutoff.PhysicalTime || - (o.TimestampPhysicalTime == cutoff.PhysicalTime && - o.TimestampLogicalCounter <= cutoff.LogicalCounter)) - .Select(o => o.Hash) - .ToList(); - await _context.OplogEntries.DeleteBulkAsync(toDelete); - } - - /// - protected override void InitializeVectorClock() - { - if (_vectorClock.IsInitialized) return; - - // Early check: if context or OplogEntries is null, skip initialization - if (_context?.OplogEntries == null) - { - _vectorClock.IsInitialized = true; - return; - } - - // Step 1: Load from SnapshotMetadata FIRST (base state after prune) - if (_snapshotMetadataStore != null) - try - { - var snapshots = _snapshotMetadataStore.GetAllSnapshotMetadataAsync().GetAwaiter().GetResult(); - foreach (var snapshot in snapshots) - _vectorClock.UpdateNode( - snapshot.NodeId, - new HlcTimestamp(snapshot.TimestampPhysicalTime, snapshot.TimestampLogicalCounter, - snapshot.NodeId), - snapshot.Hash ?? ""); - } - catch - { - // Ignore errors during initialization - oplog data will be used as fallback - } - - // Step 2: Load from Oplog (Latest State - Overrides Snapshot if newer) - var latestPerNode = _context.OplogEntries.AsQueryable() - .GroupBy(o => o.TimestampNodeId) - .Select(g => new - { - NodeId = g.Key, - MaxEntry = g.OrderByDescending(o => o.TimestampPhysicalTime) - .ThenByDescending(o => o.TimestampLogicalCounter) - .FirstOrDefault() - }) - .ToList() - .Where(x => x.MaxEntry != null) - .ToList(); - - foreach (var node in latestPerNode) - if (node.MaxEntry != null) - _vectorClock.UpdateNode( - node.NodeId, - new HlcTimestamp(node.MaxEntry.TimestampPhysicalTime, node.MaxEntry.TimestampLogicalCounter, - node.MaxEntry.TimestampNodeId), - node.MaxEntry.Hash ?? ""); - - _vectorClock.IsInitialized = true; - } - - /// - protected override async Task InsertOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default) - { - await _context.OplogEntries.InsertAsync(entry.ToEntity()); - } - - /// - protected override async Task QueryLastHashForNodeAsync(string nodeId, - CancellationToken cancellationToken = default) - { - var lastEntry = _context.OplogEntries.AsQueryable() - .Where(o => o.TimestampNodeId == nodeId) - .OrderByDescending(o => o.TimestampPhysicalTime) - .ThenByDescending(o => o.TimestampLogicalCounter) - .FirstOrDefault(); - return lastEntry?.Hash; - } - - /// - protected override async Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash, - CancellationToken cancellationToken = default) - { - // Hash is now a regular indexed property, not the Key - var entry = _context.OplogEntries.Find(o => o.Hash == hash).FirstOrDefault(); - if (entry == null) return null; - return (entry.TimestampPhysicalTime, entry.TimestampLogicalCounter); - } -} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerConfigurationStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerConfigurationStore.cs deleted file mode 100755 index 7850c94..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerConfigurationStore.cs +++ /dev/null @@ -1,131 +0,0 @@ -using System.Text.Json; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; -using ZB.MOM.WW.CBDDC.Core.Network; -using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite; - -/// -/// Provides a peer configuration store implementation that uses a specified CBDDCDocumentDbContext for persistence -/// operations. -/// -/// -/// This class enables storage, retrieval, and management of remote peer configurations using the provided -/// database context. It is typically used in scenarios where peer configurations need to be persisted in a document -/// database. -/// -/// -/// The type of the document database context used for accessing and managing peer configurations. Must inherit from -/// CBDDCDocumentDbContext. -/// -public class BLitePeerConfigurationStore : PeerConfigurationStore where TDbContext : CBDDCDocumentDbContext -{ - /// - /// Represents the database context used for data access operations within the derived class. - /// - protected readonly TDbContext _context; - - /// - /// Provides logging capabilities for the BLitePeerConfigurationStore operations. - /// - protected readonly ILogger> _logger; - - /// - /// Initializes a new instance of the BLitePeerConfigurationStore class using the specified database context and - /// optional logger. - /// - /// The database context used to access and manage peer configuration data. Cannot be null. - /// An optional logger for logging diagnostic messages. If null, a no-op logger is used. - /// Thrown if the context parameter is null. - public BLitePeerConfigurationStore(TDbContext context, - ILogger>? logger = null) - { - _context = context ?? throw new ArgumentNullException(nameof(context)); - _logger = logger ?? NullLogger>.Instance; - } - - /// - public override async Task DropAsync(CancellationToken cancellationToken = default) - { - _logger.LogWarning( - "Dropping peer configuration store - all remote peer configurations will be permanently deleted!"); - // Use Id (technical key) for deletion, not NodeId (business key) - var allIds = await Task.Run(() => _context.RemotePeerConfigurations.FindAll().Select(p => p.Id).ToList(), - cancellationToken); - await _context.RemotePeerConfigurations.DeleteBulkAsync(allIds); - await _context.SaveChangesAsync(cancellationToken); - _logger.LogInformation("Peer configuration store dropped successfully."); - } - - /// - public override async Task> ExportAsync( - CancellationToken cancellationToken = default) - { - return await Task.Run(() => _context.RemotePeerConfigurations.FindAll().ToDomain().ToList(), cancellationToken); - } - - /// - public override async Task GetRemotePeerAsync(string nodeId, - CancellationToken cancellationToken) - { - // NodeId is now a regular indexed property, not the Key - return await Task.Run( - () => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault()?.ToDomain(), - cancellationToken); - } - - /// - public override async Task> GetRemotePeersAsync( - CancellationToken cancellationToken = default) - { - return await Task.Run(() => _context.RemotePeerConfigurations.FindAll().ToDomain().ToList(), cancellationToken); - } - - /// - public override async Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default) - { - // NodeId is now a regular indexed property, not the Key - var peer = await Task.Run( - () => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault(), cancellationToken); - if (peer != null) - { - await _context.RemotePeerConfigurations.DeleteAsync(peer.Id); - await _context.SaveChangesAsync(cancellationToken); - _logger.LogInformation("Removed remote peer configuration: {NodeId}", nodeId); - } - else - { - _logger.LogWarning("Attempted to remove non-existent remote peer: {NodeId}", nodeId); - } - } - - /// - public override async Task SaveRemotePeerAsync(RemotePeerConfiguration peer, - CancellationToken cancellationToken = default) - { - // NodeId is now a regular indexed property, not the Key - var existing = - await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == peer.NodeId).FirstOrDefault(), - cancellationToken); - - if (existing == null) - { - await _context.RemotePeerConfigurations.InsertAsync(peer.ToEntity()); - } - else - { - existing.NodeId = peer.NodeId; - existing.Address = peer.Address; - existing.Type = (int)peer.Type; - existing.IsEnabled = peer.IsEnabled; - existing.InterestsJson = peer.InterestingCollections.Count > 0 - ? JsonSerializer.Serialize(peer.InterestingCollections) - : ""; - await _context.RemotePeerConfigurations.UpdateAsync(existing); - } - - await _context.SaveChangesAsync(cancellationToken); - _logger.LogInformation("Saved remote peer configuration: {NodeId} ({Type})", peer.NodeId, peer.Type); - } -} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerOplogConfirmationStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerOplogConfirmationStore.cs deleted file mode 100644 index 74183c1..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLitePeerOplogConfirmationStore.cs +++ /dev/null @@ -1,300 +0,0 @@ -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; -using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Network; -using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite; - -/// -/// BLite-backed peer oplog confirmation store. -/// -/// The BLite context type. -public class BLitePeerOplogConfirmationStore : PeerOplogConfirmationStore - where TDbContext : CBDDCDocumentDbContext -{ - internal const string RegistrationSourceNodeId = "__peer_registration__"; - - private readonly TDbContext _context; - private readonly ILogger> _logger; - - /// - /// Initializes a new instance of the class. - /// - /// The BLite context. - /// An optional logger. - public BLitePeerOplogConfirmationStore( - TDbContext context, - ILogger>? logger = null) - { - _context = context ?? throw new ArgumentNullException(nameof(context)); - _logger = logger ?? NullLogger>.Instance; - } - - /// - public override async Task EnsurePeerRegisteredAsync( - string peerNodeId, - string address, - PeerType type, - CancellationToken cancellationToken = default) - { - if (string.IsNullOrWhiteSpace(peerNodeId)) - throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); - - var existing = _context.PeerOplogConfirmations - .Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId == RegistrationSourceNodeId) - .FirstOrDefault(); - - if (existing == null) - { - await _context.PeerOplogConfirmations.InsertAsync(new PeerOplogConfirmationEntity - { - Id = Guid.NewGuid().ToString(), - PeerNodeId = peerNodeId, - SourceNodeId = RegistrationSourceNodeId, - ConfirmedWall = 0, - ConfirmedLogic = 0, - ConfirmedHash = "", - LastConfirmedUtcMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(), - IsActive = true - }); - - await _context.SaveChangesAsync(cancellationToken); - _logger.LogDebug("Registered peer confirmation tracking for {PeerNodeId} ({Address}, {Type}).", peerNodeId, - address, type); - return; - } - - if (!existing.IsActive) - { - existing.IsActive = true; - existing.LastConfirmedUtcMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); - await _context.PeerOplogConfirmations.UpdateAsync(existing); - await _context.SaveChangesAsync(cancellationToken); - } - } - - /// - public override async Task UpdateConfirmationAsync( - string peerNodeId, - string sourceNodeId, - HlcTimestamp timestamp, - string hash, - CancellationToken cancellationToken = default) - { - if (string.IsNullOrWhiteSpace(peerNodeId)) - throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); - - if (string.IsNullOrWhiteSpace(sourceNodeId)) - throw new ArgumentException("Source node id is required.", nameof(sourceNodeId)); - - var existing = _context.PeerOplogConfirmations - .Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId == sourceNodeId) - .FirstOrDefault(); - - long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); - - if (existing == null) - { - await _context.PeerOplogConfirmations.InsertAsync(new PeerOplogConfirmationEntity - { - Id = Guid.NewGuid().ToString(), - PeerNodeId = peerNodeId, - SourceNodeId = sourceNodeId, - ConfirmedWall = timestamp.PhysicalTime, - ConfirmedLogic = timestamp.LogicalCounter, - ConfirmedHash = hash ?? "", - LastConfirmedUtcMs = nowMs, - IsActive = true - }); - await _context.SaveChangesAsync(cancellationToken); - return; - } - - bool isNewer = IsIncomingTimestampNewer(timestamp, existing); - bool samePointHashChanged = timestamp.PhysicalTime == existing.ConfirmedWall && - timestamp.LogicalCounter == existing.ConfirmedLogic && - !string.Equals(existing.ConfirmedHash, hash, StringComparison.Ordinal); - - if (!isNewer && !samePointHashChanged && existing.IsActive) return; - - existing.ConfirmedWall = timestamp.PhysicalTime; - existing.ConfirmedLogic = timestamp.LogicalCounter; - existing.ConfirmedHash = hash ?? ""; - existing.LastConfirmedUtcMs = nowMs; - existing.IsActive = true; - - await _context.PeerOplogConfirmations.UpdateAsync(existing); - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override Task> GetConfirmationsAsync( - CancellationToken cancellationToken = default) - { - var confirmations = _context.PeerOplogConfirmations - .Find(c => c.SourceNodeId != RegistrationSourceNodeId) - .ToDomain() - .ToList(); - - return Task.FromResult>(confirmations); - } - - /// - public override Task> GetConfirmationsForPeerAsync( - string peerNodeId, - CancellationToken cancellationToken = default) - { - if (string.IsNullOrWhiteSpace(peerNodeId)) - throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); - - var confirmations = _context.PeerOplogConfirmations - .Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId != RegistrationSourceNodeId) - .ToDomain() - .ToList(); - - return Task.FromResult>(confirmations); - } - - /// - public override async Task RemovePeerTrackingAsync(string peerNodeId, CancellationToken cancellationToken = default) - { - if (string.IsNullOrWhiteSpace(peerNodeId)) - throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); - - var matches = _context.PeerOplogConfirmations - .Find(c => c.PeerNodeId == peerNodeId) - .ToList(); - - if (matches.Count == 0) return; - - long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); - foreach (var match in matches) - { - if (!match.IsActive) continue; - - match.IsActive = false; - match.LastConfirmedUtcMs = nowMs; - await _context.PeerOplogConfirmations.UpdateAsync(match); - } - - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override Task> GetActiveTrackedPeersAsync(CancellationToken cancellationToken = default) - { - var peers = _context.PeerOplogConfirmations - .Find(c => c.IsActive) - .Select(c => c.PeerNodeId) - .Distinct(StringComparer.Ordinal) - .ToList(); - - return Task.FromResult>(peers); - } - - /// - public override async Task DropAsync(CancellationToken cancellationToken = default) - { - var allIds = _context.PeerOplogConfirmations.FindAll().Select(c => c.Id).ToList(); - await _context.PeerOplogConfirmations.DeleteBulkAsync(allIds); - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override Task> ExportAsync(CancellationToken cancellationToken = default) - { - var exported = _context.PeerOplogConfirmations - .FindAll() - .ToDomain() - .ToList(); - - return Task.FromResult>(exported); - } - - /// - public override async Task ImportAsync(IEnumerable items, - CancellationToken cancellationToken = default) - { - foreach (var item in items) - { - var existing = _context.PeerOplogConfirmations - .Find(c => c.PeerNodeId == item.PeerNodeId && c.SourceNodeId == item.SourceNodeId) - .FirstOrDefault(); - - if (existing == null) - { - await _context.PeerOplogConfirmations.InsertAsync(item.ToEntity()); - continue; - } - - existing.ConfirmedWall = item.ConfirmedWall; - existing.ConfirmedLogic = item.ConfirmedLogic; - existing.ConfirmedHash = item.ConfirmedHash; - existing.LastConfirmedUtcMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds(); - existing.IsActive = item.IsActive; - await _context.PeerOplogConfirmations.UpdateAsync(existing); - } - - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override async Task MergeAsync(IEnumerable items, - CancellationToken cancellationToken = default) - { - foreach (var item in items) - { - var existing = _context.PeerOplogConfirmations - .Find(c => c.PeerNodeId == item.PeerNodeId && c.SourceNodeId == item.SourceNodeId) - .FirstOrDefault(); - - if (existing == null) - { - await _context.PeerOplogConfirmations.InsertAsync(item.ToEntity()); - continue; - } - - var changed = false; - var incomingTimestamp = new HlcTimestamp(item.ConfirmedWall, item.ConfirmedLogic, item.SourceNodeId); - var existingTimestamp = - new HlcTimestamp(existing.ConfirmedWall, existing.ConfirmedLogic, existing.SourceNodeId); - - if (incomingTimestamp > existingTimestamp) - { - existing.ConfirmedWall = item.ConfirmedWall; - existing.ConfirmedLogic = item.ConfirmedLogic; - existing.ConfirmedHash = item.ConfirmedHash; - changed = true; - } - - long incomingLastConfirmedMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds(); - if (incomingLastConfirmedMs > existing.LastConfirmedUtcMs) - { - existing.LastConfirmedUtcMs = incomingLastConfirmedMs; - changed = true; - } - - if (existing.IsActive != item.IsActive) - { - existing.IsActive = item.IsActive; - changed = true; - } - - if (changed) await _context.PeerOplogConfirmations.UpdateAsync(existing); - } - - await _context.SaveChangesAsync(cancellationToken); - } - - private static bool IsIncomingTimestampNewer(HlcTimestamp incomingTimestamp, PeerOplogConfirmationEntity existing) - { - if (incomingTimestamp.PhysicalTime > existing.ConfirmedWall) return true; - - if (incomingTimestamp.PhysicalTime == existing.ConfirmedWall && - incomingTimestamp.LogicalCounter > existing.ConfirmedLogic) - return true; - - return false; - } -} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteSnapshotMetadataStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteSnapshotMetadataStore.cs deleted file mode 100755 index d7649e0..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/BLiteSnapshotMetadataStore.cs +++ /dev/null @@ -1,167 +0,0 @@ -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Logging.Abstractions; -using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite; - -/// -/// Provides a snapshot metadata store implementation that uses a specified CBDDCDocumentDbContext for persistence -/// operations. -/// -/// -/// This class enables storage, retrieval, and management of snapshot metadata using the provided -/// database context. It is typically used in scenarios where snapshot metadata needs to be persisted in a document -/// database. The class supports bulk operations and incremental updates, and can be extended for custom database -/// contexts. Thread safety depends on the underlying context implementation. -/// -/// -/// The type of the document database context used for accessing and managing snapshot metadata. Must inherit from -/// CBDDCDocumentDbContext. -/// -public class BLiteSnapshotMetadataStore : SnapshotMetadataStore where TDbContext : CBDDCDocumentDbContext -{ - /// - /// Represents the database context used for data access operations within the derived class. - /// - /// - /// Intended for use by derived classes to interact with the underlying database. The context - /// should be properly disposed of according to the application's lifetime management strategy. - /// - protected readonly TDbContext _context; - - /// - /// Provides logging capabilities for the BLiteSnapshotMetadataStore operations. - /// - /// - /// Intended for use by derived classes to record diagnostic and operational information. The - /// logger instance is specific to the BLiteSnapshotMetadataStore type. - /// - protected readonly ILogger> _logger; - - /// - /// Initializes a new instance of the BLiteSnapshotMetadataStore class using the specified database context and - /// optional logger. - /// - /// The database context to be used for accessing snapshot metadata. Cannot be null. - /// An optional logger for logging diagnostic messages. If null, a no-op logger is used. - /// Thrown if the context parameter is null. - public BLiteSnapshotMetadataStore(TDbContext context, - ILogger>? logger = null) - { - _context = context ?? throw new ArgumentNullException(nameof(context)); - _logger = logger ?? NullLogger>.Instance; - } - - /// - public override async Task DropAsync(CancellationToken cancellationToken = default) - { - // Use Id (technical key) for deletion, not NodeId (business key) - var allIds = await Task.Run(() => _context.SnapshotMetadatas.FindAll().Select(s => s.Id).ToList(), - cancellationToken); - await _context.SnapshotMetadatas.DeleteBulkAsync(allIds); - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override async Task> ExportAsync(CancellationToken cancellationToken = default) - { - return await Task.Run(() => _context.SnapshotMetadatas.FindAll().ToDomain().ToList(), cancellationToken); - } - - /// - public override async Task GetSnapshotHashAsync(string nodeId, - CancellationToken cancellationToken = default) - { - // NodeId is now a regular indexed property, not the Key - var snapshot = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault(), - cancellationToken); - return snapshot?.Hash; - } - - /// - public override async Task ImportAsync(IEnumerable items, - CancellationToken cancellationToken = default) - { - foreach (var metadata in items) await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity()); - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override async Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata, - CancellationToken cancellationToken = default) - { - await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity()); - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override async Task MergeAsync(IEnumerable items, - CancellationToken cancellationToken = default) - { - foreach (var metadata in items) - { - // NodeId is now a regular indexed property, not the Key - var existing = - await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == metadata.NodeId).FirstOrDefault(), - cancellationToken); - - if (existing == null) - { - await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity()); - } - else - { - // Update only if incoming is newer - if (metadata.TimestampPhysicalTime > existing.TimestampPhysicalTime || - (metadata.TimestampPhysicalTime == existing.TimestampPhysicalTime && - metadata.TimestampLogicalCounter > existing.TimestampLogicalCounter)) - { - existing.NodeId = metadata.NodeId; - existing.TimestampPhysicalTime = metadata.TimestampPhysicalTime; - existing.TimestampLogicalCounter = metadata.TimestampLogicalCounter; - existing.Hash = metadata.Hash; - await _context.SnapshotMetadatas.UpdateAsync(existing); - } - } - } - - await _context.SaveChangesAsync(cancellationToken); - } - - /// - public override async Task UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta, - CancellationToken cancellationToken) - { - // NodeId is now a regular indexed property, not the Key - find existing by NodeId - var existing = - await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == existingMeta.NodeId).FirstOrDefault(), - cancellationToken); - if (existing != null) - { - existing.NodeId = existingMeta.NodeId; - existing.TimestampPhysicalTime = existingMeta.TimestampPhysicalTime; - existing.TimestampLogicalCounter = existingMeta.TimestampLogicalCounter; - existing.Hash = existingMeta.Hash; - await _context.SnapshotMetadatas.UpdateAsync(existing); - await _context.SaveChangesAsync(cancellationToken); - } - } - - /// - public override async Task GetSnapshotMetadataAsync(string nodeId, - CancellationToken cancellationToken = default) - { - // NodeId is now a regular indexed property, not the Key - return await Task.Run( - () => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault()?.ToDomain(), - cancellationToken); - } - - /// - public override async Task> GetAllSnapshotMetadataAsync( - CancellationToken cancellationToken = default) - { - return await Task.Run(() => _context.SnapshotMetadatas.FindAll().ToDomain().ToList(), cancellationToken); - } -} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCBLiteExtensions.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCBLiteExtensions.cs deleted file mode 100755 index 8b77ac7..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCBLiteExtensions.cs +++ /dev/null @@ -1,102 +0,0 @@ -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.DependencyInjection.Extensions; -using ZB.MOM.WW.CBDDC.Core.Storage; -using ZB.MOM.WW.CBDDC.Core.Sync; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite; - -/// -/// Extension methods for configuring BLite persistence for ZB.MOM.WW.CBDDC. -/// -public static class CBDDCBLiteExtensions -{ - /// - /// Adds BLite persistence to CBDDC using a custom DbContext and DocumentStore implementation. - /// - /// The type of the BLite document database context. Must inherit from CBDDCDocumentDbContext. - /// The type of the document store implementation. Must implement IDocumentStore. - /// The service collection to add the services to. - /// A factory function that creates the DbContext instance. - /// The service collection for chaining. - public static IServiceCollection AddCBDDCBLite( - this IServiceCollection services, - Func contextFactory) - where TDbContext : CBDDCDocumentDbContext - where TDocumentStore : class, IDocumentStore - { - if (services == null) throw new ArgumentNullException(nameof(services)); - if (contextFactory == null) throw new ArgumentNullException(nameof(contextFactory)); - - // Register the DbContext as singleton (must match store lifetime) - services.TryAddSingleton(contextFactory); - services.TryAddSingleton(sp => sp.GetRequiredService()); - - // Default Conflict Resolver (Last Write Wins) if none is provided - services.TryAddSingleton(); - - // Vector Clock Service (shared between DocumentStore and OplogStore) - services.TryAddSingleton(); - - // Register BLite Stores (all Singleton) - services.TryAddSingleton>(); - services.TryAddSingleton>(); - services.TryAddSingleton>(); - services.TryAddSingleton>(); - services.TryAddSingleton>(); - - // Register the DocumentStore implementation - services.TryAddSingleton(); - - // Register the SnapshotService (uses the generic SnapshotStore from ZB.MOM.WW.CBDDC.Persistence) - services.TryAddSingleton(); - - return services; - } - - /// - /// Adds BLite persistence to CBDDC using a custom DbContext (without explicit DocumentStore type). - /// - /// The type of the BLite document database context. Must inherit from CBDDCDocumentDbContext. - /// The service collection to add the services to. - /// A factory function that creates the DbContext instance. - /// The service collection for chaining. - /// You must manually register IDocumentStore after calling this method. - public static IServiceCollection AddCBDDCBLite( - this IServiceCollection services, - Func contextFactory) - where TDbContext : CBDDCDocumentDbContext - { - if (services == null) throw new ArgumentNullException(nameof(services)); - if (contextFactory == null) throw new ArgumentNullException(nameof(contextFactory)); - - // Register the DbContext as singleton - services.TryAddSingleton(contextFactory); - services.TryAddSingleton(sp => sp.GetRequiredService()); - - // Default Conflict Resolver (Last Write Wins) if none is provided - services.TryAddSingleton(); - - // Register BLite Stores (all Singleton) - services.TryAddSingleton>(); - services.TryAddSingleton>(); - services.TryAddSingleton>(); - services.TryAddSingleton>(); - services.TryAddSingleton>(); - - // Register the SnapshotService (uses the generic SnapshotStore from ZB.MOM.WW.CBDDC.Persistence) - services.TryAddSingleton(); - - return services; - } -} - -/// -/// Options for configuring BLite persistence. -/// -public class BLiteOptions -{ - /// - /// Gets or sets the file path to the BLite database file. - /// - public string DatabasePath { get; set; } = ""; -} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCDocumentDbContext.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCDocumentDbContext.cs deleted file mode 100755 index bf4b31e..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/CBDDCDocumentDbContext.cs +++ /dev/null @@ -1,117 +0,0 @@ -using BLite.Core; -using BLite.Core.Collections; -using BLite.Core.Metadata; -using BLite.Core.Storage; -using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite; - -public partial class CBDDCDocumentDbContext : DocumentDbContext -{ - /// - /// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database file path. - /// - /// - /// The file system path to the database file to be used by the context. Cannot be null or - /// empty. - /// - public CBDDCDocumentDbContext(string databasePath) : base(databasePath) - { - } - - /// - /// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database path and page file - /// configuration. - /// - /// The file system path to the database file. This value cannot be null or empty. - /// - /// The configuration settings for the page file. Specifies options that control how the database - /// pages are managed. - /// - public CBDDCDocumentDbContext(string databasePath, PageFileConfig config) : base(databasePath, config) - { - } - - /// - /// Gets the collection of operation log entries associated with this instance. - /// - /// - /// The collection provides access to all recorded operation log (oplog) entries, which can be - /// used to track changes or replicate operations. The collection is read-only; entries cannot be added or removed - /// directly through this property. - /// - public DocumentCollection OplogEntries { get; private set; } = null!; - - /// - /// Gets the collection of snapshot metadata associated with the document. - /// - public DocumentCollection SnapshotMetadatas { get; private set; } = null!; - - /// - /// Gets the collection of remote peer configurations associated with this instance. - /// - /// - /// Use this collection to access or enumerate the configuration settings for each remote peer. - /// The collection is read-only; to modify peer configurations, use the appropriate methods provided by the - /// containing class. - /// - public DocumentCollection RemotePeerConfigurations { get; private set; } = null!; - - /// - /// Gets the collection of document metadata for sync tracking. - /// - /// - /// Stores HLC timestamps and deleted state for each document without modifying application entities. - /// Used to track document versions for incremental sync instead of full snapshots. - /// - public DocumentCollection DocumentMetadatas { get; private set; } = null!; - - /// - /// Gets the collection of peer oplog confirmation records for pruning safety tracking. - /// - public DocumentCollection PeerOplogConfirmations { get; private set; } = null!; - - /// - protected override void OnModelCreating(ModelBuilder modelBuilder) - { - base.OnModelCreating(modelBuilder); - - // OplogEntries: Use Id as technical key, Hash as unique business key - modelBuilder.Entity() - .ToCollection("OplogEntries") - .HasKey(e => e.Id) - .HasIndex(e => e.Hash, unique: true) // Hash is unique business key - .HasIndex(e => new { e.TimestampPhysicalTime, e.TimestampLogicalCounter, e.TimestampNodeId }) - .HasIndex(e => e.Collection); - - // SnapshotMetadatas: Use Id as technical key, NodeId as unique business key - modelBuilder.Entity() - .ToCollection("SnapshotMetadatas") - .HasKey(e => e.Id) - .HasIndex(e => e.NodeId, unique: true) // NodeId is unique business key - .HasIndex(e => new { e.TimestampPhysicalTime, e.TimestampLogicalCounter }); - - // RemotePeerConfigurations: Use Id as technical key, NodeId as unique business key - modelBuilder.Entity() - .ToCollection("RemotePeerConfigurations") - .HasKey(e => e.Id) - .HasIndex(e => e.NodeId, unique: true) // NodeId is unique business key - .HasIndex(e => e.IsEnabled); - - // DocumentMetadatas: Use Id as technical key, Collection+Key as unique composite business key - modelBuilder.Entity() - .ToCollection("DocumentMetadatas") - .HasKey(e => e.Id) - .HasIndex(e => new { e.Collection, e.Key }, unique: true) // Composite business key - .HasIndex(e => new { e.HlcPhysicalTime, e.HlcLogicalCounter, e.HlcNodeId }) - .HasIndex(e => e.Collection); - - // PeerOplogConfirmations: Use Id as technical key, PeerNodeId+SourceNodeId as unique business key - modelBuilder.Entity() - .ToCollection("PeerOplogConfirmations") - .HasKey(e => e.Id) - .HasIndex(e => new { e.PeerNodeId, e.SourceNodeId }, unique: true) - .HasIndex(e => e.IsActive) - .HasIndex(e => new { e.SourceNodeId, e.ConfirmedWall, e.ConfirmedLogic }); - } -} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/DocumentMetadataEntity.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/DocumentMetadataEntity.cs deleted file mode 100755 index 5444394..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/DocumentMetadataEntity.cs +++ /dev/null @@ -1,47 +0,0 @@ -using System.ComponentModel.DataAnnotations; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; - -/// -/// BLite entity representing document metadata for sync tracking. -/// Stores HLC timestamp and deleted state for each document without modifying application entities. -/// -public class DocumentMetadataEntity -{ - /// - /// Gets or sets the unique identifier for this entity (technical key). - /// Auto-generated GUID string. - /// - [Key] - public string Id { get; set; } = ""; - - /// - /// Gets or sets the collection name (business key part 1). - /// - public string Collection { get; set; } = ""; - - /// - /// Gets or sets the document key within the collection (business key part 2). - /// - public string Key { get; set; } = ""; - - /// - /// Gets or sets the physical time component of the HLC timestamp. - /// - public long HlcPhysicalTime { get; set; } - - /// - /// Gets or sets the logical counter component of the HLC timestamp. - /// - public int HlcLogicalCounter { get; set; } - - /// - /// Gets or sets the node ID that last modified this document. - /// - public string HlcNodeId { get; set; } = ""; - - /// - /// Gets or sets whether this document is marked as deleted (tombstone). - /// - public bool IsDeleted { get; set; } -} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/EntityMappers.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/EntityMappers.cs deleted file mode 100755 index 19340d2..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/EntityMappers.cs +++ /dev/null @@ -1,240 +0,0 @@ -using System.Text.Json; -using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Network; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; - -/// -/// Provides extension methods for mapping between BLite entities and domain models. -/// -public static class EntityMappers -{ - #region DocumentMetadataEntity Helpers - - /// - /// Creates a DocumentMetadataEntity from collection, key, timestamp, and deleted state. - /// Used for tracking document sync state. - /// - /// The collection name that owns the document. - /// The document key within the collection. - /// The hybrid logical clock timestamp for the document state. - /// Indicates whether the document is marked as deleted. - public static DocumentMetadataEntity CreateDocumentMetadata(string collection, string key, HlcTimestamp timestamp, - bool isDeleted = false) - { - return new DocumentMetadataEntity - { - Id = Guid.NewGuid().ToString(), - Collection = collection, - Key = key, - HlcPhysicalTime = timestamp.PhysicalTime, - HlcLogicalCounter = timestamp.LogicalCounter, - HlcNodeId = timestamp.NodeId, - IsDeleted = isDeleted - }; - } - - #endregion - - #region OplogEntity Mappers - - /// - /// Converts an OplogEntry domain model to an OplogEntity for persistence. - /// - /// The oplog entry to convert. - public static OplogEntity ToEntity(this OplogEntry entry) - { - return new OplogEntity - { - Id = Guid.NewGuid().ToString(), // Auto-generate technical key - Collection = entry.Collection, - Key = entry.Key, - Operation = (int)entry.Operation, - // Use empty string instead of null to avoid BLite BSON serialization issues - PayloadJson = entry.Payload?.GetRawText() ?? "", - TimestampPhysicalTime = entry.Timestamp.PhysicalTime, - TimestampLogicalCounter = entry.Timestamp.LogicalCounter, - TimestampNodeId = entry.Timestamp.NodeId, - Hash = entry.Hash, - PreviousHash = entry.PreviousHash - }; - } - - /// - /// Converts an OplogEntity to an OplogEntry domain model. - /// - /// The persisted oplog entity to convert. - public static OplogEntry ToDomain(this OplogEntity entity) - { - JsonElement? payload = null; - // Treat empty string as null payload (Delete operations) - if (!string.IsNullOrEmpty(entity.PayloadJson)) - payload = JsonSerializer.Deserialize(entity.PayloadJson); - - return new OplogEntry( - entity.Collection, - entity.Key, - (OperationType)entity.Operation, - payload, - new HlcTimestamp(entity.TimestampPhysicalTime, entity.TimestampLogicalCounter, entity.TimestampNodeId), - entity.PreviousHash, - entity.Hash); - } - - /// - /// Converts a collection of OplogEntity to OplogEntry domain models. - /// - /// The oplog entities to convert. - public static IEnumerable ToDomain(this IEnumerable entities) - { - return entities.Select(e => e.ToDomain()); - } - - #endregion - - #region SnapshotMetadataEntity Mappers - - /// - /// Converts a SnapshotMetadata domain model to a SnapshotMetadataEntity for persistence. - /// - /// The snapshot metadata to convert. - public static SnapshotMetadataEntity ToEntity(this SnapshotMetadata metadata) - { - return new SnapshotMetadataEntity - { - Id = Guid.NewGuid().ToString(), // Auto-generate technical key - NodeId = metadata.NodeId, - TimestampPhysicalTime = metadata.TimestampPhysicalTime, - TimestampLogicalCounter = metadata.TimestampLogicalCounter, - Hash = metadata.Hash - }; - } - - /// - /// Converts a SnapshotMetadataEntity to a SnapshotMetadata domain model. - /// - /// The persisted snapshot metadata entity to convert. - public static SnapshotMetadata ToDomain(this SnapshotMetadataEntity entity) - { - return new SnapshotMetadata - { - NodeId = entity.NodeId, - TimestampPhysicalTime = entity.TimestampPhysicalTime, - TimestampLogicalCounter = entity.TimestampLogicalCounter, - Hash = entity.Hash - }; - } - - /// - /// Converts a collection of SnapshotMetadataEntity to SnapshotMetadata domain models. - /// - /// The snapshot metadata entities to convert. - public static IEnumerable ToDomain(this IEnumerable entities) - { - return entities.Select(e => e.ToDomain()); - } - - #endregion - - #region RemotePeerEntity Mappers - - /// - /// Converts a RemotePeerConfiguration domain model to a RemotePeerEntity for persistence. - /// - /// The remote peer configuration to convert. - public static RemotePeerEntity ToEntity(this RemotePeerConfiguration config) - { - return new RemotePeerEntity - { - Id = Guid.NewGuid().ToString(), // Auto-generate technical key - NodeId = config.NodeId, - Address = config.Address, - Type = (int)config.Type, - IsEnabled = config.IsEnabled, - InterestsJson = config.InterestingCollections.Count > 0 - ? JsonSerializer.Serialize(config.InterestingCollections) - : "" - }; - } - - /// - /// Converts a RemotePeerEntity to a RemotePeerConfiguration domain model. - /// - /// The persisted remote peer entity to convert. - public static RemotePeerConfiguration ToDomain(this RemotePeerEntity entity) - { - var config = new RemotePeerConfiguration - { - NodeId = entity.NodeId, - Address = entity.Address, - Type = (PeerType)entity.Type, - IsEnabled = entity.IsEnabled - }; - - if (!string.IsNullOrEmpty(entity.InterestsJson)) - config.InterestingCollections = JsonSerializer.Deserialize>(entity.InterestsJson) ?? []; - - return config; - } - - /// - /// Converts a collection of RemotePeerEntity to RemotePeerConfiguration domain models. - /// - /// The remote peer entities to convert. - public static IEnumerable ToDomain(this IEnumerable entities) - { - return entities.Select(e => e.ToDomain()); - } - - #endregion - - #region PeerOplogConfirmationEntity Mappers - - /// - /// Converts a peer oplog confirmation domain model to a BLite entity. - /// - /// The confirmation to convert. - public static PeerOplogConfirmationEntity ToEntity(this PeerOplogConfirmation confirmation) - { - return new PeerOplogConfirmationEntity - { - Id = Guid.NewGuid().ToString(), - PeerNodeId = confirmation.PeerNodeId, - SourceNodeId = confirmation.SourceNodeId, - ConfirmedWall = confirmation.ConfirmedWall, - ConfirmedLogic = confirmation.ConfirmedLogic, - ConfirmedHash = confirmation.ConfirmedHash, - LastConfirmedUtcMs = confirmation.LastConfirmedUtc.ToUnixTimeMilliseconds(), - IsActive = confirmation.IsActive - }; - } - - /// - /// Converts a peer oplog confirmation entity to a domain model. - /// - /// The entity to convert. - public static PeerOplogConfirmation ToDomain(this PeerOplogConfirmationEntity entity) - { - return new PeerOplogConfirmation - { - PeerNodeId = entity.PeerNodeId, - SourceNodeId = entity.SourceNodeId, - ConfirmedWall = entity.ConfirmedWall, - ConfirmedLogic = entity.ConfirmedLogic, - ConfirmedHash = entity.ConfirmedHash, - LastConfirmedUtc = DateTimeOffset.FromUnixTimeMilliseconds(entity.LastConfirmedUtcMs), - IsActive = entity.IsActive - }; - } - - /// - /// Converts a collection of peer oplog confirmation entities to domain models. - /// - /// The entities to convert. - public static IEnumerable ToDomain(this IEnumerable entities) - { - return entities.Select(e => e.ToDomain()); - } - - #endregion -} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/OplogEntity.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/OplogEntity.cs deleted file mode 100755 index 2a958c6..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/OplogEntity.cs +++ /dev/null @@ -1,61 +0,0 @@ -using System.ComponentModel.DataAnnotations; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; - -/// -/// BLite entity representing an operation log entry. -/// -public class OplogEntity -{ - /// - /// Gets or sets the unique identifier for this entity (technical key). - /// Auto-generated GUID string. - /// - [Key] - public string Id { get; set; } = ""; - - /// - /// Gets or sets the collection name. - /// - public string Collection { get; set; } = ""; - - /// - /// Gets or sets the document key. - /// - public string Key { get; set; } = ""; - - /// - /// Gets or sets the operation type (0 = Put, 1 = Delete). - /// - public int Operation { get; set; } - - /// - /// Gets or sets the payload JSON (empty string for Delete operations). - /// - public string PayloadJson { get; set; } = ""; - - /// - /// Gets or sets the physical time component of the HLC timestamp. - /// - public long TimestampPhysicalTime { get; set; } - - /// - /// Gets or sets the logical counter component of the HLC timestamp. - /// - public int TimestampLogicalCounter { get; set; } - - /// - /// Gets or sets the node ID component of the HLC timestamp. - /// - public string TimestampNodeId { get; set; } = ""; - - /// - /// Gets or sets the cryptographic hash of this entry (business key). - /// - public string Hash { get; set; } = ""; - - /// - /// Gets or sets the hash of the previous entry in the chain. - /// - public string PreviousHash { get; set; } = ""; -} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/PeerOplogConfirmationEntity.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/PeerOplogConfirmationEntity.cs deleted file mode 100644 index b65c546..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/PeerOplogConfirmationEntity.cs +++ /dev/null @@ -1,50 +0,0 @@ -using System.ComponentModel.DataAnnotations; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; - -/// -/// BLite entity representing a peer oplog confirmation watermark. -/// -public class PeerOplogConfirmationEntity -{ - /// - /// Gets or sets the unique technical identifier for this entity. - /// - [Key] - public string Id { get; set; } = ""; - - /// - /// Gets or sets the tracked peer node identifier. - /// - public string PeerNodeId { get; set; } = ""; - - /// - /// Gets or sets the source node identifier for this confirmation. - /// - public string SourceNodeId { get; set; } = ""; - - /// - /// Gets or sets the physical wall-clock component of the confirmed HLC timestamp. - /// - public long ConfirmedWall { get; set; } - - /// - /// Gets or sets the logical component of the confirmed HLC timestamp. - /// - public int ConfirmedLogic { get; set; } - - /// - /// Gets or sets the confirmed hash value. - /// - public string ConfirmedHash { get; set; } = ""; - - /// - /// Gets or sets the UTC instant of the last update as unix milliseconds. - /// - public long LastConfirmedUtcMs { get; set; } - - /// - /// Gets or sets whether the tracked peer remains active. - /// - public bool IsActive { get; set; } = true; -} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/RemotePeerEntity.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/RemotePeerEntity.cs deleted file mode 100755 index 1626dd8..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/RemotePeerEntity.cs +++ /dev/null @@ -1,42 +0,0 @@ -using System.ComponentModel.DataAnnotations; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; - -/// -/// BLite entity representing a remote peer configuration. -/// -public class RemotePeerEntity -{ - /// - /// Gets or sets the unique identifier for this entity (technical key). - /// Auto-generated GUID string. - /// - [Key] - public string Id { get; set; } = ""; - - /// - /// Gets or sets the unique identifier for the remote peer node (business key). - /// - public string NodeId { get; set; } = ""; - - /// - /// Gets or sets the network address of the remote peer (hostname:port). - /// - public string Address { get; set; } = ""; - - /// - /// Gets or sets the type of the peer (0=LanDiscovered, 1=StaticRemote, 2=CloudRemote). - /// - public int Type { get; set; } - - /// - /// Gets or sets whether this peer is enabled for synchronization. - /// - public bool IsEnabled { get; set; } = true; - - /// - /// Gets or sets the collection interests as a JSON string. - /// Use empty string instead of null for BLite compatibility. - /// - public string InterestsJson { get; set; } = ""; -} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/SnapshotMetadataEntity.cs b/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/SnapshotMetadataEntity.cs deleted file mode 100755 index facf2d7..0000000 --- a/src/ZB.MOM.WW.CBDDC.Persistence/BLite/Entities/SnapshotMetadataEntity.cs +++ /dev/null @@ -1,36 +0,0 @@ -using System.ComponentModel.DataAnnotations; - -namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities; - -/// -/// BLite entity representing snapshot metadata (oplog pruning checkpoint). -/// -public class SnapshotMetadataEntity -{ - /// - /// Gets or sets the unique identifier for this entity (technical key). - /// Auto-generated GUID string. - /// - [Key] - public string Id { get; set; } = ""; - - /// - /// Gets or sets the node identifier (business key). - /// - public string NodeId { get; set; } = ""; - - /// - /// Gets or sets the physical time component of the timestamp. - /// - public long TimestampPhysicalTime { get; set; } - - /// - /// Gets or sets the logical counter component of the timestamp. - /// - public int TimestampLogicalCounter { get; set; } - - /// - /// Gets or sets the hash of the snapshot. - /// - public string Hash { get; set; } = ""; -} \ No newline at end of file diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/README.md b/src/ZB.MOM.WW.CBDDC.Persistence/README.md index d03cc08..76becf6 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/README.md +++ b/src/ZB.MOM.WW.CBDDC.Persistence/README.md @@ -1,10 +1,10 @@ # ZB.MOM.WW.CBDDC.Persistence -BLite persistence provider and foundational persistence implementations for **CBDDC**. +SurrealDB (embedded RocksDB) persistence provider and foundational persistence implementations for **CBDDC**. ## What's Included -This package provides both BLite provider types and core persistence services: +This package provides Surreal provider types and core persistence services: - **OplogStore**: Base implementation for append-only operation log storage - **VectorClockService**: Thread-safe in-memory vector clock management @@ -14,7 +14,7 @@ This package provides both BLite provider types and core persistence services: ## When To Use This Package -- **As a Library User**: Install this package to use CBDDC with BLite persistence. +- **As a Library User**: Install this package to use CBDDC with Surreal embedded persistence. - **As a Provider Developer**: Reference this package to build custom persistence providers by extending the base classes diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealEmbeddedClient.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealEmbeddedClient.cs new file mode 100644 index 0000000..7a2f147 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealEmbeddedClient.cs @@ -0,0 +1,142 @@ +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using SurrealDb.Embedded.Options; +using SurrealDb.Embedded.RocksDb; +using SurrealDb.Net; +using SurrealDb.Net.Models.Response; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Embedded RocksDB-backed Surreal client wrapper used by CBDDC persistence components. +/// +public sealed class CBDDCSurrealEmbeddedClient : ICBDDCSurrealEmbeddedClient +{ + private static readonly IReadOnlyDictionary EmptyParameters = new Dictionary(); + private readonly SemaphoreSlim _initializeGate = new(1, 1); + private readonly ILogger _logger; + private readonly CBDDCSurrealEmbeddedOptions _options; + private bool _disposed; + private bool _initialized; + + /// + /// Initializes a new instance of the class. + /// + /// Embedded Surreal options. + /// Optional logger. + public CBDDCSurrealEmbeddedClient( + CBDDCSurrealEmbeddedOptions options, + ILogger? logger = null) + { + _options = options ?? throw new ArgumentNullException(nameof(options)); + _logger = logger ?? NullLogger.Instance; + + if (!string.IsNullOrWhiteSpace(_options.Endpoint) && + !_options.Endpoint.StartsWith("rocksdb://", StringComparison.OrdinalIgnoreCase)) + throw new ArgumentException( + "Embedded Surreal endpoint must use the rocksdb:// scheme.", + nameof(options)); + + if (string.IsNullOrWhiteSpace(_options.Namespace)) + throw new ArgumentException("Namespace is required.", nameof(options)); + + if (string.IsNullOrWhiteSpace(_options.Database)) + throw new ArgumentException("Database is required.", nameof(options)); + + if (string.IsNullOrWhiteSpace(_options.NamingPolicy)) + throw new ArgumentException("Naming policy is required.", nameof(options)); + + string dbPath = ResolveDatabasePath(_options.DatabasePath); + var embeddedOptionsBuilder = SurrealDbEmbeddedOptions.Create(); + if (_options.StrictMode.HasValue) + embeddedOptionsBuilder.WithStrictMode(_options.StrictMode.Value); + + Client = new SurrealDbRocksDbClient(dbPath, embeddedOptionsBuilder.Build(), _options.NamingPolicy); + } + + /// + public ISurrealDbClient Client { get; } + + /// + public async Task InitializeAsync(CancellationToken cancellationToken = default) + { + ThrowIfDisposed(); + if (_initialized) return; + + await _initializeGate.WaitAsync(cancellationToken); + try + { + if (_initialized) return; + + await Client.Connect(cancellationToken); + await Client.Use(_options.Namespace, _options.Database, cancellationToken); + _initialized = true; + + _logger.LogInformation("Surreal embedded client initialized for namespace '{Namespace}' and database '{Database}'.", + _options.Namespace, _options.Database); + } + finally + { + _initializeGate.Release(); + } + } + + /// + public async Task RawQueryAsync( + string query, + IReadOnlyDictionary? parameters = null, + CancellationToken cancellationToken = default) + { + ThrowIfDisposed(); + if (string.IsNullOrWhiteSpace(query)) + throw new ArgumentException("Query is required.", nameof(query)); + + await InitializeAsync(cancellationToken); + return await Client.RawQuery(query, parameters ?? EmptyParameters, cancellationToken); + } + + /// + public async Task HealthAsync(CancellationToken cancellationToken = default) + { + ThrowIfDisposed(); + await InitializeAsync(cancellationToken); + return await Client.Health(cancellationToken); + } + + /// + public void Dispose() + { + if (_disposed) return; + + _disposed = true; + Client.Dispose(); + _initializeGate.Dispose(); + } + + /// + public async ValueTask DisposeAsync() + { + if (_disposed) return; + + _disposed = true; + await Client.DisposeAsync(); + _initializeGate.Dispose(); + } + + private void ThrowIfDisposed() + { + ObjectDisposedException.ThrowIf(_disposed, this); + } + + private static string ResolveDatabasePath(string databasePath) + { + if (string.IsNullOrWhiteSpace(databasePath)) + throw new ArgumentException("DatabasePath is required.", nameof(databasePath)); + + string fullPath = Path.GetFullPath(databasePath); + string? directory = Path.GetDirectoryName(fullPath); + if (!string.IsNullOrWhiteSpace(directory)) Directory.CreateDirectory(directory); + + return fullPath; + } +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealEmbeddedExtensions.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealEmbeddedExtensions.cs new file mode 100644 index 0000000..aff56f4 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealEmbeddedExtensions.cs @@ -0,0 +1,75 @@ +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.DependencyInjection.Extensions; +using ZB.MOM.WW.CBDDC.Core.Network; +using SurrealDb.Net; +using ZB.MOM.WW.CBDDC.Core.Storage; +using ZB.MOM.WW.CBDDC.Core.Sync; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Extension methods for configuring embedded Surreal persistence for CBDDC. +/// +public static class CBDDCSurrealEmbeddedExtensions +{ + /// + /// Adds embedded Surreal infrastructure to CBDDC and registers a document store implementation. + /// + /// The concrete document store implementation. + /// The service collection to add services to. + /// Factory used to build embedded Surreal options. + /// The service collection for chaining. + public static IServiceCollection AddCBDDCSurrealEmbedded( + this IServiceCollection services, + Func optionsFactory) + where TDocumentStore : class, IDocumentStore + { + RegisterCoreServices(services, optionsFactory); + services.TryAddSingleton(); + return services; + } + + /// + /// Adds embedded Surreal infrastructure to CBDDC without registering store implementations. + /// + /// The service collection to add services to. + /// Factory used to build embedded Surreal options. + /// The service collection for chaining. + /// + /// Register store implementations separately when they become available. + /// + public static IServiceCollection AddCBDDCSurrealEmbedded( + this IServiceCollection services, + Func optionsFactory) + { + RegisterCoreServices(services, optionsFactory); + return services; + } + + private static void RegisterCoreServices( + IServiceCollection services, + Func optionsFactory) + { + if (services == null) throw new ArgumentNullException(nameof(services)); + if (optionsFactory == null) throw new ArgumentNullException(nameof(optionsFactory)); + + services.TryAddSingleton(optionsFactory); + + services.TryAddSingleton(); + services.TryAddSingleton(sp => sp.GetRequiredService().Client); + services.TryAddSingleton(); + services.TryAddSingleton(); + services.TryAddSingleton(); + + services.TryAddSingleton(); + services.TryAddSingleton(); + services.TryAddSingleton(); + services.TryAddSingleton(); + services.TryAddSingleton(); + services.TryAddSingleton(); + services.TryAddSingleton(); + + // SnapshotStore registration matches the other provider extension patterns. + services.TryAddSingleton(); + } +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealEmbeddedOptions.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealEmbeddedOptions.cs new file mode 100644 index 0000000..8b633f8 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealEmbeddedOptions.cs @@ -0,0 +1,88 @@ +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Configuration for the embedded SurrealDB RocksDB provider. +/// +public sealed class CBDDCSurrealEmbeddedOptions +{ + /// + /// Logical endpoint for this provider. For embedded RocksDB this should use the rocksdb:// scheme. + /// + public string Endpoint { get; set; } = "rocksdb://local"; + + /// + /// File path used by the embedded RocksDB engine. + /// + public string DatabasePath { get; set; } = "data/cbddc-surreal.db"; + + /// + /// Surreal namespace. + /// + public string Namespace { get; set; } = "cbddc"; + + /// + /// Surreal database name inside the namespace. + /// + public string Database { get; set; } = "main"; + + /// + /// Naming policy used by the Surreal .NET client serializer. + /// + public string NamingPolicy { get; set; } = "camelCase"; + + /// + /// Optional strict mode flag for embedded Surreal. + /// + public bool? StrictMode { get; set; } + + /// + /// CDC-related options used by persistence stores. + /// + public CBDDCSurrealCdcOptions Cdc { get; set; } = new(); +} + +/// +/// CDC/checkpoint configuration for the embedded Surreal provider. +/// +public sealed class CBDDCSurrealCdcOptions +{ + /// + /// Enables CDC-oriented checkpoint bookkeeping. + /// + public bool Enabled { get; set; } = true; + + /// + /// Checkpoint table name used for CDC progress tracking. + /// + public string CheckpointTable { get; set; } = "cbddc_cdc_checkpoint"; + + /// + /// Enables LIVE SELECT subscriptions as a low-latency wake-up signal for polling. + /// + public bool EnableLiveSelectAccelerator { get; set; } = true; + + /// + /// Logical consumer identifier used by checkpoint records. + /// + public string ConsumerId { get; set; } = "default"; + + /// + /// Polling interval for CDC readers that use pull-based processing. + /// + public TimeSpan PollingInterval { get; set; } = TimeSpan.FromSeconds(1); + + /// + /// Maximum number of changefeed entries fetched per poll cycle. + /// + public int BatchSize { get; set; } = 500; + + /// + /// Delay before re-subscribing LIVE SELECT after failures or closure. + /// + public TimeSpan LiveSelectReconnectDelay { get; set; } = TimeSpan.FromSeconds(2); + + /// + /// Retention window used when defining Surreal changefeed history. + /// + public TimeSpan RetentionDuration { get; set; } = TimeSpan.FromDays(7); +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealReadinessProbe.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealReadinessProbe.cs new file mode 100644 index 0000000..28fe7e4 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealReadinessProbe.cs @@ -0,0 +1,45 @@ +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Health/readiness helper for the embedded Surreal provider. +/// +public sealed class CBDDCSurrealReadinessProbe : ICBDDCSurrealReadinessProbe +{ + private readonly ICBDDCSurrealEmbeddedClient _surrealClient; + private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer; + private readonly ILogger _logger; + + /// + /// Initializes a new instance of the class. + /// + /// Surreal client abstraction. + /// Schema initializer. + /// Optional logger. + public CBDDCSurrealReadinessProbe( + ICBDDCSurrealEmbeddedClient surrealClient, + ICBDDCSurrealSchemaInitializer schemaInitializer, + ILogger? logger = null) + { + _surrealClient = surrealClient ?? throw new ArgumentNullException(nameof(surrealClient)); + _schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer)); + _logger = logger ?? NullLogger.Instance; + } + + /// + public async Task IsReadyAsync(CancellationToken cancellationToken = default) + { + try + { + await _schemaInitializer.EnsureInitializedAsync(cancellationToken); + return await _surrealClient.HealthAsync(cancellationToken); + } + catch (Exception ex) + { + _logger.LogWarning(ex, "Surreal embedded readiness probe failed."); + return false; + } + } +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealSchemaInitializer.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealSchemaInitializer.cs new file mode 100644 index 0000000..d3123df --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealSchemaInitializer.cs @@ -0,0 +1,131 @@ +using System.Text.RegularExpressions; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Initializes Surreal schema objects required by CBDDC persistence stores. +/// +public sealed class CBDDCSurrealSchemaInitializer : ICBDDCSurrealSchemaInitializer +{ + private static readonly Regex IdentifierRegex = new("^[A-Za-z_][A-Za-z0-9_]*$", RegexOptions.Compiled); + private readonly SemaphoreSlim _initializeGate = new(1, 1); + private readonly ICBDDCSurrealEmbeddedClient _surrealClient; + private readonly ILogger _logger; + private readonly string _checkpointTable; + private readonly string _changefeedRetentionLiteral; + private bool _initialized; + + /// + /// Initializes a new instance of the class. + /// + /// Surreal client abstraction. + /// Embedded options. + /// Optional logger. + public CBDDCSurrealSchemaInitializer( + ICBDDCSurrealEmbeddedClient surrealClient, + CBDDCSurrealEmbeddedOptions options, + ILogger? logger = null) + { + _surrealClient = surrealClient ?? throw new ArgumentNullException(nameof(surrealClient)); + _logger = logger ?? NullLogger.Instance; + if (options == null) throw new ArgumentNullException(nameof(options)); + if (options.Cdc == null) throw new ArgumentException("CDC options are required.", nameof(options)); + + _checkpointTable = EnsureValidIdentifier(options.Cdc.CheckpointTable, nameof(options.Cdc.CheckpointTable)); + _changefeedRetentionLiteral = ToSurrealDurationLiteral( + options.Cdc.RetentionDuration, + nameof(options.Cdc.RetentionDuration)); + } + + /// + public async Task EnsureInitializedAsync(CancellationToken cancellationToken = default) + { + if (_initialized) return; + + await _initializeGate.WaitAsync(cancellationToken); + try + { + if (_initialized) return; + + string schemaSql = BuildSchemaSql(); + await _surrealClient.RawQueryAsync(schemaSql, cancellationToken: cancellationToken); + + _initialized = true; + _logger.LogInformation( + "Surreal schema initialized with checkpoint table '{CheckpointTable}'.", + _checkpointTable); + } + finally + { + _initializeGate.Release(); + } + } + + private string BuildSchemaSql() + { + return $""" + DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.OplogEntriesTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral}; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.OplogHashIndex} ON TABLE {CBDDCSurrealSchemaNames.OplogEntriesTable} COLUMNS hash UNIQUE; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.OplogHlcIndex} ON TABLE {CBDDCSurrealSchemaNames.OplogEntriesTable} COLUMNS timestampPhysicalTime, timestampLogicalCounter, timestampNodeId; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.OplogCollectionIndex} ON TABLE {CBDDCSurrealSchemaNames.OplogEntriesTable} COLUMNS collection; + + DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.SnapshotMetadataTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral}; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.SnapshotNodeIdIndex} ON TABLE {CBDDCSurrealSchemaNames.SnapshotMetadataTable} COLUMNS nodeId UNIQUE; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.SnapshotHlcIndex} ON TABLE {CBDDCSurrealSchemaNames.SnapshotMetadataTable} COLUMNS timestampPhysicalTime, timestampLogicalCounter; + + DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral}; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerNodeIdIndex} ON TABLE {CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable} COLUMNS nodeId UNIQUE; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerEnabledIndex} ON TABLE {CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable} COLUMNS isEnabled; + + DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.DocumentMetadataTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral}; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.DocumentMetadataCollectionKeyIndex} ON TABLE {CBDDCSurrealSchemaNames.DocumentMetadataTable} COLUMNS collection, key UNIQUE; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.DocumentMetadataHlcIndex} ON TABLE {CBDDCSurrealSchemaNames.DocumentMetadataTable} COLUMNS hlcPhysicalTime, hlcLogicalCounter, hlcNodeId; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.DocumentMetadataCollectionIndex} ON TABLE {CBDDCSurrealSchemaNames.DocumentMetadataTable} COLUMNS collection; + + DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral}; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerConfirmationPairIndex} ON TABLE {CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable} COLUMNS peerNodeId, sourceNodeId UNIQUE; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerConfirmationActiveIndex} ON TABLE {CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable} COLUMNS isActive; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerConfirmationSourceHlcIndex} ON TABLE {CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable} COLUMNS sourceNodeId, confirmedWall, confirmedLogic; + + DEFINE TABLE OVERWRITE {_checkpointTable} SCHEMAFULL; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.CdcCheckpointConsumerIndex} ON TABLE {_checkpointTable} COLUMNS consumerId UNIQUE; + DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.CdcCheckpointVersionstampIndex} ON TABLE {_checkpointTable} COLUMNS versionstampCursor; + """; + } + + private static string EnsureValidIdentifier(string? identifier, string argumentName) + { + if (string.IsNullOrWhiteSpace(identifier)) + throw new ArgumentException("Surreal identifier is required.", argumentName); + + if (!IdentifierRegex.IsMatch(identifier)) + throw new ArgumentException( + $"Invalid Surreal identifier '{identifier}'. Use letters, numbers, and underscores only.", + argumentName); + + return identifier; + } + + private static string ToSurrealDurationLiteral(TimeSpan duration, string argumentName) + { + if (duration <= TimeSpan.Zero) + throw new ArgumentOutOfRangeException(argumentName, "Surreal changefeed retention duration must be positive."); + + if (duration.TotalDays >= 1 && duration.TotalDays == Math.Truncate(duration.TotalDays)) + return $"{(long)duration.TotalDays}d"; + + if (duration.TotalHours >= 1 && duration.TotalHours == Math.Truncate(duration.TotalHours)) + return $"{(long)duration.TotalHours}h"; + + if (duration.TotalMinutes >= 1 && duration.TotalMinutes == Math.Truncate(duration.TotalMinutes)) + return $"{(long)duration.TotalMinutes}m"; + + if (duration.TotalSeconds >= 1 && duration.TotalSeconds == Math.Truncate(duration.TotalSeconds)) + return $"{(long)duration.TotalSeconds}s"; + + long totalMs = checked((long)Math.Ceiling(duration.TotalMilliseconds)); + return $"{totalMs}ms"; + } +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealSchemaNames.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealSchemaNames.cs new file mode 100644 index 0000000..aa75568 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/CBDDCSurrealSchemaNames.cs @@ -0,0 +1,29 @@ +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Surreal table and index names shared by the embedded CBDDC provider. +/// +public static class CBDDCSurrealSchemaNames +{ + public const string OplogEntriesTable = "cbddc_oplog_entries"; + public const string SnapshotMetadataTable = "cbddc_snapshot_metadatas"; + public const string RemotePeerConfigurationsTable = "cbddc_remote_peer_configurations"; + public const string DocumentMetadataTable = "cbddc_document_metadatas"; + public const string PeerOplogConfirmationsTable = "cbddc_peer_oplog_confirmations"; + + public const string OplogHashIndex = "idx_cbddc_oplog_hash"; + public const string OplogHlcIndex = "idx_cbddc_oplog_hlc"; + public const string OplogCollectionIndex = "idx_cbddc_oplog_collection"; + public const string SnapshotNodeIdIndex = "idx_cbddc_snapshot_node"; + public const string SnapshotHlcIndex = "idx_cbddc_snapshot_hlc"; + public const string PeerNodeIdIndex = "idx_cbddc_peer_node"; + public const string PeerEnabledIndex = "idx_cbddc_peer_enabled"; + public const string DocumentMetadataCollectionKeyIndex = "idx_cbddc_docmeta_collection_key"; + public const string DocumentMetadataHlcIndex = "idx_cbddc_docmeta_hlc"; + public const string DocumentMetadataCollectionIndex = "idx_cbddc_docmeta_collection"; + public const string PeerConfirmationPairIndex = "idx_cbddc_peer_confirm_pair"; + public const string PeerConfirmationActiveIndex = "idx_cbddc_peer_confirm_active"; + public const string PeerConfirmationSourceHlcIndex = "idx_cbddc_peer_confirm_source_hlc"; + public const string CdcCheckpointConsumerIndex = "idx_cbddc_cdc_checkpoint_consumer"; + public const string CdcCheckpointVersionstampIndex = "idx_cbddc_cdc_checkpoint_versionstamp"; +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ICBDDCSurrealEmbeddedClient.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ICBDDCSurrealEmbeddedClient.cs new file mode 100644 index 0000000..403aa1c --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ICBDDCSurrealEmbeddedClient.cs @@ -0,0 +1,32 @@ +using SurrealDb.Net; +using SurrealDb.Net.Models.Response; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Abstraction over the embedded Surreal client used by CBDDC persistence stores. +/// +public interface ICBDDCSurrealEmbeddedClient : IAsyncDisposable, IDisposable +{ + /// + /// Gets the underlying Surreal client. + /// + ISurrealDbClient Client { get; } + + /// + /// Connects and selects namespace/database exactly once. + /// + Task InitializeAsync(CancellationToken cancellationToken = default); + + /// + /// Executes a raw SurrealQL statement. + /// + Task RawQueryAsync(string query, + IReadOnlyDictionary? parameters = null, + CancellationToken cancellationToken = default); + + /// + /// Checks whether the embedded client responds to health probes. + /// + Task HealthAsync(CancellationToken cancellationToken = default); +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ICBDDCSurrealReadinessProbe.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ICBDDCSurrealReadinessProbe.cs new file mode 100644 index 0000000..91de1d8 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ICBDDCSurrealReadinessProbe.cs @@ -0,0 +1,12 @@ +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Simple readiness probe for embedded Surreal infrastructure. +/// +public interface ICBDDCSurrealReadinessProbe +{ + /// + /// Returns true when client initialization, schema initialization, and health checks pass. + /// + Task IsReadyAsync(CancellationToken cancellationToken = default); +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ICBDDCSurrealSchemaInitializer.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ICBDDCSurrealSchemaInitializer.cs new file mode 100644 index 0000000..6335569 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ICBDDCSurrealSchemaInitializer.cs @@ -0,0 +1,12 @@ +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Ensures required Surreal schema objects exist. +/// +public interface ICBDDCSurrealSchemaInitializer +{ + /// + /// Creates required tables/indexes/checkpoint schema for CBDDC stores. + /// + Task EnsureInitializedAsync(CancellationToken cancellationToken = default); +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ISurrealCdcCheckpointPersistence.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ISurrealCdcCheckpointPersistence.cs new file mode 100644 index 0000000..00be6f6 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ISurrealCdcCheckpointPersistence.cs @@ -0,0 +1,76 @@ +using ZB.MOM.WW.CBDDC.Core; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Represents durable CDC progress for a logical consumer. +/// +public sealed class SurrealCdcCheckpoint +{ + /// + /// Gets or sets the logical consumer identifier. + /// + public string ConsumerId { get; set; } = ""; + + /// + /// Gets or sets the last processed hybrid logical timestamp. + /// + public HlcTimestamp Timestamp { get; set; } + + /// + /// Gets or sets the last processed hash in the local chain. + /// + public string LastHash { get; set; } = ""; + + /// + /// Gets or sets the UTC instant when the checkpoint was updated. + /// + public DateTimeOffset UpdatedUtc { get; set; } + + /// + /// Gets or sets the optional changefeed versionstamp cursor associated with this checkpoint. + /// + public long? VersionstampCursor { get; set; } +} + +/// +/// Defines persistence operations for local CDC checkpoint progress. +/// +public interface ISurrealCdcCheckpointPersistence +{ + /// + /// Reads the checkpoint for a consumer. + /// + /// Optional consumer id. Defaults to configured CDC consumer id. + /// A cancellation token. + /// The checkpoint if found; otherwise . + Task GetCheckpointAsync( + string? consumerId = null, + CancellationToken cancellationToken = default); + + /// + /// Upserts checkpoint progress for a consumer. + /// + /// The last processed timestamp. + /// The last processed hash. + /// Optional consumer id. Defaults to configured CDC consumer id. + /// A cancellation token. + /// Optional changefeed versionstamp cursor. + Task UpsertCheckpointAsync( + HlcTimestamp timestamp, + string lastHash, + string? consumerId = null, + CancellationToken cancellationToken = default, + long? versionstampCursor = null); + + /// + /// Advances checkpoint progress from an oplog entry. + /// + /// The oplog entry that was processed. + /// Optional consumer id. Defaults to configured CDC consumer id. + /// A cancellation token. + Task AdvanceCheckpointAsync( + OplogEntry entry, + string? consumerId = null, + CancellationToken cancellationToken = default); +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ISurrealCdcWorkerLifecycle.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ISurrealCdcWorkerLifecycle.cs new file mode 100644 index 0000000..ae1b487 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/ISurrealCdcWorkerLifecycle.cs @@ -0,0 +1,27 @@ +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Defines lifecycle controls for the durable Surreal CDC polling worker. +/// +public interface ISurrealCdcWorkerLifecycle +{ + /// + /// Gets a value indicating whether the CDC worker is currently running. + /// + bool IsCdcWorkerRunning { get; } + + /// + /// Starts the CDC worker. + /// + Task StartCdcWorkerAsync(CancellationToken cancellationToken = default); + + /// + /// Executes one CDC polling pass across all watched collections. + /// + Task PollCdcOnceAsync(CancellationToken cancellationToken = default); + + /// + /// Stops the CDC worker. + /// + Task StopCdcWorkerAsync(CancellationToken cancellationToken = default); +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealCdcCheckpointPersistence.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealCdcCheckpointPersistence.cs new file mode 100644 index 0000000..5f8390b --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealCdcCheckpointPersistence.cs @@ -0,0 +1,191 @@ +using System.Security.Cryptography; +using System.Text; +using System.Text.Json.Serialization; +using SurrealDb.Net; +using SurrealDb.Net.Models; +using ZB.MOM.WW.CBDDC.Core; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Surreal-backed persistence for CDC checkpoint progress. +/// +public sealed class SurrealCdcCheckpointPersistence : ISurrealCdcCheckpointPersistence +{ + private readonly bool _enabled; + private readonly string _checkpointTable; + private readonly string _defaultConsumerId; + private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer; + private readonly ISurrealDbClient _surrealClient; + + /// + /// Initializes a new instance of the class. + /// + /// The embedded Surreal client abstraction. + /// The Surreal schema initializer. + /// Embedded Surreal options. + public SurrealCdcCheckpointPersistence( + ICBDDCSurrealEmbeddedClient surrealEmbeddedClient, + ICBDDCSurrealSchemaInitializer schemaInitializer, + CBDDCSurrealEmbeddedOptions options) + { + _ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient)); + _surrealClient = surrealEmbeddedClient.Client; + _schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer)); + + if (options == null) throw new ArgumentNullException(nameof(options)); + _enabled = options.Cdc.Enabled; + _checkpointTable = options.Cdc.CheckpointTable; + _defaultConsumerId = options.Cdc.ConsumerId; + + if (string.IsNullOrWhiteSpace(_checkpointTable)) + throw new ArgumentException("CDC checkpoint table is required.", nameof(options)); + + if (string.IsNullOrWhiteSpace(_defaultConsumerId)) + throw new ArgumentException("CDC consumer id is required.", nameof(options)); + } + + /// + public async Task GetCheckpointAsync( + string? consumerId = null, + CancellationToken cancellationToken = default) + { + if (!_enabled) return null; + + string resolvedConsumerId = ResolveConsumerId(consumerId); + var existing = await FindByConsumerIdAsync(resolvedConsumerId, cancellationToken); + return existing?.ToDomain(); + } + + /// + public async Task UpsertCheckpointAsync( + HlcTimestamp timestamp, + string lastHash, + string? consumerId = null, + CancellationToken cancellationToken = default, + long? versionstampCursor = null) + { + if (!_enabled) return; + + string resolvedConsumerId = ResolveConsumerId(consumerId); + await EnsureReadyAsync(cancellationToken); + + long? effectiveVersionstampCursor = versionstampCursor; + if (!effectiveVersionstampCursor.HasValue) + { + var existing = await FindByConsumerIdAsync( + resolvedConsumerId, + cancellationToken, + ensureInitialized: false); + effectiveVersionstampCursor = existing?.VersionstampCursor; + } + + RecordId recordId = RecordId.From(_checkpointTable, ComputeConsumerKey(resolvedConsumerId)); + + var record = new SurrealCdcCheckpointRecord + { + ConsumerId = resolvedConsumerId, + TimestampPhysicalTime = timestamp.PhysicalTime, + TimestampLogicalCounter = timestamp.LogicalCounter, + TimestampNodeId = timestamp.NodeId, + LastHash = lastHash ?? string.Empty, + UpdatedUtcMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(), + VersionstampCursor = effectiveVersionstampCursor + }; + + await _surrealClient.Upsert( + recordId, + record, + cancellationToken); + } + + /// + public Task AdvanceCheckpointAsync( + OplogEntry entry, + string? consumerId = null, + CancellationToken cancellationToken = default) + { + ArgumentNullException.ThrowIfNull(entry); + return UpsertCheckpointAsync(entry.Timestamp, entry.Hash, consumerId, cancellationToken); + } + + private string ResolveConsumerId(string? consumerId) + { + string resolved = string.IsNullOrWhiteSpace(consumerId) ? _defaultConsumerId : consumerId; + if (string.IsNullOrWhiteSpace(resolved)) + throw new ArgumentException("CDC consumer id is required.", nameof(consumerId)); + + return resolved; + } + + private async Task EnsureReadyAsync(CancellationToken cancellationToken) + { + await _schemaInitializer.EnsureInitializedAsync(cancellationToken); + } + + private async Task FindByConsumerIdAsync( + string consumerId, + CancellationToken cancellationToken, + bool ensureInitialized = true) + { + if (ensureInitialized) await EnsureReadyAsync(cancellationToken); + + RecordId deterministicId = RecordId.From(_checkpointTable, ComputeConsumerKey(consumerId)); + var deterministic = await _surrealClient.Select(deterministicId, cancellationToken); + if (deterministic != null && + string.Equals(deterministic.ConsumerId, consumerId, StringComparison.Ordinal)) + return deterministic; + + var all = await _surrealClient.Select(_checkpointTable, cancellationToken); + return all?.FirstOrDefault(c => + string.Equals(c.ConsumerId, consumerId, StringComparison.Ordinal)); + } + + private static string ComputeConsumerKey(string consumerId) + { + byte[] input = Encoding.UTF8.GetBytes(consumerId); + return Convert.ToHexString(SHA256.HashData(input)).ToLowerInvariant(); + } +} + +internal sealed class SurrealCdcCheckpointRecord : Record +{ + [JsonPropertyName("consumerId")] + public string ConsumerId { get; set; } = ""; + + [JsonPropertyName("timestampPhysicalTime")] + public long TimestampPhysicalTime { get; set; } + + [JsonPropertyName("timestampLogicalCounter")] + public int TimestampLogicalCounter { get; set; } + + [JsonPropertyName("timestampNodeId")] + public string TimestampNodeId { get; set; } = ""; + + [JsonPropertyName("lastHash")] + public string LastHash { get; set; } = ""; + + [JsonPropertyName("updatedUtcMs")] + public long UpdatedUtcMs { get; set; } + + [JsonPropertyName("versionstampCursor")] + public long? VersionstampCursor { get; set; } +} + +internal static class SurrealCdcCheckpointRecordMappers +{ + public static SurrealCdcCheckpoint ToDomain(this SurrealCdcCheckpointRecord record) + { + return new SurrealCdcCheckpoint + { + ConsumerId = record.ConsumerId, + Timestamp = new HlcTimestamp( + record.TimestampPhysicalTime, + record.TimestampLogicalCounter, + record.TimestampNodeId), + LastHash = record.LastHash, + UpdatedUtc = DateTimeOffset.FromUnixTimeMilliseconds(record.UpdatedUtcMs), + VersionstampCursor = record.VersionstampCursor + }; + } +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealCdcPollingOptions.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealCdcPollingOptions.cs new file mode 100644 index 0000000..392e258 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealCdcPollingOptions.cs @@ -0,0 +1,32 @@ +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Configuration for the Surreal SHOW CHANGES polling worker. +/// +public sealed class SurrealCdcPollingOptions +{ + /// + /// Gets or sets a value indicating whether polling is enabled. + /// + public bool Enabled { get; set; } = true; + + /// + /// Gets or sets the polling interval. + /// + public TimeSpan PollInterval { get; set; } = TimeSpan.FromMilliseconds(250); + + /// + /// Gets or sets the maximum number of changefeed rows fetched per poll. + /// + public int BatchSize { get; set; } = 100; + + /// + /// Gets or sets a value indicating whether LIVE SELECT wake-ups are enabled. + /// + public bool EnableLiveSelectAccelerator { get; set; } = true; + + /// + /// Gets or sets the delay used before re-subscribing a failed LIVE SELECT stream. + /// + public TimeSpan LiveSelectReconnectDelay { get; set; } = TimeSpan.FromSeconds(2); +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealDocumentMetadataStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealDocumentMetadataStore.cs new file mode 100644 index 0000000..51c3bf2 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealDocumentMetadataStore.cs @@ -0,0 +1,164 @@ +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using SurrealDb.Net; +using SurrealDb.Net.Models; +using ZB.MOM.WW.CBDDC.Core; +using ZB.MOM.WW.CBDDC.Core.Storage; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +public class SurrealDocumentMetadataStore : DocumentMetadataStore +{ + private readonly ILogger _logger; + private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer; + private readonly ISurrealDbClient _surrealClient; + + public SurrealDocumentMetadataStore( + ICBDDCSurrealEmbeddedClient surrealEmbeddedClient, + ICBDDCSurrealSchemaInitializer schemaInitializer, + ILogger? logger = null) + { + _ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient)); + _surrealClient = surrealEmbeddedClient.Client; + _schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer)); + _logger = logger ?? NullLogger.Instance; + } + + public override async Task GetMetadataAsync(string collection, string key, + CancellationToken cancellationToken = default) + { + var existing = await FindByCollectionKeyAsync(collection, key, cancellationToken); + return existing?.ToDomain(); + } + + public override async Task> GetMetadataByCollectionAsync(string collection, + CancellationToken cancellationToken = default) + { + var all = await SelectAllAsync(cancellationToken); + return all + .Where(m => string.Equals(m.Collection, collection, StringComparison.Ordinal)) + .Select(m => m.ToDomain()) + .ToList(); + } + + public override async Task UpsertMetadataAsync(DocumentMetadata metadata, + CancellationToken cancellationToken = default) + { + await EnsureReadyAsync(cancellationToken); + + var existing = await FindByCollectionKeyAsync(metadata.Collection, metadata.Key, cancellationToken); + RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.DocumentMetadata(metadata.Collection, metadata.Key); + + await _surrealClient.Upsert( + recordId, + metadata.ToSurrealRecord(), + cancellationToken); + } + + public override async Task UpsertMetadataBatchAsync(IEnumerable metadatas, + CancellationToken cancellationToken = default) + { + foreach (var metadata in metadatas) + await UpsertMetadataAsync(metadata, cancellationToken); + } + + public override async Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp, + CancellationToken cancellationToken = default) + { + var metadata = new DocumentMetadata(collection, key, timestamp, true); + await UpsertMetadataAsync(metadata, cancellationToken); + } + + public override async Task> GetMetadataAfterAsync(HlcTimestamp since, + IEnumerable? collections = null, CancellationToken cancellationToken = default) + { + var all = await SelectAllAsync(cancellationToken); + HashSet? collectionSet = collections != null ? new HashSet(collections) : null; + + return all + .Where(m => + (m.HlcPhysicalTime > since.PhysicalTime || + (m.HlcPhysicalTime == since.PhysicalTime && m.HlcLogicalCounter > since.LogicalCounter)) && + (collectionSet == null || collectionSet.Contains(m.Collection))) + .OrderBy(m => m.HlcPhysicalTime) + .ThenBy(m => m.HlcLogicalCounter) + .Select(m => m.ToDomain()) + .ToList(); + } + + public override async Task DropAsync(CancellationToken cancellationToken = default) + { + await EnsureReadyAsync(cancellationToken); + await _surrealClient.Delete(CBDDCSurrealSchemaNames.DocumentMetadataTable, cancellationToken); + } + + public override async Task> ExportAsync(CancellationToken cancellationToken = default) + { + var all = await SelectAllAsync(cancellationToken); + return all.Select(m => m.ToDomain()).ToList(); + } + + public override async Task ImportAsync(IEnumerable items, + CancellationToken cancellationToken = default) + { + foreach (var item in items) await UpsertMetadataAsync(item, cancellationToken); + } + + public override async Task MergeAsync(IEnumerable items, + CancellationToken cancellationToken = default) + { + foreach (var item in items) + { + var existing = await FindByCollectionKeyAsync(item.Collection, item.Key, cancellationToken); + + if (existing == null) + { + await UpsertMetadataAsync(item, cancellationToken); + continue; + } + + var existingTimestamp = + new HlcTimestamp(existing.HlcPhysicalTime, existing.HlcLogicalCounter, existing.HlcNodeId); + + if (item.UpdatedAt.CompareTo(existingTimestamp) <= 0) continue; + + RecordId recordId = existing.Id ?? SurrealStoreRecordIds.DocumentMetadata(item.Collection, item.Key); + await EnsureReadyAsync(cancellationToken); + await _surrealClient.Upsert( + recordId, + item.ToSurrealRecord(), + cancellationToken); + } + } + + private async Task EnsureReadyAsync(CancellationToken cancellationToken) + { + await _schemaInitializer.EnsureInitializedAsync(cancellationToken); + } + + private async Task> SelectAllAsync(CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + var rows = await _surrealClient.Select( + CBDDCSurrealSchemaNames.DocumentMetadataTable, + cancellationToken); + return rows?.ToList() ?? []; + } + + private async Task FindByCollectionKeyAsync(string collection, string key, + CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + RecordId deterministicId = SurrealStoreRecordIds.DocumentMetadata(collection, key); + var deterministic = await _surrealClient.Select(deterministicId, cancellationToken); + if (deterministic != null && + string.Equals(deterministic.Collection, collection, StringComparison.Ordinal) && + string.Equals(deterministic.Key, key, StringComparison.Ordinal)) + return deterministic; + + var all = await SelectAllAsync(cancellationToken); + return all.FirstOrDefault(m => + string.Equals(m.Collection, collection, StringComparison.Ordinal) && + string.Equals(m.Key, key, StringComparison.Ordinal)); + } +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealDocumentStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealDocumentStore.cs new file mode 100644 index 0000000..8213e67 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealDocumentStore.cs @@ -0,0 +1,1331 @@ +using System.Collections.Concurrent; +using System.Reflection; +using System.Security.Cryptography; +using System.Text; +using System.Text.Json; +using System.Text.RegularExpressions; +using Dahomey.Cbor.ObjectModel; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using SurrealDb.Net; +using SurrealDb.Net.Models.LiveQuery; +using SurrealDb.Net.Models; +using ZB.MOM.WW.CBDDC.Core; +using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Core.Storage; +using ZB.MOM.WW.CBDDC.Core.Sync; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Abstract base class for Surreal-backed document stores. +/// Handles local oplog/document-metadata persistence and remote-sync suppression. +/// +/// The application context type used by the concrete store. +public abstract class SurrealDocumentStore : IDocumentStore, ISurrealCdcWorkerLifecycle, IDisposable + where TContext : class +{ + private static readonly Regex SurrealIdentifierRegex = new("^[A-Za-z_][A-Za-z0-9_]*$", RegexOptions.Compiled); + private readonly List _cdcWatchers = new(); + private readonly SurrealCdcPollingOptions _cdcPollingOptions; + private readonly SemaphoreSlim _cdcWorkerLifecycleGate = new(1, 1); + private readonly SemaphoreSlim _liveSelectSignal = new(0, 1); + private readonly ISurrealCdcCheckpointPersistence? _checkpointPersistence; + private readonly object _clockLock = new(); + private readonly HashSet _registeredCollections = new(StringComparer.Ordinal); + + /// + /// Semaphore used to suppress CDC-triggered oplog entry creation during remote sync. + /// + private readonly SemaphoreSlim _remoteSyncGuard = new(1, 1); + + private readonly ConcurrentDictionary _suppressedCdcEvents = new(StringComparer.Ordinal); + private readonly ConcurrentDictionary _watchedCollections = new( + StringComparer.Ordinal); + private CancellationTokenSource? _cdcWorkerCts; + private Task? _cdcWorkerTask; + private CancellationTokenSource? _liveSelectCts; + private readonly List _liveSelectTasks = new(); + protected readonly IPeerNodeConfigurationProvider _configProvider; + protected readonly IConflictResolver _conflictResolver; + protected readonly TContext _context; + protected readonly ILogger> _logger; + protected readonly ICBDDCSurrealSchemaInitializer _schemaInitializer; + protected readonly ISurrealDbClient _surrealClient; + protected readonly IVectorClockService _vectorClock; + + // HLC state for local change timestamp generation. + private int _logicalCounter; + private long _lastPhysicalTime; + + /// + /// Initializes a new instance of the class. + /// + protected SurrealDocumentStore( + TContext context, + ICBDDCSurrealEmbeddedClient surrealEmbeddedClient, + ICBDDCSurrealSchemaInitializer schemaInitializer, + IPeerNodeConfigurationProvider configProvider, + IVectorClockService vectorClockService, + IConflictResolver? conflictResolver = null, + ISurrealCdcCheckpointPersistence? checkpointPersistence = null, + SurrealCdcPollingOptions? cdcPollingOptions = null, + ILogger? logger = null) + { + _context = context ?? throw new ArgumentNullException(nameof(context)); + _ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient)); + _surrealClient = surrealEmbeddedClient.Client; + _schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer)); + _configProvider = configProvider ?? throw new ArgumentNullException(nameof(configProvider)); + _vectorClock = vectorClockService ?? throw new ArgumentNullException(nameof(vectorClockService)); + _conflictResolver = conflictResolver ?? new LastWriteWinsConflictResolver(); + _checkpointPersistence = checkpointPersistence; + _cdcPollingOptions = NormalizePollingOptions(cdcPollingOptions); + _logger = CreateTypedLogger(logger); + + _lastPhysicalTime = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); + _logicalCounter = 0; + } + + /// + /// Releases managed resources used by this document store. + /// + public virtual void Dispose() + { + try + { + StopCdcWorkerAsync(CancellationToken.None).GetAwaiter().GetResult(); + } + catch + { + } + + foreach (var watcher in _cdcWatchers) + try + { + watcher.Dispose(); + } + catch + { + } + + _cdcWatchers.Clear(); + _cdcWorkerCts?.Dispose(); + _liveSelectCts?.Dispose(); + _liveSelectSignal.Dispose(); + _cdcWorkerLifecycleGate.Dispose(); + _remoteSyncGuard.Dispose(); + } + + private static ILogger> CreateTypedLogger(ILogger? logger) + { + if (logger is null) return NullLogger>.Instance; + if (logger is ILogger> typedLogger) return typedLogger; + return new ForwardingLogger(logger); + } + + private sealed class ForwardingLogger : ILogger> + { + private readonly ILogger _inner; + + public ForwardingLogger(ILogger inner) + { + _inner = inner; + } + + public IDisposable? BeginScope(TState state) where TState : notnull + { + return _inner.BeginScope(state); + } + + public bool IsEnabled(LogLevel logLevel) + { + return _inner.IsEnabled(logLevel); + } + + public void Log( + LogLevel logLevel, + EventId eventId, + TState state, + Exception? exception, + Func formatter) + { + _inner.Log(logLevel, eventId, state, exception, formatter); + } + } + + #region CDC Registration + + private static string BuildSuppressionKey(string collection, string key, OperationType operationType) + { + return $"{collection}|{key}|{(int)operationType}"; + } + + private void RegisterSuppressedCdcEvent(string collection, string key, OperationType operationType) + { + string suppressionKey = BuildSuppressionKey(collection, key, operationType); + _suppressedCdcEvents.AddOrUpdate(suppressionKey, 1, (_, current) => current + 1); + } + + private bool TryConsumeSuppressedCdcEvent(string collection, string key, OperationType operationType) + { + string suppressionKey = BuildSuppressionKey(collection, key, operationType); + while (true) + { + if (!_suppressedCdcEvents.TryGetValue(suppressionKey, out int current)) return false; + if (current <= 1) return _suppressedCdcEvents.TryRemove(suppressionKey, out _); + if (_suppressedCdcEvents.TryUpdate(suppressionKey, current - 1, current)) return true; + } + } + + private bool IsCdcPollingWorkerActiveForCollection(string collection) + { + return IsCdcWorkerRunning && + _watchedCollections.ContainsKey(collection); + } + + /// + /// Registers a watchable collection for local change tracking. + /// + /// The entity type emitted by the watch source. + /// Logical collection name used by oplog and metadata records. + /// Watchable change source. + /// Function used to resolve the entity key. + protected void WatchCollection( + string collectionName, + ISurrealWatchableCollection collection, + Func keySelector, + bool subscribeForInMemoryEvents = true) + where TEntity : class + { + if (string.IsNullOrWhiteSpace(collectionName)) + throw new ArgumentException("Collection name is required.", nameof(collectionName)); + ArgumentNullException.ThrowIfNull(collection); + ArgumentNullException.ThrowIfNull(keySelector); + + _registeredCollections.Add(collectionName); + string tableName = ResolveSurrealTableName(collection, collectionName); + _watchedCollections[collectionName] = new WatchedCollectionRegistration(collectionName, tableName); + + if (!subscribeForInMemoryEvents) return; + + var watcher = collection.Subscribe(new CdcObserver(collectionName, keySelector, this)); + _cdcWatchers.Add(watcher); + } + + private sealed class CdcObserver : IObserver> + where TEntity : class + { + private readonly string _collectionName; + private readonly Func _keySelector; + private readonly SurrealDocumentStore _store; + + public CdcObserver( + string collectionName, + Func keySelector, + SurrealDocumentStore store) + { + _collectionName = collectionName; + _keySelector = keySelector; + _store = store; + } + + public void OnNext(SurrealCollectionChange changeEvent) + { + if (_store.IsCdcPollingWorkerActiveForCollection(_collectionName)) return; + + var operationType = changeEvent.OperationType == OperationType.Delete + ? OperationType.Delete + : OperationType.Put; + + string entityId = changeEvent.DocumentId ?? ""; + if (operationType == OperationType.Put && changeEvent.Entity != null) + { + string selectedKey = _keySelector(changeEvent.Entity); + if (!string.IsNullOrWhiteSpace(selectedKey)) entityId = selectedKey; + } + + if (operationType == OperationType.Delete && string.IsNullOrWhiteSpace(entityId)) return; + + if (_store.TryConsumeSuppressedCdcEvent(_collectionName, entityId, operationType)) return; + if (_store._remoteSyncGuard.CurrentCount == 0) return; + + if (operationType == OperationType.Delete) + { + _store.OnLocalChangeDetectedAsync(_collectionName, entityId, OperationType.Delete, null) + .GetAwaiter().GetResult(); + return; + } + + if (changeEvent.Entity == null) return; + var content = JsonSerializer.SerializeToElement(changeEvent.Entity); + string key = _keySelector(changeEvent.Entity); + if (string.IsNullOrWhiteSpace(key)) key = entityId; + if (string.IsNullOrWhiteSpace(key)) return; + + _store.OnLocalChangeDetectedAsync(_collectionName, key, OperationType.Put, content) + .GetAwaiter().GetResult(); + } + + public void OnError(Exception error) + { + } + + public void OnCompleted() + { + } + } + + private static string ResolveSurrealTableName( + ISurrealWatchableCollection collection, + string fallbackCollectionName) + where TEntity : class + { + Type collectionType = collection.GetType(); + + const BindingFlags flags = BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic; + foreach (string memberName in new[] { "TableName", "_tableName", "tableName" }) + { + PropertyInfo? property = collectionType.GetProperty(memberName, flags); + if (property?.CanRead == true && + property.GetValue(collection) is string propertyValue && + !string.IsNullOrWhiteSpace(propertyValue)) + return propertyValue; + + FieldInfo? field = collectionType.GetField(memberName, flags); + if (field?.GetValue(collection) is string fieldValue && + !string.IsNullOrWhiteSpace(fieldValue)) + return fieldValue; + } + + return fallbackCollectionName; + } + + private static SurrealCdcPollingOptions NormalizePollingOptions(SurrealCdcPollingOptions? options) + { + TimeSpan interval = options?.PollInterval ?? TimeSpan.FromMilliseconds(250); + if (interval <= TimeSpan.Zero) interval = TimeSpan.FromMilliseconds(250); + + int batchSize = options?.BatchSize ?? 100; + if (batchSize <= 0) batchSize = 100; + + TimeSpan liveReconnectDelay = options?.LiveSelectReconnectDelay ?? TimeSpan.FromSeconds(2); + if (liveReconnectDelay <= TimeSpan.Zero) liveReconnectDelay = TimeSpan.FromSeconds(2); + + return new SurrealCdcPollingOptions + { + Enabled = options?.Enabled ?? true, + PollInterval = interval, + BatchSize = batchSize, + EnableLiveSelectAccelerator = options?.EnableLiveSelectAccelerator ?? true, + LiveSelectReconnectDelay = liveReconnectDelay + }; + } + + private readonly record struct WatchedCollectionRegistration( + string CollectionName, + string TableName); + + protected readonly record struct PendingCursorCheckpoint( + string TableName, + ulong Cursor); + + #endregion + + #region CDC Worker Lifecycle + + /// + public bool IsCdcWorkerRunning => + _cdcWorkerTask != null && + !_cdcWorkerTask.IsCompleted; + + /// + public async Task StartCdcWorkerAsync(CancellationToken cancellationToken = default) + { + if (!_cdcPollingOptions.Enabled) + { + _logger.LogDebug("Surreal CDC worker start skipped because polling is disabled."); + return; + } + + if (_checkpointPersistence == null) + { + _logger.LogDebug("Surreal CDC worker start skipped because checkpoint persistence is not configured."); + return; + } + + await _cdcWorkerLifecycleGate.WaitAsync(cancellationToken); + try + { + cancellationToken.ThrowIfCancellationRequested(); + if (IsCdcWorkerRunning) return; + + await EnsureReadyAsync(cancellationToken); + StartLiveSelectAcceleratorsUnsafe(); + + _cdcWorkerCts = new CancellationTokenSource(); + _cdcWorkerTask = Task.Run(() => RunCdcWorkerAsync(_cdcWorkerCts.Token), CancellationToken.None); + _logger.LogInformation( + "Started Surreal CDC worker with interval {IntervalMs} ms, batch size {BatchSize}, live accelerator {LiveAccelerator}.", + _cdcPollingOptions.PollInterval.TotalMilliseconds, + _cdcPollingOptions.BatchSize, + _cdcPollingOptions.EnableLiveSelectAccelerator); + } + finally + { + _cdcWorkerLifecycleGate.Release(); + } + } + + /// + public async Task PollCdcOnceAsync(CancellationToken cancellationToken = default) + { + if (!_cdcPollingOptions.Enabled) return; + if (_checkpointPersistence == null) return; + if (_watchedCollections.IsEmpty) return; + + await EnsureReadyAsync(cancellationToken); + await PollWatchedCollectionsOnceAsync(cancellationToken); + } + + /// + public async Task StopCdcWorkerAsync(CancellationToken cancellationToken = default) + { + Task? workerTask; + CancellationTokenSource? workerCts; + Task[] liveSelectTasks; + CancellationTokenSource? liveSelectCts; + + await _cdcWorkerLifecycleGate.WaitAsync(cancellationToken); + try + { + workerTask = _cdcWorkerTask; + workerCts = _cdcWorkerCts; + _cdcWorkerTask = null; + _cdcWorkerCts = null; + + liveSelectTasks = _liveSelectTasks.ToArray(); + _liveSelectTasks.Clear(); + liveSelectCts = _liveSelectCts; + _liveSelectCts = null; + } + finally + { + _cdcWorkerLifecycleGate.Release(); + } + + if (workerTask == null) + { + workerCts?.Dispose(); + if (liveSelectTasks.Length == 0) + { + liveSelectCts?.Dispose(); + return; + } + } + + try + { + workerCts?.Cancel(); + liveSelectCts?.Cancel(); + + if (workerTask != null) await workerTask.WaitAsync(cancellationToken); + if (liveSelectTasks.Length > 0) + { + Task waitAll = Task.WhenAll(liveSelectTasks); + try + { + await waitAll.WaitAsync(cancellationToken); + } + catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested) + { + } + catch + { + } + } + } + catch (OperationCanceledException) when ((workerTask?.IsCanceled ?? false) || cancellationToken.IsCancellationRequested) + { + } + finally + { + workerCts?.Dispose(); + liveSelectCts?.Dispose(); + } + } + + private async Task RunCdcWorkerAsync(CancellationToken cancellationToken) + { + while (!cancellationToken.IsCancellationRequested) + try + { + await PollCdcOnceAsync(cancellationToken); + + if (!_cdcPollingOptions.EnableLiveSelectAccelerator || _liveSelectCts == null || _liveSelectTasks.Count == 0) + { + await Task.Delay(_cdcPollingOptions.PollInterval, cancellationToken); + continue; + } + + Task delayTask = Task.Delay(_cdcPollingOptions.PollInterval, cancellationToken); + Task signalTask = _liveSelectSignal.WaitAsync(cancellationToken); + await Task.WhenAny(delayTask, signalTask); + } + catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested) + { + break; + } + catch (Exception exception) + { + _logger.LogError(exception, "Surreal CDC worker polling iteration failed."); + try + { + await Task.Delay(_cdcPollingOptions.PollInterval, cancellationToken); + } + catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested) + { + break; + } + } + + _logger.LogDebug("Stopped Surreal CDC worker."); + } + + private void StartLiveSelectAcceleratorsUnsafe() + { + if (!_cdcPollingOptions.EnableLiveSelectAccelerator) return; + if (_watchedCollections.IsEmpty) return; + if (_liveSelectCts != null) return; + + _liveSelectCts = new CancellationTokenSource(); + _liveSelectTasks.Clear(); + + foreach (WatchedCollectionRegistration watched in _watchedCollections.Values + .OrderBy(v => v.CollectionName, StringComparer.Ordinal)) + _liveSelectTasks.Add(Task.Run( + () => RunLiveSelectAcceleratorAsync(watched, _liveSelectCts.Token), + CancellationToken.None)); + } + + private async Task RunLiveSelectAcceleratorAsync( + WatchedCollectionRegistration watched, + CancellationToken cancellationToken) + { + while (!cancellationToken.IsCancellationRequested) + { + try + { + await using var liveQuery = + await _surrealClient.LiveTable(watched.TableName, false, cancellationToken); + + await foreach (SurrealDbLiveQueryResponse response in liveQuery.GetResults(cancellationToken)) + { + if (cancellationToken.IsCancellationRequested) break; + if (response is SurrealDbLiveQueryOpenResponse) continue; + + if (response is SurrealDbLiveQueryCloseResponse closeResponse) + { + _logger.LogDebug( + "LIVE SELECT stream closed for table {Table} with reason {Reason}.", + watched.TableName, + closeResponse.Reason); + break; + } + + SignalLiveSelectWake(); + } + } + catch (NotSupportedException) + { + _logger.LogDebug( + "LIVE SELECT accelerator is not supported for table {Table}; fallback remains polling-only.", + watched.TableName); + return; + } + catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested) + { + break; + } + catch (Exception exception) + { + _logger.LogDebug( + exception, + "LIVE SELECT accelerator loop failed for table {Table}; retrying.", + watched.TableName); + } + + try + { + await Task.Delay(_cdcPollingOptions.LiveSelectReconnectDelay, cancellationToken); + } + catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested) + { + break; + } + } + } + + private void SignalLiveSelectWake() + { + if (_liveSelectSignal.CurrentCount > 0) return; + try + { + _liveSelectSignal.Release(); + } + catch (SemaphoreFullException) + { + } + } + + private async Task PollWatchedCollectionsOnceAsync(CancellationToken cancellationToken) + { + if (_watchedCollections.IsEmpty) return; + + foreach (WatchedCollectionRegistration watched in _watchedCollections.Values + .OrderBy(v => v.CollectionName, StringComparer.Ordinal)) + await PollCollectionChangesAsync(watched, cancellationToken); + } + + private async Task PollCollectionChangesAsync( + WatchedCollectionRegistration watched, + CancellationToken cancellationToken) + { + if (!SurrealIdentifierRegex.IsMatch(watched.TableName)) + { + _logger.LogDebug( + "Skipping CDC polling for collection {Collection} because table name '{Table}' is not a valid Surreal identifier.", + watched.CollectionName, + watched.TableName); + return; + } + + ulong cursor = await ReadCursorCheckpointAsync(watched.TableName, cancellationToken); + + while (!cancellationToken.IsCancellationRequested) + { + IReadOnlyList rows; + try + { + rows = await QueryChangeRowsAsync(watched.TableName, cursor, _cdcPollingOptions.BatchSize, + cancellationToken); + } + catch (Exception exception) + { + if (cursor > 0 && IsLikelyChangefeedRetentionBoundary(exception)) + _logger.LogWarning( + exception, + "SHOW CHANGES query failed for table {Table} at cursor {Cursor}. " + + "The cursor may be outside configured changefeed retention; checkpoint remains unchanged until replay is re-established.", + watched.TableName, + cursor); + else + _logger.LogDebug( + exception, + "SHOW CHANGES query failed for table {Table}.", + watched.TableName); + return; + } + + if (rows.Count == 0) return; + + foreach (SurrealPolledChangeRow row in rows) + { + ulong nextCursor = BuildNextCursor(row.Versionstamp); + if (row.Changes.Count == 0) + { + await WriteCursorCheckpointAsync(watched.TableName, nextCursor, cancellationToken); + cursor = nextCursor; + continue; + } + + for (var i = 0; i < row.Changes.Count; i++) + { + SurrealPolledChange change = row.Changes[i]; + PendingCursorCheckpoint? pendingCursorCheckpoint = i == row.Changes.Count - 1 + ? new PendingCursorCheckpoint(watched.TableName, nextCursor) + : null; + + await OnLocalChangeDetectedAsync( + watched.CollectionName, + change.Key, + change.OperationType, + change.Content, + pendingCursorCheckpoint, + cancellationToken); + } + + cursor = nextCursor; + } + + if (rows.Count < _cdcPollingOptions.BatchSize) return; + } + } + + private async Task> QueryChangeRowsAsync( + string tableName, + ulong cursor, + int batchSize, + CancellationToken cancellationToken) + { + string query = $"SHOW CHANGES FOR TABLE {tableName} SINCE {cursor} LIMIT {batchSize};"; + var response = await _surrealClient.RawQuery(query, cancellationToken: cancellationToken); + response.EnsureAllOks(); + + List rows; + try + { + rows = response.GetValues(0).ToList(); + } + catch + { + return []; + } + + return SurrealShowChangesCborDecoder.DecodeRows(rows, tableName); + } + + private async Task ReadCursorCheckpointAsync(string tableName, CancellationToken cancellationToken) + { + if (_checkpointPersistence == null) return 0; + + var checkpoint = await _checkpointPersistence.GetCheckpointAsync( + BuildCursorCheckpointConsumerId(tableName), + cancellationToken); + + if (checkpoint?.VersionstampCursor is > 0) + return (ulong)checkpoint.VersionstampCursor.Value; + + if (checkpoint == null || checkpoint.Timestamp.PhysicalTime < 0) return 0; + return (ulong)checkpoint.Timestamp.PhysicalTime; + } + + private async Task WriteCursorCheckpointAsync( + string tableName, + ulong cursor, + CancellationToken cancellationToken) + { + if (_checkpointPersistence == null) return; + + long encodedCursor = cursor > long.MaxValue + ? long.MaxValue + : (long)cursor; + + await _checkpointPersistence.UpsertCheckpointAsync( + new HlcTimestamp(encodedCursor, 0, "surreal-cdc"), + "", + BuildCursorCheckpointConsumerId(tableName), + cancellationToken, + encodedCursor); + } + + private string BuildCursorCheckpointConsumerId(string tableName) + { + string baseConsumerId = "default"; + if (TryGetCheckpointSettings(out _, out string configuredConsumerId)) + baseConsumerId = configuredConsumerId; + + return BuildCursorCheckpointConsumerId(tableName, baseConsumerId); + } + + private static string BuildCursorCheckpointConsumerId(string tableName, string baseConsumerId) + { + return $"{baseConsumerId}:show_changes_cursor:{tableName}"; + } + + private static ulong BuildNextCursor(ulong versionstamp) + { + ulong majorCursor = versionstamp >> 16; + if (majorCursor == 0) majorCursor = versionstamp; + return majorCursor + 1; + } + + private static bool IsLikelyChangefeedRetentionBoundary(Exception exception) + { + string message = exception.ToString(); + if (string.IsNullOrWhiteSpace(message)) return false; + + string normalized = message.ToLowerInvariant(); + return normalized.Contains("retention", StringComparison.Ordinal) || + (normalized.Contains("versionstamp", StringComparison.Ordinal) && + normalized.Contains("outside", StringComparison.Ordinal)) || + (normalized.Contains("change", StringComparison.Ordinal) && + normalized.Contains("feed", StringComparison.Ordinal) && + normalized.Contains("since", StringComparison.Ordinal)) || + (normalized.Contains("history", StringComparison.Ordinal) && + normalized.Contains("change", StringComparison.Ordinal)); + } + + #endregion + + #region Abstract Methods - Implemented by subclass + + protected abstract Task ApplyContentToEntityAsync( + string collection, string key, JsonElement content, CancellationToken cancellationToken); + + protected abstract Task ApplyContentToEntitiesBatchAsync( + IEnumerable<(string Collection, string Key, JsonElement Content)> documents, + CancellationToken cancellationToken); + + protected abstract Task GetEntityAsJsonAsync( + string collection, string key, CancellationToken cancellationToken); + + protected abstract Task RemoveEntityAsync( + string collection, string key, CancellationToken cancellationToken); + + protected abstract Task RemoveEntitiesBatchAsync( + IEnumerable<(string Collection, string Key)> documents, CancellationToken cancellationToken); + + protected abstract Task> GetAllEntitiesAsJsonAsync( + string collection, CancellationToken cancellationToken); + + #endregion + + #region IDocumentStore Implementation + + /// + public IEnumerable InterestedCollection => _registeredCollections; + + /// + public async Task GetDocumentAsync( + string collection, + string key, + CancellationToken cancellationToken = default) + { + var content = await GetEntityAsJsonAsync(collection, key, cancellationToken); + if (content == null) return null; + + var timestamp = new HlcTimestamp(0, 0, ""); + return new Document(collection, key, content.Value, timestamp, false); + } + + /// + public async Task> GetDocumentsByCollectionAsync( + string collection, + CancellationToken cancellationToken = default) + { + var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken); + var timestamp = new HlcTimestamp(0, 0, ""); + return entities.Select(e => new Document(collection, e.Key, e.Content, timestamp, false)); + } + + /// + public async Task> GetDocumentsAsync( + List<(string Collection, string Key)> documentKeys, + CancellationToken cancellationToken) + { + var documents = new List(); + foreach ((string collection, string key) in documentKeys) + { + var document = await GetDocumentAsync(collection, key, cancellationToken); + if (document != null) documents.Add(document); + } + + return documents; + } + + /// + public async Task PutDocumentAsync(Document document, CancellationToken cancellationToken = default) + { + await _remoteSyncGuard.WaitAsync(cancellationToken); + try + { + await PutDocumentInternalAsync(document, cancellationToken); + } + finally + { + _remoteSyncGuard.Release(); + } + + return true; + } + + private async Task PutDocumentInternalAsync(Document document, CancellationToken cancellationToken) + { + RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put); + await ApplyContentToEntityAsync(document.Collection, document.Key, document.Content, cancellationToken); + } + + /// + public async Task UpdateBatchDocumentsAsync( + IEnumerable documents, + CancellationToken cancellationToken = default) + { + var documentList = documents.ToList(); + await _remoteSyncGuard.WaitAsync(cancellationToken); + try + { + foreach (var document in documentList) + RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put); + + await ApplyContentToEntitiesBatchAsync( + documentList.Select(d => (d.Collection, d.Key, d.Content)), + cancellationToken); + } + finally + { + _remoteSyncGuard.Release(); + } + + return true; + } + + /// + public async Task InsertBatchDocumentsAsync( + IEnumerable documents, + CancellationToken cancellationToken = default) + { + var documentList = documents.ToList(); + await _remoteSyncGuard.WaitAsync(cancellationToken); + try + { + foreach (var document in documentList) + RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put); + + await ApplyContentToEntitiesBatchAsync( + documentList.Select(d => (d.Collection, d.Key, d.Content)), + cancellationToken); + } + finally + { + _remoteSyncGuard.Release(); + } + + return true; + } + + /// + public async Task DeleteDocumentAsync( + string collection, + string key, + CancellationToken cancellationToken = default) + { + await _remoteSyncGuard.WaitAsync(cancellationToken); + try + { + await DeleteDocumentInternalAsync(collection, key, cancellationToken); + } + finally + { + _remoteSyncGuard.Release(); + } + + return true; + } + + private async Task DeleteDocumentInternalAsync( + string collection, + string key, + CancellationToken cancellationToken) + { + RegisterSuppressedCdcEvent(collection, key, OperationType.Delete); + await RemoveEntityAsync(collection, key, cancellationToken); + } + + /// + public async Task DeleteBatchDocumentsAsync( + IEnumerable documentKeys, + CancellationToken cancellationToken = default) + { + var parsedKeys = new List<(string Collection, string Key)>(); + foreach (string key in documentKeys) + { + string[] parts = key.Split('/'); + if (parts.Length == 2) + parsedKeys.Add((parts[0], parts[1])); + else + _logger.LogWarning("Invalid document key format: {Key}", key); + } + + if (parsedKeys.Count == 0) return true; + + await _remoteSyncGuard.WaitAsync(cancellationToken); + try + { + foreach ((string collection, string key) in parsedKeys) + RegisterSuppressedCdcEvent(collection, key, OperationType.Delete); + + await RemoveEntitiesBatchAsync(parsedKeys, cancellationToken); + } + finally + { + _remoteSyncGuard.Release(); + } + + return true; + } + + /// + public async Task MergeAsync(Document incoming, CancellationToken cancellationToken = default) + { + var existing = await GetDocumentAsync(incoming.Collection, incoming.Key, cancellationToken); + if (existing == null) + { + await PutDocumentInternalAsync(incoming, cancellationToken); + return incoming; + } + + var resolution = _conflictResolver.Resolve(existing, new OplogEntry( + incoming.Collection, + incoming.Key, + OperationType.Put, + incoming.Content, + incoming.UpdatedAt, + "")); + + if (resolution.ShouldApply && resolution.MergedDocument != null) + { + await PutDocumentInternalAsync(resolution.MergedDocument, cancellationToken); + return resolution.MergedDocument; + } + + return existing; + } + + #endregion + + #region ISnapshotable Implementation + + /// + public async Task DropAsync(CancellationToken cancellationToken = default) + { + foreach (string collection in InterestedCollection) + { + var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken); + foreach ((string key, var _) in entities) await RemoveEntityAsync(collection, key, cancellationToken); + } + } + + /// + public async Task> ExportAsync(CancellationToken cancellationToken = default) + { + var documents = new List(); + foreach (string collection in InterestedCollection) + { + var collectionDocuments = await GetDocumentsByCollectionAsync(collection, cancellationToken); + documents.AddRange(collectionDocuments); + } + + return documents; + } + + /// + public async Task ImportAsync(IEnumerable items, CancellationToken cancellationToken = default) + { + var documents = items.ToList(); + await _remoteSyncGuard.WaitAsync(cancellationToken); + try + { + foreach (var document in documents) + RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put); + + await ApplyContentToEntitiesBatchAsync( + documents.Select(d => (d.Collection, d.Key, d.Content)), + cancellationToken); + } + finally + { + _remoteSyncGuard.Release(); + } + } + + /// + public async Task MergeAsync(IEnumerable items, CancellationToken cancellationToken = default) + { + await _remoteSyncGuard.WaitAsync(cancellationToken); + try + { + foreach (var document in items) await MergeAsync(document, cancellationToken); + } + finally + { + _remoteSyncGuard.Release(); + } + } + + #endregion + + #region Oplog Management + + /// + /// Returns true when remote sync is in progress and local CDC must be suppressed. + /// + protected bool IsRemoteSyncInProgress => _remoteSyncGuard.CurrentCount == 0; + + /// + /// Handles a local collection change and records oplog/metadata when not suppressed. + /// + protected async Task OnLocalChangeDetectedAsync( + string collection, + string key, + OperationType operationType, + JsonElement? content, + PendingCursorCheckpoint? pendingCursorCheckpoint = null, + CancellationToken cancellationToken = default) + { + if (string.IsNullOrWhiteSpace(collection)) return; + if (string.IsNullOrWhiteSpace(key)) return; + + if (TryConsumeSuppressedCdcEvent(collection, key, operationType)) return; + if (IsRemoteSyncInProgress) return; + await CreateOplogEntryAsync(collection, key, operationType, content, pendingCursorCheckpoint, cancellationToken); + } + + private HlcTimestamp GenerateTimestamp(string nodeId) + { + lock (_clockLock) + { + long now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); + + if (now > _lastPhysicalTime) + { + _lastPhysicalTime = now; + _logicalCounter = 0; + } + else + { + _logicalCounter++; + } + + return new HlcTimestamp(_lastPhysicalTime, _logicalCounter, nodeId); + } + } + + private async Task CreateOplogEntryAsync( + string collection, + string key, + OperationType operationType, + JsonElement? content, + PendingCursorCheckpoint? pendingCursorCheckpoint, + CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + + var config = await _configProvider.GetConfiguration(); + string nodeId = config.NodeId ?? ""; + + string previousHash = _vectorClock.GetLastHash(nodeId) ?? + await QueryLastHashForNodeAsync(nodeId, cancellationToken) ?? + string.Empty; + + var timestamp = GenerateTimestamp(nodeId); + var oplogEntry = new OplogEntry( + collection, + key, + operationType, + content, + timestamp, + previousHash); + + var metadata = new DocumentMetadata(collection, key, timestamp, operationType == OperationType.Delete); + await PersistOplogAndMetadataAtomicallyAsync(oplogEntry, metadata, pendingCursorCheckpoint, cancellationToken); + + _vectorClock.Update(oplogEntry); + + _logger.LogDebug( + "Created local oplog entry: {Operation} {Collection}/{Key} at {Timestamp} (hash: {Hash})", + operationType, collection, key, timestamp, oplogEntry.Hash); + } + + private async Task PersistOplogAndMetadataAtomicallyAsync( + OplogEntry oplogEntry, + DocumentMetadata metadata, + PendingCursorCheckpoint? pendingCursorCheckpoint, + CancellationToken cancellationToken) + { + var parameters = new Dictionary + { + ["oplogRecordId"] = SurrealStoreRecordIds.Oplog(oplogEntry.Hash), + ["oplogRecord"] = oplogEntry.ToSurrealRecord(), + ["metadataRecordId"] = SurrealStoreRecordIds.DocumentMetadata(metadata.Collection, metadata.Key), + ["metadataRecord"] = metadata.ToSurrealRecord() + }; + + var sqlBuilder = new StringBuilder(); + sqlBuilder.AppendLine("BEGIN TRANSACTION;"); + sqlBuilder.AppendLine("UPSERT $oplogRecordId CONTENT $oplogRecord;"); + sqlBuilder.AppendLine("UPSERT $metadataRecordId CONTENT $metadataRecord;"); + + bool localCheckpointWrittenInTransaction = TryBuildCheckpointTransactionPayload( + oplogEntry, + out RecordId localCheckpointRecordId, + out Dictionary localCheckpointRecord); + if (localCheckpointWrittenInTransaction) + { + parameters["localCheckpointRecordId"] = localCheckpointRecordId; + parameters["localCheckpointRecord"] = localCheckpointRecord; + sqlBuilder.AppendLine("UPSERT $localCheckpointRecordId CONTENT $localCheckpointRecord;"); + } + + bool cursorCheckpointWrittenInTransaction = TryBuildCursorCheckpointTransactionPayload( + pendingCursorCheckpoint, + out RecordId cursorCheckpointRecordId, + out Dictionary cursorCheckpointRecord); + if (cursorCheckpointWrittenInTransaction) + { + parameters["cursorCheckpointRecordId"] = cursorCheckpointRecordId; + parameters["cursorCheckpointRecord"] = cursorCheckpointRecord; + sqlBuilder.AppendLine("UPSERT $cursorCheckpointRecordId CONTENT $cursorCheckpointRecord;"); + } + + sqlBuilder.AppendLine("COMMIT TRANSACTION;"); + string sql = sqlBuilder.ToString(); + + var response = await _surrealClient.RawQuery(sql, parameters, cancellationToken); + response.EnsureAllOks(); + + if (!localCheckpointWrittenInTransaction && _checkpointPersistence != null) + await _checkpointPersistence.AdvanceCheckpointAsync(oplogEntry, cancellationToken: cancellationToken); + + if (pendingCursorCheckpoint is not null && !cursorCheckpointWrittenInTransaction) + await WriteCursorCheckpointAsync( + pendingCursorCheckpoint.Value.TableName, + pendingCursorCheckpoint.Value.Cursor, + cancellationToken); + } + + private bool TryBuildCheckpointTransactionPayload( + OplogEntry oplogEntry, + out RecordId checkpointRecordId, + out Dictionary checkpointRecord) + { + checkpointRecordId = RecordId.From(CBDDCSurrealSchemaNames.DocumentMetadataTable, "__unused__"); + checkpointRecord = new Dictionary(); + if (!TryGetCheckpointSettings(out string checkpointTable, out string consumerId)) return false; + + string consumerKey = ComputeConsumerKey(consumerId); + checkpointRecordId = RecordId.From(checkpointTable, consumerKey); + checkpointRecord = new Dictionary + { + ["consumerId"] = consumerId, + ["timestampPhysicalTime"] = oplogEntry.Timestamp.PhysicalTime, + ["timestampLogicalCounter"] = oplogEntry.Timestamp.LogicalCounter, + ["timestampNodeId"] = oplogEntry.Timestamp.NodeId, + ["lastHash"] = oplogEntry.Hash, + ["updatedUtcMs"] = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() + }; + + return true; + } + + private bool TryBuildCursorCheckpointTransactionPayload( + PendingCursorCheckpoint? pendingCursorCheckpoint, + out RecordId checkpointRecordId, + out Dictionary checkpointRecord) + { + checkpointRecordId = RecordId.From(CBDDCSurrealSchemaNames.DocumentMetadataTable, "__unused__"); + checkpointRecord = new Dictionary(); + if (pendingCursorCheckpoint is null) return false; + if (!TryGetCheckpointSettings(out string checkpointTable, out string consumerId)) return false; + + string cursorConsumerId = BuildCursorCheckpointConsumerId( + pendingCursorCheckpoint.Value.TableName, + consumerId); + + long encodedCursor = pendingCursorCheckpoint.Value.Cursor > long.MaxValue + ? long.MaxValue + : (long)pendingCursorCheckpoint.Value.Cursor; + + string consumerKey = ComputeConsumerKey(cursorConsumerId); + checkpointRecordId = RecordId.From(checkpointTable, consumerKey); + checkpointRecord = new Dictionary + { + ["consumerId"] = cursorConsumerId, + ["timestampPhysicalTime"] = encodedCursor, + ["timestampLogicalCounter"] = 0, + ["timestampNodeId"] = "surreal-cdc", + ["lastHash"] = "", + ["versionstampCursor"] = encodedCursor, + ["updatedUtcMs"] = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() + }; + + return true; + } + + private bool TryGetCheckpointSettings(out string checkpointTable, out string consumerId) + { + checkpointTable = string.Empty; + consumerId = string.Empty; + if (_checkpointPersistence == null) return false; + if (!TryGetPrivateField(_checkpointPersistence, "_enabled", out bool enabled) || !enabled) return false; + if (!TryGetPrivateField(_checkpointPersistence, "_checkpointTable", out string? resolvedCheckpointTable) || + string.IsNullOrWhiteSpace(resolvedCheckpointTable)) + return false; + if (!TryGetPrivateField(_checkpointPersistence, "_defaultConsumerId", out string? resolvedConsumerId) || + string.IsNullOrWhiteSpace(resolvedConsumerId)) + return false; + if (!SurrealIdentifierRegex.IsMatch(resolvedCheckpointTable)) return false; + + checkpointTable = resolvedCheckpointTable; + consumerId = resolvedConsumerId; + return true; + } + + private static string ComputeConsumerKey(string consumerId) + { + byte[] bytes = Encoding.UTF8.GetBytes(consumerId); + return Convert.ToHexString(SHA256.HashData(bytes)).ToLowerInvariant(); + } + + private static bool TryGetPrivateField(object source, string fieldName, out TValue value) + { + const BindingFlags flags = BindingFlags.Instance | BindingFlags.NonPublic; + FieldInfo? fieldInfo = source.GetType().GetField(fieldName, flags); + if (fieldInfo?.GetValue(source) is TValue typedValue) + { + value = typedValue; + return true; + } + + value = default!; + return false; + } + + private async Task QueryLastHashForNodeAsync(string nodeId, CancellationToken cancellationToken) + { + var all = await _surrealClient.Select( + CBDDCSurrealSchemaNames.OplogEntriesTable, + cancellationToken); + + var latest = all? + .Where(o => string.Equals(o.TimestampNodeId, nodeId, StringComparison.Ordinal)) + .OrderByDescending(o => o.TimestampPhysicalTime) + .ThenByDescending(o => o.TimestampLogicalCounter) + .FirstOrDefault(); + + return latest?.Hash; + } + + private async Task EnsureReadyAsync(CancellationToken cancellationToken) + { + await _schemaInitializer.EnsureInitializedAsync(cancellationToken); + } + + /// + /// Marks the start of remote sync operations and suppresses local CDC loopback. + /// + public IDisposable BeginRemoteSync() + { + _remoteSyncGuard.Wait(); + return new RemoteSyncScope(_remoteSyncGuard); + } + + private sealed class RemoteSyncScope : IDisposable + { + private readonly SemaphoreSlim _guard; + private int _disposed; + + public RemoteSyncScope(SemaphoreSlim guard) + { + _guard = guard; + } + + public void Dispose() + { + if (Interlocked.Exchange(ref _disposed, 1) == 1) return; + _guard.Release(); + } + } + + #endregion +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealDocumentStoreWatch.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealDocumentStoreWatch.cs new file mode 100644 index 0000000..b438f5f --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealDocumentStoreWatch.cs @@ -0,0 +1,144 @@ +using ZB.MOM.WW.CBDDC.Core; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +/// +/// Represents a single change notification emitted by a watchable collection. +/// +/// The entity type being observed. +public readonly record struct SurrealCollectionChange( + OperationType OperationType, + string? DocumentId, + TEntity? Entity) + where TEntity : class; + +/// +/// Abstraction for a collection that can publish change notifications to document-store watchers. +/// +/// The entity type being observed. +public interface ISurrealWatchableCollection where TEntity : class +{ + /// + /// Subscribes to collection change notifications. + /// + /// The observer receiving collection changes. + /// A disposable subscription. + IDisposable Subscribe(IObserver> observer); +} + +/// +/// In-memory watchable collection feed used to publish local change events. +/// +/// The entity type being observed. +public sealed class SurrealCollectionChangeFeed : ISurrealWatchableCollection, IDisposable + where TEntity : class +{ + private readonly object _observersGate = new(); + private readonly List>> _observers = new(); + private bool _disposed; + + /// + public IDisposable Subscribe(IObserver> observer) + { + ArgumentNullException.ThrowIfNull(observer); + + lock (_observersGate) + { + ThrowIfDisposed(); + _observers.Add(observer); + } + + return new Subscription(this, observer); + } + + /// + /// Publishes a put notification for an entity. + /// + /// The changed entity. + /// Optional explicit document identifier. + public void PublishPut(TEntity entity, string? documentId = null) + { + ArgumentNullException.ThrowIfNull(entity); + Publish(new SurrealCollectionChange(OperationType.Put, documentId, entity)); + } + + /// + /// Publishes a delete notification for an entity key. + /// + /// The document identifier that was removed. + public void PublishDelete(string documentId) + { + if (string.IsNullOrWhiteSpace(documentId)) + throw new ArgumentException("Document id is required.", nameof(documentId)); + + Publish(new SurrealCollectionChange(OperationType.Delete, documentId, null)); + } + + /// + /// Publishes a raw collection change notification. + /// + /// The change payload. + public void Publish(SurrealCollectionChange change) + { + List>> snapshot; + lock (_observersGate) + { + if (_disposed) return; + snapshot = _observers.ToList(); + } + + foreach (var observer in snapshot) + observer.OnNext(change); + } + + /// + public void Dispose() + { + List>> snapshot; + lock (_observersGate) + { + if (_disposed) return; + _disposed = true; + snapshot = _observers.ToList(); + _observers.Clear(); + } + + foreach (var observer in snapshot) + observer.OnCompleted(); + } + + private void Unsubscribe(IObserver> observer) + { + lock (_observersGate) + { + if (_disposed) return; + _observers.Remove(observer); + } + } + + private void ThrowIfDisposed() + { + ObjectDisposedException.ThrowIf(_disposed, this); + } + + private sealed class Subscription : IDisposable + { + private readonly SurrealCollectionChangeFeed _owner; + private readonly IObserver> _observer; + private int _disposed; + + public Subscription( + SurrealCollectionChangeFeed owner, + IObserver> observer) + { + _owner = owner; + _observer = observer; + } + + public void Dispose() + { + if (Interlocked.Exchange(ref _disposed, 1) == 1) return; + _owner.Unsubscribe(_observer); + } + } +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealOplogStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealOplogStore.cs new file mode 100644 index 0000000..e458f58 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealOplogStore.cs @@ -0,0 +1,272 @@ +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using SurrealDb.Net; +using SurrealDb.Net.Models; +using ZB.MOM.WW.CBDDC.Core; +using ZB.MOM.WW.CBDDC.Core.Storage; +using ZB.MOM.WW.CBDDC.Core.Sync; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +public class SurrealOplogStore : OplogStore +{ + private readonly ILogger _logger; + private readonly ICBDDCSurrealSchemaInitializer? _schemaInitializer; + private readonly ISurrealDbClient? _surrealClient; + + public SurrealOplogStore( + ICBDDCSurrealEmbeddedClient surrealEmbeddedClient, + ICBDDCSurrealSchemaInitializer schemaInitializer, + IDocumentStore documentStore, + IConflictResolver conflictResolver, + IVectorClockService vectorClockService, + ISnapshotMetadataStore? snapshotMetadataStore = null, + ILogger? logger = null) : base( + documentStore, + conflictResolver, + vectorClockService, + snapshotMetadataStore) + { + _ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient)); + _surrealClient = surrealEmbeddedClient.Client; + _schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer)); + _logger = logger ?? NullLogger.Instance; + + _vectorClock.Invalidate(); + InitializeVectorClock(); + } + + public override async Task> GetChainRangeAsync(string startHash, string endHash, + CancellationToken cancellationToken = default) + { + var startRow = await FindByHashAsync(startHash, cancellationToken); + var endRow = await FindByHashAsync(endHash, cancellationToken); + + if (startRow == null || endRow == null) return []; + + string nodeId = startRow.TimestampNodeId; + var all = await SelectAllAsync(cancellationToken); + + return all + .Where(o => string.Equals(o.TimestampNodeId, nodeId, StringComparison.Ordinal) && + (o.TimestampPhysicalTime > startRow.TimestampPhysicalTime || + (o.TimestampPhysicalTime == startRow.TimestampPhysicalTime && + o.TimestampLogicalCounter > startRow.TimestampLogicalCounter)) && + (o.TimestampPhysicalTime < endRow.TimestampPhysicalTime || + (o.TimestampPhysicalTime == endRow.TimestampPhysicalTime && + o.TimestampLogicalCounter <= endRow.TimestampLogicalCounter))) + .OrderBy(o => o.TimestampPhysicalTime) + .ThenBy(o => o.TimestampLogicalCounter) + .Select(o => o.ToDomain()) + .ToList(); + } + + public override async Task GetEntryByHashAsync(string hash, CancellationToken cancellationToken = default) + { + var existing = await FindByHashAsync(hash, cancellationToken); + return existing?.ToDomain(); + } + + public override async Task> GetOplogAfterAsync(HlcTimestamp timestamp, + IEnumerable? collections = null, CancellationToken cancellationToken = default) + { + var all = await SelectAllAsync(cancellationToken); + HashSet? collectionSet = collections != null ? new HashSet(collections) : null; + + return all + .Where(o => + (o.TimestampPhysicalTime > timestamp.PhysicalTime || + (o.TimestampPhysicalTime == timestamp.PhysicalTime && + o.TimestampLogicalCounter > timestamp.LogicalCounter)) && + (collectionSet == null || collectionSet.Contains(o.Collection))) + .OrderBy(o => o.TimestampPhysicalTime) + .ThenBy(o => o.TimestampLogicalCounter) + .Select(o => o.ToDomain()) + .ToList(); + } + + public override async Task> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since, + IEnumerable? collections = null, CancellationToken cancellationToken = default) + { + var all = await SelectAllAsync(cancellationToken); + HashSet? collectionSet = collections != null ? new HashSet(collections) : null; + + return all + .Where(o => + string.Equals(o.TimestampNodeId, nodeId, StringComparison.Ordinal) && + (o.TimestampPhysicalTime > since.PhysicalTime || + (o.TimestampPhysicalTime == since.PhysicalTime && + o.TimestampLogicalCounter > since.LogicalCounter)) && + (collectionSet == null || collectionSet.Contains(o.Collection))) + .OrderBy(o => o.TimestampPhysicalTime) + .ThenBy(o => o.TimestampLogicalCounter) + .Select(o => o.ToDomain()) + .ToList(); + } + + public override async Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default) + { + var all = await SelectAllAsync(cancellationToken); + var toDelete = all + .Where(o => o.TimestampPhysicalTime < cutoff.PhysicalTime || + (o.TimestampPhysicalTime == cutoff.PhysicalTime && + o.TimestampLogicalCounter <= cutoff.LogicalCounter)) + .ToList(); + + foreach (var row in toDelete) + { + RecordId recordId = row.Id ?? SurrealStoreRecordIds.Oplog(row.Hash); + await EnsureReadyAsync(cancellationToken); + await _surrealClient!.Delete(recordId, cancellationToken); + } + } + + public override async Task DropAsync(CancellationToken cancellationToken = default) + { + await EnsureReadyAsync(cancellationToken); + await _surrealClient!.Delete(CBDDCSurrealSchemaNames.OplogEntriesTable, cancellationToken); + _vectorClock.Invalidate(); + } + + public override async Task> ExportAsync(CancellationToken cancellationToken = default) + { + var all = await SelectAllAsync(cancellationToken); + return all.Select(o => o.ToDomain()).ToList(); + } + + public override async Task ImportAsync(IEnumerable items, CancellationToken cancellationToken = default) + { + foreach (var item in items) + { + var existing = await FindByHashAsync(item.Hash, cancellationToken); + RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.Oplog(item.Hash); + await UpsertAsync(item, recordId, cancellationToken); + } + } + + public override async Task MergeAsync(IEnumerable items, CancellationToken cancellationToken = default) + { + foreach (var item in items) + { + var existing = await FindByHashAsync(item.Hash, cancellationToken); + if (existing != null) continue; + + await UpsertAsync(item, SurrealStoreRecordIds.Oplog(item.Hash), cancellationToken); + } + } + + protected override void InitializeVectorClock() + { + if (_vectorClock.IsInitialized) return; + + if (_surrealClient == null || _schemaInitializer == null) + { + _vectorClock.IsInitialized = true; + return; + } + + if (_snapshotMetadataStore != null) + try + { + var snapshots = _snapshotMetadataStore.GetAllSnapshotMetadataAsync().GetAwaiter().GetResult(); + foreach (var snapshot in snapshots) + _vectorClock.UpdateNode( + snapshot.NodeId, + new HlcTimestamp( + snapshot.TimestampPhysicalTime, + snapshot.TimestampLogicalCounter, + snapshot.NodeId), + snapshot.Hash ?? ""); + } + catch + { + // Ignore snapshot bootstrap failures to keep oplog fallback behavior aligned. + } + + EnsureReadyAsync(CancellationToken.None).GetAwaiter().GetResult(); + var all = _surrealClient.Select(CBDDCSurrealSchemaNames.OplogEntriesTable, CancellationToken.None) + .GetAwaiter().GetResult() + ?? []; + + var latestPerNode = all + .Where(x => !string.IsNullOrWhiteSpace(x.TimestampNodeId)) + .GroupBy(x => x.TimestampNodeId) + .Select(g => g + .OrderByDescending(x => x.TimestampPhysicalTime) + .ThenByDescending(x => x.TimestampLogicalCounter) + .First()) + .ToList(); + + foreach (var latest in latestPerNode) + _vectorClock.UpdateNode( + latest.TimestampNodeId, + new HlcTimestamp(latest.TimestampPhysicalTime, latest.TimestampLogicalCounter, latest.TimestampNodeId), + latest.Hash ?? ""); + + _vectorClock.IsInitialized = true; + } + + protected override async Task InsertOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default) + { + var existing = await FindByHashAsync(entry.Hash, cancellationToken); + if (existing != null) return; + + await UpsertAsync(entry, SurrealStoreRecordIds.Oplog(entry.Hash), cancellationToken); + } + + protected override async Task QueryLastHashForNodeAsync(string nodeId, + CancellationToken cancellationToken = default) + { + var all = await SelectAllAsync(cancellationToken); + var lastEntry = all + .Where(o => string.Equals(o.TimestampNodeId, nodeId, StringComparison.Ordinal)) + .OrderByDescending(o => o.TimestampPhysicalTime) + .ThenByDescending(o => o.TimestampLogicalCounter) + .FirstOrDefault(); + return lastEntry?.Hash; + } + + protected override async Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash, + CancellationToken cancellationToken = default) + { + var existing = await FindByHashAsync(hash, cancellationToken); + if (existing == null) return null; + return (existing.TimestampPhysicalTime, existing.TimestampLogicalCounter); + } + + private async Task UpsertAsync(OplogEntry entry, RecordId recordId, CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + await _surrealClient!.Upsert( + recordId, + entry.ToSurrealRecord(), + cancellationToken); + } + + private async Task EnsureReadyAsync(CancellationToken cancellationToken) + { + await _schemaInitializer!.EnsureInitializedAsync(cancellationToken); + } + + private async Task> SelectAllAsync(CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + var rows = await _surrealClient!.Select( + CBDDCSurrealSchemaNames.OplogEntriesTable, + cancellationToken); + return rows?.ToList() ?? []; + } + + private async Task FindByHashAsync(string hash, CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + + RecordId deterministicId = SurrealStoreRecordIds.Oplog(hash); + var deterministic = await _surrealClient!.Select(deterministicId, cancellationToken); + if (deterministic != null && string.Equals(deterministic.Hash, hash, StringComparison.Ordinal)) + return deterministic; + + var all = await SelectAllAsync(cancellationToken); + return all.FirstOrDefault(o => string.Equals(o.Hash, hash, StringComparison.Ordinal)); + } +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealPeerConfigurationStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealPeerConfigurationStore.cs new file mode 100644 index 0000000..b1f0d35 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealPeerConfigurationStore.cs @@ -0,0 +1,111 @@ +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using SurrealDb.Net; +using SurrealDb.Net.Models; +using ZB.MOM.WW.CBDDC.Core.Network; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +public class SurrealPeerConfigurationStore : PeerConfigurationStore +{ + private readonly ILogger _logger; + private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer; + private readonly ISurrealDbClient _surrealClient; + + public SurrealPeerConfigurationStore( + ICBDDCSurrealEmbeddedClient surrealEmbeddedClient, + ICBDDCSurrealSchemaInitializer schemaInitializer, + ILogger? logger = null) + { + _ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient)); + _surrealClient = surrealEmbeddedClient.Client; + _schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer)); + _logger = logger ?? NullLogger.Instance; + } + + public override async Task> GetRemotePeersAsync( + CancellationToken cancellationToken = default) + { + var all = await SelectAllAsync(cancellationToken); + return all.Select(p => p.ToDomain()).ToList(); + } + + public override async Task GetRemotePeerAsync(string nodeId, + CancellationToken cancellationToken) + { + var existing = await FindByNodeIdAsync(nodeId, cancellationToken); + return existing?.ToDomain(); + } + + public override async Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default) + { + await EnsureReadyAsync(cancellationToken); + var existing = await FindByNodeIdAsync(nodeId, cancellationToken); + if (existing == null) + { + _logger.LogWarning("Attempted to remove non-existent remote peer: {NodeId}", nodeId); + return; + } + + RecordId recordId = existing.Id ?? SurrealStoreRecordIds.RemotePeer(nodeId); + await _surrealClient.Delete(recordId, cancellationToken); + _logger.LogInformation("Removed remote peer configuration: {NodeId}", nodeId); + } + + public override async Task SaveRemotePeerAsync(RemotePeerConfiguration peer, + CancellationToken cancellationToken = default) + { + await EnsureReadyAsync(cancellationToken); + var existing = await FindByNodeIdAsync(peer.NodeId, cancellationToken); + RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.RemotePeer(peer.NodeId); + + await _surrealClient.Upsert( + recordId, + peer.ToSurrealRecord(), + cancellationToken); + + _logger.LogInformation("Saved remote peer configuration: {NodeId} ({Type})", peer.NodeId, peer.Type); + } + + public override async Task DropAsync(CancellationToken cancellationToken = default) + { + _logger.LogWarning( + "Dropping peer configuration store - all remote peer configurations will be permanently deleted!"); + await EnsureReadyAsync(cancellationToken); + await _surrealClient.Delete(CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable, cancellationToken); + _logger.LogInformation("Peer configuration store dropped successfully."); + } + + public override async Task> ExportAsync( + CancellationToken cancellationToken = default) + { + return await GetRemotePeersAsync(cancellationToken); + } + + private async Task EnsureReadyAsync(CancellationToken cancellationToken) + { + await _schemaInitializer.EnsureInitializedAsync(cancellationToken); + } + + private async Task> SelectAllAsync(CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + var rows = await _surrealClient.Select( + CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable, + cancellationToken); + return rows?.ToList() ?? []; + } + + private async Task FindByNodeIdAsync(string nodeId, CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + RecordId deterministicId = SurrealStoreRecordIds.RemotePeer(nodeId); + var deterministic = await _surrealClient.Select(deterministicId, cancellationToken); + if (deterministic != null && + string.Equals(deterministic.NodeId, nodeId, StringComparison.Ordinal)) + return deterministic; + + var all = await SelectAllAsync(cancellationToken); + return all.FirstOrDefault(p => string.Equals(p.NodeId, nodeId, StringComparison.Ordinal)); + } +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealPeerOplogConfirmationStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealPeerOplogConfirmationStore.cs new file mode 100644 index 0000000..61c749a --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealPeerOplogConfirmationStore.cs @@ -0,0 +1,311 @@ +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using SurrealDb.Net; +using SurrealDb.Net.Models; +using ZB.MOM.WW.CBDDC.Core; +using ZB.MOM.WW.CBDDC.Core.Network; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +public class SurrealPeerOplogConfirmationStore : PeerOplogConfirmationStore +{ + internal const string RegistrationSourceNodeId = "__peer_registration__"; + + private readonly ILogger _logger; + private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer; + private readonly ISurrealDbClient _surrealClient; + + public SurrealPeerOplogConfirmationStore( + ICBDDCSurrealEmbeddedClient surrealEmbeddedClient, + ICBDDCSurrealSchemaInitializer schemaInitializer, + ILogger? logger = null) + { + _ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient)); + _surrealClient = surrealEmbeddedClient.Client; + _schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer)); + _logger = logger ?? NullLogger.Instance; + } + + public override async Task EnsurePeerRegisteredAsync( + string peerNodeId, + string address, + PeerType type, + CancellationToken cancellationToken = default) + { + if (string.IsNullOrWhiteSpace(peerNodeId)) + throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); + + var existing = + await FindByPairAsync(peerNodeId, RegistrationSourceNodeId, cancellationToken); + + if (existing == null) + { + var created = new PeerOplogConfirmation + { + PeerNodeId = peerNodeId, + SourceNodeId = RegistrationSourceNodeId, + ConfirmedWall = 0, + ConfirmedLogic = 0, + ConfirmedHash = "", + LastConfirmedUtc = DateTimeOffset.UtcNow, + IsActive = true + }; + + await UpsertAsync(created, SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, RegistrationSourceNodeId), + cancellationToken); + + _logger.LogDebug("Registered peer confirmation tracking for {PeerNodeId} ({Address}, {Type}).", peerNodeId, + address, type); + return; + } + + if (existing.IsActive) return; + + existing.IsActive = true; + existing.LastConfirmedUtcMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); + RecordId recordId = + existing.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, RegistrationSourceNodeId); + await UpsertAsync(existing, recordId, cancellationToken); + } + + public override async Task UpdateConfirmationAsync( + string peerNodeId, + string sourceNodeId, + HlcTimestamp timestamp, + string hash, + CancellationToken cancellationToken = default) + { + if (string.IsNullOrWhiteSpace(peerNodeId)) + throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); + + if (string.IsNullOrWhiteSpace(sourceNodeId)) + throw new ArgumentException("Source node id is required.", nameof(sourceNodeId)); + + var existing = await FindByPairAsync(peerNodeId, sourceNodeId, cancellationToken); + long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); + + if (existing == null) + { + var created = new PeerOplogConfirmation + { + PeerNodeId = peerNodeId, + SourceNodeId = sourceNodeId, + ConfirmedWall = timestamp.PhysicalTime, + ConfirmedLogic = timestamp.LogicalCounter, + ConfirmedHash = hash ?? "", + LastConfirmedUtc = DateTimeOffset.FromUnixTimeMilliseconds(nowMs), + IsActive = true + }; + await UpsertAsync(created, SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, sourceNodeId), + cancellationToken); + return; + } + + bool isNewer = IsIncomingTimestampNewer(timestamp, existing); + bool samePointHashChanged = timestamp.PhysicalTime == existing.ConfirmedWall && + timestamp.LogicalCounter == existing.ConfirmedLogic && + !string.Equals(existing.ConfirmedHash, hash, StringComparison.Ordinal); + + if (!isNewer && !samePointHashChanged && existing.IsActive) return; + + existing.ConfirmedWall = timestamp.PhysicalTime; + existing.ConfirmedLogic = timestamp.LogicalCounter; + existing.ConfirmedHash = hash ?? ""; + existing.LastConfirmedUtcMs = nowMs; + existing.IsActive = true; + + RecordId recordId = existing.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, sourceNodeId); + await UpsertAsync(existing, recordId, cancellationToken); + } + + public override async Task> GetConfirmationsAsync( + CancellationToken cancellationToken = default) + { + var all = await SelectAllAsync(cancellationToken); + return all + .Where(c => !string.Equals(c.SourceNodeId, RegistrationSourceNodeId, StringComparison.Ordinal)) + .Select(c => c.ToDomain()) + .ToList(); + } + + public override async Task> GetConfirmationsForPeerAsync( + string peerNodeId, + CancellationToken cancellationToken = default) + { + if (string.IsNullOrWhiteSpace(peerNodeId)) + throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); + + var all = await SelectAllAsync(cancellationToken); + return all + .Where(c => string.Equals(c.PeerNodeId, peerNodeId, StringComparison.Ordinal) && + !string.Equals(c.SourceNodeId, RegistrationSourceNodeId, StringComparison.Ordinal)) + .Select(c => c.ToDomain()) + .ToList(); + } + + public override async Task RemovePeerTrackingAsync(string peerNodeId, CancellationToken cancellationToken = default) + { + if (string.IsNullOrWhiteSpace(peerNodeId)) + throw new ArgumentException("Peer node id is required.", nameof(peerNodeId)); + + var matches = (await SelectAllAsync(cancellationToken)) + .Where(c => string.Equals(c.PeerNodeId, peerNodeId, StringComparison.Ordinal)) + .ToList(); + + if (matches.Count == 0) return; + + long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); + foreach (var match in matches) + { + if (!match.IsActive) continue; + + match.IsActive = false; + match.LastConfirmedUtcMs = nowMs; + + RecordId recordId = match.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(match.PeerNodeId, match.SourceNodeId); + await UpsertAsync(match, recordId, cancellationToken); + } + } + + public override async Task> GetActiveTrackedPeersAsync( + CancellationToken cancellationToken = default) + { + var all = await SelectAllAsync(cancellationToken); + return all + .Where(c => c.IsActive) + .Select(c => c.PeerNodeId) + .Distinct(StringComparer.Ordinal) + .ToList(); + } + + public override async Task DropAsync(CancellationToken cancellationToken = default) + { + await EnsureReadyAsync(cancellationToken); + await _surrealClient.Delete(CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable, cancellationToken); + } + + public override async Task> ExportAsync(CancellationToken cancellationToken = default) + { + var all = await SelectAllAsync(cancellationToken); + return all.Select(c => c.ToDomain()).ToList(); + } + + public override async Task ImportAsync(IEnumerable items, + CancellationToken cancellationToken = default) + { + foreach (var item in items) + { + var existing = await FindByPairAsync(item.PeerNodeId, item.SourceNodeId, cancellationToken); + RecordId recordId = + existing?.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(item.PeerNodeId, item.SourceNodeId); + await UpsertAsync(item, recordId, cancellationToken); + } + } + + public override async Task MergeAsync(IEnumerable items, + CancellationToken cancellationToken = default) + { + foreach (var item in items) + { + var existing = await FindByPairAsync(item.PeerNodeId, item.SourceNodeId, cancellationToken); + if (existing == null) + { + await UpsertAsync(item, SurrealStoreRecordIds.PeerOplogConfirmation(item.PeerNodeId, item.SourceNodeId), + cancellationToken); + continue; + } + + bool changed = false; + var incomingTimestamp = new HlcTimestamp(item.ConfirmedWall, item.ConfirmedLogic, item.SourceNodeId); + var existingTimestamp = new HlcTimestamp(existing.ConfirmedWall, existing.ConfirmedLogic, existing.SourceNodeId); + + if (incomingTimestamp > existingTimestamp) + { + existing.ConfirmedWall = item.ConfirmedWall; + existing.ConfirmedLogic = item.ConfirmedLogic; + existing.ConfirmedHash = item.ConfirmedHash; + changed = true; + } + + long incomingLastConfirmedMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds(); + if (incomingLastConfirmedMs > existing.LastConfirmedUtcMs) + { + existing.LastConfirmedUtcMs = incomingLastConfirmedMs; + changed = true; + } + + if (existing.IsActive != item.IsActive) + { + existing.IsActive = item.IsActive; + changed = true; + } + + if (!changed) continue; + + RecordId recordId = + existing.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(existing.PeerNodeId, existing.SourceNodeId); + await UpsertAsync(existing, recordId, cancellationToken); + } + } + + private async Task UpsertAsync(PeerOplogConfirmation confirmation, RecordId recordId, CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + await _surrealClient.Upsert( + recordId, + confirmation.ToSurrealRecord(), + cancellationToken); + } + + private async Task UpsertAsync(SurrealPeerOplogConfirmationRecord confirmation, RecordId recordId, + CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + await _surrealClient.Upsert( + recordId, + confirmation, + cancellationToken); + } + + private async Task EnsureReadyAsync(CancellationToken cancellationToken) + { + await _schemaInitializer.EnsureInitializedAsync(cancellationToken); + } + + private async Task> SelectAllAsync(CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + var rows = await _surrealClient.Select( + CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable, + cancellationToken); + return rows?.ToList() ?? []; + } + + private async Task FindByPairAsync(string peerNodeId, string sourceNodeId, + CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + RecordId deterministicId = SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, sourceNodeId); + var deterministic = await _surrealClient.Select(deterministicId, cancellationToken); + if (deterministic != null && + string.Equals(deterministic.PeerNodeId, peerNodeId, StringComparison.Ordinal) && + string.Equals(deterministic.SourceNodeId, sourceNodeId, StringComparison.Ordinal)) + return deterministic; + + var all = await SelectAllAsync(cancellationToken); + return all.FirstOrDefault(c => + string.Equals(c.PeerNodeId, peerNodeId, StringComparison.Ordinal) && + string.Equals(c.SourceNodeId, sourceNodeId, StringComparison.Ordinal)); + } + + private static bool IsIncomingTimestampNewer(HlcTimestamp incomingTimestamp, SurrealPeerOplogConfirmationRecord existing) + { + if (incomingTimestamp.PhysicalTime > existing.ConfirmedWall) return true; + + if (incomingTimestamp.PhysicalTime == existing.ConfirmedWall && + incomingTimestamp.LogicalCounter > existing.ConfirmedLogic) + return true; + + return false; + } +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealShowChangesCborDecoder.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealShowChangesCborDecoder.cs new file mode 100644 index 0000000..a20fd8d --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealShowChangesCborDecoder.cs @@ -0,0 +1,296 @@ +using System.Text.Json; +using Dahomey.Cbor.ObjectModel; +using ZB.MOM.WW.CBDDC.Core; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +internal readonly record struct SurrealPolledChangeRow( + ulong Versionstamp, + IReadOnlyList Changes); + +internal readonly record struct SurrealPolledChange( + OperationType OperationType, + string Key, + JsonElement? Content); + +internal static class SurrealShowChangesCborDecoder +{ + private static readonly string[] PutChangeKinds = ["create", "update", "upsert", "insert", "set", "replace"]; + + public static IReadOnlyList DecodeRows( + IEnumerable rows, + string expectedTableName) + { + var result = new List(); + + foreach (var row in rows) + { + if (!TryGetProperty(row, "versionstamp", out CborValue versionstampValue)) continue; + if (!TryReadUInt64(versionstampValue, out ulong versionstamp)) continue; + + var changes = new List(); + if (TryGetProperty(row, "changes", out CborValue rawChanges) && + rawChanges is CborArray changeArray) + foreach (CborValue changeValue in changeArray) + { + if (changeValue is not CborObject changeObject) continue; + + if (TryExtractChange(changeObject, expectedTableName, out SurrealPolledChange change)) + changes.Add(change); + } + + result.Add(new SurrealPolledChangeRow(versionstamp, changes)); + } + + return result; + } + + private static bool TryExtractChange( + CborObject changeObject, + string expectedTableName, + out SurrealPolledChange change) + { + if (TryGetProperty(changeObject, "delete", out CborValue deletePayload)) + if (TryExtractRecordKey(deletePayload, expectedTableName, out string deleteKey)) + { + change = new SurrealPolledChange(OperationType.Delete, deleteKey, null); + return true; + } + + foreach (string putKind in PutChangeKinds) + if (TryGetProperty(changeObject, putKind, out CborValue putPayload)) + if (TryExtractRecordKey(putPayload, expectedTableName, out string putKey)) + { + JsonElement? content = BuildNormalizedJsonPayload(putPayload, putKey); + change = new SurrealPolledChange(OperationType.Put, putKey, content); + return true; + } + + change = default; + return false; + } + + private static bool TryExtractRecordKey( + CborValue payload, + string expectedTableName, + out string key) + { + key = ""; + if (payload is not CborObject payloadObject) return false; + if (!TryGetProperty(payloadObject, "id", out CborValue idValue)) return false; + + if (TryExtractRecordKeyFromIdValue(idValue, expectedTableName, out string extracted)) + { + if (string.IsNullOrWhiteSpace(extracted)) return false; + key = extracted; + return true; + } + + return false; + } + + private static bool TryExtractRecordKeyFromIdValue( + CborValue idValue, + string expectedTableName, + out string key) + { + key = ""; + + if (idValue is CborArray arrayId) + { + if (arrayId.Count < 2) return false; + if (!TryReadString(arrayId[0], out string tableName)) return false; + if (!string.IsNullOrWhiteSpace(expectedTableName) && + !string.Equals(tableName, expectedTableName, StringComparison.Ordinal)) + return false; + + if (!TryReadString(arrayId[1], out string recordKey)) return false; + key = recordKey; + return true; + } + + if (idValue is CborString) + { + if (!TryReadString(idValue, out string recordId)) return false; + key = ExtractKeyFromRecordId(recordId) ?? ""; + return !string.IsNullOrWhiteSpace(key); + } + + if (idValue is CborObject idObject) + { + string? tableName = null; + if (TryGetProperty(idObject, "tb", out CborValue tbValue) && TryReadString(tbValue, out string tb)) + tableName = tb; + else if (TryGetProperty(idObject, "table", out CborValue tableValue) && + TryReadString(tableValue, out string table)) + tableName = table; + + if (!string.IsNullOrWhiteSpace(expectedTableName) && + !string.IsNullOrWhiteSpace(tableName) && + !string.Equals(tableName, expectedTableName, StringComparison.Ordinal)) + return false; + + if (TryGetProperty(idObject, "id", out CborValue nestedId)) + { + if (TryReadString(nestedId, out string nestedIdValue)) + { + key = nestedIdValue; + return true; + } + + key = nestedId.ToString()?.Trim('"') ?? ""; + return !string.IsNullOrWhiteSpace(key); + } + } + + return false; + } + + private static JsonElement? BuildNormalizedJsonPayload(CborValue payload, string key) + { + object? clrValue = ConvertCborToClr(payload); + if (clrValue == null) return null; + + if (clrValue is Dictionary payloadMap) + payloadMap["id"] = key; + + return JsonSerializer.SerializeToElement(clrValue); + } + + private static object? ConvertCborToClr(CborValue value) + { + switch (value) + { + case CborNull: + return null; + + case CborObject cborObject: + var map = new Dictionary(StringComparer.Ordinal); + foreach ((CborValue rawKey, CborValue rawValue) in cborObject) + { + if (!TryReadString(rawKey, out string key) || string.IsNullOrWhiteSpace(key)) + key = rawKey.ToString()?.Trim('"') ?? ""; + if (string.IsNullOrWhiteSpace(key)) continue; + + map[key] = ConvertCborToClr(rawValue); + } + + return map; + + case CborArray cborArray: + return cborArray.Select(ConvertCborToClr).ToList(); + + default: + if (TryReadString(value, out string stringValue)) return stringValue; + if (TryReadBoolean(value, out bool boolValue)) return boolValue; + if (TryReadInt64(value, out long intValue)) return intValue; + if (TryReadUInt64(value, out ulong uintValue)) return uintValue; + if (TryReadDouble(value, out double doubleValue)) return doubleValue; + return value.ToString(); + } + } + + private static bool TryGetProperty(CborObject source, string name, out CborValue value) + { + if (source.TryGetValue((CborValue)name, out CborValue? found)) + { + value = found; + return true; + } + + value = CborValue.Null; + return false; + } + + private static bool TryReadString(CborValue value, out string result) + { + try + { + string? parsed = value.Value(); + if (parsed == null) + { + result = ""; + return false; + } + + result = parsed; + return true; + } + catch + { + result = ""; + return false; + } + } + + private static bool TryReadBoolean(CborValue value, out bool result) + { + try + { + result = value.Value(); + return true; + } + catch + { + result = default; + return false; + } + } + + private static bool TryReadInt64(CborValue value, out long result) + { + try + { + result = value.Value(); + return true; + } + catch + { + result = default; + return false; + } + } + + private static bool TryReadUInt64(CborValue value, out ulong result) + { + try + { + result = value.Value(); + return true; + } + catch + { + result = default; + return false; + } + } + + private static bool TryReadDouble(CborValue value, out double result) + { + try + { + result = value.Value(); + return true; + } + catch + { + result = default; + return false; + } + } + + private static string? ExtractKeyFromRecordId(string recordId) + { + if (string.IsNullOrWhiteSpace(recordId)) return null; + int separator = recordId.IndexOf(':'); + if (separator < 0) return recordId; + + string key = recordId[(separator + 1)..].Trim(); + if (key.StartsWith('"') && key.EndsWith('"') && key.Length >= 2) + key = key[1..^1]; + if (key.StartsWith('`') && key.EndsWith('`') && key.Length >= 2) + key = key[1..^1]; + + return string.IsNullOrWhiteSpace(key) ? null : key; + } +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealSnapshotMetadataStore.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealSnapshotMetadataStore.cs new file mode 100644 index 0000000..6a97d48 --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealSnapshotMetadataStore.cs @@ -0,0 +1,142 @@ +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using SurrealDb.Net; +using SurrealDb.Net.Models; +using ZB.MOM.WW.CBDDC.Core; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +public class SurrealSnapshotMetadataStore : SnapshotMetadataStore +{ + private readonly ILogger _logger; + private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer; + private readonly ISurrealDbClient _surrealClient; + + public SurrealSnapshotMetadataStore( + ICBDDCSurrealEmbeddedClient surrealEmbeddedClient, + ICBDDCSurrealSchemaInitializer schemaInitializer, + ILogger? logger = null) + { + _ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient)); + _surrealClient = surrealEmbeddedClient.Client; + _schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer)); + _logger = logger ?? NullLogger.Instance; + } + + public override async Task DropAsync(CancellationToken cancellationToken = default) + { + await EnsureReadyAsync(cancellationToken); + await _surrealClient.Delete(CBDDCSurrealSchemaNames.SnapshotMetadataTable, cancellationToken); + } + + public override async Task> ExportAsync(CancellationToken cancellationToken = default) + { + var all = await SelectAllAsync(cancellationToken); + return all.Select(m => m.ToDomain()).ToList(); + } + + public override async Task GetSnapshotMetadataAsync(string nodeId, + CancellationToken cancellationToken = default) + { + var existing = await FindByNodeIdAsync(nodeId, cancellationToken); + return existing?.ToDomain(); + } + + public override async Task GetSnapshotHashAsync(string nodeId, CancellationToken cancellationToken = default) + { + var existing = await FindByNodeIdAsync(nodeId, cancellationToken); + return existing?.Hash; + } + + public override async Task ImportAsync(IEnumerable items, + CancellationToken cancellationToken = default) + { + foreach (var item in items) + { + var existing = await FindByNodeIdAsync(item.NodeId, cancellationToken); + RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.SnapshotMetadata(item.NodeId); + await UpsertAsync(item, recordId, cancellationToken); + } + } + + public override async Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata, + CancellationToken cancellationToken = default) + { + var existing = await FindByNodeIdAsync(metadata.NodeId, cancellationToken); + RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.SnapshotMetadata(metadata.NodeId); + await UpsertAsync(metadata, recordId, cancellationToken); + } + + public override async Task MergeAsync(IEnumerable items, CancellationToken cancellationToken = default) + { + foreach (var metadata in items) + { + var existing = await FindByNodeIdAsync(metadata.NodeId, cancellationToken); + if (existing == null) + { + await UpsertAsync(metadata, SurrealStoreRecordIds.SnapshotMetadata(metadata.NodeId), cancellationToken); + continue; + } + + if (metadata.TimestampPhysicalTime < existing.TimestampPhysicalTime || + (metadata.TimestampPhysicalTime == existing.TimestampPhysicalTime && + metadata.TimestampLogicalCounter <= existing.TimestampLogicalCounter)) + continue; + + RecordId recordId = existing.Id ?? SurrealStoreRecordIds.SnapshotMetadata(metadata.NodeId); + await UpsertAsync(metadata, recordId, cancellationToken); + } + } + + public override async Task UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta, + CancellationToken cancellationToken) + { + var existing = await FindByNodeIdAsync(existingMeta.NodeId, cancellationToken); + if (existing == null) return; + + RecordId recordId = existing.Id ?? SurrealStoreRecordIds.SnapshotMetadata(existingMeta.NodeId); + await UpsertAsync(existingMeta, recordId, cancellationToken); + } + + public override async Task> GetAllSnapshotMetadataAsync( + CancellationToken cancellationToken = default) + { + return await ExportAsync(cancellationToken); + } + + private async Task UpsertAsync(SnapshotMetadata metadata, RecordId recordId, CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + await _surrealClient.Upsert( + recordId, + metadata.ToSurrealRecord(), + cancellationToken); + } + + private async Task EnsureReadyAsync(CancellationToken cancellationToken) + { + await _schemaInitializer.EnsureInitializedAsync(cancellationToken); + } + + private async Task> SelectAllAsync(CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + var rows = await _surrealClient.Select( + CBDDCSurrealSchemaNames.SnapshotMetadataTable, + cancellationToken); + return rows?.ToList() ?? []; + } + + private async Task FindByNodeIdAsync(string nodeId, CancellationToken cancellationToken) + { + await EnsureReadyAsync(cancellationToken); + RecordId deterministicId = SurrealStoreRecordIds.SnapshotMetadata(nodeId); + var deterministic = await _surrealClient.Select(deterministicId, cancellationToken); + if (deterministic != null && + string.Equals(deterministic.NodeId, nodeId, StringComparison.Ordinal)) + return deterministic; + + var all = await SelectAllAsync(cancellationToken); + return all.FirstOrDefault(m => string.Equals(m.NodeId, nodeId, StringComparison.Ordinal)); + } +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealStoreRecords.cs b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealStoreRecords.cs new file mode 100644 index 0000000..502671a --- /dev/null +++ b/src/ZB.MOM.WW.CBDDC.Persistence/Surreal/SurrealStoreRecords.cs @@ -0,0 +1,294 @@ +using System.Security.Cryptography; +using System.Text; +using System.Text.Json; +using System.Text.Json.Serialization; +using SurrealDb.Net.Models; +using ZB.MOM.WW.CBDDC.Core; +using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Core.Storage; + +namespace ZB.MOM.WW.CBDDC.Persistence.Surreal; + +internal static class SurrealStoreRecordIds +{ + public static RecordId Oplog(string hash) + { + return RecordId.From(CBDDCSurrealSchemaNames.OplogEntriesTable, hash); + } + + public static RecordId DocumentMetadata(string collection, string key) + { + return RecordId.From( + CBDDCSurrealSchemaNames.DocumentMetadataTable, + CompositeKey("docmeta", collection, key)); + } + + public static RecordId SnapshotMetadata(string nodeId) + { + return RecordId.From(CBDDCSurrealSchemaNames.SnapshotMetadataTable, nodeId); + } + + public static RecordId RemotePeer(string nodeId) + { + return RecordId.From(CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable, nodeId); + } + + public static RecordId PeerOplogConfirmation(string peerNodeId, string sourceNodeId) + { + return RecordId.From( + CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable, + CompositeKey("peerconfirm", peerNodeId, sourceNodeId)); + } + + private static string CompositeKey(string prefix, string first, string second) + { + byte[] bytes = Encoding.UTF8.GetBytes($"{prefix}\n{first}\n{second}"); + return Convert.ToHexString(SHA256.HashData(bytes)).ToLowerInvariant(); + } +} + +internal sealed class SurrealOplogRecord : Record +{ + [JsonPropertyName("collection")] + public string Collection { get; set; } = ""; + + [JsonPropertyName("key")] + public string Key { get; set; } = ""; + + [JsonPropertyName("operation")] + public int Operation { get; set; } + + [JsonPropertyName("payloadJson")] + public string PayloadJson { get; set; } = ""; + + [JsonPropertyName("timestampPhysicalTime")] + public long TimestampPhysicalTime { get; set; } + + [JsonPropertyName("timestampLogicalCounter")] + public int TimestampLogicalCounter { get; set; } + + [JsonPropertyName("timestampNodeId")] + public string TimestampNodeId { get; set; } = ""; + + [JsonPropertyName("hash")] + public string Hash { get; set; } = ""; + + [JsonPropertyName("previousHash")] + public string PreviousHash { get; set; } = ""; +} + +internal sealed class SurrealDocumentMetadataRecord : Record +{ + [JsonPropertyName("collection")] + public string Collection { get; set; } = ""; + + [JsonPropertyName("key")] + public string Key { get; set; } = ""; + + [JsonPropertyName("hlcPhysicalTime")] + public long HlcPhysicalTime { get; set; } + + [JsonPropertyName("hlcLogicalCounter")] + public int HlcLogicalCounter { get; set; } + + [JsonPropertyName("hlcNodeId")] + public string HlcNodeId { get; set; } = ""; + + [JsonPropertyName("isDeleted")] + public bool IsDeleted { get; set; } +} + +internal sealed class SurrealRemotePeerRecord : Record +{ + [JsonPropertyName("nodeId")] + public string NodeId { get; set; } = ""; + + [JsonPropertyName("address")] + public string Address { get; set; } = ""; + + [JsonPropertyName("type")] + public int Type { get; set; } + + [JsonPropertyName("isEnabled")] + public bool IsEnabled { get; set; } + + [JsonPropertyName("interestsJson")] + public string InterestsJson { get; set; } = ""; +} + +internal sealed class SurrealPeerOplogConfirmationRecord : Record +{ + [JsonPropertyName("peerNodeId")] + public string PeerNodeId { get; set; } = ""; + + [JsonPropertyName("sourceNodeId")] + public string SourceNodeId { get; set; } = ""; + + [JsonPropertyName("confirmedWall")] + public long ConfirmedWall { get; set; } + + [JsonPropertyName("confirmedLogic")] + public int ConfirmedLogic { get; set; } + + [JsonPropertyName("confirmedHash")] + public string ConfirmedHash { get; set; } = ""; + + [JsonPropertyName("lastConfirmedUtcMs")] + public long LastConfirmedUtcMs { get; set; } + + [JsonPropertyName("isActive")] + public bool IsActive { get; set; } +} + +internal sealed class SurrealSnapshotMetadataRecord : Record +{ + [JsonPropertyName("nodeId")] + public string NodeId { get; set; } = ""; + + [JsonPropertyName("timestampPhysicalTime")] + public long TimestampPhysicalTime { get; set; } + + [JsonPropertyName("timestampLogicalCounter")] + public int TimestampLogicalCounter { get; set; } + + [JsonPropertyName("hash")] + public string Hash { get; set; } = ""; +} + +internal static class SurrealStoreRecordMappers +{ + public static SurrealOplogRecord ToSurrealRecord(this OplogEntry entry) + { + return new SurrealOplogRecord + { + Collection = entry.Collection, + Key = entry.Key, + Operation = (int)entry.Operation, + PayloadJson = entry.Payload?.GetRawText() ?? "", + TimestampPhysicalTime = entry.Timestamp.PhysicalTime, + TimestampLogicalCounter = entry.Timestamp.LogicalCounter, + TimestampNodeId = entry.Timestamp.NodeId, + Hash = entry.Hash, + PreviousHash = entry.PreviousHash + }; + } + + public static OplogEntry ToDomain(this SurrealOplogRecord record) + { + JsonElement? payload = null; + if (!string.IsNullOrEmpty(record.PayloadJson)) + payload = JsonSerializer.Deserialize(record.PayloadJson); + + return new OplogEntry( + record.Collection, + record.Key, + (OperationType)record.Operation, + payload, + new HlcTimestamp(record.TimestampPhysicalTime, record.TimestampLogicalCounter, record.TimestampNodeId), + record.PreviousHash, + record.Hash); + } + + public static SurrealDocumentMetadataRecord ToSurrealRecord(this DocumentMetadata metadata) + { + return new SurrealDocumentMetadataRecord + { + Collection = metadata.Collection, + Key = metadata.Key, + HlcPhysicalTime = metadata.UpdatedAt.PhysicalTime, + HlcLogicalCounter = metadata.UpdatedAt.LogicalCounter, + HlcNodeId = metadata.UpdatedAt.NodeId, + IsDeleted = metadata.IsDeleted + }; + } + + public static DocumentMetadata ToDomain(this SurrealDocumentMetadataRecord record) + { + return new DocumentMetadata( + record.Collection, + record.Key, + new HlcTimestamp(record.HlcPhysicalTime, record.HlcLogicalCounter, record.HlcNodeId), + record.IsDeleted); + } + + public static SurrealRemotePeerRecord ToSurrealRecord(this RemotePeerConfiguration peer) + { + return new SurrealRemotePeerRecord + { + NodeId = peer.NodeId, + Address = peer.Address, + Type = (int)peer.Type, + IsEnabled = peer.IsEnabled, + InterestsJson = peer.InterestingCollections.Count > 0 + ? JsonSerializer.Serialize(peer.InterestingCollections) + : "" + }; + } + + public static RemotePeerConfiguration ToDomain(this SurrealRemotePeerRecord record) + { + var result = new RemotePeerConfiguration + { + NodeId = record.NodeId, + Address = record.Address, + Type = (PeerType)record.Type, + IsEnabled = record.IsEnabled + }; + + if (!string.IsNullOrEmpty(record.InterestsJson)) + result.InterestingCollections = + JsonSerializer.Deserialize>(record.InterestsJson) ?? []; + + return result; + } + + public static SurrealPeerOplogConfirmationRecord ToSurrealRecord(this PeerOplogConfirmation confirmation) + { + return new SurrealPeerOplogConfirmationRecord + { + PeerNodeId = confirmation.PeerNodeId, + SourceNodeId = confirmation.SourceNodeId, + ConfirmedWall = confirmation.ConfirmedWall, + ConfirmedLogic = confirmation.ConfirmedLogic, + ConfirmedHash = confirmation.ConfirmedHash, + LastConfirmedUtcMs = confirmation.LastConfirmedUtc.ToUnixTimeMilliseconds(), + IsActive = confirmation.IsActive + }; + } + + public static PeerOplogConfirmation ToDomain(this SurrealPeerOplogConfirmationRecord record) + { + return new PeerOplogConfirmation + { + PeerNodeId = record.PeerNodeId, + SourceNodeId = record.SourceNodeId, + ConfirmedWall = record.ConfirmedWall, + ConfirmedLogic = record.ConfirmedLogic, + ConfirmedHash = record.ConfirmedHash, + LastConfirmedUtc = DateTimeOffset.FromUnixTimeMilliseconds(record.LastConfirmedUtcMs), + IsActive = record.IsActive + }; + } + + public static SurrealSnapshotMetadataRecord ToSurrealRecord(this SnapshotMetadata metadata) + { + return new SurrealSnapshotMetadataRecord + { + NodeId = metadata.NodeId, + TimestampPhysicalTime = metadata.TimestampPhysicalTime, + TimestampLogicalCounter = metadata.TimestampLogicalCounter, + Hash = metadata.Hash + }; + } + + public static SnapshotMetadata ToDomain(this SurrealSnapshotMetadataRecord record) + { + return new SnapshotMetadata + { + NodeId = record.NodeId, + TimestampPhysicalTime = record.TimestampPhysicalTime, + TimestampLogicalCounter = record.TimestampLogicalCounter, + Hash = record.Hash + }; + } +} diff --git a/src/ZB.MOM.WW.CBDDC.Persistence/ZB.MOM.WW.CBDDC.Persistence.csproj b/src/ZB.MOM.WW.CBDDC.Persistence/ZB.MOM.WW.CBDDC.Persistence.csproj index f4f8d2c..b4f4a0c 100755 --- a/src/ZB.MOM.WW.CBDDC.Persistence/ZB.MOM.WW.CBDDC.Persistence.csproj +++ b/src/ZB.MOM.WW.CBDDC.Persistence/ZB.MOM.WW.CBDDC.Persistence.csproj @@ -12,7 +12,7 @@ MrDevRobot Persistence provider for CBDDC. MIT - p2p;database;sqlite;persistence;storage;wal + p2p;database;surrealdb;rocksdb;persistence;storage;wal https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net git @@ -20,21 +20,18 @@ - - - all - runtime; build; native; contentfiles; analyzers; buildtransitive - - - + + + + - + - + diff --git a/tests/ZB.MOM.WW.CBDDC.E2E.Tests/ClusterCrudSyncE2ETests.cs b/tests/ZB.MOM.WW.CBDDC.E2E.Tests/ClusterCrudSyncE2ETests.cs index cb4eb19..06f6e5a 100644 --- a/tests/ZB.MOM.WW.CBDDC.E2E.Tests/ClusterCrudSyncE2ETests.cs +++ b/tests/ZB.MOM.WW.CBDDC.E2E.Tests/ClusterCrudSyncE2ETests.cs @@ -7,9 +7,10 @@ using Microsoft.Extensions.Logging; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Network; using ZB.MOM.WW.CBDDC.Core.Storage; +using ZB.MOM.WW.CBDDC.Core.Sync; using ZB.MOM.WW.CBDDC.Network; using ZB.MOM.WW.CBDDC.Network.Security; -using ZB.MOM.WW.CBDDC.Persistence.BLite; +using ZB.MOM.WW.CBDDC.Persistence.Surreal; namespace ZB.MOM.WW.CBDDC.E2E.Tests; @@ -26,7 +27,7 @@ public class ClusterCrudSyncE2ETests int nodeBPort = GetAvailableTcpPort(); while (nodeBPort == nodeAPort) nodeBPort = GetAvailableTcpPort(); - await using var nodeA = TestPeerNode.Create( + var nodeA = TestPeerNode.Create( "node-a", nodeAPort, clusterToken, @@ -160,6 +161,287 @@ public class ClusterCrudSyncE2ETests () => BuildDiagnostics(nodeA, nodeB)); } + /// + /// Verifies a reconnecting peer catches up mutations that happened while it was offline. + /// + [Fact] + public async Task PeerReconnect_ShouldCatchUpMissedChanges() + { + var clusterToken = Guid.NewGuid().ToString("N"); + int nodeAPort = GetAvailableTcpPort(); + int nodeBPort = GetAvailableTcpPort(); + while (nodeBPort == nodeAPort) nodeBPort = GetAvailableTcpPort(); + + var nodeA = TestPeerNode.Create( + "node-a", + nodeAPort, + clusterToken, + [ + new KnownPeerConfiguration + { + NodeId = "node-b", + Host = "127.0.0.1", + Port = nodeBPort + } + ]); + + await using var nodeB = TestPeerNode.Create( + "node-b", + nodeBPort, + clusterToken, + [ + new KnownPeerConfiguration + { + NodeId = "node-a", + Host = "127.0.0.1", + Port = nodeAPort + } + ]); + + await nodeA.StartAsync(); + await nodeB.StartAsync(); + + await nodeB.StopAsync(); + + const string userId = "reconnect-user"; + await nodeA.UpsertUserAsync(new User + { + Id = userId, + Name = "Offline Create", + Age = 20, + Address = new Address { City = "Rome" } + }); + + await nodeA.UpsertUserAsync(new User + { + Id = userId, + Name = "Offline Update", + Age = 21, + Address = new Address { City = "Milan" } + }); + + await nodeA.UpsertUserAsync(new User + { + Id = userId, + Name = "Offline Final", + Age = 22, + Address = new Address { City = "Turin" } + }); + + await nodeB.StartAsync(); + + await AssertEventuallyAsync(() => + { + var replicated = nodeB.ReadUser(userId); + return replicated is not null && + replicated.Name == "Offline Final" && + replicated.Age == 22 && + replicated.Address?.City == "Turin"; + }, 60, "Node B did not catch up missed reconnect mutations.", () => BuildDiagnostics(nodeA, nodeB)); + } + + /// + /// Verifies a burst of rapid multi-node mutations converges to a deterministic final state. + /// + [Fact] + public async Task MultiChangeBurst_ShouldConvergeDeterministically() + { + var clusterToken = Guid.NewGuid().ToString("N"); + int nodeAPort = GetAvailableTcpPort(); + int nodeBPort = GetAvailableTcpPort(); + while (nodeBPort == nodeAPort) nodeBPort = GetAvailableTcpPort(); + + await using var nodeA = TestPeerNode.Create( + "node-a", + nodeAPort, + clusterToken, + [ + new KnownPeerConfiguration + { + NodeId = "node-b", + Host = "127.0.0.1", + Port = nodeBPort + } + ]); + + await using var nodeB = TestPeerNode.Create( + "node-b", + nodeBPort, + clusterToken, + [ + new KnownPeerConfiguration + { + NodeId = "node-a", + Host = "127.0.0.1", + Port = nodeAPort + } + ]); + + await nodeA.StartAsync(); + await nodeB.StartAsync(); + + const int burstCount = 8; + for (var i = 0; i < burstCount; i++) + { + string aId = $"burst-a-{i:D2}"; + string bId = $"burst-b-{i:D2}"; + + await nodeA.UpsertUserAsync(new User + { + Id = aId, + Name = $"A-{i}", + Age = 30 + i, + Address = new Address { City = $"CityA-{i}" } + }); + + await nodeB.UpsertUserAsync(new User + { + Id = bId, + Name = $"B-{i}", + Age = 40 + i, + Address = new Address { City = $"CityB-{i}" } + }); + } + + await AssertEventuallyAsync( + () => nodeA.Context.Users.FindAll().Count() == burstCount * 2 && + nodeB.Context.Users.FindAll().Count() == burstCount * 2, + 60, + "Burst convergence did not reach expected document counts.", + () => BuildDiagnostics(nodeA, nodeB)); + + await AssertEventuallyAsync(() => + { + for (var i = 0; i < burstCount; i++) + { + var aOnB = nodeB.ReadUser($"burst-a-{i:D2}"); + var bOnA = nodeA.ReadUser($"burst-b-{i:D2}"); + if (aOnB is null || bOnA is null) return false; + if (aOnB.Name != $"A-{i}" || bOnA.Name != $"B-{i}") return false; + } + + return true; + }, 60, "Burst convergence content mismatch.", () => BuildDiagnostics(nodeA, nodeB)); + } + + /// + /// Verifies recovery safety when a process crashes after oplog commit but before checkpoint advance. + /// + [Fact] + public async Task CrashBetweenOplogAndCheckpoint_ShouldReplaySafelyOnRestart() + { + var clusterToken = Guid.NewGuid().ToString("N"); + int nodeAPort = GetAvailableTcpPort(); + int nodeBPort = GetAvailableTcpPort(); + while (nodeBPort == nodeAPort) nodeBPort = GetAvailableTcpPort(); + + string sharedWorkDir = Path.Combine(Path.GetTempPath(), $"cbddc-e2e-crash-{Guid.NewGuid():N}"); + Directory.CreateDirectory(sharedWorkDir); + + await using var nodeA = TestPeerNode.Create( + "node-a", + nodeAPort, + clusterToken, + [ + new KnownPeerConfiguration + { + NodeId = "node-b", + Host = "127.0.0.1", + Port = nodeBPort + } + ], + workDirOverride: sharedWorkDir, + preserveWorkDirOnDispose: true, + useFaultInjectedCheckpointStore: true); + bool nodeADisposed = false; + + try + { + await using var nodeB = TestPeerNode.Create( + "node-b", + nodeBPort, + clusterToken, + [ + new KnownPeerConfiguration + { + NodeId = "node-a", + Host = "127.0.0.1", + Port = nodeAPort + } + ]); + + await nodeA.StartAsync(); + await nodeB.StartAsync(); + + const string userId = "crash-window-user"; + var payload = new User + { + Id = userId, + Name = "Crash Recovered", + Age = 45, + Address = new Address { City = "Naples" } + }; + + await Should.ThrowAsync(() => nodeA.UpsertUserAsync(payload)); + nodeA.ReadUser(userId).ShouldNotBeNull(); + nodeA.GetLocalOplogCountForKey("Users", userId).ShouldBe(1); + + await nodeA.StopAsync(); + await nodeA.DisposeAsync(); + nodeADisposed = true; + + TestPeerNode? recoveredNodeA = null; + for (var attempt = 0; attempt < 10; attempt++) + try + { + recoveredNodeA = TestPeerNode.Create( + "node-a", + nodeAPort, + clusterToken, + [ + new KnownPeerConfiguration + { + NodeId = "node-b", + Host = "127.0.0.1", + Port = nodeBPort + } + ], + workDirOverride: sharedWorkDir); + break; + } + catch (Exception ex) when (IsRocksDbLockContention(ex) && attempt < 9) + { + await Task.Delay(100); + } + + recoveredNodeA.ShouldNotBeNull(); + await using (recoveredNodeA) + { + await recoveredNodeA.StartAsync(); + + await AssertEventuallyAsync(() => + { + var replicated = nodeB.ReadUser(userId); + return replicated is not null + && replicated.Name == payload.Name + && replicated.Age == payload.Age + && replicated.Address?.City == payload.Address?.City; + }, 60, "Node B did not converge after crash-window recovery.", () => BuildDiagnostics(recoveredNodeA, nodeB)); + + await AssertEventuallyAsync( + () => recoveredNodeA.GetOplogCountForKey("Users", userId) == 1 && + nodeB.GetOplogCountForKey("Users", userId) == 1, + 60, + "Crash-window recovery created duplicate oplog entries.", + () => BuildDiagnostics(recoveredNodeA, nodeB)); + } + } + finally + { + if (!nodeADisposed) await nodeA.DisposeAsync(); + } + } + private static async Task AssertEventuallyAsync( Func predicate, int timeoutSeconds, @@ -222,6 +504,11 @@ public class ClusterCrudSyncE2ETests return ((IPEndPoint)listener.LocalEndpoint).Port; } + private static bool IsRocksDbLockContention(Exception exception) + { + return exception.ToString().Contains("No locks available", StringComparison.OrdinalIgnoreCase); + } + private sealed class TestPeerNode : IAsyncDisposable { private readonly InMemoryLogSink _logSink; @@ -230,6 +517,7 @@ public class ClusterCrudSyncE2ETests private readonly IOplogStore _oplogStore; private readonly ServiceProvider _services; private readonly string _workDir; + private readonly bool _preserveWorkDirOnDispose; private long _lastPhysicalTime = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(); private int _logicalCounter; private bool _started; @@ -241,7 +529,8 @@ public class ClusterCrudSyncE2ETests SampleDbContext context, InMemoryLogSink logSink, string workDir, - string nodeId) + string nodeId, + bool preserveWorkDirOnDispose) { _services = services; _node = node; @@ -250,10 +539,11 @@ public class ClusterCrudSyncE2ETests _logSink = logSink; _workDir = workDir; _nodeId = nodeId; + _preserveWorkDirOnDispose = preserveWorkDirOnDispose; } /// - /// Gets the LiteDB-backed context used by this test peer. + /// Gets the Surreal-backed context used by this test peer. /// public SampleDbContext Context { get; } @@ -269,7 +559,7 @@ public class ClusterCrudSyncE2ETests } _services.Dispose(); - TryDeleteDirectory(_workDir); + if (!_preserveWorkDirOnDispose) TryDeleteDirectory(_workDir); } /// @@ -284,11 +574,15 @@ public class ClusterCrudSyncE2ETests string nodeId, int tcpPort, string authToken, - IReadOnlyList knownPeers) + IReadOnlyList knownPeers, + string? workDirOverride = null, + bool preserveWorkDirOnDispose = false, + bool useFaultInjectedCheckpointStore = false) { - string workDir = Path.Combine(Path.GetTempPath(), $"cbddc-e2e-{nodeId}-{Guid.NewGuid():N}"); + string workDir = workDirOverride ?? Path.Combine(Path.GetTempPath(), $"cbddc-e2e-{nodeId}-{Guid.NewGuid():N}"); Directory.CreateDirectory(workDir); - string dbPath = Path.Combine(workDir, "node.blite"); + string dbPath = Path.Combine(workDir, "node.rocksdb"); + string surrealDatabase = nodeId.Replace("-", "_", StringComparison.Ordinal); var configProvider = new StaticPeerNodeConfigurationProvider(new PeerNodeConfiguration { @@ -304,9 +598,33 @@ public class ClusterCrudSyncE2ETests services.AddLogging(builder => builder.SetMinimumLevel(LogLevel.Debug)); services.AddSingleton(configProvider); services.AddSingleton(configProvider); - services.AddCBDDCCore() - .AddCBDDCBLite(_ => new SampleDbContext(dbPath)) - .AddCBDDCNetwork(false); + services.AddSingleton(); + services.AddSingleton(); + var surrealOptionsFactory = new Func(_ => new CBDDCSurrealEmbeddedOptions + { + Endpoint = "rocksdb://local", + DatabasePath = dbPath, + Namespace = "cbddc_e2e", + Database = surrealDatabase, + Cdc = new CBDDCSurrealCdcOptions + { + Enabled = true, + ConsumerId = $"{nodeId}-main" + } + }); + + var coreBuilder = services.AddCBDDCCore(); + if (useFaultInjectedCheckpointStore) + { + services.AddSingleton(); + coreBuilder.AddCBDDCSurrealEmbedded(surrealOptionsFactory) + .AddCBDDCNetwork(false); + } + else + { + coreBuilder.AddCBDDCSurrealEmbedded(surrealOptionsFactory) + .AddCBDDCNetwork(false); + } // Deterministic tests: sync uses explicit known peers, so disable UDP discovery. services.AddSingleton(); @@ -317,7 +635,15 @@ public class ClusterCrudSyncE2ETests var oplogStore = provider.GetRequiredService(); var context = provider.GetRequiredService(); var logSink = provider.GetRequiredService(); - return new TestPeerNode(provider, node, oplogStore, context, logSink, workDir, nodeId); + return new TestPeerNode( + provider, + node, + oplogStore, + context, + logSink, + workDir, + nodeId, + preserveWorkDirOnDispose); } /// @@ -340,7 +666,17 @@ public class ClusterCrudSyncE2ETests { if (!_started) return; - await _node.Stop(); + try + { + await _node.Stop(); + } + catch (ObjectDisposedException) + { + } + catch (AggregateException ex) when (ex.InnerExceptions.All(e => e is ObjectDisposedException)) + { + } + _started = false; } @@ -354,6 +690,23 @@ public class ClusterCrudSyncE2ETests return Context.Users.Find(u => u.Id == userId).FirstOrDefault(); } + public int GetLocalOplogCountForKey(string collection, string key) + { + return Context.OplogEntries.FindAll() + .Count(e => + string.Equals(e.Collection, collection, StringComparison.Ordinal) && + string.Equals(e.Key, key, StringComparison.Ordinal) && + string.Equals(e.TimestampNodeId, _nodeId, StringComparison.Ordinal)); + } + + public int GetOplogCountForKey(string collection, string key) + { + return Context.OplogEntries.FindAll() + .Count(e => + string.Equals(e.Collection, collection, StringComparison.Ordinal) && + string.Equals(e.Key, key, StringComparison.Ordinal)); + } + /// /// Inserts or updates a user and persists the matching oplog entry. /// @@ -466,6 +819,183 @@ public class ClusterCrudSyncE2ETests } } + private sealed class FaultInjectedSampleDocumentStore : SurrealDocumentStore + { + private const string UsersCollection = "Users"; + private const string TodoListsCollection = "TodoLists"; + + public FaultInjectedSampleDocumentStore( + SampleDbContext context, + IPeerNodeConfigurationProvider configProvider, + IVectorClockService vectorClockService, + ISurrealCdcCheckpointPersistence checkpointPersistence, + ILogger? logger = null) + : base( + context, + context.SurrealEmbeddedClient, + context.SchemaInitializer, + configProvider, + vectorClockService, + new LastWriteWinsConflictResolver(), + checkpointPersistence, + new SurrealCdcPollingOptions + { + Enabled = false, + EnableLiveSelectAccelerator = false + }, + logger) + { + WatchCollection(UsersCollection, context.Users, u => u.Id); + WatchCollection(TodoListsCollection, context.TodoLists, t => t.Id); + } + + protected override async Task ApplyContentToEntityAsync( + string collection, + string key, + JsonElement content, + CancellationToken cancellationToken) + { + await UpsertEntityAsync(collection, key, content, cancellationToken); + } + + protected override async Task ApplyContentToEntitiesBatchAsync( + IEnumerable<(string Collection, string Key, JsonElement Content)> documents, + CancellationToken cancellationToken) + { + foreach ((string collection, string key, var content) in documents) + await UpsertEntityAsync(collection, key, content, cancellationToken); + } + + protected override async Task GetEntityAsJsonAsync( + string collection, + string key, + CancellationToken cancellationToken) + { + return collection switch + { + UsersCollection => SerializeEntity(await _context.Users.FindByIdAsync(key, cancellationToken)), + TodoListsCollection => SerializeEntity(await _context.TodoLists.FindByIdAsync(key, cancellationToken)), + _ => null + }; + } + + protected override async Task RemoveEntityAsync( + string collection, + string key, + CancellationToken cancellationToken) + { + await DeleteEntityAsync(collection, key, cancellationToken); + } + + protected override async Task RemoveEntitiesBatchAsync( + IEnumerable<(string Collection, string Key)> documents, + CancellationToken cancellationToken) + { + foreach ((string collection, string key) in documents) + await DeleteEntityAsync(collection, key, cancellationToken); + } + + protected override async Task> GetAllEntitiesAsJsonAsync( + string collection, + CancellationToken cancellationToken) + { + return collection switch + { + UsersCollection => (await _context.Users.FindAllAsync(cancellationToken)) + .Select(u => (u.Id, SerializeEntity(u)!.Value)) + .ToList(), + TodoListsCollection => (await _context.TodoLists.FindAllAsync(cancellationToken)) + .Select(t => (t.Id, SerializeEntity(t)!.Value)) + .ToList(), + _ => [] + }; + } + + private async Task UpsertEntityAsync( + string collection, + string key, + JsonElement content, + CancellationToken cancellationToken) + { + switch (collection) + { + case UsersCollection: + var user = content.Deserialize() ?? + throw new InvalidOperationException("Failed to deserialize user."); + user.Id = key; + if (await _context.Users.FindByIdAsync(key, cancellationToken) == null) + await _context.Users.InsertAsync(user, cancellationToken); + else + await _context.Users.UpdateAsync(user, cancellationToken); + break; + + case TodoListsCollection: + var todo = content.Deserialize() ?? + throw new InvalidOperationException("Failed to deserialize todo list."); + todo.Id = key; + if (await _context.TodoLists.FindByIdAsync(key, cancellationToken) == null) + await _context.TodoLists.InsertAsync(todo, cancellationToken); + else + await _context.TodoLists.UpdateAsync(todo, cancellationToken); + break; + + default: + throw new NotSupportedException($"Collection '{collection}' is not supported for sync."); + } + } + + private async Task DeleteEntityAsync(string collection, string key, CancellationToken cancellationToken) + { + switch (collection) + { + case UsersCollection: + await _context.Users.DeleteAsync(key, cancellationToken); + break; + case TodoListsCollection: + await _context.TodoLists.DeleteAsync(key, cancellationToken); + break; + } + } + + private static JsonElement? SerializeEntity(T? entity) where T : class + { + return entity == null ? null : JsonSerializer.SerializeToElement(entity); + } + } + + private sealed class CrashAfterFirstAdvanceCheckpointPersistence : ISurrealCdcCheckpointPersistence + { + private int _failOnNextAdvance = 1; + + public Task GetCheckpointAsync( + string? consumerId = null, + CancellationToken cancellationToken = default) + { + return Task.FromResult(null); + } + + public Task UpsertCheckpointAsync( + HlcTimestamp timestamp, + string lastHash, + string? consumerId = null, + CancellationToken cancellationToken = default, + long? versionstampCursor = null) + { + return Task.CompletedTask; + } + + public Task AdvanceCheckpointAsync( + OplogEntry entry, + string? consumerId = null, + CancellationToken cancellationToken = default) + { + if (Interlocked.Exchange(ref _failOnNextAdvance, 0) == 1) + throw new InvalidOperationException("Injected crash between oplog commit and checkpoint advance."); + + return Task.CompletedTask; + } + } + private sealed class PassiveDiscoveryService : IDiscoveryService { /// @@ -637,4 +1167,4 @@ public class ClusterCrudSyncE2ETests { } } -} \ No newline at end of file +} diff --git a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/BLiteStoreExportImportTests.cs b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/BLiteStoreExportImportTests.cs index 4de6cdf..7d94350 100755 --- a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/BLiteStoreExportImportTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/BLiteStoreExportImportTests.cs @@ -1,47 +1,54 @@ using System.Text.Json; using Microsoft.Extensions.Logging.Abstractions; using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Network; -using ZB.MOM.WW.CBDDC.Core.Sync; -using ZB.MOM.WW.CBDDC.Persistence; -using ZB.MOM.WW.CBDDC.Persistence.BLite; +using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Core.Sync; +using ZB.MOM.WW.CBDDC.Persistence; +using ZB.MOM.WW.CBDDC.Persistence.Surreal; namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests; -/// -/// Tests for BLite persistence stores: Export, Import, Merge, Drop operations. -/// -public class BLiteStoreExportImportTests : IDisposable -{ +/// +/// Tests for Surreal persistence stores: Export, Import, Merge, Drop operations. +/// +public class SurrealStoreExportImportTests : IDisposable +{ private readonly IPeerNodeConfigurationProvider _configProvider; private readonly SampleDbContext _context; private readonly SampleDocumentStore _documentStore; - private readonly BLiteOplogStore _oplogStore; - private readonly BLitePeerConfigurationStore _peerConfigStore; - private readonly BLiteSnapshotMetadataStore _snapshotMetadataStore; + private readonly SurrealOplogStore _oplogStore; + private readonly SurrealPeerConfigurationStore _peerConfigStore; + private readonly SurrealSnapshotMetadataStore _snapshotMetadataStore; private readonly string _testDbPath; - /// - /// Initializes a new instance of the class. - /// - public BLiteStoreExportImportTests() - { - _testDbPath = Path.Combine(Path.GetTempPath(), $"test-export-import-{Guid.NewGuid()}.blite"); - _context = new SampleDbContext(_testDbPath); - _configProvider = CreateConfigProvider("test-node"); - var vectorClock = new VectorClockService(); - - _documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock, - NullLogger.Instance); - _snapshotMetadataStore = new BLiteSnapshotMetadataStore( - _context, NullLogger>.Instance); - _oplogStore = new BLiteOplogStore( - _context, _documentStore, new LastWriteWinsConflictResolver(), - vectorClock, - _snapshotMetadataStore, - NullLogger>.Instance); - _peerConfigStore = new BLitePeerConfigurationStore( - _context, NullLogger>.Instance); + /// + /// Initializes a new instance of the class. + /// + public SurrealStoreExportImportTests() + { + _testDbPath = Path.Combine(Path.GetTempPath(), $"test-export-import-{Guid.NewGuid()}.rocksdb"); + _context = new SampleDbContext(_testDbPath); + _configProvider = CreateConfigProvider("test-node"); + var vectorClock = new VectorClockService(); + + _documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock, + logger: NullLogger.Instance); + _snapshotMetadataStore = new SurrealSnapshotMetadataStore( + _context.SurrealEmbeddedClient, + _context.SchemaInitializer, + NullLogger.Instance); + _oplogStore = new SurrealOplogStore( + _context.SurrealEmbeddedClient, + _context.SchemaInitializer, + _documentStore, + new LastWriteWinsConflictResolver(), + vectorClock, + _snapshotMetadataStore, + NullLogger.Instance); + _peerConfigStore = new SurrealPeerConfigurationStore( + _context.SurrealEmbeddedClient, + _context.SchemaInitializer, + NullLogger.Instance); } /// @@ -52,13 +59,13 @@ public class BLiteStoreExportImportTests : IDisposable _documentStore?.Dispose(); _context?.Dispose(); - if (File.Exists(_testDbPath)) - try - { - File.Delete(_testDbPath); - } - catch - { + if (Directory.Exists(_testDbPath)) + try + { + Directory.Delete(_testDbPath, true); + } + catch + { } } @@ -506,4 +513,4 @@ public class BLiteStoreExportImportTests : IDisposable } #endregion -} \ No newline at end of file +} diff --git a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/PeerOplogConfirmationStoreTests.cs b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/PeerOplogConfirmationStoreTests.cs index 56aeb3f..81b60b4 100644 --- a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/PeerOplogConfirmationStoreTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/PeerOplogConfirmationStoreTests.cs @@ -1,14 +1,14 @@ using Microsoft.Extensions.Logging.Abstractions; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Network; -using ZB.MOM.WW.CBDDC.Persistence.BLite; +using ZB.MOM.WW.CBDDC.Persistence.Surreal; namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests; public class PeerOplogConfirmationStoreTests : IDisposable { private readonly SampleDbContext _context; - private readonly BLitePeerOplogConfirmationStore _store; + private readonly SurrealPeerOplogConfirmationStore _store; private readonly string _testDbPath; /// @@ -16,21 +16,22 @@ public class PeerOplogConfirmationStoreTests : IDisposable /// public PeerOplogConfirmationStoreTests() { - _testDbPath = Path.Combine(Path.GetTempPath(), $"test-peer-confirmation-{Guid.NewGuid()}.blite"); + _testDbPath = Path.Combine(Path.GetTempPath(), $"test-peer-confirmation-{Guid.NewGuid()}.rocksdb"); _context = new SampleDbContext(_testDbPath); - _store = new BLitePeerOplogConfirmationStore( - _context, - NullLogger>.Instance); + _store = new SurrealPeerOplogConfirmationStore( + _context.SurrealEmbeddedClient, + _context.SchemaInitializer, + NullLogger.Instance); } /// public void Dispose() { _context?.Dispose(); - if (File.Exists(_testDbPath)) + if (Directory.Exists(_testDbPath)) try { - File.Delete(_testDbPath); + Directory.Delete(_testDbPath, true); } catch { @@ -109,4 +110,4 @@ public class PeerOplogConfirmationStoreTests : IDisposable peerARows.ShouldNotBeEmpty(); peerARows.All(x => !x.IsActive).ShouldBeTrue(); } -} \ No newline at end of file +} diff --git a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SampleDbContextTests.cs b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SampleDbContextTests.cs index e719797..568d168 100755 --- a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SampleDbContextTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SampleDbContextTests.cs @@ -10,7 +10,7 @@ public class SampleDbContextTests : IDisposable /// public SampleDbContextTests() { - _dbPath = Path.Combine(Path.GetTempPath(), $"test_sample_{Guid.NewGuid()}.db"); + _dbPath = Path.Combine(Path.GetTempPath(), $"test_sample_{Guid.NewGuid()}.rocksdb"); _context = new SampleDbContext(_dbPath); } @@ -20,10 +20,10 @@ public class SampleDbContextTests : IDisposable public void Dispose() { _context?.Dispose(); - if (File.Exists(_dbPath)) + if (Directory.Exists(_dbPath)) try { - File.Delete(_dbPath); + Directory.Delete(_dbPath, true); } catch { @@ -38,9 +38,9 @@ public class SampleDbContextTests : IDisposable { // Verifica che le collezioni siano state inizializzate _context.ShouldNotBeNull(); - _context.Users.ShouldNotBeNull("Users collection should be initialized by BLite"); - _context.TodoLists.ShouldNotBeNull("TodoLists collection should be initialized by BLite"); - _context.OplogEntries.ShouldNotBeNull("OplogEntries collection should be initialized by BLite"); + _context.Users.ShouldNotBeNull("Users collection should be initialized by Surreal context"); + _context.TodoLists.ShouldNotBeNull("TodoLists collection should be initialized by Surreal context"); + _context.OplogEntries.ShouldNotBeNull("OplogEntries view should be initialized by Surreal context"); } /// @@ -220,4 +220,4 @@ public class SampleDbContextTests : IDisposable adults.Select(u => u.Name).ShouldContain("Adult"); adults.Select(u => u.Name).ShouldContain("Senior"); } -} \ No newline at end of file +} diff --git a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SnapshotStoreTests.cs b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SnapshotStoreTests.cs index 788d039..0f36e01 100755 --- a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SnapshotStoreTests.cs +++ b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SnapshotStoreTests.cs @@ -2,21 +2,21 @@ using System.Text.Json; using System.Text.Json.Nodes; using Microsoft.Extensions.Logging.Abstractions; using ZB.MOM.WW.CBDDC.Core; -using ZB.MOM.WW.CBDDC.Core.Network; -using ZB.MOM.WW.CBDDC.Core.Sync; -using ZB.MOM.WW.CBDDC.Persistence; -using ZB.MOM.WW.CBDDC.Persistence.BLite; +using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Core.Sync; +using ZB.MOM.WW.CBDDC.Persistence; +using ZB.MOM.WW.CBDDC.Persistence.Surreal; namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests; public class SnapshotStoreTests : IDisposable { - private readonly IPeerNodeConfigurationProvider _configProvider; - private readonly SampleDbContext _context; - private readonly SampleDocumentStore _documentStore; - private readonly BLiteOplogStore _oplogStore; - private readonly BLitePeerConfigurationStore _peerConfigStore; - private readonly BLitePeerOplogConfirmationStore _peerConfirmationStore; + private readonly IPeerNodeConfigurationProvider _configProvider; + private readonly SampleDbContext _context; + private readonly SampleDocumentStore _documentStore; + private readonly SurrealOplogStore _oplogStore; + private readonly SurrealPeerConfigurationStore _peerConfigStore; + private readonly SurrealPeerOplogConfirmationStore _peerConfirmationStore; private readonly SnapshotStore _snapshotStore; private readonly string _testDbPath; @@ -25,29 +25,33 @@ public class SnapshotStoreTests : IDisposable /// public SnapshotStoreTests() { - _testDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-{Guid.NewGuid()}.blite"); - _context = new SampleDbContext(_testDbPath); - _configProvider = CreateConfigProvider("test-node"); - var vectorClock = new VectorClockService(); - - _documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock, - NullLogger.Instance); - var snapshotMetadataStore = new BLiteSnapshotMetadataStore( - _context, - NullLogger>.Instance); - _oplogStore = new BLiteOplogStore( - _context, - _documentStore, - new LastWriteWinsConflictResolver(), - vectorClock, - snapshotMetadataStore, - NullLogger>.Instance); - _peerConfigStore = new BLitePeerConfigurationStore( - _context, - NullLogger>.Instance); - _peerConfirmationStore = new BLitePeerOplogConfirmationStore( - _context, - NullLogger>.Instance); + _testDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-{Guid.NewGuid()}.rocksdb"); + _context = new SampleDbContext(_testDbPath); + _configProvider = CreateConfigProvider("test-node"); + var vectorClock = new VectorClockService(); + + _documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock, + logger: NullLogger.Instance); + var snapshotMetadataStore = new SurrealSnapshotMetadataStore( + _context.SurrealEmbeddedClient, + _context.SchemaInitializer, + NullLogger.Instance); + _oplogStore = new SurrealOplogStore( + _context.SurrealEmbeddedClient, + _context.SchemaInitializer, + _documentStore, + new LastWriteWinsConflictResolver(), + vectorClock, + snapshotMetadataStore, + NullLogger.Instance); + _peerConfigStore = new SurrealPeerConfigurationStore( + _context.SurrealEmbeddedClient, + _context.SchemaInitializer, + NullLogger.Instance); + _peerConfirmationStore = new SurrealPeerOplogConfirmationStore( + _context.SurrealEmbeddedClient, + _context.SchemaInitializer, + NullLogger.Instance); _snapshotStore = new SnapshotStore( _documentStore, @@ -66,13 +70,13 @@ public class SnapshotStoreTests : IDisposable _documentStore?.Dispose(); _context?.Dispose(); - if (File.Exists(_testDbPath)) - try - { - File.Delete(_testDbPath); - } - catch - { + if (Directory.Exists(_testDbPath)) + try + { + Directory.Delete(_testDbPath, true); + } + catch + { } } @@ -170,26 +174,34 @@ public class SnapshotStoreTests : IDisposable snapshotStream.Position = 0; // Create a new context/stores (simulating a different node) - string newDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-target-{Guid.NewGuid()}.blite"); + string newDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-target-{Guid.NewGuid()}.rocksdb"); try { using var newContext = new SampleDbContext(newDbPath); - var newConfigProvider = CreateConfigProvider("test-new-node"); - var newVectorClock = new VectorClockService(); - var newDocStore = new SampleDocumentStore(newContext, newConfigProvider, newVectorClock, - NullLogger.Instance); - var newSnapshotMetaStore = new BLiteSnapshotMetadataStore( - newContext, NullLogger>.Instance); - var newOplogStore = new BLiteOplogStore( - newContext, newDocStore, new LastWriteWinsConflictResolver(), - newVectorClock, - newSnapshotMetaStore, - NullLogger>.Instance); - var newPeerStore = new BLitePeerConfigurationStore( - newContext, NullLogger>.Instance); - var newPeerConfirmationStore = new BLitePeerOplogConfirmationStore( - newContext, - NullLogger>.Instance); + var newConfigProvider = CreateConfigProvider("test-new-node"); + var newVectorClock = new VectorClockService(); + var newDocStore = new SampleDocumentStore(newContext, newConfigProvider, newVectorClock, + logger: NullLogger.Instance); + var newSnapshotMetaStore = new SurrealSnapshotMetadataStore( + newContext.SurrealEmbeddedClient, + newContext.SchemaInitializer, + NullLogger.Instance); + var newOplogStore = new SurrealOplogStore( + newContext.SurrealEmbeddedClient, + newContext.SchemaInitializer, + newDocStore, + new LastWriteWinsConflictResolver(), + newVectorClock, + newSnapshotMetaStore, + NullLogger.Instance); + var newPeerStore = new SurrealPeerConfigurationStore( + newContext.SurrealEmbeddedClient, + newContext.SchemaInitializer, + NullLogger.Instance); + var newPeerConfirmationStore = new SurrealPeerOplogConfirmationStore( + newContext.SurrealEmbeddedClient, + newContext.SchemaInitializer, + NullLogger.Instance); var newSnapshotStore = new SnapshotStore( newDocStore, @@ -218,14 +230,14 @@ public class SnapshotStoreTests : IDisposable } finally { - if (File.Exists(newDbPath)) - try - { - File.Delete(newDbPath); - } - catch - { - } + if (Directory.Exists(newDbPath)) + try + { + Directory.Delete(newDbPath, true); + } + catch + { + } } } @@ -250,7 +262,7 @@ public class SnapshotStoreTests : IDisposable await _context.SaveChangesAsync(); // Create snapshot with different data - string sourceDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-source-{Guid.NewGuid()}.blite"); + string sourceDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-source-{Guid.NewGuid()}.rocksdb"); MemoryStream snapshotStream; try @@ -259,22 +271,30 @@ public class SnapshotStoreTests : IDisposable await sourceContext.Users.InsertAsync(new User { Id = "new-user", Name = "New User", Age = 25 }); await sourceContext.SaveChangesAsync(); - var sourceConfigProvider = CreateConfigProvider("test-source-node"); - var sourceVectorClock = new VectorClockService(); - var sourceDocStore = new SampleDocumentStore(sourceContext, sourceConfigProvider, sourceVectorClock, - NullLogger.Instance); - var sourceSnapshotMetaStore = new BLiteSnapshotMetadataStore( - sourceContext, NullLogger>.Instance); - var sourceOplogStore = new BLiteOplogStore( - sourceContext, sourceDocStore, new LastWriteWinsConflictResolver(), - sourceVectorClock, - sourceSnapshotMetaStore, - NullLogger>.Instance); - var sourcePeerStore = new BLitePeerConfigurationStore( - sourceContext, NullLogger>.Instance); - var sourcePeerConfirmationStore = new BLitePeerOplogConfirmationStore( - sourceContext, - NullLogger>.Instance); + var sourceConfigProvider = CreateConfigProvider("test-source-node"); + var sourceVectorClock = new VectorClockService(); + var sourceDocStore = new SampleDocumentStore(sourceContext, sourceConfigProvider, sourceVectorClock, + logger: NullLogger.Instance); + var sourceSnapshotMetaStore = new SurrealSnapshotMetadataStore( + sourceContext.SurrealEmbeddedClient, + sourceContext.SchemaInitializer, + NullLogger.Instance); + var sourceOplogStore = new SurrealOplogStore( + sourceContext.SurrealEmbeddedClient, + sourceContext.SchemaInitializer, + sourceDocStore, + new LastWriteWinsConflictResolver(), + sourceVectorClock, + sourceSnapshotMetaStore, + NullLogger.Instance); + var sourcePeerStore = new SurrealPeerConfigurationStore( + sourceContext.SurrealEmbeddedClient, + sourceContext.SchemaInitializer, + NullLogger.Instance); + var sourcePeerConfirmationStore = new SurrealPeerOplogConfirmationStore( + sourceContext.SurrealEmbeddedClient, + sourceContext.SchemaInitializer, + NullLogger.Instance); await sourcePeerConfirmationStore.UpdateConfirmationAsync( "peer-merge", "source-a", @@ -300,13 +320,13 @@ public class SnapshotStoreTests : IDisposable } finally { - if (File.Exists(sourceDbPath)) - try - { - File.Delete(sourceDbPath); - } - catch - { + if (Directory.Exists(sourceDbPath)) + try + { + Directory.Delete(sourceDbPath, true); + } + catch + { } } @@ -447,4 +467,4 @@ public class SnapshotStoreTests : IDisposable }); return configProvider; } -} \ No newline at end of file +} diff --git a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SurrealCdcDurabilityTests.cs b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SurrealCdcDurabilityTests.cs new file mode 100644 index 0000000..7f759a3 --- /dev/null +++ b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SurrealCdcDurabilityTests.cs @@ -0,0 +1,580 @@ +using System.Text.Json; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Core; +using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Core.Storage; +using ZB.MOM.WW.CBDDC.Core.Sync; +using ZB.MOM.WW.CBDDC.Persistence; +using ZB.MOM.WW.CBDDC.Persistence.Surreal; + +namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests; + +[Collection("SurrealCdcDurability")] +public class SurrealCdcDurabilityTests +{ + [Fact] + public async Task CheckpointPersistence_ShouldTrackLatestLocalChange_AndPersistPerConsumer() + { + string dbPath = CreateTemporaryDatabasePath(); + const string nodeId = "node-checkpoint"; + const string defaultConsumer = "consumer-default"; + const string secondaryConsumer = "consumer-secondary"; + + try + { + HlcTimestamp expectedTimestamp = default; + string expectedHash = ""; + DateTimeOffset previousUpdatedUtc = DateTimeOffset.MinValue; + + await using (var harness = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, defaultConsumer)) + { + var user = CreateUser("checkpoint-user", "Alice", 30, "Austin"); + await harness.Context.Users.InsertAsync(user); + await harness.Context.SaveChangesAsync(); + await harness.PollAsync(); + + user.Age = 31; + user.Address = new Address { City = "Dallas" }; + await harness.Context.Users.UpdateAsync(user); + await harness.Context.SaveChangesAsync(); + await harness.PollAsync(); + + await WaitForConditionAsync( + async () => (await harness.GetEntriesByKeyAsync("Users", "checkpoint-user")).Count >= 2, + "Timed out waiting for checkpoint-user oplog entries."); + var entries = await harness.GetEntriesByKeyAsync("Users", "checkpoint-user"); + entries.Count.ShouldBe(2); + + expectedTimestamp = entries[^1].Timestamp; + expectedHash = entries[^1].Hash; + + var checkpoint = await harness.CheckpointPersistence.GetCheckpointAsync(); + checkpoint.ShouldNotBeNull(); + checkpoint!.Timestamp.ShouldBe(expectedTimestamp); + checkpoint.LastHash.ShouldBe(expectedHash); + previousUpdatedUtc = checkpoint.UpdatedUtc; + + await harness.CheckpointPersistence.UpsertCheckpointAsync( + entries[0].Timestamp, + entries[0].Hash, + secondaryConsumer); + + var secondary = await harness.CheckpointPersistence.GetCheckpointAsync(secondaryConsumer); + secondary.ShouldNotBeNull(); + secondary!.Timestamp.ShouldBe(entries[0].Timestamp); + secondary.LastHash.ShouldBe(entries[0].Hash); + } + + await using (var restarted = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, defaultConsumer)) + { + var restoredDefault = await restarted.CheckpointPersistence.GetCheckpointAsync(); + restoredDefault.ShouldNotBeNull(); + restoredDefault!.Timestamp.ShouldBe(expectedTimestamp); + restoredDefault.LastHash.ShouldBe(expectedHash); + restoredDefault.UpdatedUtc.ShouldBe(previousUpdatedUtc); + + var restoredSecondary = await restarted.CheckpointPersistence.GetCheckpointAsync(secondaryConsumer); + restoredSecondary.ShouldNotBeNull(); + restoredSecondary!.LastHash.ShouldNotBe(restoredDefault.LastHash); + } + } + finally + { + await DeleteDirectoryWithRetriesAsync(dbPath); + } + } + + [Fact] + public async Task RestartRecovery_ShouldResumeCatchUpFromPersistedCheckpoint_InRocksDb() + { + string dbPath = CreateTemporaryDatabasePath(); + const string nodeId = "node-resume"; + const string consumerId = "consumer-resume"; + HlcTimestamp resumeTimestamp = default; + string resumeHash = ""; + string expectedFinalHash = ""; + + try + { + await using (var initial = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, consumerId)) + { + await initial.Context.Users.InsertAsync(CreateUser("resume-1", "User One", 18, "Rome")); + await initial.Context.SaveChangesAsync(); + await initial.PollAsync(); + await initial.Context.Users.InsertAsync(CreateUser("resume-2", "User Two", 19, "Milan")); + await initial.Context.SaveChangesAsync(); + await initial.PollAsync(); + + await WaitForConditionAsync( + async () => (await initial.GetEntriesByCollectionAsync("Users")).Count >= 2, + "Timed out waiting for resume oplog entries."); + var entries = await initial.GetEntriesByCollectionAsync("Users"); + entries.Count.ShouldBe(2); + + resumeTimestamp = entries[0].Timestamp; + resumeHash = entries[0].Hash; + expectedFinalHash = entries[1].Hash; + + await initial.CheckpointPersistence.UpsertCheckpointAsync(resumeTimestamp, resumeHash); + } + + await using (var restarted = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, consumerId)) + { + var checkpoint = await restarted.CheckpointPersistence.GetCheckpointAsync(); + checkpoint.ShouldNotBeNull(); + checkpoint!.Timestamp.ShouldBe(resumeTimestamp); + checkpoint.LastHash.ShouldBe(resumeHash); + + var catchUp = (await restarted.OplogStore.GetOplogAfterAsync(checkpoint.Timestamp)) + .OrderBy(e => e.Timestamp.PhysicalTime) + .ThenBy(e => e.Timestamp.LogicalCounter) + .ToList(); + + catchUp.Count.ShouldBe(1); + catchUp[0].Hash.ShouldBe(expectedFinalHash); + + await restarted.CheckpointPersistence.AdvanceCheckpointAsync(catchUp[0]); + } + + await using (var recovered = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, consumerId)) + { + var finalCheckpoint = await recovered.CheckpointPersistence.GetCheckpointAsync(); + finalCheckpoint.ShouldNotBeNull(); + finalCheckpoint!.LastHash.ShouldBe(expectedFinalHash); + + var remaining = await recovered.OplogStore.GetOplogAfterAsync(finalCheckpoint.Timestamp); + remaining.ShouldBeEmpty(); + } + } + finally + { + await DeleteDirectoryWithRetriesAsync(dbPath); + } + } + + [Fact] + public async Task RemoteApply_ShouldBeIdempotentAcrossDuplicateWindow_WithoutLoopbackEntries() + { + string dbPath = CreateTemporaryDatabasePath(); + const string localNodeId = "node-local"; + const string remoteNodeId = "node-remote"; + + try + { + await using var harness = await CdcTestHarness.OpenWithRetriesAsync( + dbPath, + localNodeId, + "consumer-loopback"); + + await harness.Context.Users.InsertAsync(CreateUser("loopback-user", "Loopback", 40, "Boston")); + await harness.Context.SaveChangesAsync(); + await harness.PollAsync(); + + await WaitForConditionAsync( + async () => (await harness.GetEntriesByKeyAsync("Users", "loopback-user")).Count >= 1, + "Timed out waiting for loopback-user insert oplog entry."); + var localEntries = await harness.GetEntriesByKeyAsync("Users", "loopback-user"); + localEntries.Count.ShouldBe(1); + localEntries[0].Operation.ShouldBe(OperationType.Put); + localEntries[0].Timestamp.NodeId.ShouldBe(localNodeId); + + var remoteDelete = new OplogEntry( + "Users", + "loopback-user", + OperationType.Delete, + null, + new HlcTimestamp(localEntries[0].Timestamp.PhysicalTime + 10, 0, remoteNodeId), + localEntries[0].Hash); + + var duplicateWindow = new[] { remoteDelete, remoteDelete }; + + await harness.OplogStore.ApplyBatchAsync(duplicateWindow); + await harness.OplogStore.ApplyBatchAsync(duplicateWindow); + + harness.Context.Users.FindById("loopback-user").ShouldBeNull(); + + var allEntries = await harness.GetEntriesByKeyAsync("Users", "loopback-user"); + allEntries.Count(e => e.Hash == remoteDelete.Hash).ShouldBe(1); + allEntries.Count(e => e.Operation == OperationType.Delete && e.Timestamp.NodeId == localNodeId) + .ShouldBe(0); + allEntries.Count(e => e.Operation == OperationType.Delete && e.Timestamp.NodeId == remoteNodeId) + .ShouldBe(1); + } + finally + { + await DeleteDirectoryWithRetriesAsync(dbPath); + } + } + + [Fact] + public async Task LocalDelete_ShouldPersistTombstoneMetadata_AndAdvanceCheckpoint() + { + string dbPath = CreateTemporaryDatabasePath(); + const string nodeId = "node-tombstone"; + + try + { + await using var harness = await CdcTestHarness.OpenWithRetriesAsync( + dbPath, + nodeId, + "consumer-tombstone"); + + await harness.Context.Users.InsertAsync(CreateUser("tombstone-user", "Before Delete", 28, "Turin")); + await harness.Context.SaveChangesAsync(); + await harness.PollAsync(); + await harness.Context.Users.DeleteAsync("tombstone-user"); + await harness.Context.SaveChangesAsync(); + await harness.PollAsync(); + + harness.Context.Users.FindById("tombstone-user").ShouldBeNull(); + + await WaitForConditionAsync( + async () => (await harness.GetEntriesByKeyAsync("Users", "tombstone-user")).Count >= 2, + "Timed out waiting for tombstone-user oplog entries."); + var entries = await harness.GetEntriesByKeyAsync("Users", "tombstone-user"); + entries.Count.ShouldBe(2); + var deleteEntry = entries.Last(e => e.Operation == OperationType.Delete); + + var metadata = await harness.MetadataStore.GetMetadataAsync("Users", "tombstone-user"); + metadata.ShouldNotBeNull(); + metadata!.IsDeleted.ShouldBeTrue(); + metadata.UpdatedAt.ShouldBe(deleteEntry.Timestamp); + + var checkpoint = await harness.CheckpointPersistence.GetCheckpointAsync(); + checkpoint.ShouldNotBeNull(); + checkpoint!.LastHash.ShouldBe(deleteEntry.Hash); + checkpoint.Timestamp.ShouldBe(deleteEntry.Timestamp); + } + finally + { + await DeleteDirectoryWithRetriesAsync(dbPath); + } + } + + private static User CreateUser(string id, string name, int age, string city) + { + return new User + { + Id = id, + Name = name, + Age = age, + Address = new Address { City = city } + }; + } + + private static string CreateTemporaryDatabasePath() + { + return Path.Combine(Path.GetTempPath(), $"cbddc-cdc-{Guid.NewGuid():N}.rocksdb"); + } + + private static async Task DeleteDirectoryWithRetriesAsync(string path) + { + for (var attempt = 0; attempt < 5; attempt++) + try + { + if (Directory.Exists(path)) Directory.Delete(path, true); + return; + } + catch when (attempt < 4) + { + await Task.Delay(50); + } + } + + private static async Task WaitForConditionAsync( + Func> predicate, + string failureMessage, + int timeoutMs = 6000, + int pollMs = 50) + { + DateTimeOffset deadline = DateTimeOffset.UtcNow.AddMilliseconds(timeoutMs); + while (DateTimeOffset.UtcNow < deadline) + { + if (await predicate()) return; + await Task.Delay(pollMs); + } + + throw new TimeoutException(failureMessage); + } +} + +[CollectionDefinition("SurrealCdcDurability", DisableParallelization = true)] +public sealed class SurrealCdcDurabilityCollection; + +internal sealed class CdcTestHarness : IAsyncDisposable +{ + private readonly VectorClockService _vectorClock; + private readonly CBDDCSurrealEmbeddedOptions _options; + + private CdcTestHarness(string databasePath, string nodeId, string consumerId) + { + _options = new CBDDCSurrealEmbeddedOptions + { + Cdc = new CBDDCSurrealCdcOptions + { + Enabled = true, + ConsumerId = consumerId, + CheckpointTable = "cbddc_cdc_checkpoint" + } + }; + + Context = new SampleDbContext(databasePath); + _vectorClock = new VectorClockService(); + + var configProvider = Substitute.For(); + configProvider.GetConfiguration().Returns(new PeerNodeConfiguration + { + NodeId = nodeId, + AuthToken = "test-token", + TcpPort = 0 + }); + + CheckpointPersistence = new SurrealCdcCheckpointPersistence( + Context.SurrealEmbeddedClient, + Context.SchemaInitializer, + _options); + + DocumentStore = new CheckpointedSampleDocumentStore( + Context, + configProvider, + _vectorClock, + CheckpointPersistence, + _options, + NullLogger.Instance); + + OplogStore = new SurrealOplogStore( + Context.SurrealEmbeddedClient, + Context.SchemaInitializer, + DocumentStore, + new LastWriteWinsConflictResolver(), + _vectorClock, + null, + NullLogger.Instance); + + MetadataStore = new SurrealDocumentMetadataStore( + Context.SurrealEmbeddedClient, + Context.SchemaInitializer, + NullLogger.Instance); + } + + public SampleDbContext Context { get; } + + public CheckpointedSampleDocumentStore DocumentStore { get; } + + public SurrealOplogStore OplogStore { get; } + + public SurrealDocumentMetadataStore MetadataStore { get; } + + public ISurrealCdcCheckpointPersistence CheckpointPersistence { get; } + + public async Task PollAsync() + { + await DocumentStore.PollCdcOnceAsync(); + } + + public static async Task OpenWithRetriesAsync( + string databasePath, + string nodeId, + string consumerId) + { + for (var attempt = 0; attempt < 8; attempt++) + try + { + return new CdcTestHarness(databasePath, nodeId, consumerId); + } + catch (Exception ex) when (IsLockContention(ex) && attempt < 7) + { + await Task.Delay(75); + } + + throw new InvalidOperationException("Unable to acquire RocksDB lock for test harness."); + } + + public async Task> GetEntriesByCollectionAsync(string collection) + { + return (await OplogStore.ExportAsync()) + .Where(e => string.Equals(e.Collection, collection, StringComparison.Ordinal)) + .OrderBy(e => e.Timestamp.PhysicalTime) + .ThenBy(e => e.Timestamp.LogicalCounter) + .ToList(); + } + + public async Task> GetEntriesByKeyAsync(string collection, string key) + { + return (await OplogStore.ExportAsync()) + .Where(e => string.Equals(e.Collection, collection, StringComparison.Ordinal) && + string.Equals(e.Key, key, StringComparison.Ordinal)) + .OrderBy(e => e.Timestamp.PhysicalTime) + .ThenBy(e => e.Timestamp.LogicalCounter) + .ToList(); + } + + public async ValueTask DisposeAsync() + { + DocumentStore.Dispose(); + Context.Dispose(); + await Task.Delay(75); + } + + private static bool IsLockContention(Exception exception) + { + return exception.ToString().Contains("No locks available", StringComparison.OrdinalIgnoreCase); + } +} + +internal sealed class CheckpointedSampleDocumentStore : SurrealDocumentStore +{ + private const string UsersCollection = "Users"; + private const string TodoListsCollection = "TodoLists"; + + public CheckpointedSampleDocumentStore( + SampleDbContext context, + IPeerNodeConfigurationProvider configProvider, + IVectorClockService vectorClockService, + ISurrealCdcCheckpointPersistence checkpointPersistence, + CBDDCSurrealEmbeddedOptions? surrealOptions = null, + ILogger? logger = null) + : base( + context, + context.SurrealEmbeddedClient, + context.SchemaInitializer, + configProvider, + vectorClockService, + new LastWriteWinsConflictResolver(), + checkpointPersistence, + BuildPollingOptions(surrealOptions), + logger) + { + WatchCollection(UsersCollection, context.Users, u => u.Id, subscribeForInMemoryEvents: false); + WatchCollection(TodoListsCollection, context.TodoLists, t => t.Id, subscribeForInMemoryEvents: false); + } + + protected override async Task ApplyContentToEntityAsync( + string collection, + string key, + JsonElement content, + CancellationToken cancellationToken) + { + await UpsertEntityAsync(collection, key, content, cancellationToken); + } + + protected override async Task ApplyContentToEntitiesBatchAsync( + IEnumerable<(string Collection, string Key, JsonElement Content)> documents, + CancellationToken cancellationToken) + { + foreach ((string collection, string key, var content) in documents) + await UpsertEntityAsync(collection, key, content, cancellationToken); + } + + protected override async Task GetEntityAsJsonAsync( + string collection, + string key, + CancellationToken cancellationToken) + { + return collection switch + { + UsersCollection => SerializeEntity(await _context.Users.FindByIdAsync(key, cancellationToken)), + TodoListsCollection => SerializeEntity(await _context.TodoLists.FindByIdAsync(key, cancellationToken)), + _ => null + }; + } + + protected override async Task RemoveEntityAsync( + string collection, + string key, + CancellationToken cancellationToken) + { + await DeleteEntityAsync(collection, key, cancellationToken); + } + + protected override async Task RemoveEntitiesBatchAsync( + IEnumerable<(string Collection, string Key)> documents, + CancellationToken cancellationToken) + { + foreach ((string collection, string key) in documents) + await DeleteEntityAsync(collection, key, cancellationToken); + } + + protected override async Task> GetAllEntitiesAsJsonAsync( + string collection, + CancellationToken cancellationToken) + { + return collection switch + { + UsersCollection => (await _context.Users.FindAllAsync(cancellationToken)) + .Select(u => (u.Id, SerializeEntity(u)!.Value)) + .ToList(), + TodoListsCollection => (await _context.TodoLists.FindAllAsync(cancellationToken)) + .Select(t => (t.Id, SerializeEntity(t)!.Value)) + .ToList(), + _ => [] + }; + } + + private async Task UpsertEntityAsync( + string collection, + string key, + JsonElement content, + CancellationToken cancellationToken) + { + switch (collection) + { + case UsersCollection: + var user = content.Deserialize() ?? + throw new InvalidOperationException("Failed to deserialize user."); + user.Id = key; + if (await _context.Users.FindByIdAsync(key, cancellationToken) == null) + await _context.Users.InsertAsync(user, cancellationToken); + else + await _context.Users.UpdateAsync(user, cancellationToken); + break; + + case TodoListsCollection: + var todo = content.Deserialize() ?? + throw new InvalidOperationException("Failed to deserialize todo list."); + todo.Id = key; + if (await _context.TodoLists.FindByIdAsync(key, cancellationToken) == null) + await _context.TodoLists.InsertAsync(todo, cancellationToken); + else + await _context.TodoLists.UpdateAsync(todo, cancellationToken); + break; + + default: + throw new NotSupportedException($"Collection '{collection}' is not supported for sync."); + } + } + + private async Task DeleteEntityAsync(string collection, string key, CancellationToken cancellationToken) + { + switch (collection) + { + case UsersCollection: + await _context.Users.DeleteAsync(key, cancellationToken); + break; + + case TodoListsCollection: + await _context.TodoLists.DeleteAsync(key, cancellationToken); + break; + } + } + + private static JsonElement? SerializeEntity(T? entity) where T : class + { + return entity == null ? null : JsonSerializer.SerializeToElement(entity); + } + + private static SurrealCdcPollingOptions? BuildPollingOptions(CBDDCSurrealEmbeddedOptions? options) + { + if (options == null) return null; + + return new SurrealCdcPollingOptions + { + Enabled = options.Cdc.Enabled, + PollInterval = options.Cdc.PollingInterval, + BatchSize = options.Cdc.BatchSize, + EnableLiveSelectAccelerator = options.Cdc.EnableLiveSelectAccelerator, + LiveSelectReconnectDelay = options.Cdc.LiveSelectReconnectDelay + }; + } +} diff --git a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SurrealCdcMatrixCompletionTests.cs b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SurrealCdcMatrixCompletionTests.cs new file mode 100644 index 0000000..870cc88 --- /dev/null +++ b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SurrealCdcMatrixCompletionTests.cs @@ -0,0 +1,219 @@ +using System.Text.Json; +using System.Reflection; +using Microsoft.Extensions.Logging.Abstractions; +using SurrealDb.Net; +using SurrealDb.Net.Models.Response; +using ZB.MOM.WW.CBDDC.Core; +using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Core.Storage; +using ZB.MOM.WW.CBDDC.Core.Sync; +using ZB.MOM.WW.CBDDC.Persistence.Surreal; + +namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests; + +public class SurrealCdcMatrixCompletionTests +{ + [Theory] + [InlineData("versionstamp is outside the configured retention window", true)] + [InlineData("change feed history since cursor is unavailable", true)] + [InlineData("socket closed unexpectedly", false)] + public void RetentionBoundaryClassifier_ShouldDetectExpectedPatterns(string message, bool expected) + { + var closedType = typeof(SurrealDocumentStore<>).MakeGenericType(typeof(object)); + var classifier = closedType.GetMethod( + "IsLikelyChangefeedRetentionBoundary", + BindingFlags.NonPublic | BindingFlags.Static); + + classifier.ShouldNotBeNull(); + bool actual = (bool)classifier!.Invoke(null, [new InvalidOperationException(message)])!; + actual.ShouldBe(expected); + } + + [Fact] + public async Task LocalWrite_ShouldEmitExactlyOneOplogEntry() + { + string dbPath = Path.Combine(Path.GetTempPath(), $"cbddc-cdc-matrix-{Guid.NewGuid():N}.rocksdb"); + + try + { + await using var harness = await CdcTestHarness.OpenWithRetriesAsync(dbPath, "node-single-write", "consumer-single"); + + await harness.Context.Users.InsertAsync(new User + { + Id = "single-write-user", + Name = "Single Write", + Age = 25, + Address = new Address { City = "Bologna" } + }); + await harness.Context.SaveChangesAsync(); + await harness.PollAsync(); + + await WaitForConditionAsync( + async () => (await harness.GetEntriesByKeyAsync("Users", "single-write-user")).Count == 1, + "Timed out waiting for exactly one local oplog entry."); + + var entries = await harness.GetEntriesByKeyAsync("Users", "single-write-user"); + entries.Count.ShouldBe(1); + entries[0].Operation.ShouldBe(OperationType.Put); + entries[0].Timestamp.NodeId.ShouldBe("node-single-write"); + } + finally + { + await DeleteDirectoryWithRetriesAsync(dbPath); + } + } + + [Fact] + public async Task Checkpoint_ShouldNotAdvance_WhenAtomicWriteFails() + { + var surrealClient = Substitute.For(); + surrealClient.RawQuery( + Arg.Any(), + Arg.Any>(), + Arg.Any()) + .Returns(Task.FromException(new InvalidOperationException("forced atomic write failure"))); + + var embeddedClient = Substitute.For(); + embeddedClient.Client.Returns(surrealClient); + + var schemaInitializer = Substitute.For(); + schemaInitializer.EnsureInitializedAsync(Arg.Any()).Returns(Task.CompletedTask); + + var configProvider = Substitute.For(); + configProvider.GetConfiguration().Returns(new PeerNodeConfiguration + { + NodeId = "node-failure", + TcpPort = 0, + AuthToken = "test-token" + }); + + var checkpointPersistence = Substitute.For(); + var vectorClock = Substitute.For(); + vectorClock.GetLastHash(Arg.Any()).Returns("seed-hash"); + + var store = new FailureInjectedDocumentStore( + embeddedClient, + schemaInitializer, + configProvider, + vectorClock, + checkpointPersistence); + + var payload = JsonSerializer.SerializeToElement(new { Id = "failure-user", Value = "x" }); + + await Should.ThrowAsync( + () => store.TriggerLocalChangeAsync("Users", "failure-user", OperationType.Put, payload)); + + checkpointPersistence.ReceivedCalls().ShouldBeEmpty(); + } + + private static async Task WaitForConditionAsync( + Func> predicate, + string failureMessage, + int timeoutMs = 6000, + int pollMs = 50) + { + DateTimeOffset deadline = DateTimeOffset.UtcNow.AddMilliseconds(timeoutMs); + while (DateTimeOffset.UtcNow < deadline) + { + if (await predicate()) return; + await Task.Delay(pollMs); + } + + throw new TimeoutException(failureMessage); + } + + private static async Task DeleteDirectoryWithRetriesAsync(string path) + { + for (var attempt = 0; attempt < 5; attempt++) + try + { + if (Directory.Exists(path)) Directory.Delete(path, true); + return; + } + catch when (attempt < 4) + { + await Task.Delay(50); + } + } +} + +internal sealed class FailureInjectedDocumentStore : SurrealDocumentStore +{ + public FailureInjectedDocumentStore( + ICBDDCSurrealEmbeddedClient surrealEmbeddedClient, + ICBDDCSurrealSchemaInitializer schemaInitializer, + IPeerNodeConfigurationProvider configProvider, + IVectorClockService vectorClockService, + ISurrealCdcCheckpointPersistence checkpointPersistence) + : base( + new object(), + surrealEmbeddedClient, + schemaInitializer, + configProvider, + vectorClockService, + new LastWriteWinsConflictResolver(), + checkpointPersistence, + new SurrealCdcPollingOptions { Enabled = false }, + NullLogger.Instance) + { + } + + public Task TriggerLocalChangeAsync( + string collection, + string key, + OperationType operationType, + JsonElement? content, + CancellationToken cancellationToken = default) + { + return OnLocalChangeDetectedAsync( + collection, + key, + operationType, + content, + pendingCursorCheckpoint: null, + cancellationToken); + } + + protected override Task ApplyContentToEntityAsync( + string collection, + string key, + JsonElement content, + CancellationToken cancellationToken) + { + return Task.CompletedTask; + } + + protected override Task ApplyContentToEntitiesBatchAsync( + IEnumerable<(string Collection, string Key, JsonElement Content)> documents, + CancellationToken cancellationToken) + { + return Task.CompletedTask; + } + + protected override Task GetEntityAsJsonAsync( + string collection, + string key, + CancellationToken cancellationToken) + { + return Task.FromResult(null); + } + + protected override Task RemoveEntityAsync(string collection, string key, CancellationToken cancellationToken) + { + return Task.CompletedTask; + } + + protected override Task RemoveEntitiesBatchAsync( + IEnumerable<(string Collection, string Key)> documents, + CancellationToken cancellationToken) + { + return Task.CompletedTask; + } + + protected override Task> GetAllEntitiesAsJsonAsync( + string collection, + CancellationToken cancellationToken) + { + return Task.FromResult>([]); + } +} diff --git a/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SurrealStoreContractTests.cs b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SurrealStoreContractTests.cs new file mode 100644 index 0000000..53bbabd --- /dev/null +++ b/tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SurrealStoreContractTests.cs @@ -0,0 +1,434 @@ +using System.Text.Json; +using Microsoft.Extensions.Logging.Abstractions; +using ZB.MOM.WW.CBDDC.Core; +using ZB.MOM.WW.CBDDC.Core.Network; +using ZB.MOM.WW.CBDDC.Core.Storage; +using ZB.MOM.WW.CBDDC.Core.Sync; +using ZB.MOM.WW.CBDDC.Persistence; +using ZB.MOM.WW.CBDDC.Persistence.Surreal; + +namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests; + +public class SurrealOplogStoreContractTests +{ + [Fact] + public async Task OplogStore_AppendQueryMergeDrop_AndLastHash_Works() + { + await using var harness = new SurrealTestHarness(); + var store = harness.CreateOplogStore(); + + var entry1 = CreateOplogEntry("Users", "u1", "node-a", 100, 0, ""); + var entry2 = CreateOplogEntry("Users", "u2", "node-a", 110, 0, entry1.Hash); + var entry3 = CreateOplogEntry("Users", "u3", "node-a", 120, 1, entry2.Hash); + var otherNode = CreateOplogEntry("Users", "u4", "node-b", 115, 0, ""); + + await store.AppendOplogEntryAsync(entry1); + await store.AppendOplogEntryAsync(entry2); + await store.AppendOplogEntryAsync(entry3); + await store.AppendOplogEntryAsync(otherNode); + + var chainRange = (await store.GetChainRangeAsync(entry1.Hash, entry3.Hash)).ToList(); + chainRange.Select(x => x.Hash).ToList().ShouldBe(new[] { entry2.Hash, entry3.Hash }); + + var after = (await store.GetOplogAfterAsync(new HlcTimestamp(100, 0, "node-a"))).ToList(); + after.Select(x => x.Hash).ToList().ShouldBe(new[] { entry2.Hash, otherNode.Hash, entry3.Hash }); + + var mergedEntry = CreateOplogEntry("Users", "u5", "node-a", 130, 0, entry3.Hash); + await store.MergeAsync(new[] { entry2, mergedEntry }); + + var exported = (await store.ExportAsync()).ToList(); + exported.Count.ShouldBe(5); + exported.Count(x => x.Hash == entry2.Hash).ShouldBe(1); + + var cachedLastNodeAHash = await store.GetLastEntryHashAsync("node-a"); + cachedLastNodeAHash.ShouldBe(entry3.Hash); + + var rehydratedStore = harness.CreateOplogStore(); + var persistedLastNodeAHash = await rehydratedStore.GetLastEntryHashAsync("node-a"); + persistedLastNodeAHash.ShouldBe(mergedEntry.Hash); + + await store.DropAsync(); + (await store.ExportAsync()).ShouldBeEmpty(); + } + + private static OplogEntry CreateOplogEntry( + string collection, + string key, + string nodeId, + long wall, + int logic, + string previousHash) + { + return new OplogEntry( + collection, + key, + OperationType.Put, + JsonSerializer.SerializeToElement(new { key }), + new HlcTimestamp(wall, logic, nodeId), + previousHash); + } +} + +public class SurrealDocumentMetadataStoreContractTests +{ + [Fact] + public async Task DocumentMetadataStore_UpsertMarkDeletedGetAfterAndMergeNewer_Works() + { + await using var harness = new SurrealTestHarness(); + var store = harness.CreateDocumentMetadataStore(); + + await store.UpsertMetadataAsync(new DocumentMetadata("Users", "doc-1", new HlcTimestamp(100, 0, "node-a"))); + await store.UpsertMetadataAsync(new DocumentMetadata("Users", "doc-2", new HlcTimestamp(105, 0, "node-a"))); + await store.MarkDeletedAsync("Users", "doc-1", new HlcTimestamp(110, 1, "node-a")); + + var doc1 = await store.GetMetadataAsync("Users", "doc-1"); + doc1.ShouldNotBeNull(); + doc1.IsDeleted.ShouldBeTrue(); + doc1.UpdatedAt.ShouldBe(new HlcTimestamp(110, 1, "node-a")); + + var after = (await store.GetMetadataAfterAsync(new HlcTimestamp(100, 0, "node-a"), new[] { "Users" })).ToList(); + after.Select(x => x.Key).ToList().ShouldBe(new[] { "doc-2", "doc-1" }); + + await store.MergeAsync(new[] + { + new DocumentMetadata("Users", "doc-1", new HlcTimestamp(109, 0, "node-a"), true), + new DocumentMetadata("Users", "doc-1", new HlcTimestamp(120, 0, "node-a"), false), + new DocumentMetadata("Users", "doc-3", new HlcTimestamp(130, 0, "node-b"), false) + }); + + var mergedDoc1 = await store.GetMetadataAsync("Users", "doc-1"); + mergedDoc1.ShouldNotBeNull(); + mergedDoc1.UpdatedAt.ShouldBe(new HlcTimestamp(120, 0, "node-a")); + mergedDoc1.IsDeleted.ShouldBeFalse(); + + var exported = (await store.ExportAsync()).ToList(); + exported.Count.ShouldBe(3); + } +} + +public class SurrealPeerConfigurationStoreContractTests +{ + [Fact] + public async Task PeerConfigurationStore_SaveGetRemoveAndMerge_Works() + { + await using var harness = new SurrealTestHarness(); + var store = harness.CreatePeerConfigurationStore(); + + await store.SaveRemotePeerAsync(CreatePeer("peer-1", "10.0.0.1:5000", true)); + + var peer1 = await store.GetRemotePeerAsync("peer-1", CancellationToken.None); + peer1.ShouldNotBeNull(); + peer1.Address.ShouldBe("10.0.0.1:5000"); + + await store.SaveRemotePeerAsync(CreatePeer("peer-1", "10.0.0.1:6000", false)); + + await store.MergeAsync(new[] + { + CreatePeer("peer-1", "10.0.0.1:7000", true), + CreatePeer("peer-2", "10.0.0.2:5000", true) + }); + + var afterMergePeer1 = await store.GetRemotePeerAsync("peer-1", CancellationToken.None); + var afterMergePeer2 = await store.GetRemotePeerAsync("peer-2", CancellationToken.None); + + afterMergePeer1.ShouldNotBeNull(); + afterMergePeer1.Address.ShouldBe("10.0.0.1:6000"); + afterMergePeer1.IsEnabled.ShouldBeFalse(); + + afterMergePeer2.ShouldNotBeNull(); + afterMergePeer2.Address.ShouldBe("10.0.0.2:5000"); + + await store.RemoveRemotePeerAsync("peer-1"); + + var removedPeer = await store.GetRemotePeerAsync("peer-1", CancellationToken.None); + removedPeer.ShouldBeNull(); + + var peers = (await store.GetRemotePeersAsync()).ToList(); + peers.Count.ShouldBe(1); + peers[0].NodeId.ShouldBe("peer-2"); + } + + private static RemotePeerConfiguration CreatePeer(string nodeId, string address, bool enabled) + { + return new RemotePeerConfiguration + { + NodeId = nodeId, + Address = address, + Type = PeerType.StaticRemote, + IsEnabled = enabled, + InterestingCollections = new List { "Users" } + }; + } +} + +public class SurrealPeerOplogConfirmationStoreContractTests +{ + [Fact] + public async Task PeerOplogConfirmationStore_EnsureUpdateAndDeactivate_Works() + { + await using var harness = new SurrealTestHarness(); + var store = harness.CreatePeerOplogConfirmationStore(); + + await store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote); + await store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote); + + await store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(100, 1, "source-1"), "hash-1"); + await store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(90, 0, "source-1"), "hash-old"); + await store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(100, 1, "source-1"), "hash-2"); + + var peerConfirmations = (await store.GetConfirmationsForPeerAsync("peer-a")).ToList(); + peerConfirmations.Count.ShouldBe(1); + peerConfirmations[0].ConfirmedWall.ShouldBe(100); + peerConfirmations[0].ConfirmedLogic.ShouldBe(1); + peerConfirmations[0].ConfirmedHash.ShouldBe("hash-2"); + + var all = (await store.ExportAsync()).Where(x => x.PeerNodeId == "peer-a").ToList(); + all.Count(x => x.SourceNodeId == "__peer_registration__").ShouldBe(1); + + await store.RemovePeerTrackingAsync("peer-a"); + + var activePeers = (await store.GetActiveTrackedPeersAsync()).ToList(); + activePeers.ShouldNotContain("peer-a"); + + var afterDeactivate = (await store.ExportAsync()).Where(x => x.PeerNodeId == "peer-a").ToList(); + afterDeactivate.All(x => x.IsActive == false).ShouldBeTrue(); + } + + [Fact] + public async Task PeerOplogConfirmationStore_Merge_UsesNewerAndActiveStateSemantics() + { + await using var harness = new SurrealTestHarness(); + var store = harness.CreatePeerOplogConfirmationStore(); + + await store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote); + await store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(100, 1, "source-1"), "hash-1"); + + var existing = (await store.ExportAsync()) + .Single(x => x.PeerNodeId == "peer-a" && x.SourceNodeId == "source-1"); + + await store.MergeAsync(new[] + { + new PeerOplogConfirmation + { + PeerNodeId = "peer-a", + SourceNodeId = "source-1", + ConfirmedWall = 90, + ConfirmedLogic = 0, + ConfirmedHash = "hash-old", + LastConfirmedUtc = existing.LastConfirmedUtc.AddMinutes(-5), + IsActive = true + }, + new PeerOplogConfirmation + { + PeerNodeId = "peer-a", + SourceNodeId = "source-1", + ConfirmedWall = 130, + ConfirmedLogic = 0, + ConfirmedHash = "hash-2", + LastConfirmedUtc = existing.LastConfirmedUtc.AddMinutes(5), + IsActive = false + }, + new PeerOplogConfirmation + { + PeerNodeId = "peer-a", + SourceNodeId = "source-2", + ConfirmedWall = 50, + ConfirmedLogic = 0, + ConfirmedHash = "hash-3", + LastConfirmedUtc = existing.LastConfirmedUtc.AddMinutes(5), + IsActive = true + } + }); + + var all = (await store.ExportAsync()) + .Where(x => x.PeerNodeId == "peer-a" && x.SourceNodeId != "__peer_registration__") + .OrderBy(x => x.SourceNodeId) + .ToList(); + + all.Count.ShouldBe(2); + + var source1 = all.Single(x => x.SourceNodeId == "source-1"); + source1.ConfirmedWall.ShouldBe(130); + source1.ConfirmedLogic.ShouldBe(0); + source1.ConfirmedHash.ShouldBe("hash-2"); + source1.IsActive.ShouldBeFalse(); + + var source2 = all.Single(x => x.SourceNodeId == "source-2"); + source2.ConfirmedWall.ShouldBe(50); + source2.ConfirmedHash.ShouldBe("hash-3"); + source2.IsActive.ShouldBeTrue(); + } +} + +public class SurrealSnapshotMetadataStoreContractTests +{ + [Fact] + public async Task SnapshotMetadataStore_InsertUpdateMergeAndHashLookup_Works() + { + await using var harness = new SurrealTestHarness(); + var store = harness.CreateSnapshotMetadataStore(); + + await store.InsertSnapshotMetadataAsync(new SnapshotMetadata + { + NodeId = "node-a", + TimestampPhysicalTime = 100, + TimestampLogicalCounter = 0, + Hash = "hash-1" + }); + + var initialHash = await store.GetSnapshotHashAsync("node-a"); + initialHash.ShouldBe("hash-1"); + + await store.UpdateSnapshotMetadataAsync(new SnapshotMetadata + { + NodeId = "node-a", + TimestampPhysicalTime = 120, + TimestampLogicalCounter = 1, + Hash = "hash-2" + }, CancellationToken.None); + + var updatedHash = await store.GetSnapshotHashAsync("node-a"); + updatedHash.ShouldBe("hash-2"); + + await store.MergeAsync(new[] + { + new SnapshotMetadata + { + NodeId = "node-a", + TimestampPhysicalTime = 119, + TimestampLogicalCounter = 9, + Hash = "hash-old" + }, + new SnapshotMetadata + { + NodeId = "node-a", + TimestampPhysicalTime = 130, + TimestampLogicalCounter = 0, + Hash = "hash-3" + }, + new SnapshotMetadata + { + NodeId = "node-b", + TimestampPhysicalTime = 140, + TimestampLogicalCounter = 0, + Hash = "hash-b" + } + }); + + var finalNodeA = await store.GetSnapshotMetadataAsync("node-a"); + finalNodeA.ShouldNotBeNull(); + finalNodeA.Hash.ShouldBe("hash-3"); + finalNodeA.TimestampPhysicalTime.ShouldBe(130); + + var all = (await store.GetAllSnapshotMetadataAsync()).OrderBy(x => x.NodeId).ToList(); + all.Count.ShouldBe(2); + all[0].NodeId.ShouldBe("node-a"); + all[1].NodeId.ShouldBe("node-b"); + } +} + +internal sealed class SurrealTestHarness : IAsyncDisposable +{ + private readonly CBDDCSurrealEmbeddedClient _client; + private readonly string _rootPath; + private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer; + + public SurrealTestHarness() + { + string suffix = Guid.NewGuid().ToString("N"); + _rootPath = Path.Combine(Path.GetTempPath(), "cbddc-surreal-tests", suffix); + string databasePath = Path.Combine(_rootPath, "rocksdb"); + + var options = new CBDDCSurrealEmbeddedOptions + { + Endpoint = "rocksdb://local", + DatabasePath = databasePath, + Namespace = $"cbddc_tests_{suffix}", + Database = $"main_{suffix}" + }; + + _client = new CBDDCSurrealEmbeddedClient(options, NullLogger.Instance); + _schemaInitializer = new TestSurrealSchemaInitializer(_client); + } + + public SurrealDocumentMetadataStore CreateDocumentMetadataStore() + { + return new SurrealDocumentMetadataStore( + _client, + _schemaInitializer, + NullLogger.Instance); + } + + public SurrealOplogStore CreateOplogStore() + { + return new SurrealOplogStore( + _client, + _schemaInitializer, + Substitute.For(), + new LastWriteWinsConflictResolver(), + new VectorClockService(), + null, + NullLogger.Instance); + } + + public SurrealPeerConfigurationStore CreatePeerConfigurationStore() + { + return new SurrealPeerConfigurationStore( + _client, + _schemaInitializer, + NullLogger.Instance); + } + + public SurrealPeerOplogConfirmationStore CreatePeerOplogConfirmationStore() + { + return new SurrealPeerOplogConfirmationStore( + _client, + _schemaInitializer, + NullLogger.Instance); + } + + public SurrealSnapshotMetadataStore CreateSnapshotMetadataStore() + { + return new SurrealSnapshotMetadataStore( + _client, + _schemaInitializer, + NullLogger.Instance); + } + + public async ValueTask DisposeAsync() + { + await _client.DisposeAsync(); + await DeleteDirectoryWithRetriesAsync(_rootPath); + } + + private static async Task DeleteDirectoryWithRetriesAsync(string path) + { + for (var attempt = 0; attempt < 5; attempt++) + try + { + if (Directory.Exists(path)) Directory.Delete(path, true); + return; + } + catch when (attempt < 4) + { + await Task.Delay(50); + } + } +} + +internal sealed class TestSurrealSchemaInitializer : ICBDDCSurrealSchemaInitializer +{ + private readonly ICBDDCSurrealEmbeddedClient _client; + private int _initialized; + + public TestSurrealSchemaInitializer(ICBDDCSurrealEmbeddedClient client) + { + _client = client; + } + + public async Task EnsureInitializedAsync(CancellationToken cancellationToken = default) + { + if (Interlocked.Exchange(ref _initialized, 1) == 1) return; + await _client.InitializeAsync(cancellationToken); + } +}