Replace BLite with Surreal embedded persistence
All checks were successful
NuGet Package Publish / nuget (push) Successful in 1m21s

This commit is contained in:
Joseph Doherty
2026-02-22 05:21:53 -05:00
parent 7ebc2cb567
commit 9c2a77dc3c
56 changed files with 6613 additions and 3177 deletions

View File

@@ -4,7 +4,7 @@ Core abstractions and logic for **CBDDC**, a peer-to-peer data synchronization m
## What Is CBDDC?
CBDDC is **not** a database <20> it's a sync layer that plugs into your existing data store (BLite) and enables automatic
CBDDC is **not** a database <20> it's a sync layer that plugs into your existing data store (for example SurrealDB) and enables automatic
P2P replication across nodes in a mesh network. Your application reads and writes to its database as usual; CBDDC
handles synchronization in the background.
@@ -28,19 +28,18 @@ dotnet add package ZB.MOM.WW.CBDDC.Network
## Quick Start
```csharp
// 1. Define your DbContext
public class MyDbContext : CBDDCDocumentDbContext
{
public DocumentCollection<string, User> Users { get; private set; }
public MyDbContext(string path) : base(path) { }
}
// 2. Create your DocumentStore (the sync bridge)
public class MyDocumentStore : BLiteDocumentStore<MyDbContext>
// 1. Define your context exposing watchable collections
public class MyDbContext
{
public MySurrealCollection<User> Users { get; }
}
// 2. Create your DocumentStore (the sync bridge)
public class MyDocumentStore : SurrealDocumentStore<MyDbContext>
{
public MyDocumentStore(MyDbContext ctx, IPeerNodeConfigurationProvider cfg,
IVectorClockService vc, ILogger<MyDocumentStore>? log = null)
: base(ctx, cfg, vc, logger: log)
: base(ctx, ctx.SurrealEmbeddedClient, ctx.SchemaInitializer, cfg, vc, logger: log)
{
WatchCollection("Users", ctx.Users, u => u.Id);
}
@@ -50,19 +49,24 @@ public class MyDocumentStore : BLiteDocumentStore<MyDbContext>
{
var user = content.Deserialize<User>()!;
user.Id = key;
var existing = _context.Users.Find(u => u.Id == key).FirstOrDefault();
if (existing != null) _context.Users.Update(user);
else _context.Users.Insert(user);
await _context.SaveChangesAsync(ct);
if (await _context.Users.FindByIdAsync(key, ct) is null)
await _context.Users.InsertAsync(user, ct);
else
await _context.Users.UpdateAsync(user, ct);
}
// ... implement other abstract methods
}
// 3. Register and use
builder.Services.AddCBDDCCore()
.AddCBDDCBLite<MyDbContext, MyDocumentStore>(
sp => new MyDbContext("data.blite"))
.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>();
builder.Services.AddCBDDCCore()
.AddCBDDCSurrealEmbedded<MyDocumentStore>(_ => new CBDDCSurrealEmbeddedOptions
{
Endpoint = "rocksdb://local",
DatabasePath = "data/cbddc.rocksdb",
Namespace = "cbddc",
Database = "main"
})
.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>();
```
## Key Concepts
@@ -93,7 +97,7 @@ Your App ? DbContext.SaveChangesAsync()
## Related Packages
- **ZB.MOM.WW.CBDDC.Persistence** <20> BLite embedded provider (.NET 10+)
- **ZB.MOM.WW.CBDDC.Persistence** <20> Surreal embedded RocksDB provider (.NET 10+)
- **ZB.MOM.WW.CBDDC.Network** <20> P2P networking (UDP discovery, TCP sync, Gossip)
## Documentation

View File

@@ -20,10 +20,15 @@ dotnet add package ZB.MOM.WW.CBDDC.Hosting
```csharp
var builder = WebApplication.CreateBuilder(args);
// Add CBDDC core + BLite persistence (custom DbContext + DocumentStore required)
// Add CBDDC core + Surreal embedded persistence (custom DocumentStore required)
builder.Services.AddCBDDCCore()
.AddCBDDCBLite<MyDbContext, MyDocumentStore>(
sp => new MyDbContext("/var/lib/cbddc/data.blite"));
.AddCBDDCSurrealEmbedded<MyDocumentStore>(_ => new CBDDCSurrealEmbeddedOptions
{
Endpoint = "rocksdb://local",
DatabasePath = "/var/lib/cbddc/data.rocksdb",
Namespace = "cbddc",
Database = "main"
});
// Add ASP.NET integration (cluster mode)
builder.Services.AddCBDDCHosting(options =>
@@ -80,10 +85,10 @@ CBDDC servers operate in respond-only mode:
## Production Checklist
- Store BLite database files on durable storage in production
- Store Surreal RocksDB data files on durable storage in production
- Configure health checks for load balancer
- Set up proper logging and monitoring
- Configure backup/restore for BLite database files
- Configure backup/restore for Surreal RocksDB data files
- Configure proper firewall rules for TCP port
- Set unique NodeId per instance
- Test failover scenarios

View File

@@ -1,238 +0,0 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// <summary>
/// BLite implementation of document metadata storage for sync tracking.
/// </summary>
/// <typeparam name="TDbContext">The type of CBDDCDocumentDbContext.</typeparam>
public class BLiteDocumentMetadataStore<TDbContext> : DocumentMetadataStore where TDbContext : CBDDCDocumentDbContext
{
private readonly TDbContext _context;
private readonly ILogger<BLiteDocumentMetadataStore<TDbContext>> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="BLiteDocumentMetadataStore{TDbContext}" /> class.
/// </summary>
/// <param name="context">The BLite document database context.</param>
/// <param name="logger">The optional logger instance.</param>
public BLiteDocumentMetadataStore(TDbContext context,
ILogger<BLiteDocumentMetadataStore<TDbContext>>? logger = null)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
_logger = logger ?? NullLogger<BLiteDocumentMetadataStore<TDbContext>>.Instance;
}
/// <inheritdoc />
public override async Task<DocumentMetadata?> GetMetadataAsync(string collection, string key,
CancellationToken cancellationToken = default)
{
var entity = _context.DocumentMetadatas
.Find(m => m.Collection == collection && m.Key == key)
.FirstOrDefault();
return entity != null ? ToDomain(entity) : null;
}
/// <inheritdoc />
public override async Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection,
CancellationToken cancellationToken = default)
{
return _context.DocumentMetadatas
.Find(m => m.Collection == collection)
.Select(ToDomain)
.ToList();
}
/// <inheritdoc />
public override async Task UpsertMetadataAsync(DocumentMetadata metadata,
CancellationToken cancellationToken = default)
{
var existing = _context.DocumentMetadatas
.Find(m => m.Collection == metadata.Collection && m.Key == metadata.Key)
.FirstOrDefault();
if (existing == null)
{
await _context.DocumentMetadatas.InsertAsync(ToEntity(metadata));
}
else
{
existing.HlcPhysicalTime = metadata.UpdatedAt.PhysicalTime;
existing.HlcLogicalCounter = metadata.UpdatedAt.LogicalCounter;
existing.HlcNodeId = metadata.UpdatedAt.NodeId;
existing.IsDeleted = metadata.IsDeleted;
await _context.DocumentMetadatas.UpdateAsync(existing);
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas,
CancellationToken cancellationToken = default)
{
foreach (var metadata in metadatas)
{
var existing = _context.DocumentMetadatas
.Find(m => m.Collection == metadata.Collection && m.Key == metadata.Key)
.FirstOrDefault();
if (existing == null)
{
await _context.DocumentMetadatas.InsertAsync(ToEntity(metadata));
}
else
{
existing.HlcPhysicalTime = metadata.UpdatedAt.PhysicalTime;
existing.HlcLogicalCounter = metadata.UpdatedAt.LogicalCounter;
existing.HlcNodeId = metadata.UpdatedAt.NodeId;
existing.IsDeleted = metadata.IsDeleted;
await _context.DocumentMetadatas.UpdateAsync(existing);
}
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp,
CancellationToken cancellationToken = default)
{
var existing = _context.DocumentMetadatas
.Find(m => m.Collection == collection && m.Key == key)
.FirstOrDefault();
if (existing == null)
{
await _context.DocumentMetadatas.InsertAsync(new DocumentMetadataEntity
{
Id = Guid.NewGuid().ToString(),
Collection = collection,
Key = key,
HlcPhysicalTime = timestamp.PhysicalTime,
HlcLogicalCounter = timestamp.LogicalCounter,
HlcNodeId = timestamp.NodeId,
IsDeleted = true
});
}
else
{
existing.HlcPhysicalTime = timestamp.PhysicalTime;
existing.HlcLogicalCounter = timestamp.LogicalCounter;
existing.HlcNodeId = timestamp.NodeId;
existing.IsDeleted = true;
await _context.DocumentMetadatas.UpdateAsync(existing);
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var query = _context.DocumentMetadatas.AsQueryable()
.Where(m => m.HlcPhysicalTime > since.PhysicalTime ||
(m.HlcPhysicalTime == since.PhysicalTime && m.HlcLogicalCounter > since.LogicalCounter));
if (collections != null)
{
var collectionSet = new HashSet<string>(collections);
query = query.Where(m => collectionSet.Contains(m.Collection));
}
return query
.OrderBy(m => m.HlcPhysicalTime)
.ThenBy(m => m.HlcLogicalCounter)
.Select(ToDomain)
.ToList();
}
/// <inheritdoc />
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
var allIds = _context.DocumentMetadatas.FindAll().Select(m => m.Id).ToList();
await _context.DocumentMetadatas.DeleteBulkAsync(allIds);
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task<IEnumerable<DocumentMetadata>> ExportAsync(CancellationToken cancellationToken = default)
{
return _context.DocumentMetadatas.FindAll().Select(ToDomain).ToList();
}
/// <inheritdoc />
public override async Task ImportAsync(IEnumerable<DocumentMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items) await _context.DocumentMetadatas.InsertAsync(ToEntity(item));
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task MergeAsync(IEnumerable<DocumentMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = _context.DocumentMetadatas
.Find(m => m.Collection == item.Collection && m.Key == item.Key)
.FirstOrDefault();
if (existing == null)
{
await _context.DocumentMetadatas.InsertAsync(ToEntity(item));
}
else
{
// Update only if incoming is newer
var existingTs = new HlcTimestamp(existing.HlcPhysicalTime, existing.HlcLogicalCounter,
existing.HlcNodeId);
if (item.UpdatedAt.CompareTo(existingTs) > 0)
{
existing.HlcPhysicalTime = item.UpdatedAt.PhysicalTime;
existing.HlcLogicalCounter = item.UpdatedAt.LogicalCounter;
existing.HlcNodeId = item.UpdatedAt.NodeId;
existing.IsDeleted = item.IsDeleted;
await _context.DocumentMetadatas.UpdateAsync(existing);
}
}
}
await _context.SaveChangesAsync(cancellationToken);
}
#region Mappers
private static DocumentMetadata ToDomain(DocumentMetadataEntity entity)
{
return new DocumentMetadata(
entity.Collection,
entity.Key,
new HlcTimestamp(entity.HlcPhysicalTime, entity.HlcLogicalCounter, entity.HlcNodeId),
entity.IsDeleted
);
}
private static DocumentMetadataEntity ToEntity(DocumentMetadata metadata)
{
return new DocumentMetadataEntity
{
Id = Guid.NewGuid().ToString(),
Collection = metadata.Collection,
Key = metadata.Key,
HlcPhysicalTime = metadata.UpdatedAt.PhysicalTime,
HlcLogicalCounter = metadata.UpdatedAt.LogicalCounter,
HlcNodeId = metadata.UpdatedAt.NodeId,
IsDeleted = metadata.IsDeleted
};
}
#endregion
}

View File

@@ -1,214 +0,0 @@
# BLiteDocumentStore - Usage Guide
## Overview
`BLiteDocumentStore<TDbContext>` is an abstract base class that simplifies creating document stores for CBDDC with BLite
persistence. It handles all Oplog management internally, so you only need to implement entity-to-JSON mapping methods.
## Key Features
- ? **Automatic Oplog Creation** - Local changes automatically create Oplog entries
- ? **Remote Sync Handling** - AsyncLocal flag suppresses Oplog during sync (prevents duplicates)
- ? **No CDC Events Needed** - Direct Oplog management eliminates event loops
- ? **Simple API** - Only 4 abstract methods to implement
## Architecture
```
User Code ? SampleDocumentStore (extends BLiteDocumentStore)
?
BLiteDocumentStore
??? _context.Users / TodoLists (read/write entities)
??? _context.OplogEntries (write oplog directly)
Remote Sync ? OplogStore.ApplyBatchAsync()
?
BLiteDocumentStore.PutDocumentAsync(fromSync=true)
??? _context.Users / TodoLists (write only)
??? _context.OplogEntries (skip - already exists)
```
**Key Advantage**: No circular dependency! `BLiteDocumentStore` writes directly to `CBDDCDocumentDbContext.OplogEntries`
collection.
## Implementation Example
```csharp
public class SampleDocumentStore : BLiteDocumentStore<SampleDbContext>
{
public SampleDocumentStore(
SampleDbContext context,
IPeerNodeConfigurationProvider configProvider,
ILogger<SampleDocumentStore>? logger = null)
: base(context, configProvider, new LastWriteWinsConflictResolver(), logger)
{
}
public override IEnumerable<string> InterestedCollection => new[] { "Users", "TodoLists" };
protected override async Task ApplyContentToEntityAsync(
string collection, string key, JsonElement content, CancellationToken ct)
{
switch (collection)
{
case "Users":
var user = content.Deserialize<User>()!;
user.Id = key;
var existingUser = _context.Users.FindById(key);
if (existingUser != null)
await _context.Users.UpdateAsync(user);
else
await _context.Users.InsertAsync(user);
await _context.SaveChangesAsync(ct);
break;
case "TodoLists":
var todoList = content.Deserialize<TodoList>()!;
todoList.Id = key;
var existingTodoList = _context.TodoLists.FindById(key);
if (existingTodoList != null)
await _context.TodoLists.UpdateAsync(todoList);
else
await _context.TodoLists.InsertAsync(todoList);
await _context.SaveChangesAsync(ct);
break;
default:
throw new NotSupportedException($"Collection '{collection}' is not supported");
}
}
protected override Task<JsonElement?> GetEntityAsJsonAsync(
string collection, string key, CancellationToken ct)
{
return Task.FromResult<JsonElement?>(collection switch
{
"Users" => SerializeEntity(_context.Users.FindById(key)),
"TodoLists" => SerializeEntity(_context.TodoLists.FindById(key)),
_ => null
});
}
protected override async Task RemoveEntityAsync(
string collection, string key, CancellationToken ct)
{
switch (collection)
{
case "Users":
await _context.Users.DeleteAsync(key);
await _context.SaveChangesAsync(ct);
break;
case "TodoLists":
await _context.TodoLists.DeleteAsync(key);
await _context.SaveChangesAsync(ct);
break;
}
}
protected override async Task<IEnumerable<(string Key, JsonElement Content)>> GetAllEntitiesAsJsonAsync(
string collection, CancellationToken ct)
{
return await Task.Run(() => collection switch
{
"Users" => _context.Users.FindAll()
.Select(u => (u.Id, SerializeEntity(u)!.Value)),
"TodoLists" => _context.TodoLists.FindAll()
.Select(t => (t.Id, SerializeEntity(t)!.Value)),
_ => Enumerable.Empty<(string, JsonElement)>()
}, ct);
}
private static JsonElement? SerializeEntity<T>(T? entity) where T : class
{
if (entity == null) return null;
return JsonSerializer.SerializeToElement(entity);
}
}
```
## Usage in Application
### Setup (DI Container)
```csharp
services.AddSingleton<SampleDbContext>(sp =>
new SampleDbContext("data/sample.blite"));
// No OplogStore dependency needed!
services.AddSingleton<IDocumentStore, SampleDocumentStore>();
services.AddSingleton<IOplogStore, BLiteOplogStore<SampleDbContext>>();
```
### Local Changes (User operations)
```csharp
// User inserts a new user
var user = new User { Id = "user-1", Name = "Alice" };
await _context.Users.InsertAsync(user);
await _context.SaveChangesAsync();
// The application then needs to notify the DocumentStore:
var document = new Document(
"Users",
"user-1",
JsonSerializer.SerializeToElement(user),
new HlcTimestamp(0, 0, ""),
false);
await documentStore.PutDocumentAsync(document);
// ? This creates an OplogEntry automatically
```
### Remote Sync (Automatic)
```csharp
// When OplogStore.ApplyBatchAsync receives remote changes:
await oplogStore.ApplyBatchAsync(remoteEntries, cancellationToken);
// Internally, this calls:
using (documentStore.BeginRemoteSync()) // ? Suppresses Oplog creation
{
foreach (var entry in remoteEntries)
{
await documentStore.PutDocumentAsync(entryAsDocument);
// ? Writes to DB only, no Oplog duplication
}
}
```
## Migration from Old CDC-based Approach
### Before (with CDC Events)
```csharp
// SampleDocumentStore subscribes to BLite CDC
// CDC emits events ? OplogCoordinator creates Oplog
// Problem: Remote sync also triggers CDC ? duplicate Oplog entries
```
### After (with BLiteDocumentStore)
```csharp
// Direct Oplog management in DocumentStore
// AsyncLocal flag prevents duplicates during sync
// No CDC events needed
```
## Benefits
1. **No Event Loops** - Direct control over Oplog creation
2. **Thread-Safe** - AsyncLocal handles concurrent operations
3. **Simpler** - Only 4 methods to implement vs full CDC subscription
4. **Transparent** - Oplog management is hidden from user code
## Next Steps
After implementing your DocumentStore:
1. Remove CDC subscriptions from your code
2. Remove `OplogCoordinator` from DI (no longer needed)
3. Test local operations create Oplog entries
4. Test remote sync doesn't create duplicate entries

View File

@@ -1,783 +0,0 @@
using System.Collections.Concurrent;
using System.Text.Json;
using BLite.Core.CDC;
using BLite.Core.Collections;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
using BLiteOperationType = BLite.Core.Transactions.OperationType;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// <summary>
/// Abstract base class for BLite-based document stores.
/// Handles Oplog creation internally - subclasses only implement entity mapping.
/// </summary>
/// <typeparam name="TDbContext">The BLite DbContext type.</typeparam>
public abstract class BLiteDocumentStore<TDbContext> : IDocumentStore, IDisposable
where TDbContext : CBDDCDocumentDbContext
{
private readonly List<IDisposable> _cdcWatchers = new();
private readonly object _clockLock = new();
protected readonly IPeerNodeConfigurationProvider _configProvider;
protected readonly IConflictResolver _conflictResolver;
protected readonly TDbContext _context;
protected readonly ILogger<BLiteDocumentStore<TDbContext>> _logger;
private readonly HashSet<string> _registeredCollections = new();
/// <summary>
/// Semaphore used to suppress CDC-triggered OplogEntry creation during remote sync.
/// CurrentCount == 0 ? sync in progress, CDC must skip.
/// CurrentCount == 1 ? no sync, CDC creates OplogEntry.
/// </summary>
private readonly SemaphoreSlim _remoteSyncGuard = new(1, 1);
private readonly ConcurrentDictionary<string, int> _suppressedCdcEvents = new(StringComparer.Ordinal);
protected readonly IVectorClockService _vectorClock;
// HLC state for generating timestamps for local changes
private long _lastPhysicalTime;
private int _logicalCounter;
/// <summary>
/// Initializes a new instance of the <see cref="BLiteDocumentStore{TDbContext}" /> class.
/// </summary>
/// <param name="context">The BLite database context.</param>
/// <param name="configProvider">The peer node configuration provider.</param>
/// <param name="vectorClockService">The vector clock service.</param>
/// <param name="conflictResolver">The conflict resolver to use for merges.</param>
/// <param name="logger">The logger instance.</param>
protected BLiteDocumentStore(
TDbContext context,
IPeerNodeConfigurationProvider configProvider,
IVectorClockService vectorClockService,
IConflictResolver? conflictResolver = null,
ILogger? logger = null)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
_configProvider = configProvider ?? throw new ArgumentNullException(nameof(configProvider));
_vectorClock = vectorClockService ?? throw new ArgumentNullException(nameof(vectorClockService));
_conflictResolver = conflictResolver ?? new LastWriteWinsConflictResolver();
_logger = CreateTypedLogger(logger);
_lastPhysicalTime = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
_logicalCounter = 0;
}
/// <summary>
/// Releases managed resources used by this document store.
/// </summary>
public virtual void Dispose()
{
foreach (var watcher in _cdcWatchers)
try
{
watcher.Dispose();
}
catch
{
}
_cdcWatchers.Clear();
_remoteSyncGuard.Dispose();
}
private static ILogger<BLiteDocumentStore<TDbContext>> CreateTypedLogger(ILogger? logger)
{
if (logger is null) return NullLogger<BLiteDocumentStore<TDbContext>>.Instance;
if (logger is ILogger<BLiteDocumentStore<TDbContext>> typedLogger) return typedLogger;
return new ForwardingLogger(logger);
}
private sealed class ForwardingLogger : ILogger<BLiteDocumentStore<TDbContext>>
{
private readonly ILogger _inner;
/// <summary>
/// Initializes a new instance of the <see cref="ForwardingLogger" /> class.
/// </summary>
/// <param name="inner">The underlying logger instance.</param>
public ForwardingLogger(ILogger inner)
{
_inner = inner;
}
/// <inheritdoc />
public IDisposable? BeginScope<TState>(TState state) where TState : notnull
{
return _inner.BeginScope(state);
}
/// <inheritdoc />
public bool IsEnabled(LogLevel logLevel)
{
return _inner.IsEnabled(logLevel);
}
/// <inheritdoc />
public void Log<TState>(
LogLevel logLevel,
EventId eventId,
TState state,
Exception? exception,
Func<TState, Exception?, string> formatter)
{
_inner.Log(logLevel, eventId, state, exception, formatter);
}
}
#region CDC Registration
private static string BuildSuppressionKey(string collection, string key, OperationType operationType)
{
return $"{collection}|{key}|{(int)operationType}";
}
private void RegisterSuppressedCdcEvent(string collection, string key, OperationType operationType)
{
string suppressionKey = BuildSuppressionKey(collection, key, operationType);
_suppressedCdcEvents.AddOrUpdate(suppressionKey, 1, (_, current) => current + 1);
}
private bool TryConsumeSuppressedCdcEvent(string collection, string key, OperationType operationType)
{
string suppressionKey = BuildSuppressionKey(collection, key, operationType);
while (true)
{
if (!_suppressedCdcEvents.TryGetValue(suppressionKey, out int current)) return false;
if (current <= 1) return _suppressedCdcEvents.TryRemove(suppressionKey, out _);
if (_suppressedCdcEvents.TryUpdate(suppressionKey, current - 1, current)) return true;
}
}
/// <summary>
/// Registers a BLite collection for CDC tracking.
/// Call in subclass constructor for each collection to sync.
/// </summary>
/// <typeparam name="TEntity">The entity type.</typeparam>
/// <param name="collectionName">The logical collection name used in Oplog.</param>
/// <param name="collection">The BLite DocumentCollection.</param>
/// <param name="keySelector">Function to extract the entity key.</param>
protected void WatchCollection<TEntity>(
string collectionName,
DocumentCollection<string, TEntity> collection,
Func<TEntity, string> keySelector)
where TEntity : class
{
_registeredCollections.Add(collectionName);
var watcher = collection.Watch(true)
.Subscribe(new CdcObserver<TEntity>(collectionName, keySelector, this));
_cdcWatchers.Add(watcher);
}
/// <summary>
/// Generic CDC observer. Forwards BLite change events to OnLocalChangeDetectedAsync.
/// Automatically skips events when remote sync is in progress.
/// </summary>
private class CdcObserver<TEntity> : IObserver<ChangeStreamEvent<string, TEntity>>
where TEntity : class
{
private readonly string _collectionName;
private readonly Func<TEntity, string> _keySelector;
private readonly BLiteDocumentStore<TDbContext> _store;
/// <summary>
/// Initializes a new instance of the <see cref="CdcObserver{TEntity}" /> class.
/// </summary>
/// <param name="collectionName">The logical collection name.</param>
/// <param name="keySelector">The key selector for observed entities.</param>
/// <param name="store">The owning document store instance.</param>
public CdcObserver(
string collectionName,
Func<TEntity, string> keySelector,
BLiteDocumentStore<TDbContext> store)
{
_collectionName = collectionName;
_keySelector = keySelector;
_store = store;
}
/// <summary>
/// Handles a change stream event from BLite CDC.
/// </summary>
/// <param name="changeEvent">The change event payload.</param>
public void OnNext(ChangeStreamEvent<string, TEntity> changeEvent)
{
var operationType = changeEvent.Type == BLiteOperationType.Delete
? OperationType.Delete
: OperationType.Put;
string entityId = changeEvent.DocumentId ?? "";
if (operationType == OperationType.Put && changeEvent.Entity != null)
entityId = _keySelector(changeEvent.Entity);
if (_store.TryConsumeSuppressedCdcEvent(_collectionName, entityId, operationType)) return;
if (_store._remoteSyncGuard.CurrentCount == 0) return;
if (changeEvent.Type == BLiteOperationType.Delete)
{
_store.OnLocalChangeDetectedAsync(_collectionName, entityId, OperationType.Delete, null)
.GetAwaiter().GetResult();
}
else if (changeEvent.Entity != null)
{
var content = JsonSerializer.SerializeToElement(changeEvent.Entity);
string key = _keySelector(changeEvent.Entity);
_store.OnLocalChangeDetectedAsync(_collectionName, key, OperationType.Put, content)
.GetAwaiter().GetResult();
}
}
/// <summary>
/// Handles CDC observer errors.
/// </summary>
/// <param name="error">The observed exception.</param>
public void OnError(Exception error)
{
}
/// <summary>
/// Handles completion of the CDC stream.
/// </summary>
public void OnCompleted()
{
}
}
#endregion
#region Abstract Methods - Implemented by subclass
/// <summary>
/// Applies JSON content to a single entity (insert or update) and commits changes.
/// Called for single-document operations.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="content">The document content to apply.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected abstract Task ApplyContentToEntityAsync(
string collection, string key, JsonElement content, CancellationToken cancellationToken);
/// <summary>
/// Applies JSON content to multiple entities (insert or update) with a single commit.
/// Called for batch operations. Must commit all changes in a single SaveChanges.
/// </summary>
/// <param name="documents">The documents to apply in one batch.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected abstract Task ApplyContentToEntitiesBatchAsync(
IEnumerable<(string Collection, string Key, JsonElement Content)> documents,
CancellationToken cancellationToken);
/// <summary>
/// Reads an entity from the DbContext and returns it as JsonElement.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected abstract Task<JsonElement?> GetEntityAsJsonAsync(
string collection, string key, CancellationToken cancellationToken);
/// <summary>
/// Removes a single entity from the DbContext and commits changes.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected abstract Task RemoveEntityAsync(
string collection, string key, CancellationToken cancellationToken);
/// <summary>
/// Removes multiple entities from the DbContext with a single commit.
/// </summary>
/// <param name="documents">The documents to remove in one batch.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected abstract Task RemoveEntitiesBatchAsync(
IEnumerable<(string Collection, string Key)> documents, CancellationToken cancellationToken);
/// <summary>
/// Reads all entities from a collection as JsonElements.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected abstract Task<IEnumerable<(string Key, JsonElement Content)>> GetAllEntitiesAsJsonAsync(
string collection, CancellationToken cancellationToken);
#endregion
#region IDocumentStore Implementation
/// <summary>
/// Returns the collections registered via WatchCollection.
/// </summary>
public IEnumerable<string> InterestedCollection => _registeredCollections;
/// <summary>
/// Gets a document by collection and key.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The matching document, or <see langword="null" /> when not found.</returns>
public async Task<Document?> GetDocumentAsync(string collection, string key,
CancellationToken cancellationToken = default)
{
var content = await GetEntityAsJsonAsync(collection, key, cancellationToken);
if (content == null) return null;
var timestamp = new HlcTimestamp(0, 0, ""); // Will be populated from metadata if needed
return new Document(collection, key, content.Value, timestamp, false);
}
/// <summary>
/// Gets all documents for a collection.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The documents in the specified collection.</returns>
public async Task<IEnumerable<Document>> GetDocumentsByCollectionAsync(string collection,
CancellationToken cancellationToken = default)
{
var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken);
var timestamp = new HlcTimestamp(0, 0, "");
return entities.Select(e => new Document(collection, e.Key, e.Content, timestamp, false));
}
/// <summary>
/// Gets documents for the specified collection and key pairs.
/// </summary>
/// <param name="documentKeys">The collection and key pairs to resolve.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The documents that were found.</returns>
public async Task<IEnumerable<Document>> GetDocumentsAsync(List<(string Collection, string Key)> documentKeys,
CancellationToken cancellationToken)
{
var documents = new List<Document>();
foreach ((string collection, string key) in documentKeys)
{
var doc = await GetDocumentAsync(collection, key, cancellationToken);
if (doc != null) documents.Add(doc);
}
return documents;
}
/// <summary>
/// Inserts or updates a single document.
/// </summary>
/// <param name="document">The document to persist.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> PutDocumentAsync(Document document, CancellationToken cancellationToken = default)
{
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
await PutDocumentInternalAsync(document, cancellationToken);
}
finally
{
_remoteSyncGuard.Release();
}
return true;
}
private async Task PutDocumentInternalAsync(Document document, CancellationToken cancellationToken)
{
RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put);
await ApplyContentToEntityAsync(document.Collection, document.Key, document.Content, cancellationToken);
}
/// <summary>
/// Updates a batch of documents.
/// </summary>
/// <param name="documents">The documents to update.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> UpdateBatchDocumentsAsync(IEnumerable<Document> documents,
CancellationToken cancellationToken = default)
{
var documentList = documents.ToList();
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
foreach (var document in documentList)
RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put);
await ApplyContentToEntitiesBatchAsync(
documentList.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken);
}
finally
{
_remoteSyncGuard.Release();
}
return true;
}
/// <summary>
/// Inserts a batch of documents.
/// </summary>
/// <param name="documents">The documents to insert.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> InsertBatchDocumentsAsync(IEnumerable<Document> documents,
CancellationToken cancellationToken = default)
{
var documentList = documents.ToList();
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
foreach (var document in documentList)
RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put);
await ApplyContentToEntitiesBatchAsync(
documentList.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken);
}
finally
{
_remoteSyncGuard.Release();
}
return true;
}
/// <summary>
/// Deletes a single document.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> DeleteDocumentAsync(string collection, string key,
CancellationToken cancellationToken = default)
{
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
await DeleteDocumentInternalAsync(collection, key, cancellationToken);
}
finally
{
_remoteSyncGuard.Release();
}
return true;
}
private async Task DeleteDocumentInternalAsync(string collection, string key, CancellationToken cancellationToken)
{
RegisterSuppressedCdcEvent(collection, key, OperationType.Delete);
await RemoveEntityAsync(collection, key, cancellationToken);
}
/// <summary>
/// Deletes a batch of documents by composite keys.
/// </summary>
/// <param name="documentKeys">The document keys in collection/key format.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns><see langword="true" /> when the operation succeeds.</returns>
public async Task<bool> DeleteBatchDocumentsAsync(IEnumerable<string> documentKeys,
CancellationToken cancellationToken = default)
{
var parsedKeys = new List<(string Collection, string Key)>();
foreach (string key in documentKeys)
{
string[] parts = key.Split('/');
if (parts.Length == 2)
parsedKeys.Add((parts[0], parts[1]));
else
_logger.LogWarning("Invalid document key format: {Key}", key);
}
if (parsedKeys.Count == 0) return true;
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
foreach ((string collection, string key) in parsedKeys)
RegisterSuppressedCdcEvent(collection, key, OperationType.Delete);
await RemoveEntitiesBatchAsync(parsedKeys, cancellationToken);
}
finally
{
_remoteSyncGuard.Release();
}
return true;
}
/// <summary>
/// Merges an incoming document with the current stored document.
/// </summary>
/// <param name="incoming">The incoming document.</param>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The stored document after merge resolution.</returns>
public async Task<Document> MergeAsync(Document incoming, CancellationToken cancellationToken = default)
{
var existing = await GetDocumentAsync(incoming.Collection, incoming.Key, cancellationToken);
if (existing == null)
{
// Use internal method - guard not acquired yet in single-document merge
await PutDocumentInternalAsync(incoming, cancellationToken);
return incoming;
}
// Use conflict resolver to merge
var resolution = _conflictResolver.Resolve(existing, new OplogEntry(
incoming.Collection,
incoming.Key,
OperationType.Put,
incoming.Content,
incoming.UpdatedAt,
""));
if (resolution.ShouldApply && resolution.MergedDocument != null)
{
await PutDocumentInternalAsync(resolution.MergedDocument, cancellationToken);
return resolution.MergedDocument;
}
return existing;
}
#endregion
#region ISnapshotable Implementation
/// <summary>
/// Removes all tracked documents from registered collections.
/// </summary>
/// <param name="cancellationToken">The cancellation token.</param>
public async Task DropAsync(CancellationToken cancellationToken = default)
{
foreach (string collection in InterestedCollection)
{
var entities = await GetAllEntitiesAsJsonAsync(collection, cancellationToken);
foreach ((string key, var _) in entities) await RemoveEntityAsync(collection, key, cancellationToken);
}
}
/// <summary>
/// Exports all tracked documents from registered collections.
/// </summary>
/// <param name="cancellationToken">The cancellation token.</param>
/// <returns>The exported documents.</returns>
public async Task<IEnumerable<Document>> ExportAsync(CancellationToken cancellationToken = default)
{
var documents = new List<Document>();
foreach (string collection in InterestedCollection)
{
var collectionDocs = await GetDocumentsByCollectionAsync(collection, cancellationToken);
documents.AddRange(collectionDocs);
}
return documents;
}
/// <summary>
/// Imports a batch of documents.
/// </summary>
/// <param name="items">The documents to import.</param>
/// <param name="cancellationToken">The cancellation token.</param>
public async Task ImportAsync(IEnumerable<Document> items, CancellationToken cancellationToken = default)
{
var documents = items.ToList();
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
foreach (var document in documents)
RegisterSuppressedCdcEvent(document.Collection, document.Key, OperationType.Put);
await ApplyContentToEntitiesBatchAsync(
documents.Select(d => (d.Collection, d.Key, d.Content)), cancellationToken);
}
finally
{
_remoteSyncGuard.Release();
}
}
/// <summary>
/// Merges a batch of incoming documents.
/// </summary>
/// <param name="items">The incoming documents.</param>
/// <param name="cancellationToken">The cancellation token.</param>
public async Task MergeAsync(IEnumerable<Document> items, CancellationToken cancellationToken = default)
{
// Acquire guard to prevent Oplog creation during merge
await _remoteSyncGuard.WaitAsync(cancellationToken);
try
{
foreach (var document in items) await MergeAsync(document, cancellationToken);
}
finally
{
_remoteSyncGuard.Release();
}
}
#endregion
#region Oplog Management
/// <summary>
/// Returns true if a remote sync operation is in progress (guard acquired).
/// CDC listeners should check this before creating OplogEntry.
/// </summary>
protected bool IsRemoteSyncInProgress => _remoteSyncGuard.CurrentCount == 0;
/// <summary>
/// Called by subclass CDC listeners when a local change is detected.
/// Creates OplogEntry + DocumentMetadata only if no remote sync is in progress.
/// </summary>
/// <param name="collection">The logical collection name.</param>
/// <param name="key">The document key.</param>
/// <param name="operationType">The detected operation type.</param>
/// <param name="content">The document content when available.</param>
/// <param name="cancellationToken">The cancellation token.</param>
protected async Task OnLocalChangeDetectedAsync(
string collection,
string key,
OperationType operationType,
JsonElement? content,
CancellationToken cancellationToken = default)
{
if (IsRemoteSyncInProgress) return;
await CreateOplogEntryAsync(collection, key, operationType, content, cancellationToken);
}
private HlcTimestamp GenerateTimestamp(string nodeId)
{
lock (_clockLock)
{
long now = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
if (now > _lastPhysicalTime)
{
_lastPhysicalTime = now;
_logicalCounter = 0;
}
else
{
_logicalCounter++;
}
return new HlcTimestamp(_lastPhysicalTime, _logicalCounter, nodeId);
}
}
private async Task CreateOplogEntryAsync(
string collection,
string key,
OperationType operationType,
JsonElement? content,
CancellationToken cancellationToken)
{
var config = await _configProvider.GetConfiguration();
string nodeId = config.NodeId;
// Get last hash from OplogEntries collection directly
var lastEntry = _context.OplogEntries
.Find(e => e.TimestampNodeId == nodeId)
.OrderByDescending(e => e.TimestampPhysicalTime)
.ThenByDescending(e => e.TimestampLogicalCounter)
.FirstOrDefault();
string previousHash = lastEntry?.Hash ?? string.Empty;
var timestamp = GenerateTimestamp(nodeId);
var oplogEntry = new OplogEntry(
collection,
key,
operationType,
content,
timestamp,
previousHash);
// Write directly to OplogEntries collection
await _context.OplogEntries.InsertAsync(oplogEntry.ToEntity());
// Write DocumentMetadata for sync tracking
var docMetadata = EntityMappers.CreateDocumentMetadata(
collection,
key,
timestamp,
operationType == OperationType.Delete);
var existingMetadata = _context.DocumentMetadatas
.Find(m => m.Collection == collection && m.Key == key)
.FirstOrDefault();
if (existingMetadata != null)
{
// Update existing metadata
existingMetadata.HlcPhysicalTime = timestamp.PhysicalTime;
existingMetadata.HlcLogicalCounter = timestamp.LogicalCounter;
existingMetadata.HlcNodeId = timestamp.NodeId;
existingMetadata.IsDeleted = operationType == OperationType.Delete;
await _context.DocumentMetadatas.UpdateAsync(existingMetadata);
}
else
{
await _context.DocumentMetadatas.InsertAsync(docMetadata);
}
await _context.SaveChangesAsync(cancellationToken);
// Notify VectorClockService so sync sees local changes
_vectorClock.Update(oplogEntry);
_logger.LogDebug(
"Created Oplog entry: {Operation} {Collection}/{Key} at {Timestamp} (hash: {Hash})",
operationType, collection, key, timestamp, oplogEntry.Hash);
}
/// <summary>
/// Marks the start of remote sync operations (suppresses CDC-triggered Oplog creation).
/// Use in using statement: using (store.BeginRemoteSync()) { ... }
/// </summary>
public IDisposable BeginRemoteSync()
{
_remoteSyncGuard.Wait();
return new RemoteSyncScope(_remoteSyncGuard);
}
private class RemoteSyncScope : IDisposable
{
private readonly SemaphoreSlim _guard;
/// <summary>
/// Initializes a new instance of the <see cref="RemoteSyncScope" /> class.
/// </summary>
/// <param name="guard">The semaphore guarding remote sync operations.</param>
public RemoteSyncScope(SemaphoreSlim guard)
{
_guard = guard;
}
/// <summary>
/// Releases the remote sync guard.
/// </summary>
public void Dispose()
{
_guard.Release();
}
}
#endregion
}

View File

@@ -1,253 +0,0 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
public class BLiteOplogStore<TDbContext> : OplogStore where TDbContext : CBDDCDocumentDbContext
{
protected readonly TDbContext _context;
protected readonly ILogger<BLiteOplogStore<TDbContext>> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="BLiteOplogStore{TDbContext}" /> class.
/// </summary>
/// <param name="dbContext">The BLite database context.</param>
/// <param name="documentStore">The document store used by the oplog store.</param>
/// <param name="conflictResolver">The conflict resolver used during merges.</param>
/// <param name="vectorClockService">The vector clock service used for timestamp coordination.</param>
/// <param name="snapshotMetadataStore">Optional snapshot metadata store used for initialization.</param>
/// <param name="logger">Optional logger instance.</param>
public BLiteOplogStore(
TDbContext dbContext,
IDocumentStore documentStore,
IConflictResolver conflictResolver,
IVectorClockService vectorClockService,
ISnapshotMetadataStore? snapshotMetadataStore = null,
ILogger<BLiteOplogStore<TDbContext>>? logger = null) : base(documentStore, conflictResolver, vectorClockService,
snapshotMetadataStore)
{
_context = dbContext ?? throw new ArgumentNullException(nameof(dbContext));
_logger = logger ?? NullLogger<BLiteOplogStore<TDbContext>>.Instance;
}
/// <inheritdoc />
public override async Task ApplyBatchAsync(IEnumerable<OplogEntry> oplogEntries,
CancellationToken cancellationToken = default)
{
// BLite transactions are committed by each SaveChangesAsync internally.
// Wrapping in an explicit transaction causes "Cannot rollback committed transaction"
// because PutDocumentAsync → SaveChangesAsync already commits.
await base.ApplyBatchAsync(oplogEntries, cancellationToken);
}
/// <inheritdoc />
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
// Use Id (technical key) for deletion, not Hash (business key)
await _context.OplogEntries.DeleteBulkAsync(_context.OplogEntries.FindAll().Select(e => e.Id));
await _context.SaveChangesAsync(cancellationToken);
_vectorClock.Invalidate();
}
/// <inheritdoc />
public override async Task<IEnumerable<OplogEntry>> ExportAsync(CancellationToken cancellationToken = default)
{
return _context.OplogEntries.FindAll().ToDomain();
}
/// <inheritdoc />
public override async Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash,
CancellationToken cancellationToken = default)
{
var startRow = _context.OplogEntries.Find(o => o.Hash == startHash).FirstOrDefault();
var endRow = _context.OplogEntries.Find(o => o.Hash == endHash).FirstOrDefault();
if (startRow == null || endRow == null) return [];
string nodeId = startRow.TimestampNodeId;
// 2. Fetch range (Start < Entry <= End)
var entities = _context.OplogEntries
.Find(o => o.TimestampNodeId == nodeId &&
(o.TimestampPhysicalTime > startRow.TimestampPhysicalTime ||
(o.TimestampPhysicalTime == startRow.TimestampPhysicalTime &&
o.TimestampLogicalCounter > startRow.TimestampLogicalCounter)) &&
(o.TimestampPhysicalTime < endRow.TimestampPhysicalTime ||
(o.TimestampPhysicalTime == endRow.TimestampPhysicalTime &&
o.TimestampLogicalCounter <= endRow.TimestampLogicalCounter)))
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
.ToList();
return entities.ToDomain();
}
/// <inheritdoc />
public override async Task<OplogEntry?> GetEntryByHashAsync(string hash,
CancellationToken cancellationToken = default)
{
// Hash is now a regular indexed property, not the Key
return _context.OplogEntries.Find(o => o.Hash == hash).FirstOrDefault()?.ToDomain();
}
/// <inheritdoc />
public override async Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var query = _context.OplogEntries
.Find(o => o.TimestampPhysicalTime > timestamp.PhysicalTime ||
(o.TimestampPhysicalTime == timestamp.PhysicalTime &&
o.TimestampLogicalCounter > timestamp.LogicalCounter));
if (collections != null)
{
var collectionSet = new HashSet<string>(collections);
query = query.Where(o => collectionSet.Contains(o.Collection));
}
return query
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
.ToDomain()
.ToList();
}
/// <inheritdoc />
public override async Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var query = _context.OplogEntries.AsQueryable()
.Where(o => o.TimestampNodeId == nodeId &&
(o.TimestampPhysicalTime > since.PhysicalTime ||
(o.TimestampPhysicalTime == since.PhysicalTime &&
o.TimestampLogicalCounter > since.LogicalCounter)));
if (collections != null)
{
var collectionSet = new HashSet<string>(collections);
query = query.Where(o => collectionSet.Contains(o.Collection));
}
return query
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
.ToDomain()
.ToList();
}
/// <inheritdoc />
public override async Task ImportAsync(IEnumerable<OplogEntry> items, CancellationToken cancellationToken = default)
{
foreach (var item in items) await _context.OplogEntries.InsertAsync(item.ToEntity());
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task MergeAsync(IEnumerable<OplogEntry> items, CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
// Hash is now a regular indexed property, not the Key
var existing = _context.OplogEntries.Find(o => o.Hash == item.Hash).FirstOrDefault();
if (existing == null) await _context.OplogEntries.InsertAsync(item.ToEntity());
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default)
{
var toDelete = _context.OplogEntries.AsQueryable()
.Where(o => o.TimestampPhysicalTime < cutoff.PhysicalTime ||
(o.TimestampPhysicalTime == cutoff.PhysicalTime &&
o.TimestampLogicalCounter <= cutoff.LogicalCounter))
.Select(o => o.Hash)
.ToList();
await _context.OplogEntries.DeleteBulkAsync(toDelete);
}
/// <inheritdoc />
protected override void InitializeVectorClock()
{
if (_vectorClock.IsInitialized) return;
// Early check: if context or OplogEntries is null, skip initialization
if (_context?.OplogEntries == null)
{
_vectorClock.IsInitialized = true;
return;
}
// Step 1: Load from SnapshotMetadata FIRST (base state after prune)
if (_snapshotMetadataStore != null)
try
{
var snapshots = _snapshotMetadataStore.GetAllSnapshotMetadataAsync().GetAwaiter().GetResult();
foreach (var snapshot in snapshots)
_vectorClock.UpdateNode(
snapshot.NodeId,
new HlcTimestamp(snapshot.TimestampPhysicalTime, snapshot.TimestampLogicalCounter,
snapshot.NodeId),
snapshot.Hash ?? "");
}
catch
{
// Ignore errors during initialization - oplog data will be used as fallback
}
// Step 2: Load from Oplog (Latest State - Overrides Snapshot if newer)
var latestPerNode = _context.OplogEntries.AsQueryable()
.GroupBy(o => o.TimestampNodeId)
.Select(g => new
{
NodeId = g.Key,
MaxEntry = g.OrderByDescending(o => o.TimestampPhysicalTime)
.ThenByDescending(o => o.TimestampLogicalCounter)
.FirstOrDefault()
})
.ToList()
.Where(x => x.MaxEntry != null)
.ToList();
foreach (var node in latestPerNode)
if (node.MaxEntry != null)
_vectorClock.UpdateNode(
node.NodeId,
new HlcTimestamp(node.MaxEntry.TimestampPhysicalTime, node.MaxEntry.TimestampLogicalCounter,
node.MaxEntry.TimestampNodeId),
node.MaxEntry.Hash ?? "");
_vectorClock.IsInitialized = true;
}
/// <inheritdoc />
protected override async Task InsertOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default)
{
await _context.OplogEntries.InsertAsync(entry.ToEntity());
}
/// <inheritdoc />
protected override async Task<string?> QueryLastHashForNodeAsync(string nodeId,
CancellationToken cancellationToken = default)
{
var lastEntry = _context.OplogEntries.AsQueryable()
.Where(o => o.TimestampNodeId == nodeId)
.OrderByDescending(o => o.TimestampPhysicalTime)
.ThenByDescending(o => o.TimestampLogicalCounter)
.FirstOrDefault();
return lastEntry?.Hash;
}
/// <inheritdoc />
protected override async Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash,
CancellationToken cancellationToken = default)
{
// Hash is now a regular indexed property, not the Key
var entry = _context.OplogEntries.Find(o => o.Hash == hash).FirstOrDefault();
if (entry == null) return null;
return (entry.TimestampPhysicalTime, entry.TimestampLogicalCounter);
}
}

View File

@@ -1,131 +0,0 @@
using System.Text.Json;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// <summary>
/// Provides a peer configuration store implementation that uses a specified CBDDCDocumentDbContext for persistence
/// operations.
/// </summary>
/// <remarks>
/// This class enables storage, retrieval, and management of remote peer configurations using the provided
/// database context. It is typically used in scenarios where peer configurations need to be persisted in a document
/// database.
/// </remarks>
/// <typeparam name="TDbContext">
/// The type of the document database context used for accessing and managing peer configurations. Must inherit from
/// CBDDCDocumentDbContext.
/// </typeparam>
public class BLitePeerConfigurationStore<TDbContext> : PeerConfigurationStore where TDbContext : CBDDCDocumentDbContext
{
/// <summary>
/// Represents the database context used for data access operations within the derived class.
/// </summary>
protected readonly TDbContext _context;
/// <summary>
/// Provides logging capabilities for the BLitePeerConfigurationStore operations.
/// </summary>
protected readonly ILogger<BLitePeerConfigurationStore<TDbContext>> _logger;
/// <summary>
/// Initializes a new instance of the BLitePeerConfigurationStore class using the specified database context and
/// optional logger.
/// </summary>
/// <param name="context">The database context used to access and manage peer configuration data. Cannot be null.</param>
/// <param name="logger">An optional logger for logging diagnostic messages. If null, a no-op logger is used.</param>
/// <exception cref="ArgumentNullException">Thrown if the context parameter is null.</exception>
public BLitePeerConfigurationStore(TDbContext context,
ILogger<BLitePeerConfigurationStore<TDbContext>>? logger = null)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
_logger = logger ?? NullLogger<BLitePeerConfigurationStore<TDbContext>>.Instance;
}
/// <inheritdoc />
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
_logger.LogWarning(
"Dropping peer configuration store - all remote peer configurations will be permanently deleted!");
// Use Id (technical key) for deletion, not NodeId (business key)
var allIds = await Task.Run(() => _context.RemotePeerConfigurations.FindAll().Select(p => p.Id).ToList(),
cancellationToken);
await _context.RemotePeerConfigurations.DeleteBulkAsync(allIds);
await _context.SaveChangesAsync(cancellationToken);
_logger.LogInformation("Peer configuration store dropped successfully.");
}
/// <inheritdoc />
public override async Task<IEnumerable<RemotePeerConfiguration>> ExportAsync(
CancellationToken cancellationToken = default)
{
return await Task.Run(() => _context.RemotePeerConfigurations.FindAll().ToDomain().ToList(), cancellationToken);
}
/// <inheritdoc />
public override async Task<RemotePeerConfiguration?> GetRemotePeerAsync(string nodeId,
CancellationToken cancellationToken)
{
// NodeId is now a regular indexed property, not the Key
return await Task.Run(
() => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault()?.ToDomain(),
cancellationToken);
}
/// <inheritdoc />
public override async Task<IEnumerable<RemotePeerConfiguration>> GetRemotePeersAsync(
CancellationToken cancellationToken = default)
{
return await Task.Run(() => _context.RemotePeerConfigurations.FindAll().ToDomain().ToList(), cancellationToken);
}
/// <inheritdoc />
public override async Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default)
{
// NodeId is now a regular indexed property, not the Key
var peer = await Task.Run(
() => _context.RemotePeerConfigurations.Find(p => p.NodeId == nodeId).FirstOrDefault(), cancellationToken);
if (peer != null)
{
await _context.RemotePeerConfigurations.DeleteAsync(peer.Id);
await _context.SaveChangesAsync(cancellationToken);
_logger.LogInformation("Removed remote peer configuration: {NodeId}", nodeId);
}
else
{
_logger.LogWarning("Attempted to remove non-existent remote peer: {NodeId}", nodeId);
}
}
/// <inheritdoc />
public override async Task SaveRemotePeerAsync(RemotePeerConfiguration peer,
CancellationToken cancellationToken = default)
{
// NodeId is now a regular indexed property, not the Key
var existing =
await Task.Run(() => _context.RemotePeerConfigurations.Find(p => p.NodeId == peer.NodeId).FirstOrDefault(),
cancellationToken);
if (existing == null)
{
await _context.RemotePeerConfigurations.InsertAsync(peer.ToEntity());
}
else
{
existing.NodeId = peer.NodeId;
existing.Address = peer.Address;
existing.Type = (int)peer.Type;
existing.IsEnabled = peer.IsEnabled;
existing.InterestsJson = peer.InterestingCollections.Count > 0
? JsonSerializer.Serialize(peer.InterestingCollections)
: "";
await _context.RemotePeerConfigurations.UpdateAsync(existing);
}
await _context.SaveChangesAsync(cancellationToken);
_logger.LogInformation("Saved remote peer configuration: {NodeId} ({Type})", peer.NodeId, peer.Type);
}
}

View File

@@ -1,300 +0,0 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// <summary>
/// BLite-backed peer oplog confirmation store.
/// </summary>
/// <typeparam name="TDbContext">The BLite context type.</typeparam>
public class BLitePeerOplogConfirmationStore<TDbContext> : PeerOplogConfirmationStore
where TDbContext : CBDDCDocumentDbContext
{
internal const string RegistrationSourceNodeId = "__peer_registration__";
private readonly TDbContext _context;
private readonly ILogger<BLitePeerOplogConfirmationStore<TDbContext>> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="BLitePeerOplogConfirmationStore{TDbContext}" /> class.
/// </summary>
/// <param name="context">The BLite context.</param>
/// <param name="logger">An optional logger.</param>
public BLitePeerOplogConfirmationStore(
TDbContext context,
ILogger<BLitePeerOplogConfirmationStore<TDbContext>>? logger = null)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
_logger = logger ?? NullLogger<BLitePeerOplogConfirmationStore<TDbContext>>.Instance;
}
/// <inheritdoc />
public override async Task EnsurePeerRegisteredAsync(
string peerNodeId,
string address,
PeerType type,
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
var existing = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId == RegistrationSourceNodeId)
.FirstOrDefault();
if (existing == null)
{
await _context.PeerOplogConfirmations.InsertAsync(new PeerOplogConfirmationEntity
{
Id = Guid.NewGuid().ToString(),
PeerNodeId = peerNodeId,
SourceNodeId = RegistrationSourceNodeId,
ConfirmedWall = 0,
ConfirmedLogic = 0,
ConfirmedHash = "",
LastConfirmedUtcMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(),
IsActive = true
});
await _context.SaveChangesAsync(cancellationToken);
_logger.LogDebug("Registered peer confirmation tracking for {PeerNodeId} ({Address}, {Type}).", peerNodeId,
address, type);
return;
}
if (!existing.IsActive)
{
existing.IsActive = true;
existing.LastConfirmedUtcMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
await _context.PeerOplogConfirmations.UpdateAsync(existing);
await _context.SaveChangesAsync(cancellationToken);
}
}
/// <inheritdoc />
public override async Task UpdateConfirmationAsync(
string peerNodeId,
string sourceNodeId,
HlcTimestamp timestamp,
string hash,
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
if (string.IsNullOrWhiteSpace(sourceNodeId))
throw new ArgumentException("Source node id is required.", nameof(sourceNodeId));
var existing = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId == sourceNodeId)
.FirstOrDefault();
long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
if (existing == null)
{
await _context.PeerOplogConfirmations.InsertAsync(new PeerOplogConfirmationEntity
{
Id = Guid.NewGuid().ToString(),
PeerNodeId = peerNodeId,
SourceNodeId = sourceNodeId,
ConfirmedWall = timestamp.PhysicalTime,
ConfirmedLogic = timestamp.LogicalCounter,
ConfirmedHash = hash ?? "",
LastConfirmedUtcMs = nowMs,
IsActive = true
});
await _context.SaveChangesAsync(cancellationToken);
return;
}
bool isNewer = IsIncomingTimestampNewer(timestamp, existing);
bool samePointHashChanged = timestamp.PhysicalTime == existing.ConfirmedWall &&
timestamp.LogicalCounter == existing.ConfirmedLogic &&
!string.Equals(existing.ConfirmedHash, hash, StringComparison.Ordinal);
if (!isNewer && !samePointHashChanged && existing.IsActive) return;
existing.ConfirmedWall = timestamp.PhysicalTime;
existing.ConfirmedLogic = timestamp.LogicalCounter;
existing.ConfirmedHash = hash ?? "";
existing.LastConfirmedUtcMs = nowMs;
existing.IsActive = true;
await _context.PeerOplogConfirmations.UpdateAsync(existing);
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override Task<IEnumerable<PeerOplogConfirmation>> GetConfirmationsAsync(
CancellationToken cancellationToken = default)
{
var confirmations = _context.PeerOplogConfirmations
.Find(c => c.SourceNodeId != RegistrationSourceNodeId)
.ToDomain()
.ToList();
return Task.FromResult<IEnumerable<PeerOplogConfirmation>>(confirmations);
}
/// <inheritdoc />
public override Task<IEnumerable<PeerOplogConfirmation>> GetConfirmationsForPeerAsync(
string peerNodeId,
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
var confirmations = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId && c.SourceNodeId != RegistrationSourceNodeId)
.ToDomain()
.ToList();
return Task.FromResult<IEnumerable<PeerOplogConfirmation>>(confirmations);
}
/// <inheritdoc />
public override async Task RemovePeerTrackingAsync(string peerNodeId, CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
var matches = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == peerNodeId)
.ToList();
if (matches.Count == 0) return;
long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
foreach (var match in matches)
{
if (!match.IsActive) continue;
match.IsActive = false;
match.LastConfirmedUtcMs = nowMs;
await _context.PeerOplogConfirmations.UpdateAsync(match);
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override Task<IEnumerable<string>> GetActiveTrackedPeersAsync(CancellationToken cancellationToken = default)
{
var peers = _context.PeerOplogConfirmations
.Find(c => c.IsActive)
.Select(c => c.PeerNodeId)
.Distinct(StringComparer.Ordinal)
.ToList();
return Task.FromResult<IEnumerable<string>>(peers);
}
/// <inheritdoc />
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
var allIds = _context.PeerOplogConfirmations.FindAll().Select(c => c.Id).ToList();
await _context.PeerOplogConfirmations.DeleteBulkAsync(allIds);
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override Task<IEnumerable<PeerOplogConfirmation>> ExportAsync(CancellationToken cancellationToken = default)
{
var exported = _context.PeerOplogConfirmations
.FindAll()
.ToDomain()
.ToList();
return Task.FromResult<IEnumerable<PeerOplogConfirmation>>(exported);
}
/// <inheritdoc />
public override async Task ImportAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == item.PeerNodeId && c.SourceNodeId == item.SourceNodeId)
.FirstOrDefault();
if (existing == null)
{
await _context.PeerOplogConfirmations.InsertAsync(item.ToEntity());
continue;
}
existing.ConfirmedWall = item.ConfirmedWall;
existing.ConfirmedLogic = item.ConfirmedLogic;
existing.ConfirmedHash = item.ConfirmedHash;
existing.LastConfirmedUtcMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds();
existing.IsActive = item.IsActive;
await _context.PeerOplogConfirmations.UpdateAsync(existing);
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task MergeAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = _context.PeerOplogConfirmations
.Find(c => c.PeerNodeId == item.PeerNodeId && c.SourceNodeId == item.SourceNodeId)
.FirstOrDefault();
if (existing == null)
{
await _context.PeerOplogConfirmations.InsertAsync(item.ToEntity());
continue;
}
var changed = false;
var incomingTimestamp = new HlcTimestamp(item.ConfirmedWall, item.ConfirmedLogic, item.SourceNodeId);
var existingTimestamp =
new HlcTimestamp(existing.ConfirmedWall, existing.ConfirmedLogic, existing.SourceNodeId);
if (incomingTimestamp > existingTimestamp)
{
existing.ConfirmedWall = item.ConfirmedWall;
existing.ConfirmedLogic = item.ConfirmedLogic;
existing.ConfirmedHash = item.ConfirmedHash;
changed = true;
}
long incomingLastConfirmedMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds();
if (incomingLastConfirmedMs > existing.LastConfirmedUtcMs)
{
existing.LastConfirmedUtcMs = incomingLastConfirmedMs;
changed = true;
}
if (existing.IsActive != item.IsActive)
{
existing.IsActive = item.IsActive;
changed = true;
}
if (changed) await _context.PeerOplogConfirmations.UpdateAsync(existing);
}
await _context.SaveChangesAsync(cancellationToken);
}
private static bool IsIncomingTimestampNewer(HlcTimestamp incomingTimestamp, PeerOplogConfirmationEntity existing)
{
if (incomingTimestamp.PhysicalTime > existing.ConfirmedWall) return true;
if (incomingTimestamp.PhysicalTime == existing.ConfirmedWall &&
incomingTimestamp.LogicalCounter > existing.ConfirmedLogic)
return true;
return false;
}
}

View File

@@ -1,167 +0,0 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// <summary>
/// Provides a snapshot metadata store implementation that uses a specified CBDDCDocumentDbContext for persistence
/// operations.
/// </summary>
/// <remarks>
/// This class enables storage, retrieval, and management of snapshot metadata using the provided
/// database context. It is typically used in scenarios where snapshot metadata needs to be persisted in a document
/// database. The class supports bulk operations and incremental updates, and can be extended for custom database
/// contexts. Thread safety depends on the underlying context implementation.
/// </remarks>
/// <typeparam name="TDbContext">
/// The type of the document database context used for accessing and managing snapshot metadata. Must inherit from
/// CBDDCDocumentDbContext.
/// </typeparam>
public class BLiteSnapshotMetadataStore<TDbContext> : SnapshotMetadataStore where TDbContext : CBDDCDocumentDbContext
{
/// <summary>
/// Represents the database context used for data access operations within the derived class.
/// </summary>
/// <remarks>
/// Intended for use by derived classes to interact with the underlying database. The context
/// should be properly disposed of according to the application's lifetime management strategy.
/// </remarks>
protected readonly TDbContext _context;
/// <summary>
/// Provides logging capabilities for the BLiteSnapshotMetadataStore operations.
/// </summary>
/// <remarks>
/// Intended for use by derived classes to record diagnostic and operational information. The
/// logger instance is specific to the BLiteSnapshotMetadataStore<TDbContext> type.
/// </remarks>
protected readonly ILogger<BLiteSnapshotMetadataStore<TDbContext>> _logger;
/// <summary>
/// Initializes a new instance of the BLiteSnapshotMetadataStore class using the specified database context and
/// optional logger.
/// </summary>
/// <param name="context">The database context to be used for accessing snapshot metadata. Cannot be null.</param>
/// <param name="logger">An optional logger for logging diagnostic messages. If null, a no-op logger is used.</param>
/// <exception cref="ArgumentNullException">Thrown if the context parameter is null.</exception>
public BLiteSnapshotMetadataStore(TDbContext context,
ILogger<BLiteSnapshotMetadataStore<TDbContext>>? logger = null)
{
_context = context ?? throw new ArgumentNullException(nameof(context));
_logger = logger ?? NullLogger<BLiteSnapshotMetadataStore<TDbContext>>.Instance;
}
/// <inheritdoc />
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
// Use Id (technical key) for deletion, not NodeId (business key)
var allIds = await Task.Run(() => _context.SnapshotMetadatas.FindAll().Select(s => s.Id).ToList(),
cancellationToken);
await _context.SnapshotMetadatas.DeleteBulkAsync(allIds);
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task<IEnumerable<SnapshotMetadata>> ExportAsync(CancellationToken cancellationToken = default)
{
return await Task.Run(() => _context.SnapshotMetadatas.FindAll().ToDomain().ToList(), cancellationToken);
}
/// <inheritdoc />
public override async Task<string?> GetSnapshotHashAsync(string nodeId,
CancellationToken cancellationToken = default)
{
// NodeId is now a regular indexed property, not the Key
var snapshot = await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault(),
cancellationToken);
return snapshot?.Hash;
}
/// <inheritdoc />
public override async Task ImportAsync(IEnumerable<SnapshotMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var metadata in items) await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity());
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata,
CancellationToken cancellationToken = default)
{
await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity());
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task MergeAsync(IEnumerable<SnapshotMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var metadata in items)
{
// NodeId is now a regular indexed property, not the Key
var existing =
await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == metadata.NodeId).FirstOrDefault(),
cancellationToken);
if (existing == null)
{
await _context.SnapshotMetadatas.InsertAsync(metadata.ToEntity());
}
else
{
// Update only if incoming is newer
if (metadata.TimestampPhysicalTime > existing.TimestampPhysicalTime ||
(metadata.TimestampPhysicalTime == existing.TimestampPhysicalTime &&
metadata.TimestampLogicalCounter > existing.TimestampLogicalCounter))
{
existing.NodeId = metadata.NodeId;
existing.TimestampPhysicalTime = metadata.TimestampPhysicalTime;
existing.TimestampLogicalCounter = metadata.TimestampLogicalCounter;
existing.Hash = metadata.Hash;
await _context.SnapshotMetadatas.UpdateAsync(existing);
}
}
}
await _context.SaveChangesAsync(cancellationToken);
}
/// <inheritdoc />
public override async Task UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta,
CancellationToken cancellationToken)
{
// NodeId is now a regular indexed property, not the Key - find existing by NodeId
var existing =
await Task.Run(() => _context.SnapshotMetadatas.Find(s => s.NodeId == existingMeta.NodeId).FirstOrDefault(),
cancellationToken);
if (existing != null)
{
existing.NodeId = existingMeta.NodeId;
existing.TimestampPhysicalTime = existingMeta.TimestampPhysicalTime;
existing.TimestampLogicalCounter = existingMeta.TimestampLogicalCounter;
existing.Hash = existingMeta.Hash;
await _context.SnapshotMetadatas.UpdateAsync(existing);
await _context.SaveChangesAsync(cancellationToken);
}
}
/// <inheritdoc />
public override async Task<SnapshotMetadata?> GetSnapshotMetadataAsync(string nodeId,
CancellationToken cancellationToken = default)
{
// NodeId is now a regular indexed property, not the Key
return await Task.Run(
() => _context.SnapshotMetadatas.Find(s => s.NodeId == nodeId).FirstOrDefault()?.ToDomain(),
cancellationToken);
}
/// <inheritdoc />
public override async Task<IEnumerable<SnapshotMetadata>> GetAllSnapshotMetadataAsync(
CancellationToken cancellationToken = default)
{
return await Task.Run(() => _context.SnapshotMetadatas.FindAll().ToDomain().ToList(), cancellationToken);
}
}

View File

@@ -1,102 +0,0 @@
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
/// <summary>
/// Extension methods for configuring BLite persistence for ZB.MOM.WW.CBDDC.
/// </summary>
public static class CBDDCBLiteExtensions
{
/// <summary>
/// Adds BLite persistence to CBDDC using a custom DbContext and DocumentStore implementation.
/// </summary>
/// <typeparam name="TDbContext">The type of the BLite document database context. Must inherit from CBDDCDocumentDbContext.</typeparam>
/// <typeparam name="TDocumentStore">The type of the document store implementation. Must implement IDocumentStore.</typeparam>
/// <param name="services">The service collection to add the services to.</param>
/// <param name="contextFactory">A factory function that creates the DbContext instance.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddCBDDCBLite<TDbContext, TDocumentStore>(
this IServiceCollection services,
Func<IServiceProvider, TDbContext> contextFactory)
where TDbContext : CBDDCDocumentDbContext
where TDocumentStore : class, IDocumentStore
{
if (services == null) throw new ArgumentNullException(nameof(services));
if (contextFactory == null) throw new ArgumentNullException(nameof(contextFactory));
// Register the DbContext as singleton (must match store lifetime)
services.TryAddSingleton<TDbContext>(contextFactory);
services.TryAddSingleton<CBDDCDocumentDbContext>(sp => sp.GetRequiredService<TDbContext>());
// Default Conflict Resolver (Last Write Wins) if none is provided
services.TryAddSingleton<IConflictResolver, LastWriteWinsConflictResolver>();
// Vector Clock Service (shared between DocumentStore and OplogStore)
services.TryAddSingleton<IVectorClockService, VectorClockService>();
// Register BLite Stores (all Singleton)
services.TryAddSingleton<IOplogStore, BLiteOplogStore<TDbContext>>();
services.TryAddSingleton<IPeerConfigurationStore, BLitePeerConfigurationStore<TDbContext>>();
services.TryAddSingleton<IPeerOplogConfirmationStore, BLitePeerOplogConfirmationStore<TDbContext>>();
services.TryAddSingleton<ISnapshotMetadataStore, BLiteSnapshotMetadataStore<TDbContext>>();
services.TryAddSingleton<IDocumentMetadataStore, BLiteDocumentMetadataStore<TDbContext>>();
// Register the DocumentStore implementation
services.TryAddSingleton<IDocumentStore, TDocumentStore>();
// Register the SnapshotService (uses the generic SnapshotStore from ZB.MOM.WW.CBDDC.Persistence)
services.TryAddSingleton<ISnapshotService, SnapshotStore>();
return services;
}
/// <summary>
/// Adds BLite persistence to CBDDC using a custom DbContext (without explicit DocumentStore type).
/// </summary>
/// <typeparam name="TDbContext">The type of the BLite document database context. Must inherit from CBDDCDocumentDbContext.</typeparam>
/// <param name="services">The service collection to add the services to.</param>
/// <param name="contextFactory">A factory function that creates the DbContext instance.</param>
/// <returns>The service collection for chaining.</returns>
/// <remarks>You must manually register IDocumentStore after calling this method.</remarks>
public static IServiceCollection AddCBDDCBLite<TDbContext>(
this IServiceCollection services,
Func<IServiceProvider, TDbContext> contextFactory)
where TDbContext : CBDDCDocumentDbContext
{
if (services == null) throw new ArgumentNullException(nameof(services));
if (contextFactory == null) throw new ArgumentNullException(nameof(contextFactory));
// Register the DbContext as singleton
services.TryAddSingleton<TDbContext>(contextFactory);
services.TryAddSingleton<CBDDCDocumentDbContext>(sp => sp.GetRequiredService<TDbContext>());
// Default Conflict Resolver (Last Write Wins) if none is provided
services.TryAddSingleton<IConflictResolver, LastWriteWinsConflictResolver>();
// Register BLite Stores (all Singleton)
services.TryAddSingleton<IOplogStore, BLiteOplogStore<TDbContext>>();
services.TryAddSingleton<IPeerConfigurationStore, BLitePeerConfigurationStore<TDbContext>>();
services.TryAddSingleton<IPeerOplogConfirmationStore, BLitePeerOplogConfirmationStore<TDbContext>>();
services.TryAddSingleton<ISnapshotMetadataStore, BLiteSnapshotMetadataStore<TDbContext>>();
services.TryAddSingleton<IDocumentMetadataStore, BLiteDocumentMetadataStore<TDbContext>>();
// Register the SnapshotService (uses the generic SnapshotStore from ZB.MOM.WW.CBDDC.Persistence)
services.TryAddSingleton<ISnapshotService, SnapshotStore>();
return services;
}
}
/// <summary>
/// Options for configuring BLite persistence.
/// </summary>
public class BLiteOptions
{
/// <summary>
/// Gets or sets the file path to the BLite database file.
/// </summary>
public string DatabasePath { get; set; } = "";
}

View File

@@ -1,117 +0,0 @@
using BLite.Core;
using BLite.Core.Collections;
using BLite.Core.Metadata;
using BLite.Core.Storage;
using ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite;
public partial class CBDDCDocumentDbContext : DocumentDbContext
{
/// <summary>
/// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database file path.
/// </summary>
/// <param name="databasePath">
/// The file system path to the database file to be used by the context. Cannot be null or
/// empty.
/// </param>
public CBDDCDocumentDbContext(string databasePath) : base(databasePath)
{
}
/// <summary>
/// Initializes a new instance of the CBDDCDocumentDbContext class using the specified database path and page file
/// configuration.
/// </summary>
/// <param name="databasePath">The file system path to the database file. This value cannot be null or empty.</param>
/// <param name="config">
/// The configuration settings for the page file. Specifies options that control how the database
/// pages are managed.
/// </param>
public CBDDCDocumentDbContext(string databasePath, PageFileConfig config) : base(databasePath, config)
{
}
/// <summary>
/// Gets the collection of operation log entries associated with this instance.
/// </summary>
/// <remarks>
/// The collection provides access to all recorded operation log (oplog) entries, which can be
/// used to track changes or replicate operations. The collection is read-only; entries cannot be added or removed
/// directly through this property.
/// </remarks>
public DocumentCollection<string, OplogEntity> OplogEntries { get; private set; } = null!;
/// <summary>
/// Gets the collection of snapshot metadata associated with the document.
/// </summary>
public DocumentCollection<string, SnapshotMetadataEntity> SnapshotMetadatas { get; private set; } = null!;
/// <summary>
/// Gets the collection of remote peer configurations associated with this instance.
/// </summary>
/// <remarks>
/// Use this collection to access or enumerate the configuration settings for each remote peer.
/// The collection is read-only; to modify peer configurations, use the appropriate methods provided by the
/// containing class.
/// </remarks>
public DocumentCollection<string, RemotePeerEntity> RemotePeerConfigurations { get; private set; } = null!;
/// <summary>
/// Gets the collection of document metadata for sync tracking.
/// </summary>
/// <remarks>
/// Stores HLC timestamps and deleted state for each document without modifying application entities.
/// Used to track document versions for incremental sync instead of full snapshots.
/// </remarks>
public DocumentCollection<string, DocumentMetadataEntity> DocumentMetadatas { get; private set; } = null!;
/// <summary>
/// Gets the collection of peer oplog confirmation records for pruning safety tracking.
/// </summary>
public DocumentCollection<string, PeerOplogConfirmationEntity> PeerOplogConfirmations { get; private set; } = null!;
/// <inheritdoc />
protected override void OnModelCreating(ModelBuilder modelBuilder)
{
base.OnModelCreating(modelBuilder);
// OplogEntries: Use Id as technical key, Hash as unique business key
modelBuilder.Entity<OplogEntity>()
.ToCollection("OplogEntries")
.HasKey(e => e.Id)
.HasIndex(e => e.Hash, unique: true) // Hash is unique business key
.HasIndex(e => new { e.TimestampPhysicalTime, e.TimestampLogicalCounter, e.TimestampNodeId })
.HasIndex(e => e.Collection);
// SnapshotMetadatas: Use Id as technical key, NodeId as unique business key
modelBuilder.Entity<SnapshotMetadataEntity>()
.ToCollection("SnapshotMetadatas")
.HasKey(e => e.Id)
.HasIndex(e => e.NodeId, unique: true) // NodeId is unique business key
.HasIndex(e => new { e.TimestampPhysicalTime, e.TimestampLogicalCounter });
// RemotePeerConfigurations: Use Id as technical key, NodeId as unique business key
modelBuilder.Entity<RemotePeerEntity>()
.ToCollection("RemotePeerConfigurations")
.HasKey(e => e.Id)
.HasIndex(e => e.NodeId, unique: true) // NodeId is unique business key
.HasIndex(e => e.IsEnabled);
// DocumentMetadatas: Use Id as technical key, Collection+Key as unique composite business key
modelBuilder.Entity<DocumentMetadataEntity>()
.ToCollection("DocumentMetadatas")
.HasKey(e => e.Id)
.HasIndex(e => new { e.Collection, e.Key }, unique: true) // Composite business key
.HasIndex(e => new { e.HlcPhysicalTime, e.HlcLogicalCounter, e.HlcNodeId })
.HasIndex(e => e.Collection);
// PeerOplogConfirmations: Use Id as technical key, PeerNodeId+SourceNodeId as unique business key
modelBuilder.Entity<PeerOplogConfirmationEntity>()
.ToCollection("PeerOplogConfirmations")
.HasKey(e => e.Id)
.HasIndex(e => new { e.PeerNodeId, e.SourceNodeId }, unique: true)
.HasIndex(e => e.IsActive)
.HasIndex(e => new { e.SourceNodeId, e.ConfirmedWall, e.ConfirmedLogic });
}
}

View File

@@ -1,47 +0,0 @@
using System.ComponentModel.DataAnnotations;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
/// <summary>
/// BLite entity representing document metadata for sync tracking.
/// Stores HLC timestamp and deleted state for each document without modifying application entities.
/// </summary>
public class DocumentMetadataEntity
{
/// <summary>
/// Gets or sets the unique identifier for this entity (technical key).
/// Auto-generated GUID string.
/// </summary>
[Key]
public string Id { get; set; } = "";
/// <summary>
/// Gets or sets the collection name (business key part 1).
/// </summary>
public string Collection { get; set; } = "";
/// <summary>
/// Gets or sets the document key within the collection (business key part 2).
/// </summary>
public string Key { get; set; } = "";
/// <summary>
/// Gets or sets the physical time component of the HLC timestamp.
/// </summary>
public long HlcPhysicalTime { get; set; }
/// <summary>
/// Gets or sets the logical counter component of the HLC timestamp.
/// </summary>
public int HlcLogicalCounter { get; set; }
/// <summary>
/// Gets or sets the node ID that last modified this document.
/// </summary>
public string HlcNodeId { get; set; } = "";
/// <summary>
/// Gets or sets whether this document is marked as deleted (tombstone).
/// </summary>
public bool IsDeleted { get; set; }
}

View File

@@ -1,240 +0,0 @@
using System.Text.Json;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
/// <summary>
/// Provides extension methods for mapping between BLite entities and domain models.
/// </summary>
public static class EntityMappers
{
#region DocumentMetadataEntity Helpers
/// <summary>
/// Creates a DocumentMetadataEntity from collection, key, timestamp, and deleted state.
/// Used for tracking document sync state.
/// </summary>
/// <param name="collection">The collection name that owns the document.</param>
/// <param name="key">The document key within the collection.</param>
/// <param name="timestamp">The hybrid logical clock timestamp for the document state.</param>
/// <param name="isDeleted">Indicates whether the document is marked as deleted.</param>
public static DocumentMetadataEntity CreateDocumentMetadata(string collection, string key, HlcTimestamp timestamp,
bool isDeleted = false)
{
return new DocumentMetadataEntity
{
Id = Guid.NewGuid().ToString(),
Collection = collection,
Key = key,
HlcPhysicalTime = timestamp.PhysicalTime,
HlcLogicalCounter = timestamp.LogicalCounter,
HlcNodeId = timestamp.NodeId,
IsDeleted = isDeleted
};
}
#endregion
#region OplogEntity Mappers
/// <summary>
/// Converts an OplogEntry domain model to an OplogEntity for persistence.
/// </summary>
/// <param name="entry">The oplog entry to convert.</param>
public static OplogEntity ToEntity(this OplogEntry entry)
{
return new OplogEntity
{
Id = Guid.NewGuid().ToString(), // Auto-generate technical key
Collection = entry.Collection,
Key = entry.Key,
Operation = (int)entry.Operation,
// Use empty string instead of null to avoid BLite BSON serialization issues
PayloadJson = entry.Payload?.GetRawText() ?? "",
TimestampPhysicalTime = entry.Timestamp.PhysicalTime,
TimestampLogicalCounter = entry.Timestamp.LogicalCounter,
TimestampNodeId = entry.Timestamp.NodeId,
Hash = entry.Hash,
PreviousHash = entry.PreviousHash
};
}
/// <summary>
/// Converts an OplogEntity to an OplogEntry domain model.
/// </summary>
/// <param name="entity">The persisted oplog entity to convert.</param>
public static OplogEntry ToDomain(this OplogEntity entity)
{
JsonElement? payload = null;
// Treat empty string as null payload (Delete operations)
if (!string.IsNullOrEmpty(entity.PayloadJson))
payload = JsonSerializer.Deserialize<JsonElement>(entity.PayloadJson);
return new OplogEntry(
entity.Collection,
entity.Key,
(OperationType)entity.Operation,
payload,
new HlcTimestamp(entity.TimestampPhysicalTime, entity.TimestampLogicalCounter, entity.TimestampNodeId),
entity.PreviousHash,
entity.Hash);
}
/// <summary>
/// Converts a collection of OplogEntity to OplogEntry domain models.
/// </summary>
/// <param name="entities">The oplog entities to convert.</param>
public static IEnumerable<OplogEntry> ToDomain(this IEnumerable<OplogEntity> entities)
{
return entities.Select(e => e.ToDomain());
}
#endregion
#region SnapshotMetadataEntity Mappers
/// <summary>
/// Converts a SnapshotMetadata domain model to a SnapshotMetadataEntity for persistence.
/// </summary>
/// <param name="metadata">The snapshot metadata to convert.</param>
public static SnapshotMetadataEntity ToEntity(this SnapshotMetadata metadata)
{
return new SnapshotMetadataEntity
{
Id = Guid.NewGuid().ToString(), // Auto-generate technical key
NodeId = metadata.NodeId,
TimestampPhysicalTime = metadata.TimestampPhysicalTime,
TimestampLogicalCounter = metadata.TimestampLogicalCounter,
Hash = metadata.Hash
};
}
/// <summary>
/// Converts a SnapshotMetadataEntity to a SnapshotMetadata domain model.
/// </summary>
/// <param name="entity">The persisted snapshot metadata entity to convert.</param>
public static SnapshotMetadata ToDomain(this SnapshotMetadataEntity entity)
{
return new SnapshotMetadata
{
NodeId = entity.NodeId,
TimestampPhysicalTime = entity.TimestampPhysicalTime,
TimestampLogicalCounter = entity.TimestampLogicalCounter,
Hash = entity.Hash
};
}
/// <summary>
/// Converts a collection of SnapshotMetadataEntity to SnapshotMetadata domain models.
/// </summary>
/// <param name="entities">The snapshot metadata entities to convert.</param>
public static IEnumerable<SnapshotMetadata> ToDomain(this IEnumerable<SnapshotMetadataEntity> entities)
{
return entities.Select(e => e.ToDomain());
}
#endregion
#region RemotePeerEntity Mappers
/// <summary>
/// Converts a RemotePeerConfiguration domain model to a RemotePeerEntity for persistence.
/// </summary>
/// <param name="config">The remote peer configuration to convert.</param>
public static RemotePeerEntity ToEntity(this RemotePeerConfiguration config)
{
return new RemotePeerEntity
{
Id = Guid.NewGuid().ToString(), // Auto-generate technical key
NodeId = config.NodeId,
Address = config.Address,
Type = (int)config.Type,
IsEnabled = config.IsEnabled,
InterestsJson = config.InterestingCollections.Count > 0
? JsonSerializer.Serialize(config.InterestingCollections)
: ""
};
}
/// <summary>
/// Converts a RemotePeerEntity to a RemotePeerConfiguration domain model.
/// </summary>
/// <param name="entity">The persisted remote peer entity to convert.</param>
public static RemotePeerConfiguration ToDomain(this RemotePeerEntity entity)
{
var config = new RemotePeerConfiguration
{
NodeId = entity.NodeId,
Address = entity.Address,
Type = (PeerType)entity.Type,
IsEnabled = entity.IsEnabled
};
if (!string.IsNullOrEmpty(entity.InterestsJson))
config.InterestingCollections = JsonSerializer.Deserialize<List<string>>(entity.InterestsJson) ?? [];
return config;
}
/// <summary>
/// Converts a collection of RemotePeerEntity to RemotePeerConfiguration domain models.
/// </summary>
/// <param name="entities">The remote peer entities to convert.</param>
public static IEnumerable<RemotePeerConfiguration> ToDomain(this IEnumerable<RemotePeerEntity> entities)
{
return entities.Select(e => e.ToDomain());
}
#endregion
#region PeerOplogConfirmationEntity Mappers
/// <summary>
/// Converts a peer oplog confirmation domain model to a BLite entity.
/// </summary>
/// <param name="confirmation">The confirmation to convert.</param>
public static PeerOplogConfirmationEntity ToEntity(this PeerOplogConfirmation confirmation)
{
return new PeerOplogConfirmationEntity
{
Id = Guid.NewGuid().ToString(),
PeerNodeId = confirmation.PeerNodeId,
SourceNodeId = confirmation.SourceNodeId,
ConfirmedWall = confirmation.ConfirmedWall,
ConfirmedLogic = confirmation.ConfirmedLogic,
ConfirmedHash = confirmation.ConfirmedHash,
LastConfirmedUtcMs = confirmation.LastConfirmedUtc.ToUnixTimeMilliseconds(),
IsActive = confirmation.IsActive
};
}
/// <summary>
/// Converts a peer oplog confirmation entity to a domain model.
/// </summary>
/// <param name="entity">The entity to convert.</param>
public static PeerOplogConfirmation ToDomain(this PeerOplogConfirmationEntity entity)
{
return new PeerOplogConfirmation
{
PeerNodeId = entity.PeerNodeId,
SourceNodeId = entity.SourceNodeId,
ConfirmedWall = entity.ConfirmedWall,
ConfirmedLogic = entity.ConfirmedLogic,
ConfirmedHash = entity.ConfirmedHash,
LastConfirmedUtc = DateTimeOffset.FromUnixTimeMilliseconds(entity.LastConfirmedUtcMs),
IsActive = entity.IsActive
};
}
/// <summary>
/// Converts a collection of peer oplog confirmation entities to domain models.
/// </summary>
/// <param name="entities">The entities to convert.</param>
public static IEnumerable<PeerOplogConfirmation> ToDomain(this IEnumerable<PeerOplogConfirmationEntity> entities)
{
return entities.Select(e => e.ToDomain());
}
#endregion
}

View File

@@ -1,61 +0,0 @@
using System.ComponentModel.DataAnnotations;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
/// <summary>
/// BLite entity representing an operation log entry.
/// </summary>
public class OplogEntity
{
/// <summary>
/// Gets or sets the unique identifier for this entity (technical key).
/// Auto-generated GUID string.
/// </summary>
[Key]
public string Id { get; set; } = "";
/// <summary>
/// Gets or sets the collection name.
/// </summary>
public string Collection { get; set; } = "";
/// <summary>
/// Gets or sets the document key.
/// </summary>
public string Key { get; set; } = "";
/// <summary>
/// Gets or sets the operation type (0 = Put, 1 = Delete).
/// </summary>
public int Operation { get; set; }
/// <summary>
/// Gets or sets the payload JSON (empty string for Delete operations).
/// </summary>
public string PayloadJson { get; set; } = "";
/// <summary>
/// Gets or sets the physical time component of the HLC timestamp.
/// </summary>
public long TimestampPhysicalTime { get; set; }
/// <summary>
/// Gets or sets the logical counter component of the HLC timestamp.
/// </summary>
public int TimestampLogicalCounter { get; set; }
/// <summary>
/// Gets or sets the node ID component of the HLC timestamp.
/// </summary>
public string TimestampNodeId { get; set; } = "";
/// <summary>
/// Gets or sets the cryptographic hash of this entry (business key).
/// </summary>
public string Hash { get; set; } = "";
/// <summary>
/// Gets or sets the hash of the previous entry in the chain.
/// </summary>
public string PreviousHash { get; set; } = "";
}

View File

@@ -1,50 +0,0 @@
using System.ComponentModel.DataAnnotations;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
/// <summary>
/// BLite entity representing a peer oplog confirmation watermark.
/// </summary>
public class PeerOplogConfirmationEntity
{
/// <summary>
/// Gets or sets the unique technical identifier for this entity.
/// </summary>
[Key]
public string Id { get; set; } = "";
/// <summary>
/// Gets or sets the tracked peer node identifier.
/// </summary>
public string PeerNodeId { get; set; } = "";
/// <summary>
/// Gets or sets the source node identifier for this confirmation.
/// </summary>
public string SourceNodeId { get; set; } = "";
/// <summary>
/// Gets or sets the physical wall-clock component of the confirmed HLC timestamp.
/// </summary>
public long ConfirmedWall { get; set; }
/// <summary>
/// Gets or sets the logical component of the confirmed HLC timestamp.
/// </summary>
public int ConfirmedLogic { get; set; }
/// <summary>
/// Gets or sets the confirmed hash value.
/// </summary>
public string ConfirmedHash { get; set; } = "";
/// <summary>
/// Gets or sets the UTC instant of the last update as unix milliseconds.
/// </summary>
public long LastConfirmedUtcMs { get; set; }
/// <summary>
/// Gets or sets whether the tracked peer remains active.
/// </summary>
public bool IsActive { get; set; } = true;
}

View File

@@ -1,42 +0,0 @@
using System.ComponentModel.DataAnnotations;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
/// <summary>
/// BLite entity representing a remote peer configuration.
/// </summary>
public class RemotePeerEntity
{
/// <summary>
/// Gets or sets the unique identifier for this entity (technical key).
/// Auto-generated GUID string.
/// </summary>
[Key]
public string Id { get; set; } = "";
/// <summary>
/// Gets or sets the unique identifier for the remote peer node (business key).
/// </summary>
public string NodeId { get; set; } = "";
/// <summary>
/// Gets or sets the network address of the remote peer (hostname:port).
/// </summary>
public string Address { get; set; } = "";
/// <summary>
/// Gets or sets the type of the peer (0=LanDiscovered, 1=StaticRemote, 2=CloudRemote).
/// </summary>
public int Type { get; set; }
/// <summary>
/// Gets or sets whether this peer is enabled for synchronization.
/// </summary>
public bool IsEnabled { get; set; } = true;
/// <summary>
/// Gets or sets the collection interests as a JSON string.
/// Use empty string instead of null for BLite compatibility.
/// </summary>
public string InterestsJson { get; set; } = "";
}

View File

@@ -1,36 +0,0 @@
using System.ComponentModel.DataAnnotations;
namespace ZB.MOM.WW.CBDDC.Persistence.BLite.Entities;
/// <summary>
/// BLite entity representing snapshot metadata (oplog pruning checkpoint).
/// </summary>
public class SnapshotMetadataEntity
{
/// <summary>
/// Gets or sets the unique identifier for this entity (technical key).
/// Auto-generated GUID string.
/// </summary>
[Key]
public string Id { get; set; } = "";
/// <summary>
/// Gets or sets the node identifier (business key).
/// </summary>
public string NodeId { get; set; } = "";
/// <summary>
/// Gets or sets the physical time component of the timestamp.
/// </summary>
public long TimestampPhysicalTime { get; set; }
/// <summary>
/// Gets or sets the logical counter component of the timestamp.
/// </summary>
public int TimestampLogicalCounter { get; set; }
/// <summary>
/// Gets or sets the hash of the snapshot.
/// </summary>
public string Hash { get; set; } = "";
}

View File

@@ -1,10 +1,10 @@
# ZB.MOM.WW.CBDDC.Persistence
BLite persistence provider and foundational persistence implementations for **CBDDC**.
SurrealDB (embedded RocksDB) persistence provider and foundational persistence implementations for **CBDDC**.
## What's Included
This package provides both BLite provider types and core persistence services:
This package provides Surreal provider types and core persistence services:
- **OplogStore**: Base implementation for append-only operation log storage
- **VectorClockService**: Thread-safe in-memory vector clock management
@@ -14,7 +14,7 @@ This package provides both BLite provider types and core persistence services:
## When To Use This Package
- **As a Library User**: Install this package to use CBDDC with BLite persistence.
- **As a Library User**: Install this package to use CBDDC with Surreal embedded persistence.
- **As a Provider Developer**: Reference this package to build custom persistence providers by extending the base
classes

View File

@@ -0,0 +1,142 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using SurrealDb.Embedded.Options;
using SurrealDb.Embedded.RocksDb;
using SurrealDb.Net;
using SurrealDb.Net.Models.Response;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Embedded RocksDB-backed Surreal client wrapper used by CBDDC persistence components.
/// </summary>
public sealed class CBDDCSurrealEmbeddedClient : ICBDDCSurrealEmbeddedClient
{
private static readonly IReadOnlyDictionary<string, object?> EmptyParameters = new Dictionary<string, object?>();
private readonly SemaphoreSlim _initializeGate = new(1, 1);
private readonly ILogger<CBDDCSurrealEmbeddedClient> _logger;
private readonly CBDDCSurrealEmbeddedOptions _options;
private bool _disposed;
private bool _initialized;
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCSurrealEmbeddedClient" /> class.
/// </summary>
/// <param name="options">Embedded Surreal options.</param>
/// <param name="logger">Optional logger.</param>
public CBDDCSurrealEmbeddedClient(
CBDDCSurrealEmbeddedOptions options,
ILogger<CBDDCSurrealEmbeddedClient>? logger = null)
{
_options = options ?? throw new ArgumentNullException(nameof(options));
_logger = logger ?? NullLogger<CBDDCSurrealEmbeddedClient>.Instance;
if (!string.IsNullOrWhiteSpace(_options.Endpoint) &&
!_options.Endpoint.StartsWith("rocksdb://", StringComparison.OrdinalIgnoreCase))
throw new ArgumentException(
"Embedded Surreal endpoint must use the rocksdb:// scheme.",
nameof(options));
if (string.IsNullOrWhiteSpace(_options.Namespace))
throw new ArgumentException("Namespace is required.", nameof(options));
if (string.IsNullOrWhiteSpace(_options.Database))
throw new ArgumentException("Database is required.", nameof(options));
if (string.IsNullOrWhiteSpace(_options.NamingPolicy))
throw new ArgumentException("Naming policy is required.", nameof(options));
string dbPath = ResolveDatabasePath(_options.DatabasePath);
var embeddedOptionsBuilder = SurrealDbEmbeddedOptions.Create();
if (_options.StrictMode.HasValue)
embeddedOptionsBuilder.WithStrictMode(_options.StrictMode.Value);
Client = new SurrealDbRocksDbClient(dbPath, embeddedOptionsBuilder.Build(), _options.NamingPolicy);
}
/// <inheritdoc />
public ISurrealDbClient Client { get; }
/// <inheritdoc />
public async Task InitializeAsync(CancellationToken cancellationToken = default)
{
ThrowIfDisposed();
if (_initialized) return;
await _initializeGate.WaitAsync(cancellationToken);
try
{
if (_initialized) return;
await Client.Connect(cancellationToken);
await Client.Use(_options.Namespace, _options.Database, cancellationToken);
_initialized = true;
_logger.LogInformation("Surreal embedded client initialized for namespace '{Namespace}' and database '{Database}'.",
_options.Namespace, _options.Database);
}
finally
{
_initializeGate.Release();
}
}
/// <inheritdoc />
public async Task<SurrealDbResponse> RawQueryAsync(
string query,
IReadOnlyDictionary<string, object?>? parameters = null,
CancellationToken cancellationToken = default)
{
ThrowIfDisposed();
if (string.IsNullOrWhiteSpace(query))
throw new ArgumentException("Query is required.", nameof(query));
await InitializeAsync(cancellationToken);
return await Client.RawQuery(query, parameters ?? EmptyParameters, cancellationToken);
}
/// <inheritdoc />
public async Task<bool> HealthAsync(CancellationToken cancellationToken = default)
{
ThrowIfDisposed();
await InitializeAsync(cancellationToken);
return await Client.Health(cancellationToken);
}
/// <inheritdoc />
public void Dispose()
{
if (_disposed) return;
_disposed = true;
Client.Dispose();
_initializeGate.Dispose();
}
/// <inheritdoc />
public async ValueTask DisposeAsync()
{
if (_disposed) return;
_disposed = true;
await Client.DisposeAsync();
_initializeGate.Dispose();
}
private void ThrowIfDisposed()
{
ObjectDisposedException.ThrowIf(_disposed, this);
}
private static string ResolveDatabasePath(string databasePath)
{
if (string.IsNullOrWhiteSpace(databasePath))
throw new ArgumentException("DatabasePath is required.", nameof(databasePath));
string fullPath = Path.GetFullPath(databasePath);
string? directory = Path.GetDirectoryName(fullPath);
if (!string.IsNullOrWhiteSpace(directory)) Directory.CreateDirectory(directory);
return fullPath;
}
}

View File

@@ -0,0 +1,75 @@
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using ZB.MOM.WW.CBDDC.Core.Network;
using SurrealDb.Net;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Extension methods for configuring embedded Surreal persistence for CBDDC.
/// </summary>
public static class CBDDCSurrealEmbeddedExtensions
{
/// <summary>
/// Adds embedded Surreal infrastructure to CBDDC and registers a document store implementation.
/// </summary>
/// <typeparam name="TDocumentStore">The concrete document store implementation.</typeparam>
/// <param name="services">The service collection to add services to.</param>
/// <param name="optionsFactory">Factory used to build embedded Surreal options.</param>
/// <returns>The service collection for chaining.</returns>
public static IServiceCollection AddCBDDCSurrealEmbedded<TDocumentStore>(
this IServiceCollection services,
Func<IServiceProvider, CBDDCSurrealEmbeddedOptions> optionsFactory)
where TDocumentStore : class, IDocumentStore
{
RegisterCoreServices(services, optionsFactory);
services.TryAddSingleton<IDocumentStore, TDocumentStore>();
return services;
}
/// <summary>
/// Adds embedded Surreal infrastructure to CBDDC without registering store implementations.
/// </summary>
/// <param name="services">The service collection to add services to.</param>
/// <param name="optionsFactory">Factory used to build embedded Surreal options.</param>
/// <returns>The service collection for chaining.</returns>
/// <remarks>
/// Register store implementations separately when they become available.
/// </remarks>
public static IServiceCollection AddCBDDCSurrealEmbedded(
this IServiceCollection services,
Func<IServiceProvider, CBDDCSurrealEmbeddedOptions> optionsFactory)
{
RegisterCoreServices(services, optionsFactory);
return services;
}
private static void RegisterCoreServices(
IServiceCollection services,
Func<IServiceProvider, CBDDCSurrealEmbeddedOptions> optionsFactory)
{
if (services == null) throw new ArgumentNullException(nameof(services));
if (optionsFactory == null) throw new ArgumentNullException(nameof(optionsFactory));
services.TryAddSingleton(optionsFactory);
services.TryAddSingleton<ICBDDCSurrealEmbeddedClient, CBDDCSurrealEmbeddedClient>();
services.TryAddSingleton<ISurrealDbClient>(sp => sp.GetRequiredService<ICBDDCSurrealEmbeddedClient>().Client);
services.TryAddSingleton<ICBDDCSurrealSchemaInitializer, CBDDCSurrealSchemaInitializer>();
services.TryAddSingleton<ICBDDCSurrealReadinessProbe, CBDDCSurrealReadinessProbe>();
services.TryAddSingleton<ISurrealCdcCheckpointPersistence, SurrealCdcCheckpointPersistence>();
services.TryAddSingleton<IConflictResolver, LastWriteWinsConflictResolver>();
services.TryAddSingleton<IVectorClockService, VectorClockService>();
services.TryAddSingleton<IPeerConfigurationStore, SurrealPeerConfigurationStore>();
services.TryAddSingleton<IPeerOplogConfirmationStore, SurrealPeerOplogConfirmationStore>();
services.TryAddSingleton<ISnapshotMetadataStore, SurrealSnapshotMetadataStore>();
services.TryAddSingleton<IDocumentMetadataStore, SurrealDocumentMetadataStore>();
services.TryAddSingleton<IOplogStore, SurrealOplogStore>();
// SnapshotStore registration matches the other provider extension patterns.
services.TryAddSingleton<ISnapshotService, SnapshotStore>();
}
}

View File

@@ -0,0 +1,88 @@
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Configuration for the embedded SurrealDB RocksDB provider.
/// </summary>
public sealed class CBDDCSurrealEmbeddedOptions
{
/// <summary>
/// Logical endpoint for this provider. For embedded RocksDB this should use the <c>rocksdb://</c> scheme.
/// </summary>
public string Endpoint { get; set; } = "rocksdb://local";
/// <summary>
/// File path used by the embedded RocksDB engine.
/// </summary>
public string DatabasePath { get; set; } = "data/cbddc-surreal.db";
/// <summary>
/// Surreal namespace.
/// </summary>
public string Namespace { get; set; } = "cbddc";
/// <summary>
/// Surreal database name inside the namespace.
/// </summary>
public string Database { get; set; } = "main";
/// <summary>
/// Naming policy used by the Surreal .NET client serializer.
/// </summary>
public string NamingPolicy { get; set; } = "camelCase";
/// <summary>
/// Optional strict mode flag for embedded Surreal.
/// </summary>
public bool? StrictMode { get; set; }
/// <summary>
/// CDC-related options used by persistence stores.
/// </summary>
public CBDDCSurrealCdcOptions Cdc { get; set; } = new();
}
/// <summary>
/// CDC/checkpoint configuration for the embedded Surreal provider.
/// </summary>
public sealed class CBDDCSurrealCdcOptions
{
/// <summary>
/// Enables CDC-oriented checkpoint bookkeeping.
/// </summary>
public bool Enabled { get; set; } = true;
/// <summary>
/// Checkpoint table name used for CDC progress tracking.
/// </summary>
public string CheckpointTable { get; set; } = "cbddc_cdc_checkpoint";
/// <summary>
/// Enables LIVE SELECT subscriptions as a low-latency wake-up signal for polling.
/// </summary>
public bool EnableLiveSelectAccelerator { get; set; } = true;
/// <summary>
/// Logical consumer identifier used by checkpoint records.
/// </summary>
public string ConsumerId { get; set; } = "default";
/// <summary>
/// Polling interval for CDC readers that use pull-based processing.
/// </summary>
public TimeSpan PollingInterval { get; set; } = TimeSpan.FromSeconds(1);
/// <summary>
/// Maximum number of changefeed entries fetched per poll cycle.
/// </summary>
public int BatchSize { get; set; } = 500;
/// <summary>
/// Delay before re-subscribing LIVE SELECT after failures or closure.
/// </summary>
public TimeSpan LiveSelectReconnectDelay { get; set; } = TimeSpan.FromSeconds(2);
/// <summary>
/// Retention window used when defining Surreal changefeed history.
/// </summary>
public TimeSpan RetentionDuration { get; set; } = TimeSpan.FromDays(7);
}

View File

@@ -0,0 +1,45 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Health/readiness helper for the embedded Surreal provider.
/// </summary>
public sealed class CBDDCSurrealReadinessProbe : ICBDDCSurrealReadinessProbe
{
private readonly ICBDDCSurrealEmbeddedClient _surrealClient;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
private readonly ILogger<CBDDCSurrealReadinessProbe> _logger;
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCSurrealReadinessProbe" /> class.
/// </summary>
/// <param name="surrealClient">Surreal client abstraction.</param>
/// <param name="schemaInitializer">Schema initializer.</param>
/// <param name="logger">Optional logger.</param>
public CBDDCSurrealReadinessProbe(
ICBDDCSurrealEmbeddedClient surrealClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
ILogger<CBDDCSurrealReadinessProbe>? logger = null)
{
_surrealClient = surrealClient ?? throw new ArgumentNullException(nameof(surrealClient));
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
_logger = logger ?? NullLogger<CBDDCSurrealReadinessProbe>.Instance;
}
/// <inheritdoc />
public async Task<bool> IsReadyAsync(CancellationToken cancellationToken = default)
{
try
{
await _schemaInitializer.EnsureInitializedAsync(cancellationToken);
return await _surrealClient.HealthAsync(cancellationToken);
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Surreal embedded readiness probe failed.");
return false;
}
}
}

View File

@@ -0,0 +1,131 @@
using System.Text.RegularExpressions;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Initializes Surreal schema objects required by CBDDC persistence stores.
/// </summary>
public sealed class CBDDCSurrealSchemaInitializer : ICBDDCSurrealSchemaInitializer
{
private static readonly Regex IdentifierRegex = new("^[A-Za-z_][A-Za-z0-9_]*$", RegexOptions.Compiled);
private readonly SemaphoreSlim _initializeGate = new(1, 1);
private readonly ICBDDCSurrealEmbeddedClient _surrealClient;
private readonly ILogger<CBDDCSurrealSchemaInitializer> _logger;
private readonly string _checkpointTable;
private readonly string _changefeedRetentionLiteral;
private bool _initialized;
/// <summary>
/// Initializes a new instance of the <see cref="CBDDCSurrealSchemaInitializer" /> class.
/// </summary>
/// <param name="surrealClient">Surreal client abstraction.</param>
/// <param name="options">Embedded options.</param>
/// <param name="logger">Optional logger.</param>
public CBDDCSurrealSchemaInitializer(
ICBDDCSurrealEmbeddedClient surrealClient,
CBDDCSurrealEmbeddedOptions options,
ILogger<CBDDCSurrealSchemaInitializer>? logger = null)
{
_surrealClient = surrealClient ?? throw new ArgumentNullException(nameof(surrealClient));
_logger = logger ?? NullLogger<CBDDCSurrealSchemaInitializer>.Instance;
if (options == null) throw new ArgumentNullException(nameof(options));
if (options.Cdc == null) throw new ArgumentException("CDC options are required.", nameof(options));
_checkpointTable = EnsureValidIdentifier(options.Cdc.CheckpointTable, nameof(options.Cdc.CheckpointTable));
_changefeedRetentionLiteral = ToSurrealDurationLiteral(
options.Cdc.RetentionDuration,
nameof(options.Cdc.RetentionDuration));
}
/// <inheritdoc />
public async Task EnsureInitializedAsync(CancellationToken cancellationToken = default)
{
if (_initialized) return;
await _initializeGate.WaitAsync(cancellationToken);
try
{
if (_initialized) return;
string schemaSql = BuildSchemaSql();
await _surrealClient.RawQueryAsync(schemaSql, cancellationToken: cancellationToken);
_initialized = true;
_logger.LogInformation(
"Surreal schema initialized with checkpoint table '{CheckpointTable}'.",
_checkpointTable);
}
finally
{
_initializeGate.Release();
}
}
private string BuildSchemaSql()
{
return $"""
DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.OplogEntriesTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral};
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.OplogHashIndex} ON TABLE {CBDDCSurrealSchemaNames.OplogEntriesTable} COLUMNS hash UNIQUE;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.OplogHlcIndex} ON TABLE {CBDDCSurrealSchemaNames.OplogEntriesTable} COLUMNS timestampPhysicalTime, timestampLogicalCounter, timestampNodeId;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.OplogCollectionIndex} ON TABLE {CBDDCSurrealSchemaNames.OplogEntriesTable} COLUMNS collection;
DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.SnapshotMetadataTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral};
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.SnapshotNodeIdIndex} ON TABLE {CBDDCSurrealSchemaNames.SnapshotMetadataTable} COLUMNS nodeId UNIQUE;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.SnapshotHlcIndex} ON TABLE {CBDDCSurrealSchemaNames.SnapshotMetadataTable} COLUMNS timestampPhysicalTime, timestampLogicalCounter;
DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral};
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerNodeIdIndex} ON TABLE {CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable} COLUMNS nodeId UNIQUE;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerEnabledIndex} ON TABLE {CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable} COLUMNS isEnabled;
DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.DocumentMetadataTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral};
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.DocumentMetadataCollectionKeyIndex} ON TABLE {CBDDCSurrealSchemaNames.DocumentMetadataTable} COLUMNS collection, key UNIQUE;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.DocumentMetadataHlcIndex} ON TABLE {CBDDCSurrealSchemaNames.DocumentMetadataTable} COLUMNS hlcPhysicalTime, hlcLogicalCounter, hlcNodeId;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.DocumentMetadataCollectionIndex} ON TABLE {CBDDCSurrealSchemaNames.DocumentMetadataTable} COLUMNS collection;
DEFINE TABLE OVERWRITE {CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable} SCHEMAFULL CHANGEFEED {_changefeedRetentionLiteral};
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerConfirmationPairIndex} ON TABLE {CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable} COLUMNS peerNodeId, sourceNodeId UNIQUE;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerConfirmationActiveIndex} ON TABLE {CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable} COLUMNS isActive;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.PeerConfirmationSourceHlcIndex} ON TABLE {CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable} COLUMNS sourceNodeId, confirmedWall, confirmedLogic;
DEFINE TABLE OVERWRITE {_checkpointTable} SCHEMAFULL;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.CdcCheckpointConsumerIndex} ON TABLE {_checkpointTable} COLUMNS consumerId UNIQUE;
DEFINE INDEX OVERWRITE {CBDDCSurrealSchemaNames.CdcCheckpointVersionstampIndex} ON TABLE {_checkpointTable} COLUMNS versionstampCursor;
""";
}
private static string EnsureValidIdentifier(string? identifier, string argumentName)
{
if (string.IsNullOrWhiteSpace(identifier))
throw new ArgumentException("Surreal identifier is required.", argumentName);
if (!IdentifierRegex.IsMatch(identifier))
throw new ArgumentException(
$"Invalid Surreal identifier '{identifier}'. Use letters, numbers, and underscores only.",
argumentName);
return identifier;
}
private static string ToSurrealDurationLiteral(TimeSpan duration, string argumentName)
{
if (duration <= TimeSpan.Zero)
throw new ArgumentOutOfRangeException(argumentName, "Surreal changefeed retention duration must be positive.");
if (duration.TotalDays >= 1 && duration.TotalDays == Math.Truncate(duration.TotalDays))
return $"{(long)duration.TotalDays}d";
if (duration.TotalHours >= 1 && duration.TotalHours == Math.Truncate(duration.TotalHours))
return $"{(long)duration.TotalHours}h";
if (duration.TotalMinutes >= 1 && duration.TotalMinutes == Math.Truncate(duration.TotalMinutes))
return $"{(long)duration.TotalMinutes}m";
if (duration.TotalSeconds >= 1 && duration.TotalSeconds == Math.Truncate(duration.TotalSeconds))
return $"{(long)duration.TotalSeconds}s";
long totalMs = checked((long)Math.Ceiling(duration.TotalMilliseconds));
return $"{totalMs}ms";
}
}

View File

@@ -0,0 +1,29 @@
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Surreal table and index names shared by the embedded CBDDC provider.
/// </summary>
public static class CBDDCSurrealSchemaNames
{
public const string OplogEntriesTable = "cbddc_oplog_entries";
public const string SnapshotMetadataTable = "cbddc_snapshot_metadatas";
public const string RemotePeerConfigurationsTable = "cbddc_remote_peer_configurations";
public const string DocumentMetadataTable = "cbddc_document_metadatas";
public const string PeerOplogConfirmationsTable = "cbddc_peer_oplog_confirmations";
public const string OplogHashIndex = "idx_cbddc_oplog_hash";
public const string OplogHlcIndex = "idx_cbddc_oplog_hlc";
public const string OplogCollectionIndex = "idx_cbddc_oplog_collection";
public const string SnapshotNodeIdIndex = "idx_cbddc_snapshot_node";
public const string SnapshotHlcIndex = "idx_cbddc_snapshot_hlc";
public const string PeerNodeIdIndex = "idx_cbddc_peer_node";
public const string PeerEnabledIndex = "idx_cbddc_peer_enabled";
public const string DocumentMetadataCollectionKeyIndex = "idx_cbddc_docmeta_collection_key";
public const string DocumentMetadataHlcIndex = "idx_cbddc_docmeta_hlc";
public const string DocumentMetadataCollectionIndex = "idx_cbddc_docmeta_collection";
public const string PeerConfirmationPairIndex = "idx_cbddc_peer_confirm_pair";
public const string PeerConfirmationActiveIndex = "idx_cbddc_peer_confirm_active";
public const string PeerConfirmationSourceHlcIndex = "idx_cbddc_peer_confirm_source_hlc";
public const string CdcCheckpointConsumerIndex = "idx_cbddc_cdc_checkpoint_consumer";
public const string CdcCheckpointVersionstampIndex = "idx_cbddc_cdc_checkpoint_versionstamp";
}

View File

@@ -0,0 +1,32 @@
using SurrealDb.Net;
using SurrealDb.Net.Models.Response;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Abstraction over the embedded Surreal client used by CBDDC persistence stores.
/// </summary>
public interface ICBDDCSurrealEmbeddedClient : IAsyncDisposable, IDisposable
{
/// <summary>
/// Gets the underlying Surreal client.
/// </summary>
ISurrealDbClient Client { get; }
/// <summary>
/// Connects and selects namespace/database exactly once.
/// </summary>
Task InitializeAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Executes a raw SurrealQL statement.
/// </summary>
Task<SurrealDbResponse> RawQueryAsync(string query,
IReadOnlyDictionary<string, object?>? parameters = null,
CancellationToken cancellationToken = default);
/// <summary>
/// Checks whether the embedded client responds to health probes.
/// </summary>
Task<bool> HealthAsync(CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,12 @@
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Simple readiness probe for embedded Surreal infrastructure.
/// </summary>
public interface ICBDDCSurrealReadinessProbe
{
/// <summary>
/// Returns true when client initialization, schema initialization, and health checks pass.
/// </summary>
Task<bool> IsReadyAsync(CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,12 @@
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Ensures required Surreal schema objects exist.
/// </summary>
public interface ICBDDCSurrealSchemaInitializer
{
/// <summary>
/// Creates required tables/indexes/checkpoint schema for CBDDC stores.
/// </summary>
Task EnsureInitializedAsync(CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,76 @@
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Represents durable CDC progress for a logical consumer.
/// </summary>
public sealed class SurrealCdcCheckpoint
{
/// <summary>
/// Gets or sets the logical consumer identifier.
/// </summary>
public string ConsumerId { get; set; } = "";
/// <summary>
/// Gets or sets the last processed hybrid logical timestamp.
/// </summary>
public HlcTimestamp Timestamp { get; set; }
/// <summary>
/// Gets or sets the last processed hash in the local chain.
/// </summary>
public string LastHash { get; set; } = "";
/// <summary>
/// Gets or sets the UTC instant when the checkpoint was updated.
/// </summary>
public DateTimeOffset UpdatedUtc { get; set; }
/// <summary>
/// Gets or sets the optional changefeed versionstamp cursor associated with this checkpoint.
/// </summary>
public long? VersionstampCursor { get; set; }
}
/// <summary>
/// Defines persistence operations for local CDC checkpoint progress.
/// </summary>
public interface ISurrealCdcCheckpointPersistence
{
/// <summary>
/// Reads the checkpoint for a consumer.
/// </summary>
/// <param name="consumerId">Optional consumer id. Defaults to configured CDC consumer id.</param>
/// <param name="cancellationToken">A cancellation token.</param>
/// <returns>The checkpoint if found; otherwise <see langword="null" />.</returns>
Task<SurrealCdcCheckpoint?> GetCheckpointAsync(
string? consumerId = null,
CancellationToken cancellationToken = default);
/// <summary>
/// Upserts checkpoint progress for a consumer.
/// </summary>
/// <param name="timestamp">The last processed timestamp.</param>
/// <param name="lastHash">The last processed hash.</param>
/// <param name="consumerId">Optional consumer id. Defaults to configured CDC consumer id.</param>
/// <param name="cancellationToken">A cancellation token.</param>
/// <param name="versionstampCursor">Optional changefeed versionstamp cursor.</param>
Task UpsertCheckpointAsync(
HlcTimestamp timestamp,
string lastHash,
string? consumerId = null,
CancellationToken cancellationToken = default,
long? versionstampCursor = null);
/// <summary>
/// Advances checkpoint progress from an oplog entry.
/// </summary>
/// <param name="entry">The oplog entry that was processed.</param>
/// <param name="consumerId">Optional consumer id. Defaults to configured CDC consumer id.</param>
/// <param name="cancellationToken">A cancellation token.</param>
Task AdvanceCheckpointAsync(
OplogEntry entry,
string? consumerId = null,
CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,27 @@
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Defines lifecycle controls for the durable Surreal CDC polling worker.
/// </summary>
public interface ISurrealCdcWorkerLifecycle
{
/// <summary>
/// Gets a value indicating whether the CDC worker is currently running.
/// </summary>
bool IsCdcWorkerRunning { get; }
/// <summary>
/// Starts the CDC worker.
/// </summary>
Task StartCdcWorkerAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Executes one CDC polling pass across all watched collections.
/// </summary>
Task PollCdcOnceAsync(CancellationToken cancellationToken = default);
/// <summary>
/// Stops the CDC worker.
/// </summary>
Task StopCdcWorkerAsync(CancellationToken cancellationToken = default);
}

View File

@@ -0,0 +1,191 @@
using System.Security.Cryptography;
using System.Text;
using System.Text.Json.Serialization;
using SurrealDb.Net;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Surreal-backed persistence for CDC checkpoint progress.
/// </summary>
public sealed class SurrealCdcCheckpointPersistence : ISurrealCdcCheckpointPersistence
{
private readonly bool _enabled;
private readonly string _checkpointTable;
private readonly string _defaultConsumerId;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
private readonly ISurrealDbClient _surrealClient;
/// <summary>
/// Initializes a new instance of the <see cref="SurrealCdcCheckpointPersistence" /> class.
/// </summary>
/// <param name="surrealEmbeddedClient">The embedded Surreal client abstraction.</param>
/// <param name="schemaInitializer">The Surreal schema initializer.</param>
/// <param name="options">Embedded Surreal options.</param>
public SurrealCdcCheckpointPersistence(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
CBDDCSurrealEmbeddedOptions options)
{
_ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient));
_surrealClient = surrealEmbeddedClient.Client;
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
if (options == null) throw new ArgumentNullException(nameof(options));
_enabled = options.Cdc.Enabled;
_checkpointTable = options.Cdc.CheckpointTable;
_defaultConsumerId = options.Cdc.ConsumerId;
if (string.IsNullOrWhiteSpace(_checkpointTable))
throw new ArgumentException("CDC checkpoint table is required.", nameof(options));
if (string.IsNullOrWhiteSpace(_defaultConsumerId))
throw new ArgumentException("CDC consumer id is required.", nameof(options));
}
/// <inheritdoc />
public async Task<SurrealCdcCheckpoint?> GetCheckpointAsync(
string? consumerId = null,
CancellationToken cancellationToken = default)
{
if (!_enabled) return null;
string resolvedConsumerId = ResolveConsumerId(consumerId);
var existing = await FindByConsumerIdAsync(resolvedConsumerId, cancellationToken);
return existing?.ToDomain();
}
/// <inheritdoc />
public async Task UpsertCheckpointAsync(
HlcTimestamp timestamp,
string lastHash,
string? consumerId = null,
CancellationToken cancellationToken = default,
long? versionstampCursor = null)
{
if (!_enabled) return;
string resolvedConsumerId = ResolveConsumerId(consumerId);
await EnsureReadyAsync(cancellationToken);
long? effectiveVersionstampCursor = versionstampCursor;
if (!effectiveVersionstampCursor.HasValue)
{
var existing = await FindByConsumerIdAsync(
resolvedConsumerId,
cancellationToken,
ensureInitialized: false);
effectiveVersionstampCursor = existing?.VersionstampCursor;
}
RecordId recordId = RecordId.From(_checkpointTable, ComputeConsumerKey(resolvedConsumerId));
var record = new SurrealCdcCheckpointRecord
{
ConsumerId = resolvedConsumerId,
TimestampPhysicalTime = timestamp.PhysicalTime,
TimestampLogicalCounter = timestamp.LogicalCounter,
TimestampNodeId = timestamp.NodeId,
LastHash = lastHash ?? string.Empty,
UpdatedUtcMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(),
VersionstampCursor = effectiveVersionstampCursor
};
await _surrealClient.Upsert<SurrealCdcCheckpointRecord, SurrealCdcCheckpointRecord>(
recordId,
record,
cancellationToken);
}
/// <inheritdoc />
public Task AdvanceCheckpointAsync(
OplogEntry entry,
string? consumerId = null,
CancellationToken cancellationToken = default)
{
ArgumentNullException.ThrowIfNull(entry);
return UpsertCheckpointAsync(entry.Timestamp, entry.Hash, consumerId, cancellationToken);
}
private string ResolveConsumerId(string? consumerId)
{
string resolved = string.IsNullOrWhiteSpace(consumerId) ? _defaultConsumerId : consumerId;
if (string.IsNullOrWhiteSpace(resolved))
throw new ArgumentException("CDC consumer id is required.", nameof(consumerId));
return resolved;
}
private async Task EnsureReadyAsync(CancellationToken cancellationToken)
{
await _schemaInitializer.EnsureInitializedAsync(cancellationToken);
}
private async Task<SurrealCdcCheckpointRecord?> FindByConsumerIdAsync(
string consumerId,
CancellationToken cancellationToken,
bool ensureInitialized = true)
{
if (ensureInitialized) await EnsureReadyAsync(cancellationToken);
RecordId deterministicId = RecordId.From(_checkpointTable, ComputeConsumerKey(consumerId));
var deterministic = await _surrealClient.Select<SurrealCdcCheckpointRecord>(deterministicId, cancellationToken);
if (deterministic != null &&
string.Equals(deterministic.ConsumerId, consumerId, StringComparison.Ordinal))
return deterministic;
var all = await _surrealClient.Select<SurrealCdcCheckpointRecord>(_checkpointTable, cancellationToken);
return all?.FirstOrDefault(c =>
string.Equals(c.ConsumerId, consumerId, StringComparison.Ordinal));
}
private static string ComputeConsumerKey(string consumerId)
{
byte[] input = Encoding.UTF8.GetBytes(consumerId);
return Convert.ToHexString(SHA256.HashData(input)).ToLowerInvariant();
}
}
internal sealed class SurrealCdcCheckpointRecord : Record
{
[JsonPropertyName("consumerId")]
public string ConsumerId { get; set; } = "";
[JsonPropertyName("timestampPhysicalTime")]
public long TimestampPhysicalTime { get; set; }
[JsonPropertyName("timestampLogicalCounter")]
public int TimestampLogicalCounter { get; set; }
[JsonPropertyName("timestampNodeId")]
public string TimestampNodeId { get; set; } = "";
[JsonPropertyName("lastHash")]
public string LastHash { get; set; } = "";
[JsonPropertyName("updatedUtcMs")]
public long UpdatedUtcMs { get; set; }
[JsonPropertyName("versionstampCursor")]
public long? VersionstampCursor { get; set; }
}
internal static class SurrealCdcCheckpointRecordMappers
{
public static SurrealCdcCheckpoint ToDomain(this SurrealCdcCheckpointRecord record)
{
return new SurrealCdcCheckpoint
{
ConsumerId = record.ConsumerId,
Timestamp = new HlcTimestamp(
record.TimestampPhysicalTime,
record.TimestampLogicalCounter,
record.TimestampNodeId),
LastHash = record.LastHash,
UpdatedUtc = DateTimeOffset.FromUnixTimeMilliseconds(record.UpdatedUtcMs),
VersionstampCursor = record.VersionstampCursor
};
}
}

View File

@@ -0,0 +1,32 @@
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Configuration for the Surreal SHOW CHANGES polling worker.
/// </summary>
public sealed class SurrealCdcPollingOptions
{
/// <summary>
/// Gets or sets a value indicating whether polling is enabled.
/// </summary>
public bool Enabled { get; set; } = true;
/// <summary>
/// Gets or sets the polling interval.
/// </summary>
public TimeSpan PollInterval { get; set; } = TimeSpan.FromMilliseconds(250);
/// <summary>
/// Gets or sets the maximum number of changefeed rows fetched per poll.
/// </summary>
public int BatchSize { get; set; } = 100;
/// <summary>
/// Gets or sets a value indicating whether LIVE SELECT wake-ups are enabled.
/// </summary>
public bool EnableLiveSelectAccelerator { get; set; } = true;
/// <summary>
/// Gets or sets the delay used before re-subscribing a failed LIVE SELECT stream.
/// </summary>
public TimeSpan LiveSelectReconnectDelay { get; set; } = TimeSpan.FromSeconds(2);
}

View File

@@ -0,0 +1,164 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using SurrealDb.Net;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
public class SurrealDocumentMetadataStore : DocumentMetadataStore
{
private readonly ILogger<SurrealDocumentMetadataStore> _logger;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
private readonly ISurrealDbClient _surrealClient;
public SurrealDocumentMetadataStore(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
ILogger<SurrealDocumentMetadataStore>? logger = null)
{
_ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient));
_surrealClient = surrealEmbeddedClient.Client;
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
_logger = logger ?? NullLogger<SurrealDocumentMetadataStore>.Instance;
}
public override async Task<DocumentMetadata?> GetMetadataAsync(string collection, string key,
CancellationToken cancellationToken = default)
{
var existing = await FindByCollectionKeyAsync(collection, key, cancellationToken);
return existing?.ToDomain();
}
public override async Task<IEnumerable<DocumentMetadata>> GetMetadataByCollectionAsync(string collection,
CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all
.Where(m => string.Equals(m.Collection, collection, StringComparison.Ordinal))
.Select(m => m.ToDomain())
.ToList();
}
public override async Task UpsertMetadataAsync(DocumentMetadata metadata,
CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
var existing = await FindByCollectionKeyAsync(metadata.Collection, metadata.Key, cancellationToken);
RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.DocumentMetadata(metadata.Collection, metadata.Key);
await _surrealClient.Upsert<SurrealDocumentMetadataRecord, SurrealDocumentMetadataRecord>(
recordId,
metadata.ToSurrealRecord(),
cancellationToken);
}
public override async Task UpsertMetadataBatchAsync(IEnumerable<DocumentMetadata> metadatas,
CancellationToken cancellationToken = default)
{
foreach (var metadata in metadatas)
await UpsertMetadataAsync(metadata, cancellationToken);
}
public override async Task MarkDeletedAsync(string collection, string key, HlcTimestamp timestamp,
CancellationToken cancellationToken = default)
{
var metadata = new DocumentMetadata(collection, key, timestamp, true);
await UpsertMetadataAsync(metadata, cancellationToken);
}
public override async Task<IEnumerable<DocumentMetadata>> GetMetadataAfterAsync(HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
HashSet<string>? collectionSet = collections != null ? new HashSet<string>(collections) : null;
return all
.Where(m =>
(m.HlcPhysicalTime > since.PhysicalTime ||
(m.HlcPhysicalTime == since.PhysicalTime && m.HlcLogicalCounter > since.LogicalCounter)) &&
(collectionSet == null || collectionSet.Contains(m.Collection)))
.OrderBy(m => m.HlcPhysicalTime)
.ThenBy(m => m.HlcLogicalCounter)
.Select(m => m.ToDomain())
.ToList();
}
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Delete(CBDDCSurrealSchemaNames.DocumentMetadataTable, cancellationToken);
}
public override async Task<IEnumerable<DocumentMetadata>> ExportAsync(CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all.Select(m => m.ToDomain()).ToList();
}
public override async Task ImportAsync(IEnumerable<DocumentMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items) await UpsertMetadataAsync(item, cancellationToken);
}
public override async Task MergeAsync(IEnumerable<DocumentMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = await FindByCollectionKeyAsync(item.Collection, item.Key, cancellationToken);
if (existing == null)
{
await UpsertMetadataAsync(item, cancellationToken);
continue;
}
var existingTimestamp =
new HlcTimestamp(existing.HlcPhysicalTime, existing.HlcLogicalCounter, existing.HlcNodeId);
if (item.UpdatedAt.CompareTo(existingTimestamp) <= 0) continue;
RecordId recordId = existing.Id ?? SurrealStoreRecordIds.DocumentMetadata(item.Collection, item.Key);
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Upsert<SurrealDocumentMetadataRecord, SurrealDocumentMetadataRecord>(
recordId,
item.ToSurrealRecord(),
cancellationToken);
}
}
private async Task EnsureReadyAsync(CancellationToken cancellationToken)
{
await _schemaInitializer.EnsureInitializedAsync(cancellationToken);
}
private async Task<List<SurrealDocumentMetadataRecord>> SelectAllAsync(CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
var rows = await _surrealClient.Select<SurrealDocumentMetadataRecord>(
CBDDCSurrealSchemaNames.DocumentMetadataTable,
cancellationToken);
return rows?.ToList() ?? [];
}
private async Task<SurrealDocumentMetadataRecord?> FindByCollectionKeyAsync(string collection, string key,
CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
RecordId deterministicId = SurrealStoreRecordIds.DocumentMetadata(collection, key);
var deterministic = await _surrealClient.Select<SurrealDocumentMetadataRecord>(deterministicId, cancellationToken);
if (deterministic != null &&
string.Equals(deterministic.Collection, collection, StringComparison.Ordinal) &&
string.Equals(deterministic.Key, key, StringComparison.Ordinal))
return deterministic;
var all = await SelectAllAsync(cancellationToken);
return all.FirstOrDefault(m =>
string.Equals(m.Collection, collection, StringComparison.Ordinal) &&
string.Equals(m.Key, key, StringComparison.Ordinal));
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,144 @@
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
/// <summary>
/// Represents a single change notification emitted by a watchable collection.
/// </summary>
/// <typeparam name="TEntity">The entity type being observed.</typeparam>
public readonly record struct SurrealCollectionChange<TEntity>(
OperationType OperationType,
string? DocumentId,
TEntity? Entity)
where TEntity : class;
/// <summary>
/// Abstraction for a collection that can publish change notifications to document-store watchers.
/// </summary>
/// <typeparam name="TEntity">The entity type being observed.</typeparam>
public interface ISurrealWatchableCollection<TEntity> where TEntity : class
{
/// <summary>
/// Subscribes to collection change notifications.
/// </summary>
/// <param name="observer">The observer receiving collection changes.</param>
/// <returns>A disposable subscription.</returns>
IDisposable Subscribe(IObserver<SurrealCollectionChange<TEntity>> observer);
}
/// <summary>
/// In-memory watchable collection feed used to publish local change events.
/// </summary>
/// <typeparam name="TEntity">The entity type being observed.</typeparam>
public sealed class SurrealCollectionChangeFeed<TEntity> : ISurrealWatchableCollection<TEntity>, IDisposable
where TEntity : class
{
private readonly object _observersGate = new();
private readonly List<IObserver<SurrealCollectionChange<TEntity>>> _observers = new();
private bool _disposed;
/// <inheritdoc />
public IDisposable Subscribe(IObserver<SurrealCollectionChange<TEntity>> observer)
{
ArgumentNullException.ThrowIfNull(observer);
lock (_observersGate)
{
ThrowIfDisposed();
_observers.Add(observer);
}
return new Subscription(this, observer);
}
/// <summary>
/// Publishes a put notification for an entity.
/// </summary>
/// <param name="entity">The changed entity.</param>
/// <param name="documentId">Optional explicit document identifier.</param>
public void PublishPut(TEntity entity, string? documentId = null)
{
ArgumentNullException.ThrowIfNull(entity);
Publish(new SurrealCollectionChange<TEntity>(OperationType.Put, documentId, entity));
}
/// <summary>
/// Publishes a delete notification for an entity key.
/// </summary>
/// <param name="documentId">The document identifier that was removed.</param>
public void PublishDelete(string documentId)
{
if (string.IsNullOrWhiteSpace(documentId))
throw new ArgumentException("Document id is required.", nameof(documentId));
Publish(new SurrealCollectionChange<TEntity>(OperationType.Delete, documentId, null));
}
/// <summary>
/// Publishes a raw collection change notification.
/// </summary>
/// <param name="change">The change payload.</param>
public void Publish(SurrealCollectionChange<TEntity> change)
{
List<IObserver<SurrealCollectionChange<TEntity>>> snapshot;
lock (_observersGate)
{
if (_disposed) return;
snapshot = _observers.ToList();
}
foreach (var observer in snapshot)
observer.OnNext(change);
}
/// <inheritdoc />
public void Dispose()
{
List<IObserver<SurrealCollectionChange<TEntity>>> snapshot;
lock (_observersGate)
{
if (_disposed) return;
_disposed = true;
snapshot = _observers.ToList();
_observers.Clear();
}
foreach (var observer in snapshot)
observer.OnCompleted();
}
private void Unsubscribe(IObserver<SurrealCollectionChange<TEntity>> observer)
{
lock (_observersGate)
{
if (_disposed) return;
_observers.Remove(observer);
}
}
private void ThrowIfDisposed()
{
ObjectDisposedException.ThrowIf(_disposed, this);
}
private sealed class Subscription : IDisposable
{
private readonly SurrealCollectionChangeFeed<TEntity> _owner;
private readonly IObserver<SurrealCollectionChange<TEntity>> _observer;
private int _disposed;
public Subscription(
SurrealCollectionChangeFeed<TEntity> owner,
IObserver<SurrealCollectionChange<TEntity>> observer)
{
_owner = owner;
_observer = observer;
}
public void Dispose()
{
if (Interlocked.Exchange(ref _disposed, 1) == 1) return;
_owner.Unsubscribe(_observer);
}
}
}

View File

@@ -0,0 +1,272 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using SurrealDb.Net;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
public class SurrealOplogStore : OplogStore
{
private readonly ILogger<SurrealOplogStore> _logger;
private readonly ICBDDCSurrealSchemaInitializer? _schemaInitializer;
private readonly ISurrealDbClient? _surrealClient;
public SurrealOplogStore(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
IDocumentStore documentStore,
IConflictResolver conflictResolver,
IVectorClockService vectorClockService,
ISnapshotMetadataStore? snapshotMetadataStore = null,
ILogger<SurrealOplogStore>? logger = null) : base(
documentStore,
conflictResolver,
vectorClockService,
snapshotMetadataStore)
{
_ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient));
_surrealClient = surrealEmbeddedClient.Client;
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
_logger = logger ?? NullLogger<SurrealOplogStore>.Instance;
_vectorClock.Invalidate();
InitializeVectorClock();
}
public override async Task<IEnumerable<OplogEntry>> GetChainRangeAsync(string startHash, string endHash,
CancellationToken cancellationToken = default)
{
var startRow = await FindByHashAsync(startHash, cancellationToken);
var endRow = await FindByHashAsync(endHash, cancellationToken);
if (startRow == null || endRow == null) return [];
string nodeId = startRow.TimestampNodeId;
var all = await SelectAllAsync(cancellationToken);
return all
.Where(o => string.Equals(o.TimestampNodeId, nodeId, StringComparison.Ordinal) &&
(o.TimestampPhysicalTime > startRow.TimestampPhysicalTime ||
(o.TimestampPhysicalTime == startRow.TimestampPhysicalTime &&
o.TimestampLogicalCounter > startRow.TimestampLogicalCounter)) &&
(o.TimestampPhysicalTime < endRow.TimestampPhysicalTime ||
(o.TimestampPhysicalTime == endRow.TimestampPhysicalTime &&
o.TimestampLogicalCounter <= endRow.TimestampLogicalCounter)))
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
.Select(o => o.ToDomain())
.ToList();
}
public override async Task<OplogEntry?> GetEntryByHashAsync(string hash, CancellationToken cancellationToken = default)
{
var existing = await FindByHashAsync(hash, cancellationToken);
return existing?.ToDomain();
}
public override async Task<IEnumerable<OplogEntry>> GetOplogAfterAsync(HlcTimestamp timestamp,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
HashSet<string>? collectionSet = collections != null ? new HashSet<string>(collections) : null;
return all
.Where(o =>
(o.TimestampPhysicalTime > timestamp.PhysicalTime ||
(o.TimestampPhysicalTime == timestamp.PhysicalTime &&
o.TimestampLogicalCounter > timestamp.LogicalCounter)) &&
(collectionSet == null || collectionSet.Contains(o.Collection)))
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
.Select(o => o.ToDomain())
.ToList();
}
public override async Task<IEnumerable<OplogEntry>> GetOplogForNodeAfterAsync(string nodeId, HlcTimestamp since,
IEnumerable<string>? collections = null, CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
HashSet<string>? collectionSet = collections != null ? new HashSet<string>(collections) : null;
return all
.Where(o =>
string.Equals(o.TimestampNodeId, nodeId, StringComparison.Ordinal) &&
(o.TimestampPhysicalTime > since.PhysicalTime ||
(o.TimestampPhysicalTime == since.PhysicalTime &&
o.TimestampLogicalCounter > since.LogicalCounter)) &&
(collectionSet == null || collectionSet.Contains(o.Collection)))
.OrderBy(o => o.TimestampPhysicalTime)
.ThenBy(o => o.TimestampLogicalCounter)
.Select(o => o.ToDomain())
.ToList();
}
public override async Task PruneOplogAsync(HlcTimestamp cutoff, CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
var toDelete = all
.Where(o => o.TimestampPhysicalTime < cutoff.PhysicalTime ||
(o.TimestampPhysicalTime == cutoff.PhysicalTime &&
o.TimestampLogicalCounter <= cutoff.LogicalCounter))
.ToList();
foreach (var row in toDelete)
{
RecordId recordId = row.Id ?? SurrealStoreRecordIds.Oplog(row.Hash);
await EnsureReadyAsync(cancellationToken);
await _surrealClient!.Delete(recordId, cancellationToken);
}
}
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient!.Delete(CBDDCSurrealSchemaNames.OplogEntriesTable, cancellationToken);
_vectorClock.Invalidate();
}
public override async Task<IEnumerable<OplogEntry>> ExportAsync(CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all.Select(o => o.ToDomain()).ToList();
}
public override async Task ImportAsync(IEnumerable<OplogEntry> items, CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = await FindByHashAsync(item.Hash, cancellationToken);
RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.Oplog(item.Hash);
await UpsertAsync(item, recordId, cancellationToken);
}
}
public override async Task MergeAsync(IEnumerable<OplogEntry> items, CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = await FindByHashAsync(item.Hash, cancellationToken);
if (existing != null) continue;
await UpsertAsync(item, SurrealStoreRecordIds.Oplog(item.Hash), cancellationToken);
}
}
protected override void InitializeVectorClock()
{
if (_vectorClock.IsInitialized) return;
if (_surrealClient == null || _schemaInitializer == null)
{
_vectorClock.IsInitialized = true;
return;
}
if (_snapshotMetadataStore != null)
try
{
var snapshots = _snapshotMetadataStore.GetAllSnapshotMetadataAsync().GetAwaiter().GetResult();
foreach (var snapshot in snapshots)
_vectorClock.UpdateNode(
snapshot.NodeId,
new HlcTimestamp(
snapshot.TimestampPhysicalTime,
snapshot.TimestampLogicalCounter,
snapshot.NodeId),
snapshot.Hash ?? "");
}
catch
{
// Ignore snapshot bootstrap failures to keep oplog fallback behavior aligned.
}
EnsureReadyAsync(CancellationToken.None).GetAwaiter().GetResult();
var all = _surrealClient.Select<SurrealOplogRecord>(CBDDCSurrealSchemaNames.OplogEntriesTable, CancellationToken.None)
.GetAwaiter().GetResult()
?? [];
var latestPerNode = all
.Where(x => !string.IsNullOrWhiteSpace(x.TimestampNodeId))
.GroupBy(x => x.TimestampNodeId)
.Select(g => g
.OrderByDescending(x => x.TimestampPhysicalTime)
.ThenByDescending(x => x.TimestampLogicalCounter)
.First())
.ToList();
foreach (var latest in latestPerNode)
_vectorClock.UpdateNode(
latest.TimestampNodeId,
new HlcTimestamp(latest.TimestampPhysicalTime, latest.TimestampLogicalCounter, latest.TimestampNodeId),
latest.Hash ?? "");
_vectorClock.IsInitialized = true;
}
protected override async Task InsertOplogEntryAsync(OplogEntry entry, CancellationToken cancellationToken = default)
{
var existing = await FindByHashAsync(entry.Hash, cancellationToken);
if (existing != null) return;
await UpsertAsync(entry, SurrealStoreRecordIds.Oplog(entry.Hash), cancellationToken);
}
protected override async Task<string?> QueryLastHashForNodeAsync(string nodeId,
CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
var lastEntry = all
.Where(o => string.Equals(o.TimestampNodeId, nodeId, StringComparison.Ordinal))
.OrderByDescending(o => o.TimestampPhysicalTime)
.ThenByDescending(o => o.TimestampLogicalCounter)
.FirstOrDefault();
return lastEntry?.Hash;
}
protected override async Task<(long Wall, int Logic)?> QueryLastHashTimestampFromOplogAsync(string hash,
CancellationToken cancellationToken = default)
{
var existing = await FindByHashAsync(hash, cancellationToken);
if (existing == null) return null;
return (existing.TimestampPhysicalTime, existing.TimestampLogicalCounter);
}
private async Task UpsertAsync(OplogEntry entry, RecordId recordId, CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient!.Upsert<SurrealOplogRecord, SurrealOplogRecord>(
recordId,
entry.ToSurrealRecord(),
cancellationToken);
}
private async Task EnsureReadyAsync(CancellationToken cancellationToken)
{
await _schemaInitializer!.EnsureInitializedAsync(cancellationToken);
}
private async Task<List<SurrealOplogRecord>> SelectAllAsync(CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
var rows = await _surrealClient!.Select<SurrealOplogRecord>(
CBDDCSurrealSchemaNames.OplogEntriesTable,
cancellationToken);
return rows?.ToList() ?? [];
}
private async Task<SurrealOplogRecord?> FindByHashAsync(string hash, CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
RecordId deterministicId = SurrealStoreRecordIds.Oplog(hash);
var deterministic = await _surrealClient!.Select<SurrealOplogRecord>(deterministicId, cancellationToken);
if (deterministic != null && string.Equals(deterministic.Hash, hash, StringComparison.Ordinal))
return deterministic;
var all = await SelectAllAsync(cancellationToken);
return all.FirstOrDefault(o => string.Equals(o.Hash, hash, StringComparison.Ordinal));
}
}

View File

@@ -0,0 +1,111 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using SurrealDb.Net;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
public class SurrealPeerConfigurationStore : PeerConfigurationStore
{
private readonly ILogger<SurrealPeerConfigurationStore> _logger;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
private readonly ISurrealDbClient _surrealClient;
public SurrealPeerConfigurationStore(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
ILogger<SurrealPeerConfigurationStore>? logger = null)
{
_ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient));
_surrealClient = surrealEmbeddedClient.Client;
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
_logger = logger ?? NullLogger<SurrealPeerConfigurationStore>.Instance;
}
public override async Task<IEnumerable<RemotePeerConfiguration>> GetRemotePeersAsync(
CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all.Select(p => p.ToDomain()).ToList();
}
public override async Task<RemotePeerConfiguration?> GetRemotePeerAsync(string nodeId,
CancellationToken cancellationToken)
{
var existing = await FindByNodeIdAsync(nodeId, cancellationToken);
return existing?.ToDomain();
}
public override async Task RemoveRemotePeerAsync(string nodeId, CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
var existing = await FindByNodeIdAsync(nodeId, cancellationToken);
if (existing == null)
{
_logger.LogWarning("Attempted to remove non-existent remote peer: {NodeId}", nodeId);
return;
}
RecordId recordId = existing.Id ?? SurrealStoreRecordIds.RemotePeer(nodeId);
await _surrealClient.Delete(recordId, cancellationToken);
_logger.LogInformation("Removed remote peer configuration: {NodeId}", nodeId);
}
public override async Task SaveRemotePeerAsync(RemotePeerConfiguration peer,
CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
var existing = await FindByNodeIdAsync(peer.NodeId, cancellationToken);
RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.RemotePeer(peer.NodeId);
await _surrealClient.Upsert<SurrealRemotePeerRecord, SurrealRemotePeerRecord>(
recordId,
peer.ToSurrealRecord(),
cancellationToken);
_logger.LogInformation("Saved remote peer configuration: {NodeId} ({Type})", peer.NodeId, peer.Type);
}
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
_logger.LogWarning(
"Dropping peer configuration store - all remote peer configurations will be permanently deleted!");
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Delete(CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable, cancellationToken);
_logger.LogInformation("Peer configuration store dropped successfully.");
}
public override async Task<IEnumerable<RemotePeerConfiguration>> ExportAsync(
CancellationToken cancellationToken = default)
{
return await GetRemotePeersAsync(cancellationToken);
}
private async Task EnsureReadyAsync(CancellationToken cancellationToken)
{
await _schemaInitializer.EnsureInitializedAsync(cancellationToken);
}
private async Task<List<SurrealRemotePeerRecord>> SelectAllAsync(CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
var rows = await _surrealClient.Select<SurrealRemotePeerRecord>(
CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable,
cancellationToken);
return rows?.ToList() ?? [];
}
private async Task<SurrealRemotePeerRecord?> FindByNodeIdAsync(string nodeId, CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
RecordId deterministicId = SurrealStoreRecordIds.RemotePeer(nodeId);
var deterministic = await _surrealClient.Select<SurrealRemotePeerRecord>(deterministicId, cancellationToken);
if (deterministic != null &&
string.Equals(deterministic.NodeId, nodeId, StringComparison.Ordinal))
return deterministic;
var all = await SelectAllAsync(cancellationToken);
return all.FirstOrDefault(p => string.Equals(p.NodeId, nodeId, StringComparison.Ordinal));
}
}

View File

@@ -0,0 +1,311 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using SurrealDb.Net;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
public class SurrealPeerOplogConfirmationStore : PeerOplogConfirmationStore
{
internal const string RegistrationSourceNodeId = "__peer_registration__";
private readonly ILogger<SurrealPeerOplogConfirmationStore> _logger;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
private readonly ISurrealDbClient _surrealClient;
public SurrealPeerOplogConfirmationStore(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
ILogger<SurrealPeerOplogConfirmationStore>? logger = null)
{
_ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient));
_surrealClient = surrealEmbeddedClient.Client;
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
_logger = logger ?? NullLogger<SurrealPeerOplogConfirmationStore>.Instance;
}
public override async Task EnsurePeerRegisteredAsync(
string peerNodeId,
string address,
PeerType type,
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
var existing =
await FindByPairAsync(peerNodeId, RegistrationSourceNodeId, cancellationToken);
if (existing == null)
{
var created = new PeerOplogConfirmation
{
PeerNodeId = peerNodeId,
SourceNodeId = RegistrationSourceNodeId,
ConfirmedWall = 0,
ConfirmedLogic = 0,
ConfirmedHash = "",
LastConfirmedUtc = DateTimeOffset.UtcNow,
IsActive = true
};
await UpsertAsync(created, SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, RegistrationSourceNodeId),
cancellationToken);
_logger.LogDebug("Registered peer confirmation tracking for {PeerNodeId} ({Address}, {Type}).", peerNodeId,
address, type);
return;
}
if (existing.IsActive) return;
existing.IsActive = true;
existing.LastConfirmedUtcMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
RecordId recordId =
existing.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, RegistrationSourceNodeId);
await UpsertAsync(existing, recordId, cancellationToken);
}
public override async Task UpdateConfirmationAsync(
string peerNodeId,
string sourceNodeId,
HlcTimestamp timestamp,
string hash,
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
if (string.IsNullOrWhiteSpace(sourceNodeId))
throw new ArgumentException("Source node id is required.", nameof(sourceNodeId));
var existing = await FindByPairAsync(peerNodeId, sourceNodeId, cancellationToken);
long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
if (existing == null)
{
var created = new PeerOplogConfirmation
{
PeerNodeId = peerNodeId,
SourceNodeId = sourceNodeId,
ConfirmedWall = timestamp.PhysicalTime,
ConfirmedLogic = timestamp.LogicalCounter,
ConfirmedHash = hash ?? "",
LastConfirmedUtc = DateTimeOffset.FromUnixTimeMilliseconds(nowMs),
IsActive = true
};
await UpsertAsync(created, SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, sourceNodeId),
cancellationToken);
return;
}
bool isNewer = IsIncomingTimestampNewer(timestamp, existing);
bool samePointHashChanged = timestamp.PhysicalTime == existing.ConfirmedWall &&
timestamp.LogicalCounter == existing.ConfirmedLogic &&
!string.Equals(existing.ConfirmedHash, hash, StringComparison.Ordinal);
if (!isNewer && !samePointHashChanged && existing.IsActive) return;
existing.ConfirmedWall = timestamp.PhysicalTime;
existing.ConfirmedLogic = timestamp.LogicalCounter;
existing.ConfirmedHash = hash ?? "";
existing.LastConfirmedUtcMs = nowMs;
existing.IsActive = true;
RecordId recordId = existing.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, sourceNodeId);
await UpsertAsync(existing, recordId, cancellationToken);
}
public override async Task<IEnumerable<PeerOplogConfirmation>> GetConfirmationsAsync(
CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all
.Where(c => !string.Equals(c.SourceNodeId, RegistrationSourceNodeId, StringComparison.Ordinal))
.Select(c => c.ToDomain())
.ToList();
}
public override async Task<IEnumerable<PeerOplogConfirmation>> GetConfirmationsForPeerAsync(
string peerNodeId,
CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
var all = await SelectAllAsync(cancellationToken);
return all
.Where(c => string.Equals(c.PeerNodeId, peerNodeId, StringComparison.Ordinal) &&
!string.Equals(c.SourceNodeId, RegistrationSourceNodeId, StringComparison.Ordinal))
.Select(c => c.ToDomain())
.ToList();
}
public override async Task RemovePeerTrackingAsync(string peerNodeId, CancellationToken cancellationToken = default)
{
if (string.IsNullOrWhiteSpace(peerNodeId))
throw new ArgumentException("Peer node id is required.", nameof(peerNodeId));
var matches = (await SelectAllAsync(cancellationToken))
.Where(c => string.Equals(c.PeerNodeId, peerNodeId, StringComparison.Ordinal))
.ToList();
if (matches.Count == 0) return;
long nowMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();
foreach (var match in matches)
{
if (!match.IsActive) continue;
match.IsActive = false;
match.LastConfirmedUtcMs = nowMs;
RecordId recordId = match.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(match.PeerNodeId, match.SourceNodeId);
await UpsertAsync(match, recordId, cancellationToken);
}
}
public override async Task<IEnumerable<string>> GetActiveTrackedPeersAsync(
CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all
.Where(c => c.IsActive)
.Select(c => c.PeerNodeId)
.Distinct(StringComparer.Ordinal)
.ToList();
}
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Delete(CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable, cancellationToken);
}
public override async Task<IEnumerable<PeerOplogConfirmation>> ExportAsync(CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all.Select(c => c.ToDomain()).ToList();
}
public override async Task ImportAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = await FindByPairAsync(item.PeerNodeId, item.SourceNodeId, cancellationToken);
RecordId recordId =
existing?.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(item.PeerNodeId, item.SourceNodeId);
await UpsertAsync(item, recordId, cancellationToken);
}
}
public override async Task MergeAsync(IEnumerable<PeerOplogConfirmation> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = await FindByPairAsync(item.PeerNodeId, item.SourceNodeId, cancellationToken);
if (existing == null)
{
await UpsertAsync(item, SurrealStoreRecordIds.PeerOplogConfirmation(item.PeerNodeId, item.SourceNodeId),
cancellationToken);
continue;
}
bool changed = false;
var incomingTimestamp = new HlcTimestamp(item.ConfirmedWall, item.ConfirmedLogic, item.SourceNodeId);
var existingTimestamp = new HlcTimestamp(existing.ConfirmedWall, existing.ConfirmedLogic, existing.SourceNodeId);
if (incomingTimestamp > existingTimestamp)
{
existing.ConfirmedWall = item.ConfirmedWall;
existing.ConfirmedLogic = item.ConfirmedLogic;
existing.ConfirmedHash = item.ConfirmedHash;
changed = true;
}
long incomingLastConfirmedMs = item.LastConfirmedUtc.ToUnixTimeMilliseconds();
if (incomingLastConfirmedMs > existing.LastConfirmedUtcMs)
{
existing.LastConfirmedUtcMs = incomingLastConfirmedMs;
changed = true;
}
if (existing.IsActive != item.IsActive)
{
existing.IsActive = item.IsActive;
changed = true;
}
if (!changed) continue;
RecordId recordId =
existing.Id ?? SurrealStoreRecordIds.PeerOplogConfirmation(existing.PeerNodeId, existing.SourceNodeId);
await UpsertAsync(existing, recordId, cancellationToken);
}
}
private async Task UpsertAsync(PeerOplogConfirmation confirmation, RecordId recordId, CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Upsert<SurrealPeerOplogConfirmationRecord, SurrealPeerOplogConfirmationRecord>(
recordId,
confirmation.ToSurrealRecord(),
cancellationToken);
}
private async Task UpsertAsync(SurrealPeerOplogConfirmationRecord confirmation, RecordId recordId,
CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Upsert<SurrealPeerOplogConfirmationRecord, SurrealPeerOplogConfirmationRecord>(
recordId,
confirmation,
cancellationToken);
}
private async Task EnsureReadyAsync(CancellationToken cancellationToken)
{
await _schemaInitializer.EnsureInitializedAsync(cancellationToken);
}
private async Task<List<SurrealPeerOplogConfirmationRecord>> SelectAllAsync(CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
var rows = await _surrealClient.Select<SurrealPeerOplogConfirmationRecord>(
CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable,
cancellationToken);
return rows?.ToList() ?? [];
}
private async Task<SurrealPeerOplogConfirmationRecord?> FindByPairAsync(string peerNodeId, string sourceNodeId,
CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
RecordId deterministicId = SurrealStoreRecordIds.PeerOplogConfirmation(peerNodeId, sourceNodeId);
var deterministic = await _surrealClient.Select<SurrealPeerOplogConfirmationRecord>(deterministicId, cancellationToken);
if (deterministic != null &&
string.Equals(deterministic.PeerNodeId, peerNodeId, StringComparison.Ordinal) &&
string.Equals(deterministic.SourceNodeId, sourceNodeId, StringComparison.Ordinal))
return deterministic;
var all = await SelectAllAsync(cancellationToken);
return all.FirstOrDefault(c =>
string.Equals(c.PeerNodeId, peerNodeId, StringComparison.Ordinal) &&
string.Equals(c.SourceNodeId, sourceNodeId, StringComparison.Ordinal));
}
private static bool IsIncomingTimestampNewer(HlcTimestamp incomingTimestamp, SurrealPeerOplogConfirmationRecord existing)
{
if (incomingTimestamp.PhysicalTime > existing.ConfirmedWall) return true;
if (incomingTimestamp.PhysicalTime == existing.ConfirmedWall &&
incomingTimestamp.LogicalCounter > existing.ConfirmedLogic)
return true;
return false;
}
}

View File

@@ -0,0 +1,296 @@
using System.Text.Json;
using Dahomey.Cbor.ObjectModel;
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
internal readonly record struct SurrealPolledChangeRow(
ulong Versionstamp,
IReadOnlyList<SurrealPolledChange> Changes);
internal readonly record struct SurrealPolledChange(
OperationType OperationType,
string Key,
JsonElement? Content);
internal static class SurrealShowChangesCborDecoder
{
private static readonly string[] PutChangeKinds = ["create", "update", "upsert", "insert", "set", "replace"];
public static IReadOnlyList<SurrealPolledChangeRow> DecodeRows(
IEnumerable<CborObject> rows,
string expectedTableName)
{
var result = new List<SurrealPolledChangeRow>();
foreach (var row in rows)
{
if (!TryGetProperty(row, "versionstamp", out CborValue versionstampValue)) continue;
if (!TryReadUInt64(versionstampValue, out ulong versionstamp)) continue;
var changes = new List<SurrealPolledChange>();
if (TryGetProperty(row, "changes", out CborValue rawChanges) &&
rawChanges is CborArray changeArray)
foreach (CborValue changeValue in changeArray)
{
if (changeValue is not CborObject changeObject) continue;
if (TryExtractChange(changeObject, expectedTableName, out SurrealPolledChange change))
changes.Add(change);
}
result.Add(new SurrealPolledChangeRow(versionstamp, changes));
}
return result;
}
private static bool TryExtractChange(
CborObject changeObject,
string expectedTableName,
out SurrealPolledChange change)
{
if (TryGetProperty(changeObject, "delete", out CborValue deletePayload))
if (TryExtractRecordKey(deletePayload, expectedTableName, out string deleteKey))
{
change = new SurrealPolledChange(OperationType.Delete, deleteKey, null);
return true;
}
foreach (string putKind in PutChangeKinds)
if (TryGetProperty(changeObject, putKind, out CborValue putPayload))
if (TryExtractRecordKey(putPayload, expectedTableName, out string putKey))
{
JsonElement? content = BuildNormalizedJsonPayload(putPayload, putKey);
change = new SurrealPolledChange(OperationType.Put, putKey, content);
return true;
}
change = default;
return false;
}
private static bool TryExtractRecordKey(
CborValue payload,
string expectedTableName,
out string key)
{
key = "";
if (payload is not CborObject payloadObject) return false;
if (!TryGetProperty(payloadObject, "id", out CborValue idValue)) return false;
if (TryExtractRecordKeyFromIdValue(idValue, expectedTableName, out string extracted))
{
if (string.IsNullOrWhiteSpace(extracted)) return false;
key = extracted;
return true;
}
return false;
}
private static bool TryExtractRecordKeyFromIdValue(
CborValue idValue,
string expectedTableName,
out string key)
{
key = "";
if (idValue is CborArray arrayId)
{
if (arrayId.Count < 2) return false;
if (!TryReadString(arrayId[0], out string tableName)) return false;
if (!string.IsNullOrWhiteSpace(expectedTableName) &&
!string.Equals(tableName, expectedTableName, StringComparison.Ordinal))
return false;
if (!TryReadString(arrayId[1], out string recordKey)) return false;
key = recordKey;
return true;
}
if (idValue is CborString)
{
if (!TryReadString(idValue, out string recordId)) return false;
key = ExtractKeyFromRecordId(recordId) ?? "";
return !string.IsNullOrWhiteSpace(key);
}
if (idValue is CborObject idObject)
{
string? tableName = null;
if (TryGetProperty(idObject, "tb", out CborValue tbValue) && TryReadString(tbValue, out string tb))
tableName = tb;
else if (TryGetProperty(idObject, "table", out CborValue tableValue) &&
TryReadString(tableValue, out string table))
tableName = table;
if (!string.IsNullOrWhiteSpace(expectedTableName) &&
!string.IsNullOrWhiteSpace(tableName) &&
!string.Equals(tableName, expectedTableName, StringComparison.Ordinal))
return false;
if (TryGetProperty(idObject, "id", out CborValue nestedId))
{
if (TryReadString(nestedId, out string nestedIdValue))
{
key = nestedIdValue;
return true;
}
key = nestedId.ToString()?.Trim('"') ?? "";
return !string.IsNullOrWhiteSpace(key);
}
}
return false;
}
private static JsonElement? BuildNormalizedJsonPayload(CborValue payload, string key)
{
object? clrValue = ConvertCborToClr(payload);
if (clrValue == null) return null;
if (clrValue is Dictionary<string, object?> payloadMap)
payloadMap["id"] = key;
return JsonSerializer.SerializeToElement(clrValue);
}
private static object? ConvertCborToClr(CborValue value)
{
switch (value)
{
case CborNull:
return null;
case CborObject cborObject:
var map = new Dictionary<string, object?>(StringComparer.Ordinal);
foreach ((CborValue rawKey, CborValue rawValue) in cborObject)
{
if (!TryReadString(rawKey, out string key) || string.IsNullOrWhiteSpace(key))
key = rawKey.ToString()?.Trim('"') ?? "";
if (string.IsNullOrWhiteSpace(key)) continue;
map[key] = ConvertCborToClr(rawValue);
}
return map;
case CborArray cborArray:
return cborArray.Select(ConvertCborToClr).ToList();
default:
if (TryReadString(value, out string stringValue)) return stringValue;
if (TryReadBoolean(value, out bool boolValue)) return boolValue;
if (TryReadInt64(value, out long intValue)) return intValue;
if (TryReadUInt64(value, out ulong uintValue)) return uintValue;
if (TryReadDouble(value, out double doubleValue)) return doubleValue;
return value.ToString();
}
}
private static bool TryGetProperty(CborObject source, string name, out CborValue value)
{
if (source.TryGetValue((CborValue)name, out CborValue? found))
{
value = found;
return true;
}
value = CborValue.Null;
return false;
}
private static bool TryReadString(CborValue value, out string result)
{
try
{
string? parsed = value.Value<string>();
if (parsed == null)
{
result = "";
return false;
}
result = parsed;
return true;
}
catch
{
result = "";
return false;
}
}
private static bool TryReadBoolean(CborValue value, out bool result)
{
try
{
result = value.Value<bool>();
return true;
}
catch
{
result = default;
return false;
}
}
private static bool TryReadInt64(CborValue value, out long result)
{
try
{
result = value.Value<long>();
return true;
}
catch
{
result = default;
return false;
}
}
private static bool TryReadUInt64(CborValue value, out ulong result)
{
try
{
result = value.Value<ulong>();
return true;
}
catch
{
result = default;
return false;
}
}
private static bool TryReadDouble(CborValue value, out double result)
{
try
{
result = value.Value<double>();
return true;
}
catch
{
result = default;
return false;
}
}
private static string? ExtractKeyFromRecordId(string recordId)
{
if (string.IsNullOrWhiteSpace(recordId)) return null;
int separator = recordId.IndexOf(':');
if (separator < 0) return recordId;
string key = recordId[(separator + 1)..].Trim();
if (key.StartsWith('"') && key.EndsWith('"') && key.Length >= 2)
key = key[1..^1];
if (key.StartsWith('`') && key.EndsWith('`') && key.Length >= 2)
key = key[1..^1];
return string.IsNullOrWhiteSpace(key) ? null : key;
}
}

View File

@@ -0,0 +1,142 @@
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using SurrealDb.Net;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Core;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
public class SurrealSnapshotMetadataStore : SnapshotMetadataStore
{
private readonly ILogger<SurrealSnapshotMetadataStore> _logger;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
private readonly ISurrealDbClient _surrealClient;
public SurrealSnapshotMetadataStore(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
ILogger<SurrealSnapshotMetadataStore>? logger = null)
{
_ = surrealEmbeddedClient ?? throw new ArgumentNullException(nameof(surrealEmbeddedClient));
_surrealClient = surrealEmbeddedClient.Client;
_schemaInitializer = schemaInitializer ?? throw new ArgumentNullException(nameof(schemaInitializer));
_logger = logger ?? NullLogger<SurrealSnapshotMetadataStore>.Instance;
}
public override async Task DropAsync(CancellationToken cancellationToken = default)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Delete(CBDDCSurrealSchemaNames.SnapshotMetadataTable, cancellationToken);
}
public override async Task<IEnumerable<SnapshotMetadata>> ExportAsync(CancellationToken cancellationToken = default)
{
var all = await SelectAllAsync(cancellationToken);
return all.Select(m => m.ToDomain()).ToList();
}
public override async Task<SnapshotMetadata?> GetSnapshotMetadataAsync(string nodeId,
CancellationToken cancellationToken = default)
{
var existing = await FindByNodeIdAsync(nodeId, cancellationToken);
return existing?.ToDomain();
}
public override async Task<string?> GetSnapshotHashAsync(string nodeId, CancellationToken cancellationToken = default)
{
var existing = await FindByNodeIdAsync(nodeId, cancellationToken);
return existing?.Hash;
}
public override async Task ImportAsync(IEnumerable<SnapshotMetadata> items,
CancellationToken cancellationToken = default)
{
foreach (var item in items)
{
var existing = await FindByNodeIdAsync(item.NodeId, cancellationToken);
RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.SnapshotMetadata(item.NodeId);
await UpsertAsync(item, recordId, cancellationToken);
}
}
public override async Task InsertSnapshotMetadataAsync(SnapshotMetadata metadata,
CancellationToken cancellationToken = default)
{
var existing = await FindByNodeIdAsync(metadata.NodeId, cancellationToken);
RecordId recordId = existing?.Id ?? SurrealStoreRecordIds.SnapshotMetadata(metadata.NodeId);
await UpsertAsync(metadata, recordId, cancellationToken);
}
public override async Task MergeAsync(IEnumerable<SnapshotMetadata> items, CancellationToken cancellationToken = default)
{
foreach (var metadata in items)
{
var existing = await FindByNodeIdAsync(metadata.NodeId, cancellationToken);
if (existing == null)
{
await UpsertAsync(metadata, SurrealStoreRecordIds.SnapshotMetadata(metadata.NodeId), cancellationToken);
continue;
}
if (metadata.TimestampPhysicalTime < existing.TimestampPhysicalTime ||
(metadata.TimestampPhysicalTime == existing.TimestampPhysicalTime &&
metadata.TimestampLogicalCounter <= existing.TimestampLogicalCounter))
continue;
RecordId recordId = existing.Id ?? SurrealStoreRecordIds.SnapshotMetadata(metadata.NodeId);
await UpsertAsync(metadata, recordId, cancellationToken);
}
}
public override async Task UpdateSnapshotMetadataAsync(SnapshotMetadata existingMeta,
CancellationToken cancellationToken)
{
var existing = await FindByNodeIdAsync(existingMeta.NodeId, cancellationToken);
if (existing == null) return;
RecordId recordId = existing.Id ?? SurrealStoreRecordIds.SnapshotMetadata(existingMeta.NodeId);
await UpsertAsync(existingMeta, recordId, cancellationToken);
}
public override async Task<IEnumerable<SnapshotMetadata>> GetAllSnapshotMetadataAsync(
CancellationToken cancellationToken = default)
{
return await ExportAsync(cancellationToken);
}
private async Task UpsertAsync(SnapshotMetadata metadata, RecordId recordId, CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
await _surrealClient.Upsert<SurrealSnapshotMetadataRecord, SurrealSnapshotMetadataRecord>(
recordId,
metadata.ToSurrealRecord(),
cancellationToken);
}
private async Task EnsureReadyAsync(CancellationToken cancellationToken)
{
await _schemaInitializer.EnsureInitializedAsync(cancellationToken);
}
private async Task<List<SurrealSnapshotMetadataRecord>> SelectAllAsync(CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
var rows = await _surrealClient.Select<SurrealSnapshotMetadataRecord>(
CBDDCSurrealSchemaNames.SnapshotMetadataTable,
cancellationToken);
return rows?.ToList() ?? [];
}
private async Task<SurrealSnapshotMetadataRecord?> FindByNodeIdAsync(string nodeId, CancellationToken cancellationToken)
{
await EnsureReadyAsync(cancellationToken);
RecordId deterministicId = SurrealStoreRecordIds.SnapshotMetadata(nodeId);
var deterministic = await _surrealClient.Select<SurrealSnapshotMetadataRecord>(deterministicId, cancellationToken);
if (deterministic != null &&
string.Equals(deterministic.NodeId, nodeId, StringComparison.Ordinal))
return deterministic;
var all = await SelectAllAsync(cancellationToken);
return all.FirstOrDefault(m => string.Equals(m.NodeId, nodeId, StringComparison.Ordinal));
}
}

View File

@@ -0,0 +1,294 @@
using System.Security.Cryptography;
using System.Text;
using System.Text.Json;
using System.Text.Json.Serialization;
using SurrealDb.Net.Models;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Persistence.Surreal;
internal static class SurrealStoreRecordIds
{
public static RecordId Oplog(string hash)
{
return RecordId.From(CBDDCSurrealSchemaNames.OplogEntriesTable, hash);
}
public static RecordId DocumentMetadata(string collection, string key)
{
return RecordId.From(
CBDDCSurrealSchemaNames.DocumentMetadataTable,
CompositeKey("docmeta", collection, key));
}
public static RecordId SnapshotMetadata(string nodeId)
{
return RecordId.From(CBDDCSurrealSchemaNames.SnapshotMetadataTable, nodeId);
}
public static RecordId RemotePeer(string nodeId)
{
return RecordId.From(CBDDCSurrealSchemaNames.RemotePeerConfigurationsTable, nodeId);
}
public static RecordId PeerOplogConfirmation(string peerNodeId, string sourceNodeId)
{
return RecordId.From(
CBDDCSurrealSchemaNames.PeerOplogConfirmationsTable,
CompositeKey("peerconfirm", peerNodeId, sourceNodeId));
}
private static string CompositeKey(string prefix, string first, string second)
{
byte[] bytes = Encoding.UTF8.GetBytes($"{prefix}\n{first}\n{second}");
return Convert.ToHexString(SHA256.HashData(bytes)).ToLowerInvariant();
}
}
internal sealed class SurrealOplogRecord : Record
{
[JsonPropertyName("collection")]
public string Collection { get; set; } = "";
[JsonPropertyName("key")]
public string Key { get; set; } = "";
[JsonPropertyName("operation")]
public int Operation { get; set; }
[JsonPropertyName("payloadJson")]
public string PayloadJson { get; set; } = "";
[JsonPropertyName("timestampPhysicalTime")]
public long TimestampPhysicalTime { get; set; }
[JsonPropertyName("timestampLogicalCounter")]
public int TimestampLogicalCounter { get; set; }
[JsonPropertyName("timestampNodeId")]
public string TimestampNodeId { get; set; } = "";
[JsonPropertyName("hash")]
public string Hash { get; set; } = "";
[JsonPropertyName("previousHash")]
public string PreviousHash { get; set; } = "";
}
internal sealed class SurrealDocumentMetadataRecord : Record
{
[JsonPropertyName("collection")]
public string Collection { get; set; } = "";
[JsonPropertyName("key")]
public string Key { get; set; } = "";
[JsonPropertyName("hlcPhysicalTime")]
public long HlcPhysicalTime { get; set; }
[JsonPropertyName("hlcLogicalCounter")]
public int HlcLogicalCounter { get; set; }
[JsonPropertyName("hlcNodeId")]
public string HlcNodeId { get; set; } = "";
[JsonPropertyName("isDeleted")]
public bool IsDeleted { get; set; }
}
internal sealed class SurrealRemotePeerRecord : Record
{
[JsonPropertyName("nodeId")]
public string NodeId { get; set; } = "";
[JsonPropertyName("address")]
public string Address { get; set; } = "";
[JsonPropertyName("type")]
public int Type { get; set; }
[JsonPropertyName("isEnabled")]
public bool IsEnabled { get; set; }
[JsonPropertyName("interestsJson")]
public string InterestsJson { get; set; } = "";
}
internal sealed class SurrealPeerOplogConfirmationRecord : Record
{
[JsonPropertyName("peerNodeId")]
public string PeerNodeId { get; set; } = "";
[JsonPropertyName("sourceNodeId")]
public string SourceNodeId { get; set; } = "";
[JsonPropertyName("confirmedWall")]
public long ConfirmedWall { get; set; }
[JsonPropertyName("confirmedLogic")]
public int ConfirmedLogic { get; set; }
[JsonPropertyName("confirmedHash")]
public string ConfirmedHash { get; set; } = "";
[JsonPropertyName("lastConfirmedUtcMs")]
public long LastConfirmedUtcMs { get; set; }
[JsonPropertyName("isActive")]
public bool IsActive { get; set; }
}
internal sealed class SurrealSnapshotMetadataRecord : Record
{
[JsonPropertyName("nodeId")]
public string NodeId { get; set; } = "";
[JsonPropertyName("timestampPhysicalTime")]
public long TimestampPhysicalTime { get; set; }
[JsonPropertyName("timestampLogicalCounter")]
public int TimestampLogicalCounter { get; set; }
[JsonPropertyName("hash")]
public string Hash { get; set; } = "";
}
internal static class SurrealStoreRecordMappers
{
public static SurrealOplogRecord ToSurrealRecord(this OplogEntry entry)
{
return new SurrealOplogRecord
{
Collection = entry.Collection,
Key = entry.Key,
Operation = (int)entry.Operation,
PayloadJson = entry.Payload?.GetRawText() ?? "",
TimestampPhysicalTime = entry.Timestamp.PhysicalTime,
TimestampLogicalCounter = entry.Timestamp.LogicalCounter,
TimestampNodeId = entry.Timestamp.NodeId,
Hash = entry.Hash,
PreviousHash = entry.PreviousHash
};
}
public static OplogEntry ToDomain(this SurrealOplogRecord record)
{
JsonElement? payload = null;
if (!string.IsNullOrEmpty(record.PayloadJson))
payload = JsonSerializer.Deserialize<JsonElement>(record.PayloadJson);
return new OplogEntry(
record.Collection,
record.Key,
(OperationType)record.Operation,
payload,
new HlcTimestamp(record.TimestampPhysicalTime, record.TimestampLogicalCounter, record.TimestampNodeId),
record.PreviousHash,
record.Hash);
}
public static SurrealDocumentMetadataRecord ToSurrealRecord(this DocumentMetadata metadata)
{
return new SurrealDocumentMetadataRecord
{
Collection = metadata.Collection,
Key = metadata.Key,
HlcPhysicalTime = metadata.UpdatedAt.PhysicalTime,
HlcLogicalCounter = metadata.UpdatedAt.LogicalCounter,
HlcNodeId = metadata.UpdatedAt.NodeId,
IsDeleted = metadata.IsDeleted
};
}
public static DocumentMetadata ToDomain(this SurrealDocumentMetadataRecord record)
{
return new DocumentMetadata(
record.Collection,
record.Key,
new HlcTimestamp(record.HlcPhysicalTime, record.HlcLogicalCounter, record.HlcNodeId),
record.IsDeleted);
}
public static SurrealRemotePeerRecord ToSurrealRecord(this RemotePeerConfiguration peer)
{
return new SurrealRemotePeerRecord
{
NodeId = peer.NodeId,
Address = peer.Address,
Type = (int)peer.Type,
IsEnabled = peer.IsEnabled,
InterestsJson = peer.InterestingCollections.Count > 0
? JsonSerializer.Serialize(peer.InterestingCollections)
: ""
};
}
public static RemotePeerConfiguration ToDomain(this SurrealRemotePeerRecord record)
{
var result = new RemotePeerConfiguration
{
NodeId = record.NodeId,
Address = record.Address,
Type = (PeerType)record.Type,
IsEnabled = record.IsEnabled
};
if (!string.IsNullOrEmpty(record.InterestsJson))
result.InterestingCollections =
JsonSerializer.Deserialize<List<string>>(record.InterestsJson) ?? [];
return result;
}
public static SurrealPeerOplogConfirmationRecord ToSurrealRecord(this PeerOplogConfirmation confirmation)
{
return new SurrealPeerOplogConfirmationRecord
{
PeerNodeId = confirmation.PeerNodeId,
SourceNodeId = confirmation.SourceNodeId,
ConfirmedWall = confirmation.ConfirmedWall,
ConfirmedLogic = confirmation.ConfirmedLogic,
ConfirmedHash = confirmation.ConfirmedHash,
LastConfirmedUtcMs = confirmation.LastConfirmedUtc.ToUnixTimeMilliseconds(),
IsActive = confirmation.IsActive
};
}
public static PeerOplogConfirmation ToDomain(this SurrealPeerOplogConfirmationRecord record)
{
return new PeerOplogConfirmation
{
PeerNodeId = record.PeerNodeId,
SourceNodeId = record.SourceNodeId,
ConfirmedWall = record.ConfirmedWall,
ConfirmedLogic = record.ConfirmedLogic,
ConfirmedHash = record.ConfirmedHash,
LastConfirmedUtc = DateTimeOffset.FromUnixTimeMilliseconds(record.LastConfirmedUtcMs),
IsActive = record.IsActive
};
}
public static SurrealSnapshotMetadataRecord ToSurrealRecord(this SnapshotMetadata metadata)
{
return new SurrealSnapshotMetadataRecord
{
NodeId = metadata.NodeId,
TimestampPhysicalTime = metadata.TimestampPhysicalTime,
TimestampLogicalCounter = metadata.TimestampLogicalCounter,
Hash = metadata.Hash
};
}
public static SnapshotMetadata ToDomain(this SurrealSnapshotMetadataRecord record)
{
return new SnapshotMetadata
{
NodeId = record.NodeId,
TimestampPhysicalTime = record.TimestampPhysicalTime,
TimestampLogicalCounter = record.TimestampLogicalCounter,
Hash = record.Hash
};
}
}

View File

@@ -12,7 +12,7 @@
<Authors>MrDevRobot</Authors>
<Description>Persistence provider for CBDDC.</Description>
<PackageLicenseExpression>MIT</PackageLicenseExpression>
<PackageTags>p2p;database;sqlite;persistence;storage;wal</PackageTags>
<PackageTags>p2p;database;surrealdb;rocksdb;persistence;storage;wal</PackageTags>
<PackageProjectUrl>https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net</PackageProjectUrl>
<RepositoryUrl>https://github.com/CBDDC/ZB.MOM.WW.CBDDC.Net</RepositoryUrl>
<RepositoryType>git</RepositoryType>
@@ -20,21 +20,18 @@
</PropertyGroup>
<ItemGroup>
<PackageReference Include="BLite" Version="1.3.1"/>
<PackageReference Include="BLite.SourceGenerators" Version="1.3.1">
<PrivateAssets>all</PrivateAssets>
<IncludeAssets>runtime; build; native; contentfiles; analyzers; buildtransitive</IncludeAssets>
</PackageReference>
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="8.0.0"/>
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="8.0.0"/>
<PackageReference Include="Microsoft.Extensions.DependencyInjection.Abstractions" Version="9.0.4" />
<PackageReference Include="Microsoft.Extensions.Logging.Abstractions" Version="9.0.4" />
<PackageReference Include="SurrealDb.Embedded.RocksDb" Version="0.9.0" />
<PackageReference Include="SurrealDb.Net" Version="0.9.0" />
</ItemGroup>
<ItemGroup>
<None Include="README.md" Pack="true" PackagePath="\"/>
<None Include="README.md" Pack="true" PackagePath="\" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj"/>
<ProjectReference Include="..\ZB.MOM.WW.CBDDC.Core\ZB.MOM.WW.CBDDC.Core.csproj" />
</ItemGroup>
</Project>