Replace BLite with Surreal embedded persistence
All checks were successful
NuGet Package Publish / nuget (push) Successful in 1m21s

This commit is contained in:
Joseph Doherty
2026-02-22 05:21:53 -05:00
parent 7ebc2cb567
commit 9c2a77dc3c
56 changed files with 6613 additions and 3177 deletions

View File

@@ -1,47 +1,54 @@
using System.Text.Json;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
/// <summary>
/// Tests for BLite persistence stores: Export, Import, Merge, Drop operations.
/// </summary>
public class BLiteStoreExportImportTests : IDisposable
{
/// <summary>
/// Tests for Surreal persistence stores: Export, Import, Merge, Drop operations.
/// </summary>
public class SurrealStoreExportImportTests : IDisposable
{
private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly SampleDbContext _context;
private readonly SampleDocumentStore _documentStore;
private readonly BLiteOplogStore<SampleDbContext> _oplogStore;
private readonly BLitePeerConfigurationStore<SampleDbContext> _peerConfigStore;
private readonly BLiteSnapshotMetadataStore<SampleDbContext> _snapshotMetadataStore;
private readonly SurrealOplogStore _oplogStore;
private readonly SurrealPeerConfigurationStore _peerConfigStore;
private readonly SurrealSnapshotMetadataStore _snapshotMetadataStore;
private readonly string _testDbPath;
/// <summary>
/// Initializes a new instance of the <see cref="BLiteStoreExportImportTests" /> class.
/// </summary>
public BLiteStoreExportImportTests()
{
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-export-import-{Guid.NewGuid()}.blite");
_context = new SampleDbContext(_testDbPath);
_configProvider = CreateConfigProvider("test-node");
var vectorClock = new VectorClockService();
_documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock,
NullLogger<SampleDocumentStore>.Instance);
_snapshotMetadataStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
_context, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
_oplogStore = new BLiteOplogStore<SampleDbContext>(
_context, _documentStore, new LastWriteWinsConflictResolver(),
vectorClock,
_snapshotMetadataStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
_peerConfigStore = new BLitePeerConfigurationStore<SampleDbContext>(
_context, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
/// <summary>
/// Initializes a new instance of the <see cref="SurrealStoreExportImportTests" /> class.
/// </summary>
public SurrealStoreExportImportTests()
{
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-export-import-{Guid.NewGuid()}.rocksdb");
_context = new SampleDbContext(_testDbPath);
_configProvider = CreateConfigProvider("test-node");
var vectorClock = new VectorClockService();
_documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock,
logger: NullLogger<SampleDocumentStore>.Instance);
_snapshotMetadataStore = new SurrealSnapshotMetadataStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
NullLogger<SurrealSnapshotMetadataStore>.Instance);
_oplogStore = new SurrealOplogStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
_documentStore,
new LastWriteWinsConflictResolver(),
vectorClock,
_snapshotMetadataStore,
NullLogger<SurrealOplogStore>.Instance);
_peerConfigStore = new SurrealPeerConfigurationStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
NullLogger<SurrealPeerConfigurationStore>.Instance);
}
/// <summary>
@@ -52,13 +59,13 @@ public class BLiteStoreExportImportTests : IDisposable
_documentStore?.Dispose();
_context?.Dispose();
if (File.Exists(_testDbPath))
try
{
File.Delete(_testDbPath);
}
catch
{
if (Directory.Exists(_testDbPath))
try
{
Directory.Delete(_testDbPath, true);
}
catch
{
}
}
@@ -506,4 +513,4 @@ public class BLiteStoreExportImportTests : IDisposable
}
#endregion
}
}

View File

@@ -1,14 +1,14 @@
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class PeerOplogConfirmationStoreTests : IDisposable
{
private readonly SampleDbContext _context;
private readonly BLitePeerOplogConfirmationStore<SampleDbContext> _store;
private readonly SurrealPeerOplogConfirmationStore _store;
private readonly string _testDbPath;
/// <summary>
@@ -16,21 +16,22 @@ public class PeerOplogConfirmationStoreTests : IDisposable
/// </summary>
public PeerOplogConfirmationStoreTests()
{
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-peer-confirmation-{Guid.NewGuid()}.blite");
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-peer-confirmation-{Guid.NewGuid()}.rocksdb");
_context = new SampleDbContext(_testDbPath);
_store = new BLitePeerOplogConfirmationStore<SampleDbContext>(
_context,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
_store = new SurrealPeerOplogConfirmationStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
NullLogger<SurrealPeerOplogConfirmationStore>.Instance);
}
/// <inheritdoc />
public void Dispose()
{
_context?.Dispose();
if (File.Exists(_testDbPath))
if (Directory.Exists(_testDbPath))
try
{
File.Delete(_testDbPath);
Directory.Delete(_testDbPath, true);
}
catch
{
@@ -109,4 +110,4 @@ public class PeerOplogConfirmationStoreTests : IDisposable
peerARows.ShouldNotBeEmpty();
peerARows.All(x => !x.IsActive).ShouldBeTrue();
}
}
}

View File

@@ -10,7 +10,7 @@ public class SampleDbContextTests : IDisposable
/// </summary>
public SampleDbContextTests()
{
_dbPath = Path.Combine(Path.GetTempPath(), $"test_sample_{Guid.NewGuid()}.db");
_dbPath = Path.Combine(Path.GetTempPath(), $"test_sample_{Guid.NewGuid()}.rocksdb");
_context = new SampleDbContext(_dbPath);
}
@@ -20,10 +20,10 @@ public class SampleDbContextTests : IDisposable
public void Dispose()
{
_context?.Dispose();
if (File.Exists(_dbPath))
if (Directory.Exists(_dbPath))
try
{
File.Delete(_dbPath);
Directory.Delete(_dbPath, true);
}
catch
{
@@ -38,9 +38,9 @@ public class SampleDbContextTests : IDisposable
{
// Verifica che le collezioni siano state inizializzate
_context.ShouldNotBeNull();
_context.Users.ShouldNotBeNull("Users collection should be initialized by BLite");
_context.TodoLists.ShouldNotBeNull("TodoLists collection should be initialized by BLite");
_context.OplogEntries.ShouldNotBeNull("OplogEntries collection should be initialized by BLite");
_context.Users.ShouldNotBeNull("Users collection should be initialized by Surreal context");
_context.TodoLists.ShouldNotBeNull("TodoLists collection should be initialized by Surreal context");
_context.OplogEntries.ShouldNotBeNull("OplogEntries view should be initialized by Surreal context");
}
/// <summary>
@@ -220,4 +220,4 @@ public class SampleDbContextTests : IDisposable
adults.Select(u => u.Name).ShouldContain("Adult");
adults.Select(u => u.Name).ShouldContain("Senior");
}
}
}

View File

@@ -2,21 +2,21 @@ using System.Text.Json;
using System.Text.Json.Nodes;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence.BLite;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class SnapshotStoreTests : IDisposable
{
private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly SampleDbContext _context;
private readonly SampleDocumentStore _documentStore;
private readonly BLiteOplogStore<SampleDbContext> _oplogStore;
private readonly BLitePeerConfigurationStore<SampleDbContext> _peerConfigStore;
private readonly BLitePeerOplogConfirmationStore<SampleDbContext> _peerConfirmationStore;
private readonly IPeerNodeConfigurationProvider _configProvider;
private readonly SampleDbContext _context;
private readonly SampleDocumentStore _documentStore;
private readonly SurrealOplogStore _oplogStore;
private readonly SurrealPeerConfigurationStore _peerConfigStore;
private readonly SurrealPeerOplogConfirmationStore _peerConfirmationStore;
private readonly SnapshotStore _snapshotStore;
private readonly string _testDbPath;
@@ -25,29 +25,33 @@ public class SnapshotStoreTests : IDisposable
/// </summary>
public SnapshotStoreTests()
{
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-{Guid.NewGuid()}.blite");
_context = new SampleDbContext(_testDbPath);
_configProvider = CreateConfigProvider("test-node");
var vectorClock = new VectorClockService();
_documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock,
NullLogger<SampleDocumentStore>.Instance);
var snapshotMetadataStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
_context,
NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
_oplogStore = new BLiteOplogStore<SampleDbContext>(
_context,
_documentStore,
new LastWriteWinsConflictResolver(),
vectorClock,
snapshotMetadataStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
_peerConfigStore = new BLitePeerConfigurationStore<SampleDbContext>(
_context,
NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
_peerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
_context,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-{Guid.NewGuid()}.rocksdb");
_context = new SampleDbContext(_testDbPath);
_configProvider = CreateConfigProvider("test-node");
var vectorClock = new VectorClockService();
_documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock,
logger: NullLogger<SampleDocumentStore>.Instance);
var snapshotMetadataStore = new SurrealSnapshotMetadataStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
NullLogger<SurrealSnapshotMetadataStore>.Instance);
_oplogStore = new SurrealOplogStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
_documentStore,
new LastWriteWinsConflictResolver(),
vectorClock,
snapshotMetadataStore,
NullLogger<SurrealOplogStore>.Instance);
_peerConfigStore = new SurrealPeerConfigurationStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
NullLogger<SurrealPeerConfigurationStore>.Instance);
_peerConfirmationStore = new SurrealPeerOplogConfirmationStore(
_context.SurrealEmbeddedClient,
_context.SchemaInitializer,
NullLogger<SurrealPeerOplogConfirmationStore>.Instance);
_snapshotStore = new SnapshotStore(
_documentStore,
@@ -66,13 +70,13 @@ public class SnapshotStoreTests : IDisposable
_documentStore?.Dispose();
_context?.Dispose();
if (File.Exists(_testDbPath))
try
{
File.Delete(_testDbPath);
}
catch
{
if (Directory.Exists(_testDbPath))
try
{
Directory.Delete(_testDbPath, true);
}
catch
{
}
}
@@ -170,26 +174,34 @@ public class SnapshotStoreTests : IDisposable
snapshotStream.Position = 0;
// Create a new context/stores (simulating a different node)
string newDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-target-{Guid.NewGuid()}.blite");
string newDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-target-{Guid.NewGuid()}.rocksdb");
try
{
using var newContext = new SampleDbContext(newDbPath);
var newConfigProvider = CreateConfigProvider("test-new-node");
var newVectorClock = new VectorClockService();
var newDocStore = new SampleDocumentStore(newContext, newConfigProvider, newVectorClock,
NullLogger<SampleDocumentStore>.Instance);
var newSnapshotMetaStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
newContext, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
var newOplogStore = new BLiteOplogStore<SampleDbContext>(
newContext, newDocStore, new LastWriteWinsConflictResolver(),
newVectorClock,
newSnapshotMetaStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
var newPeerStore = new BLitePeerConfigurationStore<SampleDbContext>(
newContext, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
var newPeerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
newContext,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
var newConfigProvider = CreateConfigProvider("test-new-node");
var newVectorClock = new VectorClockService();
var newDocStore = new SampleDocumentStore(newContext, newConfigProvider, newVectorClock,
logger: NullLogger<SampleDocumentStore>.Instance);
var newSnapshotMetaStore = new SurrealSnapshotMetadataStore(
newContext.SurrealEmbeddedClient,
newContext.SchemaInitializer,
NullLogger<SurrealSnapshotMetadataStore>.Instance);
var newOplogStore = new SurrealOplogStore(
newContext.SurrealEmbeddedClient,
newContext.SchemaInitializer,
newDocStore,
new LastWriteWinsConflictResolver(),
newVectorClock,
newSnapshotMetaStore,
NullLogger<SurrealOplogStore>.Instance);
var newPeerStore = new SurrealPeerConfigurationStore(
newContext.SurrealEmbeddedClient,
newContext.SchemaInitializer,
NullLogger<SurrealPeerConfigurationStore>.Instance);
var newPeerConfirmationStore = new SurrealPeerOplogConfirmationStore(
newContext.SurrealEmbeddedClient,
newContext.SchemaInitializer,
NullLogger<SurrealPeerOplogConfirmationStore>.Instance);
var newSnapshotStore = new SnapshotStore(
newDocStore,
@@ -218,14 +230,14 @@ public class SnapshotStoreTests : IDisposable
}
finally
{
if (File.Exists(newDbPath))
try
{
File.Delete(newDbPath);
}
catch
{
}
if (Directory.Exists(newDbPath))
try
{
Directory.Delete(newDbPath, true);
}
catch
{
}
}
}
@@ -250,7 +262,7 @@ public class SnapshotStoreTests : IDisposable
await _context.SaveChangesAsync();
// Create snapshot with different data
string sourceDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-source-{Guid.NewGuid()}.blite");
string sourceDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-source-{Guid.NewGuid()}.rocksdb");
MemoryStream snapshotStream;
try
@@ -259,22 +271,30 @@ public class SnapshotStoreTests : IDisposable
await sourceContext.Users.InsertAsync(new User { Id = "new-user", Name = "New User", Age = 25 });
await sourceContext.SaveChangesAsync();
var sourceConfigProvider = CreateConfigProvider("test-source-node");
var sourceVectorClock = new VectorClockService();
var sourceDocStore = new SampleDocumentStore(sourceContext, sourceConfigProvider, sourceVectorClock,
NullLogger<SampleDocumentStore>.Instance);
var sourceSnapshotMetaStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
sourceContext, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
var sourceOplogStore = new BLiteOplogStore<SampleDbContext>(
sourceContext, sourceDocStore, new LastWriteWinsConflictResolver(),
sourceVectorClock,
sourceSnapshotMetaStore,
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
var sourcePeerStore = new BLitePeerConfigurationStore<SampleDbContext>(
sourceContext, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
var sourcePeerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
sourceContext,
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
var sourceConfigProvider = CreateConfigProvider("test-source-node");
var sourceVectorClock = new VectorClockService();
var sourceDocStore = new SampleDocumentStore(sourceContext, sourceConfigProvider, sourceVectorClock,
logger: NullLogger<SampleDocumentStore>.Instance);
var sourceSnapshotMetaStore = new SurrealSnapshotMetadataStore(
sourceContext.SurrealEmbeddedClient,
sourceContext.SchemaInitializer,
NullLogger<SurrealSnapshotMetadataStore>.Instance);
var sourceOplogStore = new SurrealOplogStore(
sourceContext.SurrealEmbeddedClient,
sourceContext.SchemaInitializer,
sourceDocStore,
new LastWriteWinsConflictResolver(),
sourceVectorClock,
sourceSnapshotMetaStore,
NullLogger<SurrealOplogStore>.Instance);
var sourcePeerStore = new SurrealPeerConfigurationStore(
sourceContext.SurrealEmbeddedClient,
sourceContext.SchemaInitializer,
NullLogger<SurrealPeerConfigurationStore>.Instance);
var sourcePeerConfirmationStore = new SurrealPeerOplogConfirmationStore(
sourceContext.SurrealEmbeddedClient,
sourceContext.SchemaInitializer,
NullLogger<SurrealPeerOplogConfirmationStore>.Instance);
await sourcePeerConfirmationStore.UpdateConfirmationAsync(
"peer-merge",
"source-a",
@@ -300,13 +320,13 @@ public class SnapshotStoreTests : IDisposable
}
finally
{
if (File.Exists(sourceDbPath))
try
{
File.Delete(sourceDbPath);
}
catch
{
if (Directory.Exists(sourceDbPath))
try
{
Directory.Delete(sourceDbPath, true);
}
catch
{
}
}
@@ -447,4 +467,4 @@ public class SnapshotStoreTests : IDisposable
});
return configProvider;
}
}
}

View File

@@ -0,0 +1,580 @@
using System.Text.Json;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
[Collection("SurrealCdcDurability")]
public class SurrealCdcDurabilityTests
{
[Fact]
public async Task CheckpointPersistence_ShouldTrackLatestLocalChange_AndPersistPerConsumer()
{
string dbPath = CreateTemporaryDatabasePath();
const string nodeId = "node-checkpoint";
const string defaultConsumer = "consumer-default";
const string secondaryConsumer = "consumer-secondary";
try
{
HlcTimestamp expectedTimestamp = default;
string expectedHash = "";
DateTimeOffset previousUpdatedUtc = DateTimeOffset.MinValue;
await using (var harness = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, defaultConsumer))
{
var user = CreateUser("checkpoint-user", "Alice", 30, "Austin");
await harness.Context.Users.InsertAsync(user);
await harness.Context.SaveChangesAsync();
await harness.PollAsync();
user.Age = 31;
user.Address = new Address { City = "Dallas" };
await harness.Context.Users.UpdateAsync(user);
await harness.Context.SaveChangesAsync();
await harness.PollAsync();
await WaitForConditionAsync(
async () => (await harness.GetEntriesByKeyAsync("Users", "checkpoint-user")).Count >= 2,
"Timed out waiting for checkpoint-user oplog entries.");
var entries = await harness.GetEntriesByKeyAsync("Users", "checkpoint-user");
entries.Count.ShouldBe(2);
expectedTimestamp = entries[^1].Timestamp;
expectedHash = entries[^1].Hash;
var checkpoint = await harness.CheckpointPersistence.GetCheckpointAsync();
checkpoint.ShouldNotBeNull();
checkpoint!.Timestamp.ShouldBe(expectedTimestamp);
checkpoint.LastHash.ShouldBe(expectedHash);
previousUpdatedUtc = checkpoint.UpdatedUtc;
await harness.CheckpointPersistence.UpsertCheckpointAsync(
entries[0].Timestamp,
entries[0].Hash,
secondaryConsumer);
var secondary = await harness.CheckpointPersistence.GetCheckpointAsync(secondaryConsumer);
secondary.ShouldNotBeNull();
secondary!.Timestamp.ShouldBe(entries[0].Timestamp);
secondary.LastHash.ShouldBe(entries[0].Hash);
}
await using (var restarted = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, defaultConsumer))
{
var restoredDefault = await restarted.CheckpointPersistence.GetCheckpointAsync();
restoredDefault.ShouldNotBeNull();
restoredDefault!.Timestamp.ShouldBe(expectedTimestamp);
restoredDefault.LastHash.ShouldBe(expectedHash);
restoredDefault.UpdatedUtc.ShouldBe(previousUpdatedUtc);
var restoredSecondary = await restarted.CheckpointPersistence.GetCheckpointAsync(secondaryConsumer);
restoredSecondary.ShouldNotBeNull();
restoredSecondary!.LastHash.ShouldNotBe(restoredDefault.LastHash);
}
}
finally
{
await DeleteDirectoryWithRetriesAsync(dbPath);
}
}
[Fact]
public async Task RestartRecovery_ShouldResumeCatchUpFromPersistedCheckpoint_InRocksDb()
{
string dbPath = CreateTemporaryDatabasePath();
const string nodeId = "node-resume";
const string consumerId = "consumer-resume";
HlcTimestamp resumeTimestamp = default;
string resumeHash = "";
string expectedFinalHash = "";
try
{
await using (var initial = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, consumerId))
{
await initial.Context.Users.InsertAsync(CreateUser("resume-1", "User One", 18, "Rome"));
await initial.Context.SaveChangesAsync();
await initial.PollAsync();
await initial.Context.Users.InsertAsync(CreateUser("resume-2", "User Two", 19, "Milan"));
await initial.Context.SaveChangesAsync();
await initial.PollAsync();
await WaitForConditionAsync(
async () => (await initial.GetEntriesByCollectionAsync("Users")).Count >= 2,
"Timed out waiting for resume oplog entries.");
var entries = await initial.GetEntriesByCollectionAsync("Users");
entries.Count.ShouldBe(2);
resumeTimestamp = entries[0].Timestamp;
resumeHash = entries[0].Hash;
expectedFinalHash = entries[1].Hash;
await initial.CheckpointPersistence.UpsertCheckpointAsync(resumeTimestamp, resumeHash);
}
await using (var restarted = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, consumerId))
{
var checkpoint = await restarted.CheckpointPersistence.GetCheckpointAsync();
checkpoint.ShouldNotBeNull();
checkpoint!.Timestamp.ShouldBe(resumeTimestamp);
checkpoint.LastHash.ShouldBe(resumeHash);
var catchUp = (await restarted.OplogStore.GetOplogAfterAsync(checkpoint.Timestamp))
.OrderBy(e => e.Timestamp.PhysicalTime)
.ThenBy(e => e.Timestamp.LogicalCounter)
.ToList();
catchUp.Count.ShouldBe(1);
catchUp[0].Hash.ShouldBe(expectedFinalHash);
await restarted.CheckpointPersistence.AdvanceCheckpointAsync(catchUp[0]);
}
await using (var recovered = await CdcTestHarness.OpenWithRetriesAsync(dbPath, nodeId, consumerId))
{
var finalCheckpoint = await recovered.CheckpointPersistence.GetCheckpointAsync();
finalCheckpoint.ShouldNotBeNull();
finalCheckpoint!.LastHash.ShouldBe(expectedFinalHash);
var remaining = await recovered.OplogStore.GetOplogAfterAsync(finalCheckpoint.Timestamp);
remaining.ShouldBeEmpty();
}
}
finally
{
await DeleteDirectoryWithRetriesAsync(dbPath);
}
}
[Fact]
public async Task RemoteApply_ShouldBeIdempotentAcrossDuplicateWindow_WithoutLoopbackEntries()
{
string dbPath = CreateTemporaryDatabasePath();
const string localNodeId = "node-local";
const string remoteNodeId = "node-remote";
try
{
await using var harness = await CdcTestHarness.OpenWithRetriesAsync(
dbPath,
localNodeId,
"consumer-loopback");
await harness.Context.Users.InsertAsync(CreateUser("loopback-user", "Loopback", 40, "Boston"));
await harness.Context.SaveChangesAsync();
await harness.PollAsync();
await WaitForConditionAsync(
async () => (await harness.GetEntriesByKeyAsync("Users", "loopback-user")).Count >= 1,
"Timed out waiting for loopback-user insert oplog entry.");
var localEntries = await harness.GetEntriesByKeyAsync("Users", "loopback-user");
localEntries.Count.ShouldBe(1);
localEntries[0].Operation.ShouldBe(OperationType.Put);
localEntries[0].Timestamp.NodeId.ShouldBe(localNodeId);
var remoteDelete = new OplogEntry(
"Users",
"loopback-user",
OperationType.Delete,
null,
new HlcTimestamp(localEntries[0].Timestamp.PhysicalTime + 10, 0, remoteNodeId),
localEntries[0].Hash);
var duplicateWindow = new[] { remoteDelete, remoteDelete };
await harness.OplogStore.ApplyBatchAsync(duplicateWindow);
await harness.OplogStore.ApplyBatchAsync(duplicateWindow);
harness.Context.Users.FindById("loopback-user").ShouldBeNull();
var allEntries = await harness.GetEntriesByKeyAsync("Users", "loopback-user");
allEntries.Count(e => e.Hash == remoteDelete.Hash).ShouldBe(1);
allEntries.Count(e => e.Operation == OperationType.Delete && e.Timestamp.NodeId == localNodeId)
.ShouldBe(0);
allEntries.Count(e => e.Operation == OperationType.Delete && e.Timestamp.NodeId == remoteNodeId)
.ShouldBe(1);
}
finally
{
await DeleteDirectoryWithRetriesAsync(dbPath);
}
}
[Fact]
public async Task LocalDelete_ShouldPersistTombstoneMetadata_AndAdvanceCheckpoint()
{
string dbPath = CreateTemporaryDatabasePath();
const string nodeId = "node-tombstone";
try
{
await using var harness = await CdcTestHarness.OpenWithRetriesAsync(
dbPath,
nodeId,
"consumer-tombstone");
await harness.Context.Users.InsertAsync(CreateUser("tombstone-user", "Before Delete", 28, "Turin"));
await harness.Context.SaveChangesAsync();
await harness.PollAsync();
await harness.Context.Users.DeleteAsync("tombstone-user");
await harness.Context.SaveChangesAsync();
await harness.PollAsync();
harness.Context.Users.FindById("tombstone-user").ShouldBeNull();
await WaitForConditionAsync(
async () => (await harness.GetEntriesByKeyAsync("Users", "tombstone-user")).Count >= 2,
"Timed out waiting for tombstone-user oplog entries.");
var entries = await harness.GetEntriesByKeyAsync("Users", "tombstone-user");
entries.Count.ShouldBe(2);
var deleteEntry = entries.Last(e => e.Operation == OperationType.Delete);
var metadata = await harness.MetadataStore.GetMetadataAsync("Users", "tombstone-user");
metadata.ShouldNotBeNull();
metadata!.IsDeleted.ShouldBeTrue();
metadata.UpdatedAt.ShouldBe(deleteEntry.Timestamp);
var checkpoint = await harness.CheckpointPersistence.GetCheckpointAsync();
checkpoint.ShouldNotBeNull();
checkpoint!.LastHash.ShouldBe(deleteEntry.Hash);
checkpoint.Timestamp.ShouldBe(deleteEntry.Timestamp);
}
finally
{
await DeleteDirectoryWithRetriesAsync(dbPath);
}
}
private static User CreateUser(string id, string name, int age, string city)
{
return new User
{
Id = id,
Name = name,
Age = age,
Address = new Address { City = city }
};
}
private static string CreateTemporaryDatabasePath()
{
return Path.Combine(Path.GetTempPath(), $"cbddc-cdc-{Guid.NewGuid():N}.rocksdb");
}
private static async Task DeleteDirectoryWithRetriesAsync(string path)
{
for (var attempt = 0; attempt < 5; attempt++)
try
{
if (Directory.Exists(path)) Directory.Delete(path, true);
return;
}
catch when (attempt < 4)
{
await Task.Delay(50);
}
}
private static async Task WaitForConditionAsync(
Func<Task<bool>> predicate,
string failureMessage,
int timeoutMs = 6000,
int pollMs = 50)
{
DateTimeOffset deadline = DateTimeOffset.UtcNow.AddMilliseconds(timeoutMs);
while (DateTimeOffset.UtcNow < deadline)
{
if (await predicate()) return;
await Task.Delay(pollMs);
}
throw new TimeoutException(failureMessage);
}
}
[CollectionDefinition("SurrealCdcDurability", DisableParallelization = true)]
public sealed class SurrealCdcDurabilityCollection;
internal sealed class CdcTestHarness : IAsyncDisposable
{
private readonly VectorClockService _vectorClock;
private readonly CBDDCSurrealEmbeddedOptions _options;
private CdcTestHarness(string databasePath, string nodeId, string consumerId)
{
_options = new CBDDCSurrealEmbeddedOptions
{
Cdc = new CBDDCSurrealCdcOptions
{
Enabled = true,
ConsumerId = consumerId,
CheckpointTable = "cbddc_cdc_checkpoint"
}
};
Context = new SampleDbContext(databasePath);
_vectorClock = new VectorClockService();
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration
{
NodeId = nodeId,
AuthToken = "test-token",
TcpPort = 0
});
CheckpointPersistence = new SurrealCdcCheckpointPersistence(
Context.SurrealEmbeddedClient,
Context.SchemaInitializer,
_options);
DocumentStore = new CheckpointedSampleDocumentStore(
Context,
configProvider,
_vectorClock,
CheckpointPersistence,
_options,
NullLogger<CheckpointedSampleDocumentStore>.Instance);
OplogStore = new SurrealOplogStore(
Context.SurrealEmbeddedClient,
Context.SchemaInitializer,
DocumentStore,
new LastWriteWinsConflictResolver(),
_vectorClock,
null,
NullLogger<SurrealOplogStore>.Instance);
MetadataStore = new SurrealDocumentMetadataStore(
Context.SurrealEmbeddedClient,
Context.SchemaInitializer,
NullLogger<SurrealDocumentMetadataStore>.Instance);
}
public SampleDbContext Context { get; }
public CheckpointedSampleDocumentStore DocumentStore { get; }
public SurrealOplogStore OplogStore { get; }
public SurrealDocumentMetadataStore MetadataStore { get; }
public ISurrealCdcCheckpointPersistence CheckpointPersistence { get; }
public async Task PollAsync()
{
await DocumentStore.PollCdcOnceAsync();
}
public static async Task<CdcTestHarness> OpenWithRetriesAsync(
string databasePath,
string nodeId,
string consumerId)
{
for (var attempt = 0; attempt < 8; attempt++)
try
{
return new CdcTestHarness(databasePath, nodeId, consumerId);
}
catch (Exception ex) when (IsLockContention(ex) && attempt < 7)
{
await Task.Delay(75);
}
throw new InvalidOperationException("Unable to acquire RocksDB lock for test harness.");
}
public async Task<List<OplogEntry>> GetEntriesByCollectionAsync(string collection)
{
return (await OplogStore.ExportAsync())
.Where(e => string.Equals(e.Collection, collection, StringComparison.Ordinal))
.OrderBy(e => e.Timestamp.PhysicalTime)
.ThenBy(e => e.Timestamp.LogicalCounter)
.ToList();
}
public async Task<List<OplogEntry>> GetEntriesByKeyAsync(string collection, string key)
{
return (await OplogStore.ExportAsync())
.Where(e => string.Equals(e.Collection, collection, StringComparison.Ordinal) &&
string.Equals(e.Key, key, StringComparison.Ordinal))
.OrderBy(e => e.Timestamp.PhysicalTime)
.ThenBy(e => e.Timestamp.LogicalCounter)
.ToList();
}
public async ValueTask DisposeAsync()
{
DocumentStore.Dispose();
Context.Dispose();
await Task.Delay(75);
}
private static bool IsLockContention(Exception exception)
{
return exception.ToString().Contains("No locks available", StringComparison.OrdinalIgnoreCase);
}
}
internal sealed class CheckpointedSampleDocumentStore : SurrealDocumentStore<SampleDbContext>
{
private const string UsersCollection = "Users";
private const string TodoListsCollection = "TodoLists";
public CheckpointedSampleDocumentStore(
SampleDbContext context,
IPeerNodeConfigurationProvider configProvider,
IVectorClockService vectorClockService,
ISurrealCdcCheckpointPersistence checkpointPersistence,
CBDDCSurrealEmbeddedOptions? surrealOptions = null,
ILogger<CheckpointedSampleDocumentStore>? logger = null)
: base(
context,
context.SurrealEmbeddedClient,
context.SchemaInitializer,
configProvider,
vectorClockService,
new LastWriteWinsConflictResolver(),
checkpointPersistence,
BuildPollingOptions(surrealOptions),
logger)
{
WatchCollection(UsersCollection, context.Users, u => u.Id, subscribeForInMemoryEvents: false);
WatchCollection(TodoListsCollection, context.TodoLists, t => t.Id, subscribeForInMemoryEvents: false);
}
protected override async Task ApplyContentToEntityAsync(
string collection,
string key,
JsonElement content,
CancellationToken cancellationToken)
{
await UpsertEntityAsync(collection, key, content, cancellationToken);
}
protected override async Task ApplyContentToEntitiesBatchAsync(
IEnumerable<(string Collection, string Key, JsonElement Content)> documents,
CancellationToken cancellationToken)
{
foreach ((string collection, string key, var content) in documents)
await UpsertEntityAsync(collection, key, content, cancellationToken);
}
protected override async Task<JsonElement?> GetEntityAsJsonAsync(
string collection,
string key,
CancellationToken cancellationToken)
{
return collection switch
{
UsersCollection => SerializeEntity(await _context.Users.FindByIdAsync(key, cancellationToken)),
TodoListsCollection => SerializeEntity(await _context.TodoLists.FindByIdAsync(key, cancellationToken)),
_ => null
};
}
protected override async Task RemoveEntityAsync(
string collection,
string key,
CancellationToken cancellationToken)
{
await DeleteEntityAsync(collection, key, cancellationToken);
}
protected override async Task RemoveEntitiesBatchAsync(
IEnumerable<(string Collection, string Key)> documents,
CancellationToken cancellationToken)
{
foreach ((string collection, string key) in documents)
await DeleteEntityAsync(collection, key, cancellationToken);
}
protected override async Task<IEnumerable<(string Key, JsonElement Content)>> GetAllEntitiesAsJsonAsync(
string collection,
CancellationToken cancellationToken)
{
return collection switch
{
UsersCollection => (await _context.Users.FindAllAsync(cancellationToken))
.Select(u => (u.Id, SerializeEntity(u)!.Value))
.ToList(),
TodoListsCollection => (await _context.TodoLists.FindAllAsync(cancellationToken))
.Select(t => (t.Id, SerializeEntity(t)!.Value))
.ToList(),
_ => []
};
}
private async Task UpsertEntityAsync(
string collection,
string key,
JsonElement content,
CancellationToken cancellationToken)
{
switch (collection)
{
case UsersCollection:
var user = content.Deserialize<User>() ??
throw new InvalidOperationException("Failed to deserialize user.");
user.Id = key;
if (await _context.Users.FindByIdAsync(key, cancellationToken) == null)
await _context.Users.InsertAsync(user, cancellationToken);
else
await _context.Users.UpdateAsync(user, cancellationToken);
break;
case TodoListsCollection:
var todo = content.Deserialize<TodoList>() ??
throw new InvalidOperationException("Failed to deserialize todo list.");
todo.Id = key;
if (await _context.TodoLists.FindByIdAsync(key, cancellationToken) == null)
await _context.TodoLists.InsertAsync(todo, cancellationToken);
else
await _context.TodoLists.UpdateAsync(todo, cancellationToken);
break;
default:
throw new NotSupportedException($"Collection '{collection}' is not supported for sync.");
}
}
private async Task DeleteEntityAsync(string collection, string key, CancellationToken cancellationToken)
{
switch (collection)
{
case UsersCollection:
await _context.Users.DeleteAsync(key, cancellationToken);
break;
case TodoListsCollection:
await _context.TodoLists.DeleteAsync(key, cancellationToken);
break;
}
}
private static JsonElement? SerializeEntity<T>(T? entity) where T : class
{
return entity == null ? null : JsonSerializer.SerializeToElement(entity);
}
private static SurrealCdcPollingOptions? BuildPollingOptions(CBDDCSurrealEmbeddedOptions? options)
{
if (options == null) return null;
return new SurrealCdcPollingOptions
{
Enabled = options.Cdc.Enabled,
PollInterval = options.Cdc.PollingInterval,
BatchSize = options.Cdc.BatchSize,
EnableLiveSelectAccelerator = options.Cdc.EnableLiveSelectAccelerator,
LiveSelectReconnectDelay = options.Cdc.LiveSelectReconnectDelay
};
}
}

View File

@@ -0,0 +1,219 @@
using System.Text.Json;
using System.Reflection;
using Microsoft.Extensions.Logging.Abstractions;
using SurrealDb.Net;
using SurrealDb.Net.Models.Response;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class SurrealCdcMatrixCompletionTests
{
[Theory]
[InlineData("versionstamp is outside the configured retention window", true)]
[InlineData("change feed history since cursor is unavailable", true)]
[InlineData("socket closed unexpectedly", false)]
public void RetentionBoundaryClassifier_ShouldDetectExpectedPatterns(string message, bool expected)
{
var closedType = typeof(SurrealDocumentStore<>).MakeGenericType(typeof(object));
var classifier = closedType.GetMethod(
"IsLikelyChangefeedRetentionBoundary",
BindingFlags.NonPublic | BindingFlags.Static);
classifier.ShouldNotBeNull();
bool actual = (bool)classifier!.Invoke(null, [new InvalidOperationException(message)])!;
actual.ShouldBe(expected);
}
[Fact]
public async Task LocalWrite_ShouldEmitExactlyOneOplogEntry()
{
string dbPath = Path.Combine(Path.GetTempPath(), $"cbddc-cdc-matrix-{Guid.NewGuid():N}.rocksdb");
try
{
await using var harness = await CdcTestHarness.OpenWithRetriesAsync(dbPath, "node-single-write", "consumer-single");
await harness.Context.Users.InsertAsync(new User
{
Id = "single-write-user",
Name = "Single Write",
Age = 25,
Address = new Address { City = "Bologna" }
});
await harness.Context.SaveChangesAsync();
await harness.PollAsync();
await WaitForConditionAsync(
async () => (await harness.GetEntriesByKeyAsync("Users", "single-write-user")).Count == 1,
"Timed out waiting for exactly one local oplog entry.");
var entries = await harness.GetEntriesByKeyAsync("Users", "single-write-user");
entries.Count.ShouldBe(1);
entries[0].Operation.ShouldBe(OperationType.Put);
entries[0].Timestamp.NodeId.ShouldBe("node-single-write");
}
finally
{
await DeleteDirectoryWithRetriesAsync(dbPath);
}
}
[Fact]
public async Task Checkpoint_ShouldNotAdvance_WhenAtomicWriteFails()
{
var surrealClient = Substitute.For<ISurrealDbClient>();
surrealClient.RawQuery(
Arg.Any<string>(),
Arg.Any<IReadOnlyDictionary<string, object?>>(),
Arg.Any<CancellationToken>())
.Returns(Task.FromException<SurrealDbResponse>(new InvalidOperationException("forced atomic write failure")));
var embeddedClient = Substitute.For<ICBDDCSurrealEmbeddedClient>();
embeddedClient.Client.Returns(surrealClient);
var schemaInitializer = Substitute.For<ICBDDCSurrealSchemaInitializer>();
schemaInitializer.EnsureInitializedAsync(Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration
{
NodeId = "node-failure",
TcpPort = 0,
AuthToken = "test-token"
});
var checkpointPersistence = Substitute.For<ISurrealCdcCheckpointPersistence>();
var vectorClock = Substitute.For<IVectorClockService>();
vectorClock.GetLastHash(Arg.Any<string>()).Returns("seed-hash");
var store = new FailureInjectedDocumentStore(
embeddedClient,
schemaInitializer,
configProvider,
vectorClock,
checkpointPersistence);
var payload = JsonSerializer.SerializeToElement(new { Id = "failure-user", Value = "x" });
await Should.ThrowAsync<InvalidOperationException>(
() => store.TriggerLocalChangeAsync("Users", "failure-user", OperationType.Put, payload));
checkpointPersistence.ReceivedCalls().ShouldBeEmpty();
}
private static async Task WaitForConditionAsync(
Func<Task<bool>> predicate,
string failureMessage,
int timeoutMs = 6000,
int pollMs = 50)
{
DateTimeOffset deadline = DateTimeOffset.UtcNow.AddMilliseconds(timeoutMs);
while (DateTimeOffset.UtcNow < deadline)
{
if (await predicate()) return;
await Task.Delay(pollMs);
}
throw new TimeoutException(failureMessage);
}
private static async Task DeleteDirectoryWithRetriesAsync(string path)
{
for (var attempt = 0; attempt < 5; attempt++)
try
{
if (Directory.Exists(path)) Directory.Delete(path, true);
return;
}
catch when (attempt < 4)
{
await Task.Delay(50);
}
}
}
internal sealed class FailureInjectedDocumentStore : SurrealDocumentStore<object>
{
public FailureInjectedDocumentStore(
ICBDDCSurrealEmbeddedClient surrealEmbeddedClient,
ICBDDCSurrealSchemaInitializer schemaInitializer,
IPeerNodeConfigurationProvider configProvider,
IVectorClockService vectorClockService,
ISurrealCdcCheckpointPersistence checkpointPersistence)
: base(
new object(),
surrealEmbeddedClient,
schemaInitializer,
configProvider,
vectorClockService,
new LastWriteWinsConflictResolver(),
checkpointPersistence,
new SurrealCdcPollingOptions { Enabled = false },
NullLogger<FailureInjectedDocumentStore>.Instance)
{
}
public Task TriggerLocalChangeAsync(
string collection,
string key,
OperationType operationType,
JsonElement? content,
CancellationToken cancellationToken = default)
{
return OnLocalChangeDetectedAsync(
collection,
key,
operationType,
content,
pendingCursorCheckpoint: null,
cancellationToken);
}
protected override Task ApplyContentToEntityAsync(
string collection,
string key,
JsonElement content,
CancellationToken cancellationToken)
{
return Task.CompletedTask;
}
protected override Task ApplyContentToEntitiesBatchAsync(
IEnumerable<(string Collection, string Key, JsonElement Content)> documents,
CancellationToken cancellationToken)
{
return Task.CompletedTask;
}
protected override Task<JsonElement?> GetEntityAsJsonAsync(
string collection,
string key,
CancellationToken cancellationToken)
{
return Task.FromResult<JsonElement?>(null);
}
protected override Task RemoveEntityAsync(string collection, string key, CancellationToken cancellationToken)
{
return Task.CompletedTask;
}
protected override Task RemoveEntitiesBatchAsync(
IEnumerable<(string Collection, string Key)> documents,
CancellationToken cancellationToken)
{
return Task.CompletedTask;
}
protected override Task<IEnumerable<(string Key, JsonElement Content)>> GetAllEntitiesAsJsonAsync(
string collection,
CancellationToken cancellationToken)
{
return Task.FromResult<IEnumerable<(string Key, JsonElement Content)>>([]);
}
}

View File

@@ -0,0 +1,434 @@
using System.Text.Json;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class SurrealOplogStoreContractTests
{
[Fact]
public async Task OplogStore_AppendQueryMergeDrop_AndLastHash_Works()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreateOplogStore();
var entry1 = CreateOplogEntry("Users", "u1", "node-a", 100, 0, "");
var entry2 = CreateOplogEntry("Users", "u2", "node-a", 110, 0, entry1.Hash);
var entry3 = CreateOplogEntry("Users", "u3", "node-a", 120, 1, entry2.Hash);
var otherNode = CreateOplogEntry("Users", "u4", "node-b", 115, 0, "");
await store.AppendOplogEntryAsync(entry1);
await store.AppendOplogEntryAsync(entry2);
await store.AppendOplogEntryAsync(entry3);
await store.AppendOplogEntryAsync(otherNode);
var chainRange = (await store.GetChainRangeAsync(entry1.Hash, entry3.Hash)).ToList();
chainRange.Select(x => x.Hash).ToList().ShouldBe(new[] { entry2.Hash, entry3.Hash });
var after = (await store.GetOplogAfterAsync(new HlcTimestamp(100, 0, "node-a"))).ToList();
after.Select(x => x.Hash).ToList().ShouldBe(new[] { entry2.Hash, otherNode.Hash, entry3.Hash });
var mergedEntry = CreateOplogEntry("Users", "u5", "node-a", 130, 0, entry3.Hash);
await store.MergeAsync(new[] { entry2, mergedEntry });
var exported = (await store.ExportAsync()).ToList();
exported.Count.ShouldBe(5);
exported.Count(x => x.Hash == entry2.Hash).ShouldBe(1);
var cachedLastNodeAHash = await store.GetLastEntryHashAsync("node-a");
cachedLastNodeAHash.ShouldBe(entry3.Hash);
var rehydratedStore = harness.CreateOplogStore();
var persistedLastNodeAHash = await rehydratedStore.GetLastEntryHashAsync("node-a");
persistedLastNodeAHash.ShouldBe(mergedEntry.Hash);
await store.DropAsync();
(await store.ExportAsync()).ShouldBeEmpty();
}
private static OplogEntry CreateOplogEntry(
string collection,
string key,
string nodeId,
long wall,
int logic,
string previousHash)
{
return new OplogEntry(
collection,
key,
OperationType.Put,
JsonSerializer.SerializeToElement(new { key }),
new HlcTimestamp(wall, logic, nodeId),
previousHash);
}
}
public class SurrealDocumentMetadataStoreContractTests
{
[Fact]
public async Task DocumentMetadataStore_UpsertMarkDeletedGetAfterAndMergeNewer_Works()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreateDocumentMetadataStore();
await store.UpsertMetadataAsync(new DocumentMetadata("Users", "doc-1", new HlcTimestamp(100, 0, "node-a")));
await store.UpsertMetadataAsync(new DocumentMetadata("Users", "doc-2", new HlcTimestamp(105, 0, "node-a")));
await store.MarkDeletedAsync("Users", "doc-1", new HlcTimestamp(110, 1, "node-a"));
var doc1 = await store.GetMetadataAsync("Users", "doc-1");
doc1.ShouldNotBeNull();
doc1.IsDeleted.ShouldBeTrue();
doc1.UpdatedAt.ShouldBe(new HlcTimestamp(110, 1, "node-a"));
var after = (await store.GetMetadataAfterAsync(new HlcTimestamp(100, 0, "node-a"), new[] { "Users" })).ToList();
after.Select(x => x.Key).ToList().ShouldBe(new[] { "doc-2", "doc-1" });
await store.MergeAsync(new[]
{
new DocumentMetadata("Users", "doc-1", new HlcTimestamp(109, 0, "node-a"), true),
new DocumentMetadata("Users", "doc-1", new HlcTimestamp(120, 0, "node-a"), false),
new DocumentMetadata("Users", "doc-3", new HlcTimestamp(130, 0, "node-b"), false)
});
var mergedDoc1 = await store.GetMetadataAsync("Users", "doc-1");
mergedDoc1.ShouldNotBeNull();
mergedDoc1.UpdatedAt.ShouldBe(new HlcTimestamp(120, 0, "node-a"));
mergedDoc1.IsDeleted.ShouldBeFalse();
var exported = (await store.ExportAsync()).ToList();
exported.Count.ShouldBe(3);
}
}
public class SurrealPeerConfigurationStoreContractTests
{
[Fact]
public async Task PeerConfigurationStore_SaveGetRemoveAndMerge_Works()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreatePeerConfigurationStore();
await store.SaveRemotePeerAsync(CreatePeer("peer-1", "10.0.0.1:5000", true));
var peer1 = await store.GetRemotePeerAsync("peer-1", CancellationToken.None);
peer1.ShouldNotBeNull();
peer1.Address.ShouldBe("10.0.0.1:5000");
await store.SaveRemotePeerAsync(CreatePeer("peer-1", "10.0.0.1:6000", false));
await store.MergeAsync(new[]
{
CreatePeer("peer-1", "10.0.0.1:7000", true),
CreatePeer("peer-2", "10.0.0.2:5000", true)
});
var afterMergePeer1 = await store.GetRemotePeerAsync("peer-1", CancellationToken.None);
var afterMergePeer2 = await store.GetRemotePeerAsync("peer-2", CancellationToken.None);
afterMergePeer1.ShouldNotBeNull();
afterMergePeer1.Address.ShouldBe("10.0.0.1:6000");
afterMergePeer1.IsEnabled.ShouldBeFalse();
afterMergePeer2.ShouldNotBeNull();
afterMergePeer2.Address.ShouldBe("10.0.0.2:5000");
await store.RemoveRemotePeerAsync("peer-1");
var removedPeer = await store.GetRemotePeerAsync("peer-1", CancellationToken.None);
removedPeer.ShouldBeNull();
var peers = (await store.GetRemotePeersAsync()).ToList();
peers.Count.ShouldBe(1);
peers[0].NodeId.ShouldBe("peer-2");
}
private static RemotePeerConfiguration CreatePeer(string nodeId, string address, bool enabled)
{
return new RemotePeerConfiguration
{
NodeId = nodeId,
Address = address,
Type = PeerType.StaticRemote,
IsEnabled = enabled,
InterestingCollections = new List<string> { "Users" }
};
}
}
public class SurrealPeerOplogConfirmationStoreContractTests
{
[Fact]
public async Task PeerOplogConfirmationStore_EnsureUpdateAndDeactivate_Works()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreatePeerOplogConfirmationStore();
await store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote);
await store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote);
await store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(100, 1, "source-1"), "hash-1");
await store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(90, 0, "source-1"), "hash-old");
await store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(100, 1, "source-1"), "hash-2");
var peerConfirmations = (await store.GetConfirmationsForPeerAsync("peer-a")).ToList();
peerConfirmations.Count.ShouldBe(1);
peerConfirmations[0].ConfirmedWall.ShouldBe(100);
peerConfirmations[0].ConfirmedLogic.ShouldBe(1);
peerConfirmations[0].ConfirmedHash.ShouldBe("hash-2");
var all = (await store.ExportAsync()).Where(x => x.PeerNodeId == "peer-a").ToList();
all.Count(x => x.SourceNodeId == "__peer_registration__").ShouldBe(1);
await store.RemovePeerTrackingAsync("peer-a");
var activePeers = (await store.GetActiveTrackedPeersAsync()).ToList();
activePeers.ShouldNotContain("peer-a");
var afterDeactivate = (await store.ExportAsync()).Where(x => x.PeerNodeId == "peer-a").ToList();
afterDeactivate.All(x => x.IsActive == false).ShouldBeTrue();
}
[Fact]
public async Task PeerOplogConfirmationStore_Merge_UsesNewerAndActiveStateSemantics()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreatePeerOplogConfirmationStore();
await store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote);
await store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(100, 1, "source-1"), "hash-1");
var existing = (await store.ExportAsync())
.Single(x => x.PeerNodeId == "peer-a" && x.SourceNodeId == "source-1");
await store.MergeAsync(new[]
{
new PeerOplogConfirmation
{
PeerNodeId = "peer-a",
SourceNodeId = "source-1",
ConfirmedWall = 90,
ConfirmedLogic = 0,
ConfirmedHash = "hash-old",
LastConfirmedUtc = existing.LastConfirmedUtc.AddMinutes(-5),
IsActive = true
},
new PeerOplogConfirmation
{
PeerNodeId = "peer-a",
SourceNodeId = "source-1",
ConfirmedWall = 130,
ConfirmedLogic = 0,
ConfirmedHash = "hash-2",
LastConfirmedUtc = existing.LastConfirmedUtc.AddMinutes(5),
IsActive = false
},
new PeerOplogConfirmation
{
PeerNodeId = "peer-a",
SourceNodeId = "source-2",
ConfirmedWall = 50,
ConfirmedLogic = 0,
ConfirmedHash = "hash-3",
LastConfirmedUtc = existing.LastConfirmedUtc.AddMinutes(5),
IsActive = true
}
});
var all = (await store.ExportAsync())
.Where(x => x.PeerNodeId == "peer-a" && x.SourceNodeId != "__peer_registration__")
.OrderBy(x => x.SourceNodeId)
.ToList();
all.Count.ShouldBe(2);
var source1 = all.Single(x => x.SourceNodeId == "source-1");
source1.ConfirmedWall.ShouldBe(130);
source1.ConfirmedLogic.ShouldBe(0);
source1.ConfirmedHash.ShouldBe("hash-2");
source1.IsActive.ShouldBeFalse();
var source2 = all.Single(x => x.SourceNodeId == "source-2");
source2.ConfirmedWall.ShouldBe(50);
source2.ConfirmedHash.ShouldBe("hash-3");
source2.IsActive.ShouldBeTrue();
}
}
public class SurrealSnapshotMetadataStoreContractTests
{
[Fact]
public async Task SnapshotMetadataStore_InsertUpdateMergeAndHashLookup_Works()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreateSnapshotMetadataStore();
await store.InsertSnapshotMetadataAsync(new SnapshotMetadata
{
NodeId = "node-a",
TimestampPhysicalTime = 100,
TimestampLogicalCounter = 0,
Hash = "hash-1"
});
var initialHash = await store.GetSnapshotHashAsync("node-a");
initialHash.ShouldBe("hash-1");
await store.UpdateSnapshotMetadataAsync(new SnapshotMetadata
{
NodeId = "node-a",
TimestampPhysicalTime = 120,
TimestampLogicalCounter = 1,
Hash = "hash-2"
}, CancellationToken.None);
var updatedHash = await store.GetSnapshotHashAsync("node-a");
updatedHash.ShouldBe("hash-2");
await store.MergeAsync(new[]
{
new SnapshotMetadata
{
NodeId = "node-a",
TimestampPhysicalTime = 119,
TimestampLogicalCounter = 9,
Hash = "hash-old"
},
new SnapshotMetadata
{
NodeId = "node-a",
TimestampPhysicalTime = 130,
TimestampLogicalCounter = 0,
Hash = "hash-3"
},
new SnapshotMetadata
{
NodeId = "node-b",
TimestampPhysicalTime = 140,
TimestampLogicalCounter = 0,
Hash = "hash-b"
}
});
var finalNodeA = await store.GetSnapshotMetadataAsync("node-a");
finalNodeA.ShouldNotBeNull();
finalNodeA.Hash.ShouldBe("hash-3");
finalNodeA.TimestampPhysicalTime.ShouldBe(130);
var all = (await store.GetAllSnapshotMetadataAsync()).OrderBy(x => x.NodeId).ToList();
all.Count.ShouldBe(2);
all[0].NodeId.ShouldBe("node-a");
all[1].NodeId.ShouldBe("node-b");
}
}
internal sealed class SurrealTestHarness : IAsyncDisposable
{
private readonly CBDDCSurrealEmbeddedClient _client;
private readonly string _rootPath;
private readonly ICBDDCSurrealSchemaInitializer _schemaInitializer;
public SurrealTestHarness()
{
string suffix = Guid.NewGuid().ToString("N");
_rootPath = Path.Combine(Path.GetTempPath(), "cbddc-surreal-tests", suffix);
string databasePath = Path.Combine(_rootPath, "rocksdb");
var options = new CBDDCSurrealEmbeddedOptions
{
Endpoint = "rocksdb://local",
DatabasePath = databasePath,
Namespace = $"cbddc_tests_{suffix}",
Database = $"main_{suffix}"
};
_client = new CBDDCSurrealEmbeddedClient(options, NullLogger<CBDDCSurrealEmbeddedClient>.Instance);
_schemaInitializer = new TestSurrealSchemaInitializer(_client);
}
public SurrealDocumentMetadataStore CreateDocumentMetadataStore()
{
return new SurrealDocumentMetadataStore(
_client,
_schemaInitializer,
NullLogger<SurrealDocumentMetadataStore>.Instance);
}
public SurrealOplogStore CreateOplogStore()
{
return new SurrealOplogStore(
_client,
_schemaInitializer,
Substitute.For<IDocumentStore>(),
new LastWriteWinsConflictResolver(),
new VectorClockService(),
null,
NullLogger<SurrealOplogStore>.Instance);
}
public SurrealPeerConfigurationStore CreatePeerConfigurationStore()
{
return new SurrealPeerConfigurationStore(
_client,
_schemaInitializer,
NullLogger<SurrealPeerConfigurationStore>.Instance);
}
public SurrealPeerOplogConfirmationStore CreatePeerOplogConfirmationStore()
{
return new SurrealPeerOplogConfirmationStore(
_client,
_schemaInitializer,
NullLogger<SurrealPeerOplogConfirmationStore>.Instance);
}
public SurrealSnapshotMetadataStore CreateSnapshotMetadataStore()
{
return new SurrealSnapshotMetadataStore(
_client,
_schemaInitializer,
NullLogger<SurrealSnapshotMetadataStore>.Instance);
}
public async ValueTask DisposeAsync()
{
await _client.DisposeAsync();
await DeleteDirectoryWithRetriesAsync(_rootPath);
}
private static async Task DeleteDirectoryWithRetriesAsync(string path)
{
for (var attempt = 0; attempt < 5; attempt++)
try
{
if (Directory.Exists(path)) Directory.Delete(path, true);
return;
}
catch when (attempt < 4)
{
await Task.Delay(50);
}
}
}
internal sealed class TestSurrealSchemaInitializer : ICBDDCSurrealSchemaInitializer
{
private readonly ICBDDCSurrealEmbeddedClient _client;
private int _initialized;
public TestSurrealSchemaInitializer(ICBDDCSurrealEmbeddedClient client)
{
_client = client;
}
public async Task EnsureInitializedAsync(CancellationToken cancellationToken = default)
{
if (Interlocked.Exchange(ref _initialized, 1) == 1) return;
await _client.InitializeAsync(cancellationToken);
}
}