Add LMDB oplog migration path with dual-write cutover support
All checks were successful
NuGet Package Publish / nuget (push) Successful in 1m16s

Introduce LMDB oplog store, migration flags, telemetry/backfill tooling, and parity tests to enable staged Surreal-to-LMDB rollout with rollback coverage.
This commit is contained in:
Joseph Doherty
2026-02-22 17:44:57 -05:00
parent 3b9ff69adc
commit cce24fa8f3
16 changed files with 3601 additions and 6 deletions

View File

@@ -10,6 +10,7 @@ using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Persistence.Lmdb;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.E2E.Tests;
@@ -240,6 +241,92 @@ public class ClusterCrudSyncE2ETests
}, 60, "Node B did not catch up missed reconnect mutations.", () => BuildDiagnostics(nodeA, nodeB));
}
/// <summary>
/// Verifies reconnect catch-up still works when reads are cut over to LMDB with dual-write enabled.
/// </summary>
[Fact]
public async Task PeerReconnect_ShouldCatchUpMissedChanges_WithLmdbPreferredReads()
{
var clusterToken = Guid.NewGuid().ToString("N");
int nodeAPort = GetAvailableTcpPort();
int nodeBPort = GetAvailableTcpPort();
while (nodeBPort == nodeAPort) nodeBPort = GetAvailableTcpPort();
await using var nodeA = TestPeerNode.Create(
"node-a",
nodeAPort,
clusterToken,
[
new KnownPeerConfiguration
{
NodeId = "node-b",
Host = "127.0.0.1",
Port = nodeBPort
}
],
useLmdbOplog: true,
dualWriteOplog: true,
preferLmdbReads: true);
await using var nodeB = TestPeerNode.Create(
"node-b",
nodeBPort,
clusterToken,
[
new KnownPeerConfiguration
{
NodeId = "node-a",
Host = "127.0.0.1",
Port = nodeAPort
}
],
useLmdbOplog: true,
dualWriteOplog: true,
preferLmdbReads: true);
await nodeA.StartAsync();
await nodeB.StartAsync();
await nodeB.StopAsync();
const string userId = "reconnect-lmdb-user";
await nodeA.UpsertUserAsync(new User
{
Id = userId,
Name = "Offline Create",
Age = 20,
Address = new Address { City = "Rome" }
});
await nodeA.UpsertUserAsync(new User
{
Id = userId,
Name = "Offline Update",
Age = 21,
Address = new Address { City = "Milan" }
});
await nodeA.UpsertUserAsync(new User
{
Id = userId,
Name = "Offline Final",
Age = 22,
Address = new Address { City = "Turin" }
});
await nodeB.StartAsync();
await AssertEventuallyAsync(() =>
{
var replicated = nodeB.ReadUser(userId);
return replicated is not null &&
replicated.Name == "Offline Final" &&
replicated.Age == 22 &&
replicated.Address?.City == "Turin";
}, 60, "Node B did not catch up missed reconnect mutations with LMDB preferred reads.",
() => BuildDiagnostics(nodeA, nodeB));
}
/// <summary>
/// Verifies a burst of rapid multi-node mutations converges to a deterministic final state.
/// </summary>
@@ -572,6 +659,9 @@ public class ClusterCrudSyncE2ETests
/// <param name="workDirOverride">An optional working directory override for test artifacts.</param>
/// <param name="preserveWorkDirOnDispose">A value indicating whether to preserve the working directory on dispose.</param>
/// <param name="useFaultInjectedCheckpointStore">A value indicating whether to inject a checkpoint persistence that fails once.</param>
/// <param name="useLmdbOplog">A value indicating whether to enable the LMDB oplog migration path.</param>
/// <param name="dualWriteOplog">A value indicating whether oplog writes should be mirrored to Surreal + LMDB.</param>
/// <param name="preferLmdbReads">A value indicating whether reads should prefer LMDB.</param>
/// <returns>A configured <see cref="TestPeerNode" /> instance.</returns>
public static TestPeerNode Create(
string nodeId,
@@ -580,7 +670,10 @@ public class ClusterCrudSyncE2ETests
IReadOnlyList<KnownPeerConfiguration> knownPeers,
string? workDirOverride = null,
bool preserveWorkDirOnDispose = false,
bool useFaultInjectedCheckpointStore = false)
bool useFaultInjectedCheckpointStore = false,
bool useLmdbOplog = false,
bool dualWriteOplog = true,
bool preferLmdbReads = false)
{
string workDir = workDirOverride ?? Path.Combine(Path.GetTempPath(), $"cbddc-e2e-{nodeId}-{Guid.NewGuid():N}");
Directory.CreateDirectory(workDir);
@@ -620,13 +713,47 @@ public class ClusterCrudSyncE2ETests
if (useFaultInjectedCheckpointStore)
{
services.AddSingleton<ISurrealCdcCheckpointPersistence, CrashAfterFirstAdvanceCheckpointPersistence>();
coreBuilder.AddCBDDCSurrealEmbedded<FaultInjectedSampleDocumentStore>(surrealOptionsFactory)
.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>(false);
var registration = coreBuilder.AddCBDDCSurrealEmbedded<FaultInjectedSampleDocumentStore>(surrealOptionsFactory);
if (useLmdbOplog)
registration.AddCBDDCLmdbOplog(
_ => new LmdbOplogOptions
{
EnvironmentPath = Path.Combine(workDir, "oplog-lmdb"),
MapSizeBytes = 128L * 1024 * 1024,
MaxDatabases = 16,
PruneBatchSize = 256
},
flags =>
{
flags.UseLmdbOplog = true;
flags.DualWriteOplog = dualWriteOplog;
flags.PreferLmdbReads = preferLmdbReads;
flags.ReconciliationInterval = TimeSpan.Zero;
});
registration.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>(false);
}
else
{
coreBuilder.AddCBDDCSurrealEmbedded<SampleDocumentStore>(surrealOptionsFactory)
.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>(false);
var registration = coreBuilder.AddCBDDCSurrealEmbedded<SampleDocumentStore>(surrealOptionsFactory);
if (useLmdbOplog)
registration.AddCBDDCLmdbOplog(
_ => new LmdbOplogOptions
{
EnvironmentPath = Path.Combine(workDir, "oplog-lmdb"),
MapSizeBytes = 128L * 1024 * 1024,
MaxDatabases = 16,
PruneBatchSize = 256
},
flags =>
{
flags.UseLmdbOplog = true;
flags.DualWriteOplog = dualWriteOplog;
flags.PreferLmdbReads = preferLmdbReads;
flags.ReconciliationInterval = TimeSpan.Zero;
});
registration.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>(false);
}
// Deterministic tests: sync uses explicit known peers, so disable UDP discovery.

View File

@@ -0,0 +1,237 @@
using System.Text.Json;
using Microsoft.Extensions.Logging.Abstractions;
using NSubstitute;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence.Lmdb;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class LmdbOplogMigrationTests
{
[Fact]
public async Task FeatureFlags_DualWrite_WritesToBothStores()
{
await using var surrealHarness = new SurrealTestHarness();
var surrealStore = surrealHarness.CreateOplogStore();
using var lmdbStore = CreateLmdbStore();
var flags = new LmdbOplogFeatureFlags
{
UseLmdbOplog = true,
DualWriteOplog = true,
PreferLmdbReads = false
};
var store = new FeatureFlagOplogStore(
surrealStore,
lmdbStore,
flags,
logger: NullLogger<FeatureFlagOplogStore>.Instance);
var entry = CreateEntry("Users", "dual-write", "node-a", 100, 0, "");
await store.AppendOplogEntryAsync(entry);
(await surrealStore.GetEntryByHashAsync(entry.Hash)).ShouldNotBeNull();
(await lmdbStore.GetEntryByHashAsync(entry.Hash)).ShouldNotBeNull();
}
[Fact]
public async Task FeatureFlags_PreferLmdbReads_ReconcilesFromSurrealWhenLmdbMissingEntries()
{
await using var surrealHarness = new SurrealTestHarness();
var surrealStore = surrealHarness.CreateOplogStore();
using var lmdbStore = CreateLmdbStore();
var flags = new LmdbOplogFeatureFlags
{
UseLmdbOplog = true,
DualWriteOplog = false,
PreferLmdbReads = true,
ReconciliationInterval = TimeSpan.Zero
};
var entry = CreateEntry("Users", "reconcile-1", "node-a", 200, 0, "");
// Simulate crash window where only Surreal persisted before LMDB migration store starts.
await surrealStore.AppendOplogEntryAsync(entry);
(await lmdbStore.GetEntryByHashAsync(entry.Hash)).ShouldBeNull();
var store = new FeatureFlagOplogStore(
surrealStore,
lmdbStore,
flags,
logger: NullLogger<FeatureFlagOplogStore>.Instance);
OplogEntry? resolved = await store.GetEntryByHashAsync(entry.Hash);
resolved.ShouldNotBeNull();
resolved.Hash.ShouldBe(entry.Hash);
// Reconciliation should have backfilled LMDB.
(await lmdbStore.GetEntryByHashAsync(entry.Hash)).ShouldNotBeNull();
OplogMigrationTelemetrySnapshot telemetry = store.GetTelemetrySnapshot();
telemetry.ReconciliationRuns.ShouldBeGreaterThanOrEqualTo(1);
telemetry.ReconciledEntries.ShouldBeGreaterThanOrEqualTo(1);
}
[Fact]
public async Task FeatureFlags_ShadowValidation_RecordsMismatchTelemetry()
{
await using var surrealHarness = new SurrealTestHarness();
var surrealStore = surrealHarness.CreateOplogStore();
using var lmdbStore = CreateLmdbStore();
var telemetry = new OplogMigrationTelemetry();
var flags = new LmdbOplogFeatureFlags
{
UseLmdbOplog = true,
DualWriteOplog = true,
PreferLmdbReads = false,
EnableReadShadowValidation = true
};
var store = new FeatureFlagOplogStore(
surrealStore,
lmdbStore,
flags,
telemetry,
NullLogger<FeatureFlagOplogStore>.Instance);
var entry = CreateEntry("Users", "shadow-mismatch-1", "node-a", 210, 0, "");
await surrealStore.AppendOplogEntryAsync(entry);
OplogEntry? resolved = await store.GetEntryByHashAsync(entry.Hash);
resolved.ShouldNotBeNull();
OplogMigrationTelemetrySnapshot snapshot = store.GetTelemetrySnapshot();
snapshot.ShadowComparisons.ShouldBe(1);
snapshot.ShadowMismatches.ShouldBe(1);
}
[Fact]
public async Task FeatureFlags_RollbackToSurreal_UsesSurrealForWritesAndReads()
{
await using var surrealHarness = new SurrealTestHarness();
var surrealStore = surrealHarness.CreateOplogStore();
using var lmdbStore = CreateLmdbStore();
var flags = new LmdbOplogFeatureFlags
{
UseLmdbOplog = true,
DualWriteOplog = false,
PreferLmdbReads = false
};
var store = new FeatureFlagOplogStore(
surrealStore,
lmdbStore,
flags,
logger: NullLogger<FeatureFlagOplogStore>.Instance);
var entry = CreateEntry("Users", "rollback-1", "node-a", 220, 0, "");
await store.AppendOplogEntryAsync(entry);
(await surrealStore.GetEntryByHashAsync(entry.Hash)).ShouldNotBeNull();
(await lmdbStore.GetEntryByHashAsync(entry.Hash)).ShouldBeNull();
OplogEntry? routedRead = await store.GetEntryByHashAsync(entry.Hash);
routedRead.ShouldNotBeNull();
routedRead.Hash.ShouldBe(entry.Hash);
}
[Fact]
public async Task BackfillTool_BackfillAndValidate_ReportsSuccess()
{
await using var surrealHarness = new SurrealTestHarness();
var surrealStore = surrealHarness.CreateOplogStore();
using var lmdbStore = CreateLmdbStore();
var tool = new LmdbOplogBackfillTool(surrealStore, lmdbStore, NullLogger<LmdbOplogBackfillTool>.Instance);
var first = CreateEntry("Users", "backfill-1", "node-a", 300, 0, "");
var second = CreateEntry("Users", "backfill-2", "node-a", 301, 0, first.Hash);
var third = CreateEntry("Users", "backfill-3", "node-b", 302, 0, "");
var fourth = CreateEntry("Users", "backfill-4", "node-b", 303, 0, third.Hash);
await surrealStore.AppendOplogEntryAsync(first);
await surrealStore.AppendOplogEntryAsync(second);
await surrealStore.AppendOplogEntryAsync(third);
await surrealStore.AppendOplogEntryAsync(fourth);
LmdbOplogBackfillReport report = await tool.BackfillAsync(DatasetId.Primary);
report.IsSuccess.ShouldBeTrue();
report.CountsMatch.ShouldBeTrue();
report.CountsPerNodeMatch.ShouldBeTrue();
report.LatestHashPerNodeMatch.ShouldBeTrue();
report.HashSpotChecksPassed.ShouldBeTrue();
report.ChainSpotChecksPassed.ShouldBeTrue();
report.SourceCount.ShouldBe(4);
report.DestinationCount.ShouldBe(4);
}
[Fact]
public async Task BackfillTool_BackfillAndValidate_WorksPerDataset()
{
await using var surrealHarness = new SurrealTestHarness();
var surrealStore = surrealHarness.CreateOplogStore();
using var lmdbStore = CreateLmdbStore();
var tool = new LmdbOplogBackfillTool(surrealStore, lmdbStore, NullLogger<LmdbOplogBackfillTool>.Instance);
var logsEntryA = CreateEntry("Logs", "log-1", "node-a", 400, 0, "");
var logsEntryB = CreateEntry("Logs", "log-2", "node-a", 401, 0, logsEntryA.Hash);
var primaryEntry = CreateEntry("Users", "primary-1", "node-a", 500, 0, "");
await surrealStore.AppendOplogEntryAsync(logsEntryA, DatasetId.Logs);
await surrealStore.AppendOplogEntryAsync(logsEntryB, DatasetId.Logs);
await surrealStore.AppendOplogEntryAsync(primaryEntry, DatasetId.Primary);
LmdbOplogBackfillReport logsReport = await tool.BackfillAsync(DatasetId.Logs);
logsReport.IsSuccess.ShouldBeTrue();
logsReport.SourceCount.ShouldBe(2);
logsReport.DestinationCount.ShouldBe(2);
(await lmdbStore.GetEntryByHashAsync(logsEntryA.Hash, DatasetId.Logs)).ShouldNotBeNull();
(await lmdbStore.GetEntryByHashAsync(logsEntryB.Hash, DatasetId.Logs)).ShouldNotBeNull();
(await lmdbStore.GetEntryByHashAsync(primaryEntry.Hash, DatasetId.Primary)).ShouldBeNull();
}
private static LmdbOplogStore CreateLmdbStore()
{
string rootPath = Path.Combine(Path.GetTempPath(), "cbddc-lmdb-migration", Guid.NewGuid().ToString("N"));
Directory.CreateDirectory(rootPath);
return new LmdbOplogStore(
Substitute.For<IDocumentStore>(),
new LastWriteWinsConflictResolver(),
new VectorClockService(),
new LmdbOplogOptions
{
EnvironmentPath = rootPath,
MapSizeBytes = 64L * 1024 * 1024,
MaxDatabases = 16
},
null,
NullLogger<LmdbOplogStore>.Instance);
}
private static OplogEntry CreateEntry(
string collection,
string key,
string nodeId,
long wall,
int logic,
string previousHash)
{
return new OplogEntry(
collection,
key,
OperationType.Put,
JsonSerializer.SerializeToElement(new { key }),
new HlcTimestamp(wall, logic, nodeId),
previousHash);
}
}

View File

@@ -0,0 +1,267 @@
using System.Diagnostics;
using Microsoft.Extensions.Logging.Abstractions;
using NSubstitute;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
using ZB.MOM.WW.CBDDC.Core.Sync;
using ZB.MOM.WW.CBDDC.Persistence;
using ZB.MOM.WW.CBDDC.Persistence.Lmdb;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class SurrealOplogStoreContractParityTests : OplogStoreContractTestBase
{
protected override Task<IOplogStoreContractHarness> CreateHarnessAsync()
{
return Task.FromResult<IOplogStoreContractHarness>(new SurrealOplogStoreContractHarness());
}
}
public class LmdbOplogStoreContractTests : OplogStoreContractTestBase
{
protected override Task<IOplogStoreContractHarness> CreateHarnessAsync()
{
return Task.FromResult<IOplogStoreContractHarness>(new LmdbOplogStoreContractHarness());
}
[Fact]
public async Task Lmdb_IndexConsistency_InsertPopulatesAndPruneRemovesIndexes()
{
await using var harness = new LmdbOplogStoreContractHarness();
var store = (LmdbOplogStore)harness.Store;
var entry1 = CreateOplogEntry("Users", "i1", "node-a", 100, 0, "");
var entry2 = CreateOplogEntry("Users", "i2", "node-a", 101, 0, entry1.Hash);
await store.AppendOplogEntryAsync(entry1);
await store.AppendOplogEntryAsync(entry2);
LmdbOplogIndexDiagnostics before = await store.GetIndexDiagnosticsAsync(DatasetId.Primary);
before.OplogByHashCount.ShouldBe(2);
before.OplogByHlcCount.ShouldBe(2);
before.OplogByNodeHlcCount.ShouldBe(2);
before.OplogPrevToHashCount.ShouldBe(1);
before.OplogNodeHeadCount.ShouldBe(1);
await store.PruneOplogAsync(new HlcTimestamp(101, 0, "node-a"));
LmdbOplogIndexDiagnostics after = await store.GetIndexDiagnosticsAsync(DatasetId.Primary);
after.OplogByHashCount.ShouldBe(0);
after.OplogByHlcCount.ShouldBe(0);
after.OplogByNodeHlcCount.ShouldBe(0);
after.OplogPrevToHashCount.ShouldBe(0);
after.OplogNodeHeadCount.ShouldBe(0);
}
[Fact]
public async Task Lmdb_Prune_RemovesAtOrBeforeCutoff_AndKeepsNewerInterleavedEntries()
{
await using var harness = new LmdbOplogStoreContractHarness();
IOplogStore store = harness.Store;
var nodeAOld = CreateOplogEntry("Users", "a-old", "node-a", 100, 0, "");
var nodeBKeep = CreateOplogEntry("Users", "b-keep", "node-b", 105, 0, "");
var nodeANew = CreateOplogEntry("Users", "a-new", "node-a", 110, 0, nodeAOld.Hash);
var lateOld = CreateOplogEntry("Users", "late-old", "node-c", 90, 0, "");
await store.AppendOplogEntryAsync(nodeAOld);
await store.AppendOplogEntryAsync(nodeBKeep);
await store.AppendOplogEntryAsync(nodeANew);
await store.AppendOplogEntryAsync(lateOld);
await store.PruneOplogAsync(new HlcTimestamp(100, 0, "node-a"));
var remaining = (await store.ExportAsync()).Select(e => e.Hash).ToHashSet(StringComparer.Ordinal);
remaining.Contains(nodeAOld.Hash).ShouldBeFalse();
remaining.Contains(lateOld.Hash).ShouldBeFalse();
remaining.Contains(nodeBKeep.Hash).ShouldBeTrue();
remaining.Contains(nodeANew.Hash).ShouldBeTrue();
}
[Fact]
public async Task Lmdb_NodeHead_AdvancesAndRecomputesAcrossPrune()
{
await using var harness = new LmdbOplogStoreContractHarness();
IOplogStore store = harness.Store;
var older = CreateOplogEntry("Users", "n1", "node-a", 100, 0, "");
var newer = CreateOplogEntry("Users", "n2", "node-a", 120, 0, older.Hash);
await store.AppendOplogEntryAsync(older);
await store.AppendOplogEntryAsync(newer);
(await store.GetLastEntryHashAsync("node-a")).ShouldBe(newer.Hash);
await store.PruneOplogAsync(new HlcTimestamp(110, 0, "node-a"));
(await store.GetLastEntryHashAsync("node-a")).ShouldBe(newer.Hash);
await store.PruneOplogAsync(new HlcTimestamp(130, 0, "node-a"));
(await store.GetLastEntryHashAsync("node-a")).ShouldBeNull();
}
[Fact]
public async Task Lmdb_RestartDurability_PreservesHeadAndScans()
{
await using var harness = new LmdbOplogStoreContractHarness();
IOplogStore store = harness.Store;
var entry1 = CreateOplogEntry("Users", "r1", "node-a", 100, 0, "");
var entry2 = CreateOplogEntry("Users", "r2", "node-a", 101, 0, entry1.Hash);
await store.AppendOplogEntryAsync(entry1);
await store.AppendOplogEntryAsync(entry2);
IOplogStore reopened = harness.ReopenStore();
(await reopened.GetLastEntryHashAsync("node-a")).ShouldBe(entry2.Hash);
var after = (await reopened.GetOplogAfterAsync(new HlcTimestamp(0, 0, string.Empty))).ToList();
after.Count.ShouldBe(2);
after[0].Hash.ShouldBe(entry1.Hash);
after[1].Hash.ShouldBe(entry2.Hash);
}
[Fact]
public async Task Lmdb_Dedupe_DuplicateHashAppendIsIdempotent()
{
await using var harness = new LmdbOplogStoreContractHarness();
IOplogStore store = harness.Store;
var entry = CreateOplogEntry("Users", "d1", "node-a", 100, 0, "");
await store.AppendOplogEntryAsync(entry);
await store.AppendOplogEntryAsync(entry);
var exported = (await store.ExportAsync()).ToList();
exported.Count.ShouldBe(1);
exported[0].Hash.ShouldBe(entry.Hash);
}
[Fact]
public async Task Lmdb_PrunePerformanceSmoke_LargeSyntheticWindow_CompletesWithinGenerousBudget()
{
await using var harness = new LmdbOplogStoreContractHarness();
IOplogStore store = harness.Store;
string previousHash = string.Empty;
for (var i = 0; i < 5000; i++)
{
var entry = CreateOplogEntry("Users", $"p-{i:D5}", "node-a", 1_000 + i, 0, previousHash);
await store.AppendOplogEntryAsync(entry);
previousHash = entry.Hash;
}
var sw = Stopwatch.StartNew();
await store.PruneOplogAsync(new HlcTimestamp(6_000, 0, "node-a"));
sw.Stop();
sw.ElapsedMilliseconds.ShouldBeLessThan(30_000L);
(await store.ExportAsync()).ShouldBeEmpty();
}
}
internal sealed class SurrealOplogStoreContractHarness : IOplogStoreContractHarness
{
private readonly SurrealTestHarness _harness;
public SurrealOplogStoreContractHarness()
{
_harness = new SurrealTestHarness();
Store = _harness.CreateOplogStore();
}
public IOplogStore Store { get; private set; }
public IOplogStore ReopenStore()
{
Store = _harness.CreateOplogStore();
return Store;
}
public Task AppendOplogEntryAsync(OplogEntry entry, string datasetId, CancellationToken cancellationToken = default)
{
return ((SurrealOplogStore)Store).AppendOplogEntryAsync(entry, datasetId, cancellationToken);
}
public Task<IEnumerable<OplogEntry>> ExportAsync(string datasetId, CancellationToken cancellationToken = default)
{
return ((SurrealOplogStore)Store).ExportAsync(datasetId, cancellationToken);
}
public ValueTask DisposeAsync()
{
return _harness.DisposeAsync();
}
}
internal sealed class LmdbOplogStoreContractHarness : IOplogStoreContractHarness
{
private readonly string _rootPath;
private LmdbOplogStore? _store;
public LmdbOplogStoreContractHarness()
{
_rootPath = Path.Combine(Path.GetTempPath(), "cbddc-lmdb-tests", Guid.NewGuid().ToString("N"));
Directory.CreateDirectory(_rootPath);
_store = CreateStore();
}
public IOplogStore Store => _store ?? throw new ObjectDisposedException(nameof(LmdbOplogStoreContractHarness));
public IOplogStore ReopenStore()
{
_store?.Dispose();
_store = CreateStore();
return _store;
}
public Task AppendOplogEntryAsync(OplogEntry entry, string datasetId, CancellationToken cancellationToken = default)
{
return (_store ?? throw new ObjectDisposedException(nameof(LmdbOplogStoreContractHarness)))
.AppendOplogEntryAsync(entry, datasetId, cancellationToken);
}
public Task<IEnumerable<OplogEntry>> ExportAsync(string datasetId, CancellationToken cancellationToken = default)
{
return (_store ?? throw new ObjectDisposedException(nameof(LmdbOplogStoreContractHarness)))
.ExportAsync(datasetId, cancellationToken);
}
public async ValueTask DisposeAsync()
{
_store?.Dispose();
_store = null;
for (var attempt = 0; attempt < 5; attempt++)
try
{
if (Directory.Exists(_rootPath))
Directory.Delete(_rootPath, true);
break;
}
catch when (attempt < 4)
{
await Task.Delay(50);
}
}
private LmdbOplogStore CreateStore()
{
string lmdbPath = Path.Combine(_rootPath, "lmdb-oplog");
Directory.CreateDirectory(lmdbPath);
return new LmdbOplogStore(
Substitute.For<IDocumentStore>(),
new LastWriteWinsConflictResolver(),
new VectorClockService(),
new LmdbOplogOptions
{
EnvironmentPath = lmdbPath,
MapSizeBytes = 64L * 1024 * 1024,
MaxDatabases = 16,
PruneBatchSize = 128
},
null,
NullLogger<LmdbOplogStore>.Instance);
}
}

View File

@@ -0,0 +1,121 @@
using System.Text.Json;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public abstract class OplogStoreContractTestBase
{
[Fact]
public async Task OplogStore_AppendQueryMergeDrop_AndLastHash_Works()
{
await using var harness = await CreateHarnessAsync();
IOplogStore store = harness.Store;
var entry1 = CreateOplogEntry("Users", "u1", "node-a", 100, 0, "");
var entry2 = CreateOplogEntry("Users", "u2", "node-a", 110, 0, entry1.Hash);
var entry3 = CreateOplogEntry("Users", "u3", "node-a", 120, 1, entry2.Hash);
var otherNode = CreateOplogEntry("Users", "u4", "node-b", 115, 0, "");
await store.AppendOplogEntryAsync(entry1);
await store.AppendOplogEntryAsync(entry2);
await store.AppendOplogEntryAsync(entry3);
await store.AppendOplogEntryAsync(otherNode);
var chainRange = (await store.GetChainRangeAsync(entry1.Hash, entry3.Hash)).ToList();
chainRange.Select(x => x.Hash).ToList().ShouldBe(new[] { entry2.Hash, entry3.Hash });
var after = (await store.GetOplogAfterAsync(new HlcTimestamp(100, 0, "node-a"))).ToList();
after.Select(x => x.Hash).ToList().ShouldBe(new[] { entry2.Hash, otherNode.Hash, entry3.Hash });
var mergedEntry = CreateOplogEntry("Users", "u5", "node-a", 130, 0, entry3.Hash);
await store.MergeAsync(new[] { entry2, mergedEntry });
var exported = (await store.ExportAsync()).ToList();
exported.Count.ShouldBe(5);
exported.Count(x => x.Hash == entry2.Hash).ShouldBe(1);
string? cachedLastNodeAHash = await store.GetLastEntryHashAsync("node-a");
cachedLastNodeAHash.ShouldBe(entry3.Hash);
IOplogStore rehydratedStore = harness.ReopenStore();
string? persistedLastNodeAHash = await rehydratedStore.GetLastEntryHashAsync("node-a");
persistedLastNodeAHash.ShouldBe(mergedEntry.Hash);
await rehydratedStore.DropAsync();
(await rehydratedStore.ExportAsync()).ShouldBeEmpty();
}
[Fact]
public async Task OplogStore_DatasetIsolation_Works()
{
await using var harness = await CreateHarnessAsync();
IOplogStore store = harness.Store;
var primaryEntry = CreateOplogEntry("Users", "p1", "node-a", 100, 0, "");
var logsEntry = CreateOplogEntry("Users", "l1", "node-a", 100, 1, "");
await harness.AppendOplogEntryAsync(primaryEntry, DatasetId.Primary);
await harness.AppendOplogEntryAsync(logsEntry, DatasetId.Logs);
var primary = (await harness.ExportAsync(DatasetId.Primary)).ToList();
var logs = (await harness.ExportAsync(DatasetId.Logs)).ToList();
primary.Count.ShouldBe(1);
primary[0].DatasetId.ShouldBe(DatasetId.Primary);
logs.Count.ShouldBe(1);
logs[0].DatasetId.ShouldBe(DatasetId.Logs);
}
[Fact]
public async Task OplogStore_GetChainRangeAsync_ReturnsOrderedLinkedRange()
{
await using var harness = await CreateHarnessAsync();
IOplogStore store = harness.Store;
var entry1 = CreateOplogEntry("Users", "k1", "node1", 1000, 0, "");
var entry2 = CreateOplogEntry("Users", "k2", "node1", 2000, 0, entry1.Hash);
var entry3 = CreateOplogEntry("Users", "k3", "node1", 3000, 0, entry2.Hash);
await store.AppendOplogEntryAsync(entry1);
await store.AppendOplogEntryAsync(entry2);
await store.AppendOplogEntryAsync(entry3);
var range = (await store.GetChainRangeAsync(entry1.Hash, entry3.Hash)).ToList();
range.Count.ShouldBe(2);
range[0].Hash.ShouldBe(entry2.Hash);
range[1].Hash.ShouldBe(entry3.Hash);
}
protected abstract Task<IOplogStoreContractHarness> CreateHarnessAsync();
protected static OplogEntry CreateOplogEntry(
string collection,
string key,
string nodeId,
long wall,
int logic,
string previousHash)
{
return new OplogEntry(
collection,
key,
OperationType.Put,
JsonSerializer.SerializeToElement(new { key }),
new HlcTimestamp(wall, logic, nodeId),
previousHash);
}
}
public interface IOplogStoreContractHarness : IAsyncDisposable
{
IOplogStore Store { get; }
IOplogStore ReopenStore();
Task AppendOplogEntryAsync(OplogEntry entry, string datasetId, CancellationToken cancellationToken = default);
Task<IEnumerable<OplogEntry>> ExportAsync(string datasetId, CancellationToken cancellationToken = default);
}