using System.Text.Json; using Microsoft.Extensions.Logging.Abstractions; using NSubstitute; using ZB.MOM.WW.CBDDC.Core; using ZB.MOM.WW.CBDDC.Core.Storage; using ZB.MOM.WW.CBDDC.Core.Sync; using ZB.MOM.WW.CBDDC.Persistence; using ZB.MOM.WW.CBDDC.Persistence.Lmdb; using ZB.MOM.WW.CBDDC.Persistence.Surreal; namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests; public class LmdbOplogMigrationTests { [Fact] public async Task FeatureFlags_DualWrite_WritesToBothStores() { await using var surrealHarness = new SurrealTestHarness(); var surrealStore = surrealHarness.CreateOplogStore(); using var lmdbStore = CreateLmdbStore(); var flags = new LmdbOplogFeatureFlags { UseLmdbOplog = true, DualWriteOplog = true, PreferLmdbReads = false }; var store = new FeatureFlagOplogStore( surrealStore, lmdbStore, flags, logger: NullLogger.Instance); var entry = CreateEntry("Users", "dual-write", "node-a", 100, 0, ""); await store.AppendOplogEntryAsync(entry); (await surrealStore.GetEntryByHashAsync(entry.Hash)).ShouldNotBeNull(); (await lmdbStore.GetEntryByHashAsync(entry.Hash)).ShouldNotBeNull(); } [Fact] public async Task FeatureFlags_PreferLmdbReads_ReconcilesFromSurrealWhenLmdbMissingEntries() { await using var surrealHarness = new SurrealTestHarness(); var surrealStore = surrealHarness.CreateOplogStore(); using var lmdbStore = CreateLmdbStore(); var flags = new LmdbOplogFeatureFlags { UseLmdbOplog = true, DualWriteOplog = false, PreferLmdbReads = true, ReconciliationInterval = TimeSpan.Zero }; var entry = CreateEntry("Users", "reconcile-1", "node-a", 200, 0, ""); // Simulate crash window where only Surreal persisted before LMDB migration store starts. await surrealStore.AppendOplogEntryAsync(entry); (await lmdbStore.GetEntryByHashAsync(entry.Hash)).ShouldBeNull(); var store = new FeatureFlagOplogStore( surrealStore, lmdbStore, flags, logger: NullLogger.Instance); OplogEntry? resolved = await store.GetEntryByHashAsync(entry.Hash); resolved.ShouldNotBeNull(); resolved.Hash.ShouldBe(entry.Hash); // Reconciliation should have backfilled LMDB. (await lmdbStore.GetEntryByHashAsync(entry.Hash)).ShouldNotBeNull(); OplogMigrationTelemetrySnapshot telemetry = store.GetTelemetrySnapshot(); telemetry.ReconciliationRuns.ShouldBeGreaterThanOrEqualTo(1); telemetry.ReconciledEntries.ShouldBeGreaterThanOrEqualTo(1); } [Fact] public async Task FeatureFlags_ShadowValidation_RecordsMismatchTelemetry() { await using var surrealHarness = new SurrealTestHarness(); var surrealStore = surrealHarness.CreateOplogStore(); using var lmdbStore = CreateLmdbStore(); var telemetry = new OplogMigrationTelemetry(); var flags = new LmdbOplogFeatureFlags { UseLmdbOplog = true, DualWriteOplog = true, PreferLmdbReads = false, EnableReadShadowValidation = true }; var store = new FeatureFlagOplogStore( surrealStore, lmdbStore, flags, telemetry, NullLogger.Instance); var entry = CreateEntry("Users", "shadow-mismatch-1", "node-a", 210, 0, ""); await surrealStore.AppendOplogEntryAsync(entry); OplogEntry? resolved = await store.GetEntryByHashAsync(entry.Hash); resolved.ShouldNotBeNull(); OplogMigrationTelemetrySnapshot snapshot = store.GetTelemetrySnapshot(); snapshot.ShadowComparisons.ShouldBe(1); snapshot.ShadowMismatches.ShouldBe(1); } [Fact] public async Task FeatureFlags_RollbackToSurreal_UsesSurrealForWritesAndReads() { await using var surrealHarness = new SurrealTestHarness(); var surrealStore = surrealHarness.CreateOplogStore(); using var lmdbStore = CreateLmdbStore(); var flags = new LmdbOplogFeatureFlags { UseLmdbOplog = true, DualWriteOplog = false, PreferLmdbReads = false }; var store = new FeatureFlagOplogStore( surrealStore, lmdbStore, flags, logger: NullLogger.Instance); var entry = CreateEntry("Users", "rollback-1", "node-a", 220, 0, ""); await store.AppendOplogEntryAsync(entry); (await surrealStore.GetEntryByHashAsync(entry.Hash)).ShouldNotBeNull(); (await lmdbStore.GetEntryByHashAsync(entry.Hash)).ShouldBeNull(); OplogEntry? routedRead = await store.GetEntryByHashAsync(entry.Hash); routedRead.ShouldNotBeNull(); routedRead.Hash.ShouldBe(entry.Hash); } [Fact] public async Task BackfillTool_BackfillAndValidate_ReportsSuccess() { await using var surrealHarness = new SurrealTestHarness(); var surrealStore = surrealHarness.CreateOplogStore(); using var lmdbStore = CreateLmdbStore(); var tool = new LmdbOplogBackfillTool(surrealStore, lmdbStore, NullLogger.Instance); var first = CreateEntry("Users", "backfill-1", "node-a", 300, 0, ""); var second = CreateEntry("Users", "backfill-2", "node-a", 301, 0, first.Hash); var third = CreateEntry("Users", "backfill-3", "node-b", 302, 0, ""); var fourth = CreateEntry("Users", "backfill-4", "node-b", 303, 0, third.Hash); await surrealStore.AppendOplogEntryAsync(first); await surrealStore.AppendOplogEntryAsync(second); await surrealStore.AppendOplogEntryAsync(third); await surrealStore.AppendOplogEntryAsync(fourth); LmdbOplogBackfillReport report = await tool.BackfillAsync(DatasetId.Primary); report.IsSuccess.ShouldBeTrue(); report.CountsMatch.ShouldBeTrue(); report.CountsPerNodeMatch.ShouldBeTrue(); report.LatestHashPerNodeMatch.ShouldBeTrue(); report.HashSpotChecksPassed.ShouldBeTrue(); report.ChainSpotChecksPassed.ShouldBeTrue(); report.SourceCount.ShouldBe(4); report.DestinationCount.ShouldBe(4); } [Fact] public async Task BackfillTool_BackfillAndValidate_WorksPerDataset() { await using var surrealHarness = new SurrealTestHarness(); var surrealStore = surrealHarness.CreateOplogStore(); using var lmdbStore = CreateLmdbStore(); var tool = new LmdbOplogBackfillTool(surrealStore, lmdbStore, NullLogger.Instance); var logsEntryA = CreateEntry("Logs", "log-1", "node-a", 400, 0, ""); var logsEntryB = CreateEntry("Logs", "log-2", "node-a", 401, 0, logsEntryA.Hash); var primaryEntry = CreateEntry("Users", "primary-1", "node-a", 500, 0, ""); await surrealStore.AppendOplogEntryAsync(logsEntryA, DatasetId.Logs); await surrealStore.AppendOplogEntryAsync(logsEntryB, DatasetId.Logs); await surrealStore.AppendOplogEntryAsync(primaryEntry, DatasetId.Primary); LmdbOplogBackfillReport logsReport = await tool.BackfillAsync(DatasetId.Logs); logsReport.IsSuccess.ShouldBeTrue(); logsReport.SourceCount.ShouldBe(2); logsReport.DestinationCount.ShouldBe(2); (await lmdbStore.GetEntryByHashAsync(logsEntryA.Hash, DatasetId.Logs)).ShouldNotBeNull(); (await lmdbStore.GetEntryByHashAsync(logsEntryB.Hash, DatasetId.Logs)).ShouldNotBeNull(); (await lmdbStore.GetEntryByHashAsync(primaryEntry.Hash, DatasetId.Primary)).ShouldBeNull(); } private static LmdbOplogStore CreateLmdbStore() { string rootPath = Path.Combine(Path.GetTempPath(), "cbddc-lmdb-migration", Guid.NewGuid().ToString("N")); Directory.CreateDirectory(rootPath); return new LmdbOplogStore( Substitute.For(), new LastWriteWinsConflictResolver(), new VectorClockService(), new LmdbOplogOptions { EnvironmentPath = rootPath, MapSizeBytes = 64L * 1024 * 1024, MaxDatabases = 16 }, null, NullLogger.Instance); } private static OplogEntry CreateEntry( string collection, string key, string nodeId, long wall, int logic, string previousHash) { return new OplogEntry( collection, key, OperationType.Put, JsonSerializer.SerializeToElement(new { key }), new HlcTimestamp(wall, logic, nodeId), previousHash); } }