Add LMDB oplog migration path with dual-write cutover support
All checks were successful
NuGet Package Publish / nuget (push) Successful in 1m16s
All checks were successful
NuGet Package Publish / nuget (push) Successful in 1m16s
Introduce LMDB oplog store, migration flags, telemetry/backfill tooling, and parity tests to enable staged Surreal-to-LMDB rollout with rollback coverage.
This commit is contained in:
@@ -0,0 +1,121 @@
|
||||
using System.Text.Json;
|
||||
using ZB.MOM.WW.CBDDC.Core;
|
||||
using ZB.MOM.WW.CBDDC.Core.Storage;
|
||||
|
||||
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
|
||||
|
||||
public abstract class OplogStoreContractTestBase
|
||||
{
|
||||
[Fact]
|
||||
public async Task OplogStore_AppendQueryMergeDrop_AndLastHash_Works()
|
||||
{
|
||||
await using var harness = await CreateHarnessAsync();
|
||||
IOplogStore store = harness.Store;
|
||||
|
||||
var entry1 = CreateOplogEntry("Users", "u1", "node-a", 100, 0, "");
|
||||
var entry2 = CreateOplogEntry("Users", "u2", "node-a", 110, 0, entry1.Hash);
|
||||
var entry3 = CreateOplogEntry("Users", "u3", "node-a", 120, 1, entry2.Hash);
|
||||
var otherNode = CreateOplogEntry("Users", "u4", "node-b", 115, 0, "");
|
||||
|
||||
await store.AppendOplogEntryAsync(entry1);
|
||||
await store.AppendOplogEntryAsync(entry2);
|
||||
await store.AppendOplogEntryAsync(entry3);
|
||||
await store.AppendOplogEntryAsync(otherNode);
|
||||
|
||||
var chainRange = (await store.GetChainRangeAsync(entry1.Hash, entry3.Hash)).ToList();
|
||||
chainRange.Select(x => x.Hash).ToList().ShouldBe(new[] { entry2.Hash, entry3.Hash });
|
||||
|
||||
var after = (await store.GetOplogAfterAsync(new HlcTimestamp(100, 0, "node-a"))).ToList();
|
||||
after.Select(x => x.Hash).ToList().ShouldBe(new[] { entry2.Hash, otherNode.Hash, entry3.Hash });
|
||||
|
||||
var mergedEntry = CreateOplogEntry("Users", "u5", "node-a", 130, 0, entry3.Hash);
|
||||
await store.MergeAsync(new[] { entry2, mergedEntry });
|
||||
|
||||
var exported = (await store.ExportAsync()).ToList();
|
||||
exported.Count.ShouldBe(5);
|
||||
exported.Count(x => x.Hash == entry2.Hash).ShouldBe(1);
|
||||
|
||||
string? cachedLastNodeAHash = await store.GetLastEntryHashAsync("node-a");
|
||||
cachedLastNodeAHash.ShouldBe(entry3.Hash);
|
||||
|
||||
IOplogStore rehydratedStore = harness.ReopenStore();
|
||||
string? persistedLastNodeAHash = await rehydratedStore.GetLastEntryHashAsync("node-a");
|
||||
persistedLastNodeAHash.ShouldBe(mergedEntry.Hash);
|
||||
|
||||
await rehydratedStore.DropAsync();
|
||||
(await rehydratedStore.ExportAsync()).ShouldBeEmpty();
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task OplogStore_DatasetIsolation_Works()
|
||||
{
|
||||
await using var harness = await CreateHarnessAsync();
|
||||
IOplogStore store = harness.Store;
|
||||
|
||||
var primaryEntry = CreateOplogEntry("Users", "p1", "node-a", 100, 0, "");
|
||||
var logsEntry = CreateOplogEntry("Users", "l1", "node-a", 100, 1, "");
|
||||
|
||||
await harness.AppendOplogEntryAsync(primaryEntry, DatasetId.Primary);
|
||||
await harness.AppendOplogEntryAsync(logsEntry, DatasetId.Logs);
|
||||
|
||||
var primary = (await harness.ExportAsync(DatasetId.Primary)).ToList();
|
||||
var logs = (await harness.ExportAsync(DatasetId.Logs)).ToList();
|
||||
|
||||
primary.Count.ShouldBe(1);
|
||||
primary[0].DatasetId.ShouldBe(DatasetId.Primary);
|
||||
|
||||
logs.Count.ShouldBe(1);
|
||||
logs[0].DatasetId.ShouldBe(DatasetId.Logs);
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task OplogStore_GetChainRangeAsync_ReturnsOrderedLinkedRange()
|
||||
{
|
||||
await using var harness = await CreateHarnessAsync();
|
||||
IOplogStore store = harness.Store;
|
||||
|
||||
var entry1 = CreateOplogEntry("Users", "k1", "node1", 1000, 0, "");
|
||||
var entry2 = CreateOplogEntry("Users", "k2", "node1", 2000, 0, entry1.Hash);
|
||||
var entry3 = CreateOplogEntry("Users", "k3", "node1", 3000, 0, entry2.Hash);
|
||||
|
||||
await store.AppendOplogEntryAsync(entry1);
|
||||
await store.AppendOplogEntryAsync(entry2);
|
||||
await store.AppendOplogEntryAsync(entry3);
|
||||
|
||||
var range = (await store.GetChainRangeAsync(entry1.Hash, entry3.Hash)).ToList();
|
||||
|
||||
range.Count.ShouldBe(2);
|
||||
range[0].Hash.ShouldBe(entry2.Hash);
|
||||
range[1].Hash.ShouldBe(entry3.Hash);
|
||||
}
|
||||
|
||||
protected abstract Task<IOplogStoreContractHarness> CreateHarnessAsync();
|
||||
|
||||
protected static OplogEntry CreateOplogEntry(
|
||||
string collection,
|
||||
string key,
|
||||
string nodeId,
|
||||
long wall,
|
||||
int logic,
|
||||
string previousHash)
|
||||
{
|
||||
return new OplogEntry(
|
||||
collection,
|
||||
key,
|
||||
OperationType.Put,
|
||||
JsonSerializer.SerializeToElement(new { key }),
|
||||
new HlcTimestamp(wall, logic, nodeId),
|
||||
previousHash);
|
||||
}
|
||||
}
|
||||
|
||||
public interface IOplogStoreContractHarness : IAsyncDisposable
|
||||
{
|
||||
IOplogStore Store { get; }
|
||||
|
||||
IOplogStore ReopenStore();
|
||||
|
||||
Task AppendOplogEntryAsync(OplogEntry entry, string datasetId, CancellationToken cancellationToken = default);
|
||||
|
||||
Task<IEnumerable<OplogEntry>> ExportAsync(string datasetId, CancellationToken cancellationToken = default);
|
||||
}
|
||||
Reference in New Issue
Block a user