Add XML docs required by CommentChecker fixes
All checks were successful
NuGet Package Publish / nuget (push) Successful in 1m13s

This commit is contained in:
Joseph Doherty
2026-02-23 04:39:25 -05:00
parent cce24fa8f3
commit 6c4714f666
15 changed files with 444 additions and 15 deletions

View File

@@ -24,6 +24,11 @@ public sealed class FeatureFlagOplogStore : IOplogStore
/// <summary>
/// Initializes a new instance of the <see cref="FeatureFlagOplogStore" /> class.
/// </summary>
/// <param name="surreal">The Surreal-backed oplog store.</param>
/// <param name="lmdb">The LMDB-backed oplog store.</param>
/// <param name="flags">Feature flags controlling migration and routing behavior.</param>
/// <param name="telemetry">Optional telemetry collector for migration metrics.</param>
/// <param name="logger">Optional logger for routing and fallback diagnostics.</param>
public FeatureFlagOplogStore(
SurrealOplogStore surreal,
LmdbOplogStore lmdb,

View File

@@ -18,6 +18,9 @@ public sealed class LmdbOplogBackfillTool
/// <summary>
/// Initializes a new instance of the <see cref="LmdbOplogBackfillTool" /> class.
/// </summary>
/// <param name="source">The Surreal oplog source.</param>
/// <param name="destination">The LMDB oplog destination.</param>
/// <param name="logger">Optional logger instance.</param>
public LmdbOplogBackfillTool(
SurrealOplogStore source,
LmdbOplogStore destination,
@@ -31,6 +34,8 @@ public sealed class LmdbOplogBackfillTool
/// <summary>
/// Backfills one dataset from Surreal to LMDB and validates parity.
/// </summary>
/// <param name="datasetId">Dataset identifier to migrate.</param>
/// <param name="cancellationToken">Cancellation token.</param>
public async Task<LmdbOplogBackfillReport> BackfillAsync(
string datasetId,
CancellationToken cancellationToken = default)
@@ -61,6 +66,8 @@ public sealed class LmdbOplogBackfillTool
/// <summary>
/// Validates parity only without running a backfill merge.
/// </summary>
/// <param name="datasetId">Dataset identifier to validate.</param>
/// <param name="cancellationToken">Cancellation token.</param>
public async Task<LmdbOplogBackfillReport> ValidateParityAsync(
string datasetId,
CancellationToken cancellationToken = default)
@@ -73,6 +80,8 @@ public sealed class LmdbOplogBackfillTool
/// <summary>
/// Backfills and throws when parity validation fails.
/// </summary>
/// <param name="datasetId">Dataset identifier to backfill.</param>
/// <param name="cancellationToken">Cancellation token.</param>
public async Task<LmdbOplogBackfillReport> BackfillOrThrowAsync(
string datasetId,
CancellationToken cancellationToken = default)

View File

@@ -35,6 +35,12 @@ public sealed class LmdbOplogStore : OplogStore, IDisposable
/// <summary>
/// Initializes a new instance of the <see cref="LmdbOplogStore" /> class.
/// </summary>
/// <param name="documentStore">Document store used for snapshot metadata persistence.</param>
/// <param name="conflictResolver">Conflict resolver used for resolving oplog merges.</param>
/// <param name="vectorClockService">Vector-clock service used for dataset ordering.</param>
/// <param name="options">Configuration options for LMDB storage.</param>
/// <param name="snapshotMetadataStore">Optional snapshot metadata store.</param>
/// <param name="logger">Optional logger for LMDB diagnostics.</param>
public LmdbOplogStore(
IDocumentStore documentStore,
IConflictResolver conflictResolver,
@@ -342,6 +348,8 @@ public sealed class LmdbOplogStore : OplogStore, IDisposable
/// <summary>
/// Drops all oplog data for the specified dataset.
/// </summary>
/// <param name="datasetId">The dataset identifier.</param>
/// <param name="cancellationToken">Cancellation token.</param>
public async Task DropAsync(string datasetId, CancellationToken cancellationToken = default)
{
string normalizedDatasetId = NormalizeDatasetId(datasetId);
@@ -373,6 +381,8 @@ public sealed class LmdbOplogStore : OplogStore, IDisposable
/// <summary>
/// Exports all oplog entries for a dataset.
/// </summary>
/// <param name="datasetId">The dataset identifier.</param>
/// <param name="cancellationToken">Cancellation token.</param>
public Task<IEnumerable<OplogEntry>> ExportAsync(string datasetId, CancellationToken cancellationToken = default)
{
cancellationToken.ThrowIfCancellationRequested();
@@ -408,6 +418,9 @@ public sealed class LmdbOplogStore : OplogStore, IDisposable
/// <summary>
/// Imports oplog entries for a dataset (upsert semantics).
/// </summary>
/// <param name="items">Entries to import.</param>
/// <param name="datasetId">The dataset identifier.</param>
/// <param name="cancellationToken">Cancellation token.</param>
public Task ImportAsync(
IEnumerable<OplogEntry> items,
string datasetId,
@@ -425,6 +438,9 @@ public sealed class LmdbOplogStore : OplogStore, IDisposable
/// <summary>
/// Merges oplog entries into a dataset (dedupe by hash).
/// </summary>
/// <param name="items">Entries to merge.</param>
/// <param name="datasetId">The dataset identifier.</param>
/// <param name="cancellationToken">Cancellation token.</param>
public Task MergeAsync(
IEnumerable<OplogEntry> items,
string datasetId,
@@ -626,6 +642,8 @@ public sealed class LmdbOplogStore : OplogStore, IDisposable
/// <summary>
/// Returns index-level diagnostics for a dataset, useful in contract/unit tests.
/// </summary>
/// <param name="datasetId">The dataset identifier.</param>
/// <param name="cancellationToken">Cancellation token.</param>
public Task<LmdbOplogIndexDiagnostics> GetIndexDiagnosticsAsync(
string datasetId,
CancellationToken cancellationToken = default)
@@ -1360,15 +1378,54 @@ public sealed class LmdbOplogStore : OplogStore, IDisposable
private sealed class OplogEntryDto
{
/// <summary>
/// Dataset identifier for the serialized oplog entry.
/// </summary>
public string DatasetId { get; set; } = global::ZB.MOM.WW.CBDDC.Core.DatasetId.Primary;
/// <summary>
/// Entry collection name.
/// </summary>
public string Collection { get; set; } = string.Empty;
/// <summary>
/// Entry key within the collection.
/// </summary>
public string Key { get; set; } = string.Empty;
/// <summary>
/// Operation performed for this entry.
/// </summary>
public OperationType Operation { get; set; }
/// <summary>
/// Serialized payload for the entry.
/// </summary>
public JsonElement? Payload { get; set; }
/// <summary>
/// Physical time component of the HLC timestamp.
/// </summary>
public long PhysicalTime { get; set; }
/// <summary>
/// Logical counter component of the HLC timestamp.
/// </summary>
public int LogicalCounter { get; set; }
/// <summary>
/// Logical-clock node identifier.
/// </summary>
public string NodeId { get; set; } = string.Empty;
/// <summary>
/// Previous hash in the oplog chain.
/// </summary>
public string PreviousHash { get; set; } = string.Empty;
/// <summary>
/// Entry hash.
/// </summary>
public string Hash { get; set; } = string.Empty;
}
}
@@ -1376,10 +1433,60 @@ public sealed class LmdbOplogStore : OplogStore, IDisposable
/// <summary>
/// Dataset-scoped LMDB oplog index counts.
/// </summary>
public readonly record struct LmdbOplogIndexDiagnostics(
string DatasetId,
long OplogByHashCount,
long OplogByHlcCount,
long OplogByNodeHlcCount,
long OplogPrevToHashCount,
long OplogNodeHeadCount);
public readonly record struct LmdbOplogIndexDiagnostics
{
/// <summary>
/// The dataset identifier for these diagnostics.
/// </summary>
public string DatasetId { get; init; }
/// <summary>
/// Count of entries in the hash index.
/// </summary>
public long OplogByHashCount { get; init; }
/// <summary>
/// Count of entries in the HLC index.
/// </summary>
public long OplogByHlcCount { get; init; }
/// <summary>
/// Count of entries in the per-node HLC index.
/// </summary>
public long OplogByNodeHlcCount { get; init; }
/// <summary>
/// Count of entries in the previous-hash index.
/// </summary>
public long OplogPrevToHashCount { get; init; }
/// <summary>
/// Count of entries tracked in the node head index.
/// </summary>
public long OplogNodeHeadCount { get; init; }
/// <summary>
/// Initializes a diagnostics snapshot for a dataset.
/// </summary>
/// <param name="datasetId">Dataset identifier for the diagnostics.</param>
/// <param name="oplogByHashCount">Count of entries in the hash index.</param>
/// <param name="oplogByHlcCount">Count of entries in the HLC index.</param>
/// <param name="oplogByNodeHlcCount">Count of entries in the per-node HLC index.</param>
/// <param name="oplogPrevToHashCount">Count of entries in the previous-hash index.</param>
/// <param name="oplogNodeHeadCount">Count of node-head entries.</param>
public LmdbOplogIndexDiagnostics(
string datasetId,
long oplogByHashCount,
long oplogByHlcCount,
long oplogByNodeHlcCount,
long oplogPrevToHashCount,
long oplogNodeHeadCount)
{
DatasetId = datasetId;
OplogByHashCount = oplogByHashCount;
OplogByHlcCount = oplogByHlcCount;
OplogByNodeHlcCount = oplogByNodeHlcCount;
OplogPrevToHashCount = oplogPrevToHashCount;
OplogNodeHeadCount = oplogNodeHeadCount;
}
}

View File

@@ -25,6 +25,7 @@ public sealed class OplogMigrationTelemetry
/// <summary>
/// Records the outcome of one shadow comparison.
/// </summary>
/// <param name="isMatch"><see langword="true" /> when source and LMDB entries matched.</param>
public void RecordShadowComparison(bool isMatch)
{
Interlocked.Increment(ref _shadowComparisons);
@@ -42,6 +43,8 @@ public sealed class OplogMigrationTelemetry
/// <summary>
/// Records one reconciliation/backfill run for a dataset.
/// </summary>
/// <param name="datasetId">Dataset identifier for this reconciliation.</param>
/// <param name="entriesMerged">Count of entries merged into LMDB.</param>
public void RecordReconciliation(string datasetId, int entriesMerged)
{
string normalizedDatasetId = DatasetId.Normalize(datasetId);

View File

@@ -5,6 +5,9 @@ namespace ZB.MOM.WW.CBDDC.Core.Tests;
public class DatasetAwareModelTests
{
/// <summary>
/// Verifies <see cref="DocumentMetadata" /> defaults to the primary dataset.
/// </summary>
[Fact]
public void DocumentMetadata_ShouldDefaultDatasetId_ToPrimary()
{
@@ -13,6 +16,9 @@ public class DatasetAwareModelTests
metadata.DatasetId.ShouldBe(DatasetId.Primary);
}
/// <summary>
/// Verifies dataset identifiers survive JSON round-trips for <see cref="DocumentMetadata" />.
/// </summary>
[Fact]
public void DocumentMetadata_SerializationRoundTrip_ShouldPreserveDatasetId()
{
@@ -25,6 +31,9 @@ public class DatasetAwareModelTests
restored.DatasetId.ShouldBe("logs");
}
/// <summary>
/// Verifies <see cref="SnapshotMetadata" /> defaults to the primary dataset.
/// </summary>
[Fact]
public void SnapshotMetadata_ShouldDefaultDatasetId_ToPrimary()
{
@@ -33,6 +42,9 @@ public class DatasetAwareModelTests
metadata.DatasetId.ShouldBe(DatasetId.Primary);
}
/// <summary>
/// Verifies <see cref="PeerOplogConfirmation" /> defaults to the primary dataset.
/// </summary>
[Fact]
public void PeerOplogConfirmation_ShouldDefaultDatasetId_ToPrimary()
{

View File

@@ -6,6 +6,9 @@ namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class MultiDatasetRegistrationTests
{
/// <summary>
/// Verifies CBDDC network registration replaces the default orchestrator with multi-dataset wiring.
/// </summary>
[Fact]
public void AddCBDDCMultiDataset_ShouldRegisterCoordinatorAndReplaceSyncOrchestrator()
{
@@ -28,12 +31,18 @@ public class MultiDatasetRegistrationTests
private sealed class TestPeerNodeConfigurationProvider : IPeerNodeConfigurationProvider
{
/// <summary>
/// Raised when the peer configuration changes.
/// </summary>
public event PeerNodeConfigurationChangedEventHandler? ConfigurationChanged
{
add { }
remove { }
}
/// <summary>
/// Returns the current peer configuration.
/// </summary>
public Task<PeerNodeConfiguration> GetConfiguration()
{
return Task.FromResult(new PeerNodeConfiguration

View File

@@ -7,6 +7,9 @@ namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class MultiDatasetSyncOrchestratorTests
{
/// <summary>
/// Verifies multi-dataset sync is disabled, only the primary context is created.
/// </summary>
[Fact]
public void Constructor_WhenMultiDatasetDisabled_ShouldOnlyCreatePrimaryContext()
{
@@ -27,6 +30,9 @@ public class MultiDatasetSyncOrchestratorTests
datasetIds[0].ShouldBe(DatasetId.Primary);
}
/// <summary>
/// Verifies that failures in one orchestrator do not prevent remaining contexts from starting and stopping.
/// </summary>
[Fact]
public async Task StartStop_WhenOneDatasetThrows_ShouldContinueOtherDatasets()
{
@@ -82,9 +88,19 @@ public class MultiDatasetSyncOrchestratorTests
private sealed class TrackingSyncOrchestrator(Exception? startException = null, Exception? stopException = null)
: ISyncOrchestrator
{
/// <summary>
/// Number of times <see cref="Start" /> has been called.
/// </summary>
public int StartCalls { get; private set; }
/// <summary>
/// Number of times <see cref="Stop" /> has been called.
/// </summary>
public int StopCalls { get; private set; }
/// <summary>
/// Starts the orchestrator.
/// </summary>
public Task Start()
{
StartCalls++;
@@ -92,6 +108,9 @@ public class MultiDatasetSyncOrchestratorTests
return Task.CompletedTask;
}
/// <summary>
/// Stops the orchestrator.
/// </summary>
public Task Stop()
{
StopCalls++;

View File

@@ -12,6 +12,9 @@ namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class LmdbOplogMigrationTests
{
/// <summary>
/// Verifies dual-write mode writes each entry to both Surreal and LMDB stores.
/// </summary>
[Fact]
public async Task FeatureFlags_DualWrite_WritesToBothStores()
{
@@ -39,6 +42,9 @@ public class LmdbOplogMigrationTests
(await lmdbStore.GetEntryByHashAsync(entry.Hash)).ShouldNotBeNull();
}
/// <summary>
/// Verifies preferred LMDB reads reconcile missing LMDB data from Surreal.
/// </summary>
[Fact]
public async Task FeatureFlags_PreferLmdbReads_ReconcilesFromSurrealWhenLmdbMissingEntries()
{
@@ -78,6 +84,9 @@ public class LmdbOplogMigrationTests
telemetry.ReconciledEntries.ShouldBeGreaterThanOrEqualTo(1);
}
/// <summary>
/// Verifies shadow validation records mismatches when LMDB and Surreal diverge.
/// </summary>
[Fact]
public async Task FeatureFlags_ShadowValidation_RecordsMismatchTelemetry()
{
@@ -112,6 +121,9 @@ public class LmdbOplogMigrationTests
snapshot.ShadowMismatches.ShouldBe(1);
}
/// <summary>
/// Verifies rollback to Surreal when dual-write is disabled uses Surreal for all writes and reads.
/// </summary>
[Fact]
public async Task FeatureFlags_RollbackToSurreal_UsesSurrealForWritesAndReads()
{
@@ -143,6 +155,9 @@ public class LmdbOplogMigrationTests
routedRead.Hash.ShouldBe(entry.Hash);
}
/// <summary>
/// Verifies backfill succeeds and records matching dataset counts.
/// </summary>
[Fact]
public async Task BackfillTool_BackfillAndValidate_ReportsSuccess()
{
@@ -173,6 +188,9 @@ public class LmdbOplogMigrationTests
report.DestinationCount.ShouldBe(4);
}
/// <summary>
/// Verifies backfill can target non-primary datasets successfully.
/// </summary>
[Fact]
public async Task BackfillTool_BackfillAndValidate_WorksPerDataset()
{

View File

@@ -12,6 +12,7 @@ namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class SurrealOplogStoreContractParityTests : OplogStoreContractTestBase
{
/// <inheritdoc />
protected override Task<IOplogStoreContractHarness> CreateHarnessAsync()
{
return Task.FromResult<IOplogStoreContractHarness>(new SurrealOplogStoreContractHarness());
@@ -20,11 +21,15 @@ public class SurrealOplogStoreContractParityTests : OplogStoreContractTestBase
public class LmdbOplogStoreContractTests : OplogStoreContractTestBase
{
/// <inheritdoc />
protected override Task<IOplogStoreContractHarness> CreateHarnessAsync()
{
return Task.FromResult<IOplogStoreContractHarness>(new LmdbOplogStoreContractHarness());
}
/// <summary>
/// Verifies prune operations clear index tables as expected.
/// </summary>
[Fact]
public async Task Lmdb_IndexConsistency_InsertPopulatesAndPruneRemovesIndexes()
{
@@ -54,6 +59,9 @@ public class LmdbOplogStoreContractTests : OplogStoreContractTestBase
after.OplogNodeHeadCount.ShouldBe(0);
}
/// <summary>
/// Verifies prune retains newer entries while removing qualifying stale records.
/// </summary>
[Fact]
public async Task Lmdb_Prune_RemovesAtOrBeforeCutoff_AndKeepsNewerInterleavedEntries()
{
@@ -79,6 +87,9 @@ public class LmdbOplogStoreContractTests : OplogStoreContractTestBase
remaining.Contains(nodeANew.Hash).ShouldBeTrue();
}
/// <summary>
/// Verifies node head values recompute correctly after prune operations.
/// </summary>
[Fact]
public async Task Lmdb_NodeHead_AdvancesAndRecomputesAcrossPrune()
{
@@ -100,6 +111,9 @@ public class LmdbOplogStoreContractTests : OplogStoreContractTestBase
(await store.GetLastEntryHashAsync("node-a")).ShouldBeNull();
}
/// <summary>
/// Verifies durable persistence preserves node head after reopen.
/// </summary>
[Fact]
public async Task Lmdb_RestartDurability_PreservesHeadAndScans()
{
@@ -121,6 +135,9 @@ public class LmdbOplogStoreContractTests : OplogStoreContractTestBase
after[1].Hash.ShouldBe(entry2.Hash);
}
/// <summary>
/// Verifies appending duplicate entries remains idempotent.
/// </summary>
[Fact]
public async Task Lmdb_Dedupe_DuplicateHashAppendIsIdempotent()
{
@@ -137,6 +154,9 @@ public class LmdbOplogStoreContractTests : OplogStoreContractTestBase
exported[0].Hash.ShouldBe(entry.Hash);
}
/// <summary>
/// Verifies prune performance remains bounded under large synthetic datasets.
/// </summary>
[Fact]
public async Task Lmdb_PrunePerformanceSmoke_LargeSyntheticWindow_CompletesWithinGenerousBudget()
{
@@ -164,30 +184,53 @@ internal sealed class SurrealOplogStoreContractHarness : IOplogStoreContractHarn
{
private readonly SurrealTestHarness _harness;
/// <summary>
/// Initializes a new surrogate Surreal contract harness.
/// </summary>
public SurrealOplogStoreContractHarness()
{
_harness = new SurrealTestHarness();
Store = _harness.CreateOplogStore();
}
/// <summary>
/// Gets the active store instance.
/// </summary>
public IOplogStore Store { get; private set; }
/// <summary>
/// Reopens the Surreal store and returns a fresh harness handle.
/// </summary>
public IOplogStore ReopenStore()
{
Store = _harness.CreateOplogStore();
return Store;
}
/// <summary>
/// Appends an entry into the Surreal store for a dataset.
/// </summary>
/// <param name="entry">The oplog entry to append.</param>
/// <param name="datasetId">The dataset identifier for the append operation.</param>
/// <param name="cancellationToken">The cancellation token.</param>
public Task AppendOplogEntryAsync(OplogEntry entry, string datasetId, CancellationToken cancellationToken = default)
{
return ((SurrealOplogStore)Store).AppendOplogEntryAsync(entry, datasetId, cancellationToken);
}
/// <summary>
/// Exports all entries for a dataset from the Surreal store.
/// </summary>
/// <param name="datasetId">The dataset identifier to export.</param>
/// <param name="cancellationToken">The cancellation token.</param>
public Task<IEnumerable<OplogEntry>> ExportAsync(string datasetId, CancellationToken cancellationToken = default)
{
return ((SurrealOplogStore)Store).ExportAsync(datasetId, cancellationToken);
}
/// <summary>
/// Disposes Surreal harness resources.
/// </summary>
public ValueTask DisposeAsync()
{
return _harness.DisposeAsync();
@@ -199,6 +242,9 @@ internal sealed class LmdbOplogStoreContractHarness : IOplogStoreContractHarness
private readonly string _rootPath;
private LmdbOplogStore? _store;
/// <summary>
/// Initializes a new LMDB contract harness and backing store.
/// </summary>
public LmdbOplogStoreContractHarness()
{
_rootPath = Path.Combine(Path.GetTempPath(), "cbddc-lmdb-tests", Guid.NewGuid().ToString("N"));
@@ -206,8 +252,14 @@ internal sealed class LmdbOplogStoreContractHarness : IOplogStoreContractHarness
_store = CreateStore();
}
/// <summary>
/// Gets the active LMDB store.
/// </summary>
public IOplogStore Store => _store ?? throw new ObjectDisposedException(nameof(LmdbOplogStoreContractHarness));
/// <summary>
/// Recreates the LMDB store instance and returns it.
/// </summary>
public IOplogStore ReopenStore()
{
_store?.Dispose();
@@ -215,18 +267,32 @@ internal sealed class LmdbOplogStoreContractHarness : IOplogStoreContractHarness
return _store;
}
/// <summary>
/// Appends an entry into the LMDB store for a dataset.
/// </summary>
/// <param name="entry">The oplog entry to append.</param>
/// <param name="datasetId">The dataset identifier for the append operation.</param>
/// <param name="cancellationToken">The cancellation token.</param>
public Task AppendOplogEntryAsync(OplogEntry entry, string datasetId, CancellationToken cancellationToken = default)
{
return (_store ?? throw new ObjectDisposedException(nameof(LmdbOplogStoreContractHarness)))
.AppendOplogEntryAsync(entry, datasetId, cancellationToken);
}
/// <summary>
/// Exports all entries for a dataset from the LMDB store.
/// </summary>
/// <param name="datasetId">The dataset identifier to export.</param>
/// <param name="cancellationToken">The cancellation token.</param>
public Task<IEnumerable<OplogEntry>> ExportAsync(string datasetId, CancellationToken cancellationToken = default)
{
return (_store ?? throw new ObjectDisposedException(nameof(LmdbOplogStoreContractHarness)))
.ExportAsync(datasetId, cancellationToken);
}
/// <summary>
/// Disposes LMDB harness resources.
/// </summary>
public async ValueTask DisposeAsync()
{
_store?.Dispose();

View File

@@ -6,6 +6,9 @@ namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class MultiDatasetConfigParsingTests
{
/// <summary>
/// Verifies multi-dataset section binds runtime options from JSON.
/// </summary>
[Fact]
public void MultiDatasetSection_ShouldBindRuntimeOptions()
{

View File

@@ -6,6 +6,9 @@ namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public abstract class OplogStoreContractTestBase
{
/// <summary>
/// Verifies append, merge, and drop behavior across query, chain, and restart scenarios.
/// </summary>
[Fact]
public async Task OplogStore_AppendQueryMergeDrop_AndLastHash_Works()
{
@@ -46,6 +49,9 @@ public abstract class OplogStoreContractTestBase
(await rehydratedStore.ExportAsync()).ShouldBeEmpty();
}
/// <summary>
/// Verifies dataset isolation between primary and secondary stores.
/// </summary>
[Fact]
public async Task OplogStore_DatasetIsolation_Works()
{
@@ -68,6 +74,9 @@ public abstract class OplogStoreContractTestBase
logs[0].DatasetId.ShouldBe(DatasetId.Logs);
}
/// <summary>
/// Verifies chain range queries return ordered linked entries.
/// </summary>
[Fact]
public async Task OplogStore_GetChainRangeAsync_ReturnsOrderedLinkedRange()
{
@@ -89,8 +98,20 @@ public abstract class OplogStoreContractTestBase
range[1].Hash.ShouldBe(entry3.Hash);
}
/// <summary>
/// Creates the contract harness for this test class.
/// </summary>
protected abstract Task<IOplogStoreContractHarness> CreateHarnessAsync();
/// <summary>
/// Creates a reusable oplog entry with deterministic timestamps.
/// </summary>
/// <param name="collection">The collection name.</param>
/// <param name="key">The entry key.</param>
/// <param name="nodeId">The node identifier generating the entry.</param>
/// <param name="wall">The wall-clock component of the HLC timestamp.</param>
/// <param name="logic">The logical clock component of the HLC timestamp.</param>
/// <param name="previousHash">The previous entry hash.</param>
protected static OplogEntry CreateOplogEntry(
string collection,
string key,
@@ -111,11 +132,28 @@ public abstract class OplogStoreContractTestBase
public interface IOplogStoreContractHarness : IAsyncDisposable
{
/// <summary>
/// Gets the active contract store.
/// </summary>
IOplogStore Store { get; }
/// <summary>
/// Reopens the harness storage.
/// </summary>
IOplogStore ReopenStore();
/// <summary>
/// Appends an entry for the specified dataset.
/// </summary>
/// <param name="entry">The oplog entry to append.</param>
/// <param name="datasetId">The dataset identifier.</param>
/// <param name="cancellationToken">The cancellation token.</param>
Task AppendOplogEntryAsync(OplogEntry entry, string datasetId, CancellationToken cancellationToken = default);
/// <summary>
/// Exports entries for the specified dataset.
/// </summary>
/// <param name="datasetId">The dataset identifier.</param>
/// <param name="cancellationToken">The cancellation token.</param>
Task<IEnumerable<OplogEntry>> ExportAsync(string datasetId, CancellationToken cancellationToken = default);
}

View File

@@ -3,6 +3,7 @@ using Microsoft.Extensions.Logging;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Network;
using ZB.MOM.WW.CBDDC.Network.Security;
using ZB.MOM.WW.CBDDC.Persistence.Lmdb;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
using ZB.MOM.WW.CBDDC.Sample.Console;
@@ -15,6 +16,18 @@ internal sealed class BenchmarkPeerNode : IAsyncDisposable
private readonly string _workDir;
private bool _started;
/// <summary>
/// Gets the active EF/Core context for generated users.
/// </summary>
public SampleDbContext Context { get; }
/// <summary>
/// Creates and initializes a benchmark peer node.
/// </summary>
/// <param name="serviceProvider">Service provider containing node dependencies.</param>
/// <param name="node">Benchmark node abstraction.</param>
/// <param name="context">Live data context used by the benchmark.</param>
/// <param name="workDir">Temporary working directory for this node.</param>
private BenchmarkPeerNode(
ServiceProvider serviceProvider,
ICBDDCNode node,
@@ -27,8 +40,13 @@ internal sealed class BenchmarkPeerNode : IAsyncDisposable
_workDir = workDir;
}
public SampleDbContext Context { get; }
/// <summary>
/// Creates and starts a benchmark peer node from configuration.
/// </summary>
/// <param name="nodeId">Unique peer identifier.</param>
/// <param name="tcpPort">Local TCP port for the node.</param>
/// <param name="authToken">Authentication token shared across peers.</param>
/// <param name="knownPeers">Known peers to connect to at startup.</param>
public static BenchmarkPeerNode Create(
string nodeId,
int tcpPort,
@@ -58,7 +76,13 @@ internal sealed class BenchmarkPeerNode : IAsyncDisposable
services.AddSingleton<ICBDDCSurrealSchemaInitializer, SampleSurrealSchemaInitializer>();
services.AddSingleton<SampleDbContext>();
services.AddCBDDCCore()
bool useLmdb = GetBoolEnv("CBDDC_BENCH_USE_LMDB", defaultValue: true);
bool dualWrite = GetBoolEnv("CBDDC_BENCH_DUAL_WRITE", defaultValue: true);
bool preferLmdbReads = GetBoolEnv("CBDDC_BENCH_PREFER_LMDB_READS", defaultValue: true);
bool enableShadowValidation = GetBoolEnv("CBDDC_BENCH_SHADOW_READ_VALIDATE", defaultValue: false);
int reconcileIntervalMs = GetIntEnv("CBDDC_BENCH_RECONCILE_INTERVAL_MS", defaultValue: 0);
var registration = services.AddCBDDCCore()
.AddCBDDCSurrealEmbedded<SampleDocumentStore>(_ => new CBDDCSurrealEmbeddedOptions
{
Endpoint = "rocksdb://local",
@@ -72,8 +96,27 @@ internal sealed class BenchmarkPeerNode : IAsyncDisposable
PollingInterval = TimeSpan.FromMilliseconds(50),
EnableLiveSelectAccelerator = true
}
})
.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>(false);
});
if (useLmdb)
registration.AddCBDDCLmdbOplog(
_ => new LmdbOplogOptions
{
EnvironmentPath = Path.Combine(workDir, "oplog-lmdb"),
MapSizeBytes = 256L * 1024 * 1024,
MaxDatabases = 16,
PruneBatchSize = 512
},
flags =>
{
flags.UseLmdbOplog = true;
flags.DualWriteOplog = dualWrite;
flags.PreferLmdbReads = preferLmdbReads;
flags.EnableReadShadowValidation = enableShadowValidation;
flags.ReconciliationInterval = TimeSpan.FromMilliseconds(Math.Max(0, reconcileIntervalMs));
});
registration.AddCBDDCNetwork<StaticPeerNodeConfigurationProvider>(false);
// Benchmark runs use explicit known peers; disable UDP discovery and handshake overhead.
services.AddSingleton<IDiscoveryService, PassiveDiscoveryService>();
@@ -86,6 +129,9 @@ internal sealed class BenchmarkPeerNode : IAsyncDisposable
return new BenchmarkPeerNode(provider, node, context, workDir);
}
/// <summary>
/// Starts the node asynchronously.
/// </summary>
public async Task StartAsync()
{
if (_started) return;
@@ -93,6 +139,9 @@ internal sealed class BenchmarkPeerNode : IAsyncDisposable
_started = true;
}
/// <summary>
/// Stops the node asynchronously.
/// </summary>
public async Task StopAsync()
{
if (!_started) return;
@@ -111,22 +160,37 @@ internal sealed class BenchmarkPeerNode : IAsyncDisposable
_started = false;
}
/// <summary>
/// Inserts or updates a user record.
/// </summary>
/// <param name="user">User payload.</param>
public async Task UpsertUserAsync(User user)
{
await Context.Users.UpdateAsync(user);
await Context.SaveChangesAsync();
}
/// <summary>
/// Returns whether a user identifier exists.
/// </summary>
/// <param name="userId">Target user identifier.</param>
public bool ContainsUser(string userId)
{
return Context.Users.Find(u => u.Id == userId).Any();
}
/// <summary>
/// Counts user identifiers matching the provided prefix.
/// </summary>
/// <param name="prefix">User identifier prefix.</param>
public int CountUsersWithPrefix(string prefix)
{
return Context.Users.FindAll().Count(u => u.Id.StartsWith(prefix, StringComparison.Ordinal));
}
/// <summary>
/// Disposes the node and any unmanaged resources.
/// </summary>
public async ValueTask DisposeAsync()
{
try
@@ -156,18 +220,43 @@ internal sealed class BenchmarkPeerNode : IAsyncDisposable
}
}
private static bool GetBoolEnv(string key, bool defaultValue)
{
string? raw = Environment.GetEnvironmentVariable(key);
if (string.IsNullOrWhiteSpace(raw)) return defaultValue;
if (bool.TryParse(raw, out bool parsed)) return parsed;
return defaultValue;
}
private static int GetIntEnv(string key, int defaultValue)
{
string? raw = Environment.GetEnvironmentVariable(key);
if (string.IsNullOrWhiteSpace(raw)) return defaultValue;
if (int.TryParse(raw, out int parsed)) return parsed;
return defaultValue;
}
private sealed class PassiveDiscoveryService : IDiscoveryService
{
/// <summary>
/// Gets the current list of active peers.
/// </summary>
public IEnumerable<PeerNode> GetActivePeers()
{
return Array.Empty<PeerNode>();
}
/// <summary>
/// Starts discovery.
/// </summary>
public Task Start()
{
return Task.CompletedTask;
}
/// <summary>
/// Stops discovery.
/// </summary>
public Task Stop()
{
return Task.CompletedTask;

View File

@@ -15,6 +15,9 @@ public class E2EThroughputBenchmarks
private BenchmarkPeerNode _nodeB = null!;
private int _sequence;
/// <summary>
/// Sets up benchmark nodes and prepares the cluster.
/// </summary>
[GlobalSetup]
public async Task GlobalSetupAsync()
{
@@ -58,6 +61,9 @@ public class E2EThroughputBenchmarks
await Task.Delay(500);
}
/// <summary>
/// Handles benchmark teardown for the throughput test suite.
/// </summary>
[GlobalCleanup]
public Task GlobalCleanupAsync()
{
@@ -66,6 +72,9 @@ public class E2EThroughputBenchmarks
return Task.CompletedTask;
}
/// <summary>
/// Measures local write throughput against a single node.
/// </summary>
[Benchmark(Description = "Local write throughput", OperationsPerInvoke = BatchSize)]
public async Task LocalWriteThroughput()
{
@@ -74,6 +83,9 @@ public class E2EThroughputBenchmarks
await _nodeA.UpsertUserAsync(CreateUser(userId));
}
/// <summary>
/// Measures replicated write throughput across two nodes.
/// </summary>
[Benchmark(Description = "Cross-node replicated throughput", OperationsPerInvoke = BatchSize)]
public async Task ReplicatedWriteThroughput()
{

View File

@@ -10,18 +10,24 @@ namespace ZB.MOM.WW.CDBBC.E2E.Benchmark.Tests;
[SimpleJob(launchCount: 1, warmupCount: 0, iterationCount: 1)]
public class OfflineResyncThroughputBenchmarks
{
private const int BacklogOperationCount = 10_000;
private const int BacklogOperationCount = 100_000;
private BenchmarkPeerNode _onlineNode = null!;
private BenchmarkPeerNode _offlineNode = null!;
private int _runSequence;
private string _currentPrefix = string.Empty;
/// <summary>
/// Sets up benchmark resources for offline resync scenarios.
/// </summary>
[GlobalSetup]
public Task GlobalSetupAsync()
{
return Task.CompletedTask;
}
/// <summary>
/// Handles benchmark teardown for offline resync scenarios.
/// </summary>
[GlobalCleanup]
public Task GlobalCleanupAsync()
{
@@ -30,6 +36,9 @@ public class OfflineResyncThroughputBenchmarks
return Task.CompletedTask;
}
/// <summary>
/// Prepares write-only workload state for the 100K throughput benchmark.
/// </summary>
[IterationSetup(Target = nameof(OfflineBacklogWriteThroughput100k))]
public void SetupOfflineWriteThroughput()
{
@@ -37,12 +46,18 @@ public class OfflineResyncThroughputBenchmarks
InitializeIterationNodesAsync().GetAwaiter().GetResult();
}
[Benchmark(Description = "Offline backlog write throughput (10K ops)", OperationsPerInvoke = BacklogOperationCount)]
/// <summary>
/// Measures offline backlog write throughput for 100K operations.
/// </summary>
[Benchmark(Description = "Offline backlog write throughput (100K ops)", OperationsPerInvoke = BacklogOperationCount)]
public async Task OfflineBacklogWriteThroughput100k()
{
await WriteBatchAsync(_currentPrefix, BacklogOperationCount);
}
/// <summary>
/// Prepares nodes and backlog before the re-sync benchmark iteration.
/// </summary>
[IterationSetup(Target = nameof(OfflineNodeResyncDurationAfter100kBacklog))]
public void SetupOfflineResyncBenchmark()
{
@@ -51,7 +66,10 @@ public class OfflineResyncThroughputBenchmarks
WriteBatchAsync(_currentPrefix, BacklogOperationCount).GetAwaiter().GetResult();
}
[Benchmark(Description = "Offline node re-sync duration after 10K backlog")]
/// <summary>
/// Measures re-sync duration after processing a 100K-entry offline backlog.
/// </summary>
[Benchmark(Description = "Offline node re-sync duration after 100K backlog")]
public async Task OfflineNodeResyncDurationAfter100kBacklog()
{
await _offlineNode.StartAsync();

View File

@@ -57,6 +57,9 @@ public class SurrealLogStorageBenchmarks
private string _contextNumericKeyQueryValue = string.Empty;
private int _contextNumericValueQueryValue;
/// <summary>
/// Initializes the benchmark environment and seeds the dataset.
/// </summary>
[GlobalSetup]
public async Task GlobalSetupAsync()
{
@@ -97,6 +100,9 @@ public class SurrealLogStorageBenchmarks
$"RocksDB size: {sizeBytes / (1024d * 1024d):F2} MiB ({sizeBytes:N0} bytes). Path: {_databasePath}");
}
/// <summary>
/// Handles benchmark teardown for surreal log storage scenarios.
/// </summary>
[GlobalCleanup]
public Task GlobalCleanupAsync()
{
@@ -104,6 +110,9 @@ public class SurrealLogStorageBenchmarks
return Task.CompletedTask;
}
/// <summary>
/// Queries the latest log rows by context identifier.
/// </summary>
[Benchmark(Description = "Query by contextId (latest 200 rows)")]
public async Task QueryByContextIdAsync()
{
@@ -117,6 +126,9 @@ public class SurrealLogStorageBenchmarks
new Dictionary<string, object?> { ["contextId"] = _contextIdQueryValue });
}
/// <summary>
/// Queries the latest rows by logger and timestamp range.
/// </summary>
[Benchmark(Description = "Query by loggerName + timestamp range (latest 200 rows)")]
public async Task QueryByLoggerAndTimestampAsync()
{
@@ -137,6 +149,9 @@ public class SurrealLogStorageBenchmarks
});
}
/// <summary>
/// Queries rows by logger and context key/value pairs.
/// </summary>
[Benchmark(Description = "Query by loggerName + timestamp + arbitrary context string key/value")]
public async Task QueryByLoggerTimestampAndContextKeyAsync()
{
@@ -169,6 +184,9 @@ public class SurrealLogStorageBenchmarks
});
}
/// <summary>
/// Queries rows by logger and numeric context key/value pairs.
/// </summary>
[Benchmark(Description = "Query by loggerName + timestamp + arbitrary context number key/value")]
public async Task QueryByLoggerTimestampAndNumericContextKeyAsync()
{
@@ -201,6 +219,9 @@ public class SurrealLogStorageBenchmarks
});
}
/// <summary>
/// Reports RocksDB size for the seeded benchmark database.
/// </summary>
[Benchmark(Description = "RocksDB size (bytes)")]
public long GetDatabaseFileSizeBytes()
{