Implement in-process multi-dataset sync isolation across core, network, persistence, and tests
All checks were successful
NuGet Package Publish / nuget (push) Successful in 1m14s

This commit is contained in:
Joseph Doherty
2026-02-22 11:58:34 -05:00
parent c06b56172a
commit 8e97061ab8
60 changed files with 4519 additions and 559 deletions

View File

@@ -0,0 +1,43 @@
using System.Text.Json;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Core.Tests;
public class DatasetAwareModelTests
{
[Fact]
public void DocumentMetadata_ShouldDefaultDatasetId_ToPrimary()
{
var metadata = new DocumentMetadata("Users", "42", new HlcTimestamp(100, 0, "node"));
metadata.DatasetId.ShouldBe(DatasetId.Primary);
}
[Fact]
public void DocumentMetadata_SerializationRoundTrip_ShouldPreserveDatasetId()
{
var original = new DocumentMetadata("Users", "42", new HlcTimestamp(100, 0, "node"), false, "logs");
string json = JsonSerializer.Serialize(original);
var restored = JsonSerializer.Deserialize<DocumentMetadata>(json);
restored.ShouldNotBeNull();
restored.DatasetId.ShouldBe("logs");
}
[Fact]
public void SnapshotMetadata_ShouldDefaultDatasetId_ToPrimary()
{
var metadata = new SnapshotMetadata();
metadata.DatasetId.ShouldBe(DatasetId.Primary);
}
[Fact]
public void PeerOplogConfirmation_ShouldDefaultDatasetId_ToPrimary()
{
var confirmation = new PeerOplogConfirmation();
confirmation.DatasetId.ShouldBe(DatasetId.Primary);
}
}

View File

@@ -63,11 +63,35 @@ public class OplogEntryTests
/// Verifies that an entry is valid when its stored hash matches computed content.
/// </summary>
[Fact]
public void IsValid_ShouldReturnTrue_WhenHashMatches()
{
var timestamp = new HlcTimestamp(100, 0, "node-1");
var entry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev");
entry.IsValid().ShouldBeTrue();
}
}
public void IsValid_ShouldReturnTrue_WhenHashMatches()
{
var timestamp = new HlcTimestamp(100, 0, "node-1");
var entry = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev");
entry.IsValid().ShouldBeTrue();
}
/// <summary>
/// Verifies that entries default to the primary dataset when dataset is omitted.
/// </summary>
[Fact]
public void Constructor_ShouldDefaultDatasetId_ToPrimary()
{
var entry = new OplogEntry("col", "key", OperationType.Put, null, new HlcTimestamp(1, 0, "node"), "prev");
entry.DatasetId.ShouldBe(DatasetId.Primary);
}
/// <summary>
/// Verifies that hash computation includes dataset identity to prevent cross-dataset collisions.
/// </summary>
[Fact]
public void ComputeHash_ShouldDiffer_WhenDatasetDiffers()
{
var timestamp = new HlcTimestamp(100, 0, "node-1");
var primary = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev", datasetId: "primary");
var logs = new OplogEntry("col", "key", OperationType.Put, null, timestamp, "prev", datasetId: "logs");
logs.Hash.ShouldNotBe(primary.Hash);
}
}

View File

@@ -0,0 +1,47 @@
using Microsoft.Extensions.DependencyInjection;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class MultiDatasetRegistrationTests
{
[Fact]
public void AddCBDDCMultiDataset_ShouldRegisterCoordinatorAndReplaceSyncOrchestrator()
{
var services = new ServiceCollection();
services.AddCBDDCNetwork<TestPeerNodeConfigurationProvider>(useHostedService: false);
services.AddCBDDCMultiDataset(options =>
{
options.EnableMultiDatasetSync = true;
options.EnableDatasetPrimary = true;
options.EnableDatasetLogs = true;
options.EnableDatasetTimeseries = true;
});
services.Any(descriptor => descriptor.ServiceType == typeof(IMultiDatasetSyncOrchestrator)).ShouldBeTrue();
var syncDescriptor = services.Last(descriptor => descriptor.ServiceType == typeof(ISyncOrchestrator));
syncDescriptor.ImplementationFactory.ShouldNotBeNull();
}
private sealed class TestPeerNodeConfigurationProvider : IPeerNodeConfigurationProvider
{
public event PeerNodeConfigurationChangedEventHandler? ConfigurationChanged
{
add { }
remove { }
}
public Task<PeerNodeConfiguration> GetConfiguration()
{
return Task.FromResult(new PeerNodeConfiguration
{
NodeId = "node-test",
TcpPort = 9000,
AuthToken = "auth"
});
}
}
}

View File

@@ -0,0 +1,102 @@
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Core.Storage;
namespace ZB.MOM.WW.CBDDC.Network.Tests;
public class MultiDatasetSyncOrchestratorTests
{
[Fact]
public void Constructor_WhenMultiDatasetDisabled_ShouldOnlyCreatePrimaryContext()
{
var sut = CreateSut(
[
new DatasetSyncOptions { DatasetId = DatasetId.Primary, Enabled = true },
new DatasetSyncOptions { DatasetId = DatasetId.Logs, Enabled = true }
],
new MultiDatasetRuntimeOptions
{
EnableMultiDatasetSync = false,
EnableDatasetPrimary = true,
EnableDatasetLogs = true
});
var datasetIds = sut.Contexts.Select(c => c.DatasetId).ToList();
datasetIds.Count.ShouldBe(1);
datasetIds[0].ShouldBe(DatasetId.Primary);
}
[Fact]
public async Task StartStop_WhenOneDatasetThrows_ShouldContinueOtherDatasets()
{
var orchestrators = new Dictionary<string, TrackingSyncOrchestrator>(StringComparer.Ordinal)
{
[DatasetId.Primary] = new TrackingSyncOrchestrator(),
[DatasetId.Logs] = new TrackingSyncOrchestrator(startException: new InvalidOperationException("boom")),
[DatasetId.Timeseries] = new TrackingSyncOrchestrator()
};
var sut = CreateSut(
[],
new MultiDatasetRuntimeOptions
{
EnableMultiDatasetSync = true,
EnableDatasetPrimary = true,
EnableDatasetLogs = true,
EnableDatasetTimeseries = true
},
options => orchestrators[DatasetId.Normalize(options.DatasetId)]);
await sut.Start();
await sut.Stop();
orchestrators[DatasetId.Primary].StartCalls.ShouldBe(1);
orchestrators[DatasetId.Primary].StopCalls.ShouldBe(1);
orchestrators[DatasetId.Logs].StartCalls.ShouldBe(1);
orchestrators[DatasetId.Logs].StopCalls.ShouldBe(1);
orchestrators[DatasetId.Timeseries].StartCalls.ShouldBe(1);
orchestrators[DatasetId.Timeseries].StopCalls.ShouldBe(1);
}
private static MultiDatasetSyncOrchestrator CreateSut(
IEnumerable<DatasetSyncOptions> datasetOptions,
MultiDatasetRuntimeOptions runtimeOptions,
Func<DatasetSyncOptions, ISyncOrchestrator>? orchestratorFactory = null)
{
return new MultiDatasetSyncOrchestrator(
Substitute.For<IDiscoveryService>(),
Substitute.For<IOplogStore>(),
Substitute.For<IDocumentStore>(),
Substitute.For<ISnapshotMetadataStore>(),
Substitute.For<ISnapshotService>(),
Substitute.For<IPeerNodeConfigurationProvider>(),
NullLoggerFactory.Instance,
datasetOptions,
runtimeOptions,
orchestratorFactory: orchestratorFactory);
}
private sealed class TrackingSyncOrchestrator(Exception? startException = null, Exception? stopException = null)
: ISyncOrchestrator
{
public int StartCalls { get; private set; }
public int StopCalls { get; private set; }
public Task Start()
{
StartCalls++;
if (startException != null) throw startException;
return Task.CompletedTask;
}
public Task Stop()
{
StopCalls++;
if (stopException != null) throw stopException;
return Task.CompletedTask;
}
}
}

View File

@@ -1,4 +1,6 @@
using Microsoft.Extensions.Logging.Abstractions;
using Google.Protobuf;
using ZB.MOM.WW.CBDDC.Core;
using ZB.MOM.WW.CBDDC.Network.Proto;
using ZB.MOM.WW.CBDDC.Network.Protocol;
using ZB.MOM.WW.CBDDC.Network.Security;
@@ -145,6 +147,44 @@ public class ProtocolTests
decoded.NodeId.ShouldBe("fragmented");
}
/// <summary>
/// Verifies that dataset-aware protocol fields are serialized and parsed correctly.
/// </summary>
[Fact]
public void DatasetAwareMessages_ShouldRoundTripDatasetFields()
{
var request = new PullChangesRequest
{
SinceWall = 10,
SinceLogic = 1,
SinceNode = "node-a",
DatasetId = "logs"
};
byte[] payload = request.ToByteArray();
var decoded = PullChangesRequest.Parser.ParseFrom(payload);
decoded.DatasetId.ShouldBe("logs");
}
/// <summary>
/// Verifies that legacy messages with no dataset id default to the primary dataset.
/// </summary>
[Fact]
public void DatasetAwareMessages_WhenMissingDataset_ShouldDefaultToPrimary()
{
var legacy = new HandshakeRequest
{
NodeId = "node-legacy",
AuthToken = "token"
};
byte[] payload = legacy.ToByteArray();
var decoded = HandshakeRequest.Parser.ParseFrom(payload);
DatasetId.Normalize(decoded.DatasetId).ShouldBe(DatasetId.Primary);
}
// Helper Stream for fragmentation test
private class FragmentedMemoryStream : MemoryStream
{
@@ -169,4 +209,4 @@ public class ProtocolTests
return await base.ReadAsync(buffer, offset, toRead, cancellationToken);
}
}
}
}

View File

@@ -16,6 +16,8 @@ public class SnapshotReconnectRegressionTests
.Returns((SnapshotMetadata?)null);
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((string?)null);
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns((string?)null);
snapshotMetadataStore.GetAllSnapshotMetadataAsync(Arg.Any<CancellationToken>())
.Returns(Array.Empty<SnapshotMetadata>());
return snapshotMetadataStore;
@@ -30,6 +32,10 @@ public class SnapshotReconnectRegressionTests
.Returns(Task.CompletedTask);
snapshotService.MergeSnapshotAsync(Arg.Any<Stream>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
snapshotService.ReplaceDatabaseAsync(Arg.Any<Stream>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
snapshotService.MergeSnapshotAsync(Arg.Any<Stream>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
return snapshotService;
}
@@ -69,8 +75,12 @@ public class SnapshotReconnectRegressionTests
var oplogStore = Substitute.For<IOplogStore>();
oplogStore.GetLastEntryHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(localHeadHash);
oplogStore.GetLastEntryHashAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(localHeadHash);
oplogStore.ApplyBatchAsync(Arg.Any<IEnumerable<OplogEntry>>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
oplogStore.ApplyBatchAsync(Arg.Any<IEnumerable<OplogEntry>>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
return oplogStore;
}
@@ -84,6 +94,8 @@ public class SnapshotReconnectRegressionTests
null);
client.GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(_ => Task.FromException<List<OplogEntry>>(new SnapshotRequiredException()));
client.GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(_ => Task.FromException<List<OplogEntry>>(new SnapshotRequiredException()));
return client;
}
@@ -109,19 +121,38 @@ public class SnapshotReconnectRegressionTests
store.EnsurePeerRegisteredAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<PeerType>(),
Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.EnsurePeerRegisteredAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<PeerType>(), Arg.Any<string>(),
Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.UpdateConfirmationAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<HlcTimestamp>(), Arg.Any<string>(),
Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.UpdateConfirmationAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<HlcTimestamp>(), Arg.Any<string>(),
Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.GetConfirmationsAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<PeerOplogConfirmation>());
store.GetConfirmationsAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<PeerOplogConfirmation>());
store.GetConfirmationsForPeerAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<PeerOplogConfirmation>());
store.GetConfirmationsForPeerAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<PeerOplogConfirmation>());
store.RemovePeerTrackingAsync(Arg.Any<string>(), Arg.Any<CancellationToken>()).Returns(Task.CompletedTask);
store.RemovePeerTrackingAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.GetActiveTrackedPeersAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<string>());
store.GetActiveTrackedPeersAsync(Arg.Any<string>(), Arg.Any<CancellationToken>()).Returns(Array.Empty<string>());
store.ExportAsync(Arg.Any<CancellationToken>()).Returns(Array.Empty<PeerOplogConfirmation>());
store.ExportAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Array.Empty<PeerOplogConfirmation>());
store.ImportAsync(Arg.Any<IEnumerable<PeerOplogConfirmation>>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.ImportAsync(Arg.Any<IEnumerable<PeerOplogConfirmation>>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.MergeAsync(Arg.Any<IEnumerable<PeerOplogConfirmation>>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
store.MergeAsync(Arg.Any<IEnumerable<PeerOplogConfirmation>>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns(Task.CompletedTask);
return store;
}
@@ -136,6 +167,8 @@ public class SnapshotReconnectRegressionTests
var snapshotMetadataStore = CreateSnapshotMetadataStore();
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns("snapshot-boundary-hash");
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns("snapshot-boundary-hash");
var snapshotService = CreateSnapshotService();
var orch = new TestableSyncOrchestrator(
@@ -165,7 +198,7 @@ public class SnapshotReconnectRegressionTests
// Assert
result.ShouldBe("Success");
await client.DidNotReceive()
.GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>());
.GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>());
}
/// <summary>
@@ -179,6 +212,8 @@ public class SnapshotReconnectRegressionTests
var snapshotMetadataStore = CreateSnapshotMetadataStore();
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns("snapshot-boundary-hash");
snapshotMetadataStore.GetSnapshotHashAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>())
.Returns("snapshot-boundary-hash");
var snapshotService = CreateSnapshotService();
var orch = new TestableSyncOrchestrator(
@@ -208,7 +243,11 @@ public class SnapshotReconnectRegressionTests
await Should.ThrowAsync<SnapshotRequiredException>(async () =>
await orch.TestProcessInboundBatchAsync(client, "remote-node", entries, CancellationToken.None));
await client.Received(1).GetChainRangeAsync(Arg.Any<string>(), Arg.Any<string>(), Arg.Any<CancellationToken>());
await client.Received(1).GetChainRangeAsync(
Arg.Any<string>(),
Arg.Any<string>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
}
// Subclass to expose private method
@@ -283,4 +322,4 @@ public class SnapshotReconnectRegressionTests
}
}
}
}
}

View File

@@ -39,18 +39,21 @@ public class SyncOrchestratorConfirmationTests
"peer-a",
"10.0.0.1:9000",
PeerType.LanDiscovered,
DatasetId.Primary,
Arg.Any<CancellationToken>());
await confirmationStore.Received(1).EnsurePeerRegisteredAsync(
"peer-b",
"10.0.0.2:9010",
PeerType.StaticRemote,
DatasetId.Primary,
Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().EnsurePeerRegisteredAsync(
"local",
Arg.Any<string>(),
Arg.Any<PeerType>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
}
@@ -89,6 +92,7 @@ public class SyncOrchestratorConfirmationTests
"peer-new",
"10.0.0.25:9010",
PeerType.LanDiscovered,
DatasetId.Primary,
Arg.Any<CancellationToken>());
}
@@ -114,9 +118,9 @@ public class SyncOrchestratorConfirmationTests
remote.SetTimestamp("node-behind", new HlcTimestamp(299, 9, "node-behind"));
remote.SetTimestamp("node-remote-only", new HlcTimestamp(900, 0, "node-remote-only"));
oplogStore.GetLastEntryHashAsync("node-equal", Arg.Any<CancellationToken>())
oplogStore.GetLastEntryHashAsync("node-equal", DatasetId.Primary, Arg.Any<CancellationToken>())
.Returns("hash-equal");
oplogStore.GetLastEntryHashAsync("node-ahead", Arg.Any<CancellationToken>())
oplogStore.GetLastEntryHashAsync("node-ahead", DatasetId.Primary, Arg.Any<CancellationToken>())
.Returns((string?)null);
await orchestrator.AdvanceConfirmationsFromVectorClockAsync("peer-1", local, remote, CancellationToken.None);
@@ -126,6 +130,7 @@ public class SyncOrchestratorConfirmationTests
"node-equal",
new HlcTimestamp(100, 1, "node-equal"),
"hash-equal",
DatasetId.Primary,
Arg.Any<CancellationToken>());
await confirmationStore.Received(1).UpdateConfirmationAsync(
@@ -133,6 +138,7 @@ public class SyncOrchestratorConfirmationTests
"node-ahead",
new HlcTimestamp(200, 0, "node-ahead"),
string.Empty,
DatasetId.Primary,
Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().UpdateConfirmationAsync(
@@ -140,6 +146,7 @@ public class SyncOrchestratorConfirmationTests
"node-behind",
Arg.Any<HlcTimestamp>(),
Arg.Any<string>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().UpdateConfirmationAsync(
@@ -147,6 +154,7 @@ public class SyncOrchestratorConfirmationTests
"node-local-only",
Arg.Any<HlcTimestamp>(),
Arg.Any<string>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
await confirmationStore.DidNotReceive().UpdateConfirmationAsync(
@@ -154,6 +162,7 @@ public class SyncOrchestratorConfirmationTests
"node-remote-only",
Arg.Any<HlcTimestamp>(),
Arg.Any<string>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
}
@@ -182,6 +191,7 @@ public class SyncOrchestratorConfirmationTests
"source-1",
new HlcTimestamp(120, 1, "source-1"),
"hash-120",
DatasetId.Primary,
Arg.Any<CancellationToken>());
}
@@ -206,6 +216,7 @@ public class SyncOrchestratorConfirmationTests
Arg.Any<string>(),
Arg.Any<HlcTimestamp>(),
Arg.Any<string>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
}
@@ -245,4 +256,4 @@ public class SyncOrchestratorConfirmationTests
string.Empty,
hash);
}
}
}

View File

@@ -138,7 +138,10 @@ public class SyncOrchestratorMaintenancePruningTests
await orchestrator.RunMaintenanceIfDueAsync(config, DateTime.UtcNow, CancellationToken.None);
await oplogStore.DidNotReceive().PruneOplogAsync(Arg.Any<HlcTimestamp>(), Arg.Any<CancellationToken>());
await oplogStore.DidNotReceive().PruneOplogAsync(
Arg.Any<HlcTimestamp>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
}
/// <summary>
@@ -187,6 +190,7 @@ public class SyncOrchestratorMaintenancePruningTests
timestamp.PhysicalTime == 100 &&
timestamp.LogicalCounter == 0 &&
string.Equals(timestamp.NodeId, "node-local", StringComparison.Ordinal)),
DatasetId.Primary,
Arg.Any<CancellationToken>());
}
@@ -228,7 +232,10 @@ public class SyncOrchestratorMaintenancePruningTests
var now = DateTime.UtcNow;
await orchestrator.RunMaintenanceIfDueAsync(config, now, CancellationToken.None);
await oplogStore.DidNotReceive().PruneOplogAsync(Arg.Any<HlcTimestamp>(), Arg.Any<CancellationToken>());
await oplogStore.DidNotReceive().PruneOplogAsync(
Arg.Any<HlcTimestamp>(),
Arg.Any<string>(),
Arg.Any<CancellationToken>());
await orchestrator.RunMaintenanceIfDueAsync(config, now.AddMinutes(2), CancellationToken.None);
@@ -237,6 +244,7 @@ public class SyncOrchestratorMaintenancePruningTests
timestamp.PhysicalTime == 100 &&
timestamp.LogicalCounter == 0 &&
string.Equals(timestamp.NodeId, "node-local", StringComparison.Ordinal)),
DatasetId.Primary,
Arg.Any<CancellationToken>());
}
@@ -286,4 +294,4 @@ public class SyncOrchestratorMaintenancePruningTests
IsActive = isActive
};
}
}
}

View File

@@ -0,0 +1,38 @@
using System.Text;
using Microsoft.Extensions.Configuration;
using ZB.MOM.WW.CBDDC.Network;
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
public class MultiDatasetConfigParsingTests
{
[Fact]
public void MultiDatasetSection_ShouldBindRuntimeOptions()
{
const string json = """
{
"CBDDC": {
"MultiDataset": {
"EnableMultiDatasetSync": true,
"EnableDatasetPrimary": true,
"EnableDatasetLogs": true,
"EnableDatasetTimeseries": false
}
}
}
""";
using var stream = new MemoryStream(Encoding.UTF8.GetBytes(json));
var config = new ConfigurationBuilder()
.AddJsonStream(stream)
.Build();
var options = config.GetSection("CBDDC:MultiDataset").Get<MultiDatasetRuntimeOptions>();
options.ShouldNotBeNull();
options.EnableMultiDatasetSync.ShouldBeTrue();
options.EnableDatasetPrimary.ShouldBeTrue();
options.EnableDatasetLogs.ShouldBeTrue();
options.EnableDatasetTimeseries.ShouldBeFalse();
}
}

View File

@@ -54,6 +54,68 @@ public class SurrealOplogStoreContractTests
(await store.ExportAsync()).ShouldBeEmpty();
}
/// <summary>
/// Verifies oplog reads and writes are isolated by dataset identifier.
/// </summary>
[Fact]
public async Task OplogStore_DatasetIsolation_Works()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreateOplogStore();
var primaryEntry = CreateOplogEntry("Users", "p1", "node-a", 100, 0, "");
var logsEntry = CreateOplogEntry("Users", "l1", "node-a", 100, 1, "");
await store.AppendOplogEntryAsync(primaryEntry, DatasetId.Primary);
await store.AppendOplogEntryAsync(logsEntry, DatasetId.Logs);
var primary = (await store.ExportAsync(DatasetId.Primary)).ToList();
var logs = (await store.ExportAsync(DatasetId.Logs)).ToList();
primary.Count.ShouldBe(1);
primary[0].DatasetId.ShouldBe(DatasetId.Primary);
logs.Count.ShouldBe(1);
logs[0].DatasetId.ShouldBe(DatasetId.Logs);
}
/// <summary>
/// Verifies legacy oplog rows without dataset id are treated as primary dataset.
/// </summary>
[Fact]
public async Task OplogStore_LegacyRowsWithoutDatasetId_MapToPrimaryOnly()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreateOplogStore();
await harness.SurrealEmbeddedClient.InitializeAsync();
await harness.SurrealEmbeddedClient.RawQueryAsync(
"""
UPSERT type::thing($table, $id) CONTENT {
collection: "Users",
key: "legacy",
operation: 0,
payloadJson: "{}",
timestampPhysicalTime: 10,
timestampLogicalCounter: 0,
timestampNodeId: "node-legacy",
hash: "legacy-hash",
previousHash: ""
};
""",
new Dictionary<string, object?>
{
["table"] = CBDDCSurrealSchemaNames.OplogEntriesTable,
["id"] = "legacy-hash"
});
var primary = (await store.GetOplogAfterAsync(new HlcTimestamp(0, 0, ""), DatasetId.Primary)).ToList();
var logs = (await store.GetOplogAfterAsync(new HlcTimestamp(0, 0, ""), DatasetId.Logs)).ToList();
primary.Any(entry => entry.Hash == "legacy-hash").ShouldBeTrue();
logs.Any(entry => entry.Hash == "legacy-hash").ShouldBeFalse();
}
private static OplogEntry CreateOplogEntry(
string collection,
string key,
@@ -110,6 +172,34 @@ public class SurrealDocumentMetadataStoreContractTests
var exported = (await store.ExportAsync()).ToList();
exported.Count.ShouldBe(3);
}
/// <summary>
/// Verifies document metadata records do not leak across datasets.
/// </summary>
[Fact]
public async Task DocumentMetadataStore_DatasetIsolation_Works()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreateDocumentMetadataStore();
await store.UpsertMetadataAsync(
new DocumentMetadata("Users", "doc-shared", new HlcTimestamp(100, 0, "node-a"), false, DatasetId.Primary),
DatasetId.Primary);
await store.UpsertMetadataAsync(
new DocumentMetadata("Users", "doc-shared", new HlcTimestamp(101, 0, "node-a"), false, DatasetId.Logs),
DatasetId.Logs);
var primary = await store.GetMetadataAsync("Users", "doc-shared", DatasetId.Primary);
var logs = await store.GetMetadataAsync("Users", "doc-shared", DatasetId.Logs);
primary.ShouldNotBeNull();
primary.DatasetId.ShouldBe(DatasetId.Primary);
primary.UpdatedAt.ShouldBe(new HlcTimestamp(100, 0, "node-a"));
logs.ShouldNotBeNull();
logs.DatasetId.ShouldBe(DatasetId.Logs);
logs.UpdatedAt.ShouldBe(new HlcTimestamp(101, 0, "node-a"));
}
}
public class SurrealPeerConfigurationStoreContractTests
@@ -206,6 +296,42 @@ public class SurrealPeerOplogConfirmationStoreContractTests
afterDeactivate.All(x => x.IsActive == false).ShouldBeTrue();
}
/// <summary>
/// Verifies peer confirmations are isolated by dataset.
/// </summary>
[Fact]
public async Task PeerOplogConfirmationStore_DatasetIsolation_Works()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreatePeerOplogConfirmationStore();
await store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote, DatasetId.Primary);
await store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote, DatasetId.Logs);
await store.UpdateConfirmationAsync(
"peer-a",
"source-1",
new HlcTimestamp(100, 1, "source-1"),
"hash-primary",
DatasetId.Primary);
await store.UpdateConfirmationAsync(
"peer-a",
"source-1",
new HlcTimestamp(200, 1, "source-1"),
"hash-logs",
DatasetId.Logs);
var primary = (await store.GetConfirmationsForPeerAsync("peer-a", DatasetId.Primary)).ToList();
var logs = (await store.GetConfirmationsForPeerAsync("peer-a", DatasetId.Logs)).ToList();
primary.Count.ShouldBe(1);
primary[0].ConfirmedHash.ShouldBe("hash-primary");
primary[0].DatasetId.ShouldBe(DatasetId.Primary);
logs.Count.ShouldBe(1);
logs[0].ConfirmedHash.ShouldBe("hash-logs");
logs[0].DatasetId.ShouldBe(DatasetId.Logs);
}
/// <summary>
/// Verifies merge semantics prefer newer confirmations and preserve active-state transitions.
/// </summary>
@@ -343,6 +469,45 @@ public class SurrealSnapshotMetadataStoreContractTests
all[0].NodeId.ShouldBe("node-a");
all[1].NodeId.ShouldBe("node-b");
}
/// <summary>
/// Verifies snapshot metadata rows are isolated by dataset.
/// </summary>
[Fact]
public async Task SnapshotMetadataStore_DatasetIsolation_Works()
{
await using var harness = new SurrealTestHarness();
var store = harness.CreateSnapshotMetadataStore();
await store.InsertSnapshotMetadataAsync(new SnapshotMetadata
{
DatasetId = DatasetId.Primary,
NodeId = "node-a",
TimestampPhysicalTime = 100,
TimestampLogicalCounter = 0,
Hash = "hash-primary"
}, DatasetId.Primary);
await store.InsertSnapshotMetadataAsync(new SnapshotMetadata
{
DatasetId = DatasetId.Logs,
NodeId = "node-a",
TimestampPhysicalTime = 200,
TimestampLogicalCounter = 0,
Hash = "hash-logs"
}, DatasetId.Logs);
var primary = await store.GetSnapshotMetadataAsync("node-a", DatasetId.Primary);
var logs = await store.GetSnapshotMetadataAsync("node-a", DatasetId.Logs);
primary.ShouldNotBeNull();
primary.Hash.ShouldBe("hash-primary");
primary.DatasetId.ShouldBe(DatasetId.Primary);
logs.ShouldNotBeNull();
logs.Hash.ShouldBe("hash-logs");
logs.DatasetId.ShouldBe(DatasetId.Logs);
}
}
internal sealed class SurrealTestHarness : IAsyncDisposable
@@ -431,6 +596,11 @@ internal sealed class SurrealTestHarness : IAsyncDisposable
NullLogger<SurrealSnapshotMetadataStore>.Instance);
}
/// <summary>
/// Gets the embedded Surreal client used by this harness.
/// </summary>
public ICBDDCSurrealEmbeddedClient SurrealEmbeddedClient => _client;
/// <inheritdoc />
public async ValueTask DisposeAsync()
{

View File

@@ -113,12 +113,7 @@ internal sealed class BenchmarkPeerNode : IAsyncDisposable
public async Task UpsertUserAsync(User user)
{
User? existing = Context.Users.Find(u => u.Id == user.Id).FirstOrDefault();
if (existing == null)
await Context.Users.InsertAsync(user);
else
await Context.Users.UpdateAsync(user);
await Context.Users.UpdateAsync(user);
await Context.SaveChangesAsync();
}
@@ -127,6 +122,11 @@ internal sealed class BenchmarkPeerNode : IAsyncDisposable
return Context.Users.Find(u => u.Id == userId).Any();
}
public int CountUsersWithPrefix(string prefix)
{
return Context.Users.FindAll().Count(u => u.Id.StartsWith(prefix, StringComparison.Ordinal));
}
public async ValueTask DisposeAsync()
{
try

View File

@@ -0,0 +1,142 @@
using BenchmarkDotNet.Attributes;
using System.Net;
using System.Net.Sockets;
using ZB.MOM.WW.CBDDC.Core.Network;
using ZB.MOM.WW.CBDDC.Sample.Console;
namespace ZB.MOM.WW.CDBBC.E2E.Benchmark.Tests;
[MemoryDiagnoser]
[SimpleJob(launchCount: 1, warmupCount: 0, iterationCount: 1)]
public class OfflineResyncThroughputBenchmarks
{
private const int BacklogOperationCount = 10_000;
private BenchmarkPeerNode _onlineNode = null!;
private BenchmarkPeerNode _offlineNode = null!;
private int _runSequence;
private string _currentPrefix = string.Empty;
[GlobalSetup]
public Task GlobalSetupAsync()
{
return Task.CompletedTask;
}
[GlobalCleanup]
public Task GlobalCleanupAsync()
{
// Avoid explicit node disposal in BenchmarkDotNet child processes due Surreal embedded callback race.
// Process teardown releases resources after benchmark completion.
return Task.CompletedTask;
}
[IterationSetup(Target = nameof(OfflineBacklogWriteThroughput100k))]
public void SetupOfflineWriteThroughput()
{
_currentPrefix = $"offline-write-{Interlocked.Increment(ref _runSequence):D6}";
InitializeIterationNodesAsync().GetAwaiter().GetResult();
}
[Benchmark(Description = "Offline backlog write throughput (10K ops)", OperationsPerInvoke = BacklogOperationCount)]
public async Task OfflineBacklogWriteThroughput100k()
{
await WriteBatchAsync(_currentPrefix, BacklogOperationCount);
}
[IterationSetup(Target = nameof(OfflineNodeResyncDurationAfter100kBacklog))]
public void SetupOfflineResyncBenchmark()
{
_currentPrefix = $"offline-resync-{Interlocked.Increment(ref _runSequence):D6}";
InitializeIterationNodesAsync().GetAwaiter().GetResult();
WriteBatchAsync(_currentPrefix, BacklogOperationCount).GetAwaiter().GetResult();
}
[Benchmark(Description = "Offline node re-sync duration after 10K backlog")]
public async Task OfflineNodeResyncDurationAfter100kBacklog()
{
await _offlineNode.StartAsync();
await WaitForReplicationAsync(_currentPrefix, BacklogOperationCount, TimeSpan.FromMinutes(3));
}
private async Task WriteBatchAsync(string prefix, int count)
{
for (var i = 0; i < count; i++)
{
string userId = $"{prefix}-{i:D6}";
await _onlineNode.UpsertUserAsync(CreateUser(userId));
}
}
private async Task WaitForReplicationAsync(string prefix, int expectedCount, TimeSpan timeout)
{
DateTime deadline = DateTime.UtcNow.Add(timeout);
while (DateTime.UtcNow < deadline)
{
if (_offlineNode.CountUsersWithPrefix(prefix) >= expectedCount)
return;
await Task.Delay(250);
}
int replicatedCount = _offlineNode.CountUsersWithPrefix(prefix);
throw new TimeoutException(
$"Timed out waiting for re-sync. Expected {expectedCount}, replicated {replicatedCount}.");
}
private static User CreateUser(string userId)
{
return new User
{
Id = userId,
Name = $"user-{userId}",
Age = 30,
Address = new Address { City = "OfflineBenchmarkCity" }
};
}
private static int GetAvailableTcpPort()
{
using var listener = new TcpListener(IPAddress.Loopback, 0);
listener.Start();
return ((IPEndPoint)listener.LocalEndpoint).Port;
}
private async Task InitializeIterationNodesAsync()
{
int onlinePort = GetAvailableTcpPort();
int offlinePort = GetAvailableTcpPort();
while (offlinePort == onlinePort)
offlinePort = GetAvailableTcpPort();
string clusterToken = Guid.NewGuid().ToString("N");
_onlineNode = BenchmarkPeerNode.Create(
"offline-benchmark-online",
onlinePort,
clusterToken,
[
new KnownPeerConfiguration
{
NodeId = "offline-benchmark-offline",
Host = "127.0.0.1",
Port = offlinePort
}
]);
_offlineNode = BenchmarkPeerNode.Create(
"offline-benchmark-offline",
offlinePort,
clusterToken,
[
new KnownPeerConfiguration
{
NodeId = "offline-benchmark-online",
Host = "127.0.0.1",
Port = onlinePort
}
]);
await _onlineNode.StartAsync();
await Task.Delay(250);
}
}

View File

@@ -0,0 +1,50 @@
using System;
using System.Collections.Generic;
namespace ZB.MOM.WW.CDBBC.E2E.Benchmark.Tests;
/// <summary>
/// Represents a typical Serilog log entry.
/// </summary>
public sealed class SerilogLogEntry
{
/// <summary>
/// Timestamp when the log event was written.
/// </summary>
public DateTimeOffset Timestamp { get; set; }
/// <summary>
/// Log level (for example: Information, Warning, Error).
/// </summary>
public string Level { get; set; } = "Information";
/// <summary>
/// Name of the logger/category (typically the class name).
/// </summary>
public string? LoggerName { get; set; }
/// <summary>
/// Correlation context identifier used to tie log entries to a request.
/// </summary>
public string? ContextId { get; set; }
/// <summary>
/// Original message template used by Serilog.
/// </summary>
public string MessageTemplate { get; set; } = string.Empty;
/// <summary>
/// Fully rendered message text.
/// </summary>
public string? RenderedMessage { get; set; }
/// <summary>
/// Exception details if one was logged.
/// </summary>
public string? Exception { get; set; }
/// <summary>
/// Structured context values captured from Serilog context.
/// </summary>
public Dictionary<string, object?> ContextProperties { get; set; } = new(StringComparer.Ordinal);
}

View File

@@ -0,0 +1,440 @@
using System.Diagnostics;
using System.Text;
using BenchmarkDotNet.Attributes;
using SurrealDb.Net.Models;
using SurrealDb.Net.Models.Response;
using ZB.MOM.WW.CBDDC.Persistence.Surreal;
namespace ZB.MOM.WW.CDBBC.E2E.Benchmark.Tests;
[MemoryDiagnoser]
[SimpleJob(launchCount: 1, warmupCount: 1, iterationCount: 3)]
public class SurrealLogStorageBenchmarks
{
private const int LogRecordCount = 100_000;
private const int InsertBatchSize = 500;
private const string LogTable = "benchmark_log_entry";
private const string LogKvTable = "benchmark_log_kv";
private static readonly string[] LoggerNames =
[
"Api.RequestHandler",
"Api.AuthController",
"Api.OrderController",
"Api.InventoryController",
"Api.CustomerController",
"Workers.OutboxPublisher",
"Workers.NotificationDispatcher",
"Infrastructure.SqlRepository",
"Infrastructure.CacheService",
"Infrastructure.HttpClient",
"Domain.OrderService",
"Domain.PricingService"
];
private static readonly string[] TenantIds =
[
"tenant-01",
"tenant-02",
"tenant-03",
"tenant-04",
"tenant-05",
"tenant-06",
"tenant-07",
"tenant-08"
];
private CBDDCSurrealEmbeddedClient _surrealClient = null!;
private string _databasePath = string.Empty;
private string _workDir = string.Empty;
private DateTime _seedBaseUtc;
private DateTime _queryRangeStartUtc;
private DateTime _queryRangeEndUtc;
private string _contextIdQueryValue = string.Empty;
private string _loggerQueryValue = string.Empty;
private string _contextStringKeyQueryValue = string.Empty;
private string _contextStringValueQueryValue = string.Empty;
private string _contextNumericKeyQueryValue = string.Empty;
private int _contextNumericValueQueryValue;
[GlobalSetup]
public async Task GlobalSetupAsync()
{
_workDir = Path.Combine(Path.GetTempPath(), $"cbddc-serilog-benchmark-{Guid.NewGuid():N}");
Directory.CreateDirectory(_workDir);
_databasePath = Path.Combine(_workDir, "serilog.rocksdb");
_surrealClient = new CBDDCSurrealEmbeddedClient(
new CBDDCSurrealEmbeddedOptions
{
Endpoint = "rocksdb://local",
DatabasePath = _databasePath,
Namespace = "cbddc_benchmark",
Database = $"serilog_{Guid.NewGuid():N}",
Cdc = new CBDDCSurrealCdcOptions { Enabled = false }
});
await _surrealClient.InitializeAsync();
await DefineSchemaAndIndexesAsync();
_seedBaseUtc = DateTime.UtcNow.AddDays(-1);
_contextIdQueryValue = BuildContextId(LogRecordCount / 2);
_loggerQueryValue = LoggerNames[3];
_contextStringKeyQueryValue = "tenantId";
_contextStringValueQueryValue = TenantIds[5];
_contextNumericKeyQueryValue = "statusCode";
_contextNumericValueQueryValue = 500;
_queryRangeStartUtc = _seedBaseUtc.AddMinutes(6);
_queryRangeEndUtc = _queryRangeStartUtc.AddSeconds(30);
var seedTimer = Stopwatch.StartNew();
await InsertLogRecordsAsync();
seedTimer.Stop();
long sizeBytes = CalculatePathSizeBytes(_databasePath);
Console.WriteLine(
$"Seeded {LogRecordCount:N0} records in {seedTimer.Elapsed.TotalSeconds:F2}s. " +
$"RocksDB size: {sizeBytes / (1024d * 1024d):F2} MiB ({sizeBytes:N0} bytes). Path: {_databasePath}");
}
[GlobalCleanup]
public Task GlobalCleanupAsync()
{
// Avoid explicit Surreal embedded disposal in benchmark child processes due known native callback race.
return Task.CompletedTask;
}
[Benchmark(Description = "Query by contextId (latest 200 rows)")]
public async Task QueryByContextIdAsync()
{
await ExecuteQueryAsync(
$"""
SELECT * FROM {LogTable}
WHERE contextId = $contextId
ORDER BY timestamp DESC
LIMIT 200;
""",
new Dictionary<string, object?> { ["contextId"] = _contextIdQueryValue });
}
[Benchmark(Description = "Query by loggerName + timestamp range (latest 200 rows)")]
public async Task QueryByLoggerAndTimestampAsync()
{
await ExecuteQueryAsync(
$"""
SELECT * FROM {LogTable}
WHERE loggerName = $loggerName
AND timestamp >= $fromTs
AND timestamp <= $toTs
ORDER BY timestamp DESC
LIMIT 200;
""",
new Dictionary<string, object?>
{
["loggerName"] = _loggerQueryValue,
["fromTs"] = _queryRangeStartUtc,
["toTs"] = _queryRangeEndUtc
});
}
[Benchmark(Description = "Query by loggerName + timestamp + arbitrary context string key/value")]
public async Task QueryByLoggerTimestampAndContextKeyAsync()
{
await ExecuteQueryAsync(
$"""
LET $logIds = (
SELECT VALUE logId FROM {LogKvTable}
WHERE loggerName = $loggerName
AND key = $contextKey
AND valueStr = $contextValueStr
AND timestamp >= $fromTs
AND timestamp <= $toTs
ORDER BY timestamp DESC
LIMIT 200
);
SELECT * FROM {LogTable}
WHERE id INSIDE $logIds
ORDER BY timestamp DESC
LIMIT 200;
""",
new Dictionary<string, object?>
{
["loggerName"] = _loggerQueryValue,
["fromTs"] = _queryRangeStartUtc,
["toTs"] = _queryRangeEndUtc,
["contextKey"] = _contextStringKeyQueryValue,
["contextValueStr"] = _contextStringValueQueryValue
});
}
[Benchmark(Description = "Query by loggerName + timestamp + arbitrary context number key/value")]
public async Task QueryByLoggerTimestampAndNumericContextKeyAsync()
{
await ExecuteQueryAsync(
$"""
LET $logIds = (
SELECT VALUE logId FROM {LogKvTable}
WHERE loggerName = $loggerName
AND key = $contextKey
AND valueNum = $contextValueNum
AND timestamp >= $fromTs
AND timestamp <= $toTs
ORDER BY timestamp DESC
LIMIT 200
);
SELECT * FROM {LogTable}
WHERE id INSIDE $logIds
ORDER BY timestamp DESC
LIMIT 200;
""",
new Dictionary<string, object?>
{
["loggerName"] = _loggerQueryValue,
["fromTs"] = _queryRangeStartUtc,
["toTs"] = _queryRangeEndUtc,
["contextKey"] = _contextNumericKeyQueryValue,
["contextValueNum"] = _contextNumericValueQueryValue
});
}
[Benchmark(Description = "RocksDB size (bytes)")]
public long GetDatabaseFileSizeBytes()
{
return CalculatePathSizeBytes(_databasePath);
}
private async Task DefineSchemaAndIndexesAsync()
{
string schemaSql =
$"""
DEFINE TABLE OVERWRITE {LogTable} SCHEMAFULL;
DEFINE FIELD OVERWRITE timestamp ON TABLE {LogTable} TYPE datetime;
DEFINE FIELD OVERWRITE level ON TABLE {LogTable} TYPE string;
DEFINE FIELD OVERWRITE loggerName ON TABLE {LogTable} TYPE option<string>;
DEFINE FIELD OVERWRITE contextId ON TABLE {LogTable} TYPE option<string>;
DEFINE FIELD OVERWRITE messageTemplate ON TABLE {LogTable} TYPE string;
DEFINE FIELD OVERWRITE renderedMessage ON TABLE {LogTable} TYPE option<string>;
DEFINE FIELD OVERWRITE exception ON TABLE {LogTable} TYPE option<string>;
DEFINE FIELD OVERWRITE contextValues ON TABLE {LogTable} TYPE object FLEXIBLE;
DEFINE INDEX OVERWRITE idx_log_contextid_ts
ON TABLE {LogTable} COLUMNS contextId, timestamp;
DEFINE INDEX OVERWRITE idx_log_logger_ts
ON TABLE {LogTable} COLUMNS loggerName, timestamp;
DEFINE TABLE OVERWRITE {LogKvTable} SCHEMAFULL;
DEFINE FIELD OVERWRITE logId ON TABLE {LogKvTable} TYPE record<{LogTable}>;
DEFINE FIELD OVERWRITE loggerName ON TABLE {LogKvTable} TYPE string;
DEFINE FIELD OVERWRITE timestamp ON TABLE {LogKvTable} TYPE datetime;
DEFINE FIELD OVERWRITE key ON TABLE {LogKvTable} TYPE string;
DEFINE FIELD OVERWRITE valueStr ON TABLE {LogKvTable} TYPE option<string>;
DEFINE FIELD OVERWRITE valueNum ON TABLE {LogKvTable} TYPE option<number>;
DEFINE FIELD OVERWRITE valueBool ON TABLE {LogKvTable} TYPE option<bool>;
DEFINE INDEX OVERWRITE idx_logkv_logger_key_str_ts
ON TABLE {LogKvTable} COLUMNS loggerName, key, valueStr, timestamp;
DEFINE INDEX OVERWRITE idx_logkv_logger_key_num_ts
ON TABLE {LogKvTable} COLUMNS loggerName, key, valueNum, timestamp;
DEFINE INDEX OVERWRITE idx_logkv_logger_key_bool_ts
ON TABLE {LogKvTable} COLUMNS loggerName, key, valueBool, timestamp;
DEFINE INDEX OVERWRITE idx_logkv_logid
ON TABLE {LogKvTable} COLUMNS logId;
""";
var response = await _surrealClient.RawQueryAsync(schemaSql);
EnsureSuccessfulResponse(response, "Schema definition");
}
private async Task InsertLogRecordsAsync()
{
for (var batchStart = 0; batchStart < LogRecordCount; batchStart += InsertBatchSize)
{
int batchCount = Math.Min(InsertBatchSize, LogRecordCount - batchStart);
var sqlBuilder = new StringBuilder();
sqlBuilder.AppendLine("BEGIN TRANSACTION;");
var parameters = new Dictionary<string, object?>(batchCount * 2);
for (var offset = 0; offset < batchCount; offset++)
{
int sequence = batchStart + offset;
string idParameterName = $"id{offset}";
string recordParameterName = $"record{offset}";
string logId = $"log-{sequence:D8}";
RecordId logRecordId = RecordId.From(LogTable, logId);
IReadOnlyDictionary<string, object?> logRecord = CreateLogRecord(sequence);
parameters[idParameterName] = logRecordId;
parameters[recordParameterName] = logRecord;
sqlBuilder.Append("UPSERT $")
.Append(idParameterName)
.Append(" CONTENT $")
.Append(recordParameterName)
.AppendLine(";");
int kvOrdinal = 0;
foreach (IReadOnlyDictionary<string, object?> kvRow in CreateKvRows(logId, logRecordId, logRecord))
{
string kvIdParameterName = $"kvid{offset}_{kvOrdinal}";
string kvRecordParameterName = $"kvrecord{offset}_{kvOrdinal}";
parameters[kvIdParameterName] = RecordId.From(LogKvTable, $"{logId}-{kvOrdinal:D2}");
parameters[kvRecordParameterName] = kvRow;
sqlBuilder.Append("UPSERT $")
.Append(kvIdParameterName)
.Append(" CONTENT $")
.Append(kvRecordParameterName)
.AppendLine(";");
kvOrdinal++;
}
}
sqlBuilder.AppendLine("COMMIT TRANSACTION;");
var response = await _surrealClient.RawQueryAsync(sqlBuilder.ToString(), parameters);
EnsureSuccessfulResponse(response, $"Insert batch starting at row {batchStart}");
}
}
private IReadOnlyDictionary<string, object?> CreateLogRecord(int sequence)
{
DateTime timestamp = _seedBaseUtc.AddMilliseconds(sequence * 10L);
string loggerName = LoggerNames[sequence % LoggerNames.Length];
string tenantId = TenantIds[sequence % TenantIds.Length];
bool isBackground = sequence % 7 == 0;
string? exception = sequence % 2_500 == 0
? "System.InvalidOperationException: simulated benchmark exception."
: null;
var record = new Dictionary<string, object?>
{
["timestamp"] = timestamp,
["level"] = ResolveLogLevel(sequence),
["loggerName"] = loggerName,
["contextId"] = BuildContextId(sequence),
["messageTemplate"] = "Processed request {RequestId} for {Route}",
["renderedMessage"] = $"Processed request req-{sequence:D8} for /api/items/{sequence % 250}",
["contextValues"] = new Dictionary<string, object?>
{
["tenantId"] = tenantId,
["requestId"] = $"req-{sequence:D8}",
["route"] = $"/api/items/{sequence % 250}",
["statusCode"] = sequence % 20 == 0 ? 500 : 200,
["elapsedMs"] = 5 + (sequence % 200),
["nodeId"] = $"node-{sequence % 8:D2}",
["isBackground"] = isBackground
}
};
if (!string.IsNullOrEmpty(exception))
record["exception"] = exception;
return record;
}
private static IEnumerable<IReadOnlyDictionary<string, object?>> CreateKvRows(
string logId,
RecordId logRecordId,
IReadOnlyDictionary<string, object?> logRecord)
{
if (!logRecord.TryGetValue("loggerName", out object? loggerNameValue) || loggerNameValue is not string loggerName)
yield break;
if (!logRecord.TryGetValue("timestamp", out object? timestampValue) || timestampValue is not DateTime timestamp)
yield break;
if (!logRecord.TryGetValue("contextValues", out object? contextValuesObject) ||
contextValuesObject is not IReadOnlyDictionary<string, object?> contextValues)
yield break;
foreach ((string key, object? value) in contextValues)
{
if (value == null) continue;
var row = new Dictionary<string, object?>
{
["logId"] = logRecordId,
["loggerName"] = loggerName,
["timestamp"] = timestamp,
["key"] = key
};
switch (value)
{
case string stringValue:
row["valueStr"] = stringValue;
break;
case bool boolValue:
row["valueBool"] = boolValue;
break;
case sbyte or byte or short or ushort or int or uint or long or ulong or float or double or decimal:
row["valueNum"] = Convert.ToDouble(value);
break;
default:
row["valueStr"] = value.ToString();
break;
}
yield return row;
}
}
private async Task ExecuteQueryAsync(string query, IReadOnlyDictionary<string, object?> parameters)
{
var response = await _surrealClient.RawQueryAsync(query, parameters);
EnsureSuccessfulResponse(response, "Query execution");
}
private static string BuildContextId(int sequence)
{
return $"ctx-{sequence / 10:D6}";
}
private static string ResolveLogLevel(int sequence)
{
if (sequence % 2_500 == 0) return "Error";
if (sequence % 500 == 0) return "Warning";
return "Information";
}
private static long CalculatePathSizeBytes(string path)
{
if (File.Exists(path)) return new FileInfo(path).Length;
if (!Directory.Exists(path)) return 0;
long size = 0;
foreach (string file in Directory.EnumerateFiles(path, "*", SearchOption.AllDirectories))
size += new FileInfo(file).Length;
return size;
}
private static void EnsureSuccessfulResponse(SurrealDbResponse response, string operation)
{
if (!response.HasErrors) return;
string errorSummary = string.Join(
" | ",
response.Errors.Take(3).Select((error, index) => DescribeError(error, index)));
throw new InvalidOperationException(
$"{operation} failed with SurrealDB errors. Details: {errorSummary}");
}
private static string DescribeError(object error, int index)
{
Type errorType = error.GetType();
string[] fieldsToExtract = ["Status", "Details", "Description", "Information", "Code"];
var extracted = new List<string>();
foreach (string field in fieldsToExtract)
{
object? value = errorType.GetProperty(field)?.GetValue(error);
if (value != null) extracted.Add($"{field}={value}");
}
if (extracted.Count == 0) return $"error[{index}] type={errorType.Name}";
return $"error[{index}] type={errorType.Name} {string.Join(", ", extracted)}";
}
}