Initial import of the CBDDC codebase with docs and tests. Add a .NET-focused gitignore to keep generated artifacts out of source control.
Some checks failed
CI / verify (push) Has been cancelled
Some checks failed
CI / verify (push) Has been cancelled
This commit is contained in:
505
tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/BLiteStoreExportImportTests.cs
Executable file
505
tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/BLiteStoreExportImportTests.cs
Executable file
@@ -0,0 +1,505 @@
|
||||
using ZB.MOM.WW.CBDDC.Core;
|
||||
using ZB.MOM.WW.CBDDC.Core.Network;
|
||||
using ZB.MOM.WW.CBDDC.Core.Storage;
|
||||
using ZB.MOM.WW.CBDDC.Core.Sync;
|
||||
using ZB.MOM.WW.CBDDC.Persistence.BLite;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using System.Text.Json;
|
||||
using Xunit;
|
||||
using ZB.MOM.WW.CBDDC.Persistence;
|
||||
|
||||
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
|
||||
|
||||
/// <summary>
|
||||
/// Tests for BLite persistence stores: Export, Import, Merge, Drop operations.
|
||||
/// </summary>
|
||||
public class BLiteStoreExportImportTests : IDisposable
|
||||
{
|
||||
private readonly string _testDbPath;
|
||||
private readonly SampleDbContext _context;
|
||||
private readonly SampleDocumentStore _documentStore;
|
||||
private readonly BLiteOplogStore<SampleDbContext> _oplogStore;
|
||||
private readonly BLitePeerConfigurationStore<SampleDbContext> _peerConfigStore;
|
||||
private readonly BLiteSnapshotMetadataStore<SampleDbContext> _snapshotMetadataStore;
|
||||
private readonly IPeerNodeConfigurationProvider _configProvider;
|
||||
|
||||
/// <summary>
|
||||
/// Initializes a new instance of the <see cref="BLiteStoreExportImportTests"/> class.
|
||||
/// </summary>
|
||||
public BLiteStoreExportImportTests()
|
||||
{
|
||||
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-export-import-{Guid.NewGuid()}.blite");
|
||||
_context = new SampleDbContext(_testDbPath);
|
||||
_configProvider = CreateConfigProvider("test-node");
|
||||
var vectorClock = new VectorClockService();
|
||||
|
||||
_documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock, NullLogger<SampleDocumentStore>.Instance);
|
||||
_snapshotMetadataStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
|
||||
_context, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
|
||||
_oplogStore = new BLiteOplogStore<SampleDbContext>(
|
||||
_context, _documentStore, new LastWriteWinsConflictResolver(),
|
||||
vectorClock,
|
||||
_snapshotMetadataStore,
|
||||
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
|
||||
_peerConfigStore = new BLitePeerConfigurationStore<SampleDbContext>(
|
||||
_context, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
|
||||
}
|
||||
|
||||
#region OplogStore Tests
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that exporting oplog entries returns all persisted records.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task OplogStore_ExportAsync_ReturnsAllEntries()
|
||||
{
|
||||
// Arrange
|
||||
var entry1 = CreateOplogEntry("col1", "key1", "node1", 1000);
|
||||
var entry2 = CreateOplogEntry("col2", "key2", "node1", 2000);
|
||||
await _oplogStore.AppendOplogEntryAsync(entry1);
|
||||
await _oplogStore.AppendOplogEntryAsync(entry2);
|
||||
|
||||
// Act
|
||||
var exported = (await _oplogStore.ExportAsync()).ToList();
|
||||
|
||||
// Assert
|
||||
exported.Count.ShouldBe(2);
|
||||
exported.ShouldContain(e => e.Key == "key1");
|
||||
exported.ShouldContain(e => e.Key == "key2");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that importing oplog entries adds them to the store.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task OplogStore_ImportAsync_AddsEntries()
|
||||
{
|
||||
// Arrange
|
||||
var entries = new[]
|
||||
{
|
||||
CreateOplogEntry("col1", "imported1", "node1", 1000),
|
||||
CreateOplogEntry("col2", "imported2", "node1", 2000)
|
||||
};
|
||||
|
||||
// Act
|
||||
await _oplogStore.ImportAsync(entries);
|
||||
|
||||
// Assert
|
||||
var exported = (await _oplogStore.ExportAsync()).ToList();
|
||||
exported.Count.ShouldBe(2);
|
||||
exported.ShouldContain(e => e.Key == "imported1");
|
||||
exported.ShouldContain(e => e.Key == "imported2");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that merging oplog entries adds only entries that are not already present.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task OplogStore_MergeAsync_OnlyAddsNewEntries()
|
||||
{
|
||||
// Arrange - Add existing entry
|
||||
var existing = CreateOplogEntry("col1", "existing", "node1", 1000);
|
||||
await _oplogStore.AppendOplogEntryAsync(existing);
|
||||
|
||||
// Create entries to merge (one duplicate hash, one new)
|
||||
var toMerge = new[]
|
||||
{
|
||||
existing, // Same hash - should be skipped
|
||||
CreateOplogEntry("col2", "new-entry", "node1", 2000)
|
||||
};
|
||||
|
||||
// Act
|
||||
await _oplogStore.MergeAsync(toMerge);
|
||||
|
||||
// Assert
|
||||
var exported = (await _oplogStore.ExportAsync()).ToList();
|
||||
exported.Count.ShouldBe(2); // existing + new, not 3
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that chain range lookup resolves entries by hash and returns the expected range.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task OplogStore_GetChainRangeAsync_UsesHashLookup()
|
||||
{
|
||||
// Arrange
|
||||
var payload1 = JsonDocument.Parse("{\"test\":\"k1\"}").RootElement;
|
||||
var payload2 = JsonDocument.Parse("{\"test\":\"k2\"}").RootElement;
|
||||
var entry1 = new OplogEntry("col1", "k1", OperationType.Put, payload1, new HlcTimestamp(1000, 0, "node1"), "");
|
||||
var entry2 = new OplogEntry("col1", "k2", OperationType.Put, payload2, new HlcTimestamp(2000, 0, "node1"), entry1.Hash);
|
||||
|
||||
await _oplogStore.AppendOplogEntryAsync(entry1);
|
||||
await _oplogStore.AppendOplogEntryAsync(entry2);
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Act
|
||||
var range = (await _oplogStore.GetChainRangeAsync(entry1.Hash, entry2.Hash)).ToList();
|
||||
|
||||
// Assert
|
||||
range.Count.ShouldBe(1);
|
||||
range[0].Hash.ShouldBe(entry2.Hash);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that dropping the oplog store removes all entries.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task OplogStore_DropAsync_ClearsAllEntries()
|
||||
{
|
||||
// Arrange
|
||||
await _oplogStore.AppendOplogEntryAsync(CreateOplogEntry("col1", "key1", "node1", 1000));
|
||||
await _oplogStore.AppendOplogEntryAsync(CreateOplogEntry("col2", "key2", "node1", 2000));
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Act
|
||||
await _oplogStore.DropAsync();
|
||||
|
||||
// Assert
|
||||
var exported = (await _oplogStore.ExportAsync()).ToList();
|
||||
exported.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region PeerConfigurationStore Tests
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that exporting peer configurations returns all persisted peers.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task PeerConfigStore_ExportAsync_ReturnsAllPeers()
|
||||
{
|
||||
// Arrange
|
||||
await _peerConfigStore.SaveRemotePeerAsync(CreatePeerConfig("peer1", "host1:5000"));
|
||||
await _peerConfigStore.SaveRemotePeerAsync(CreatePeerConfig("peer2", "host2:5000"));
|
||||
|
||||
// Act
|
||||
var exported = (await _peerConfigStore.ExportAsync()).ToList();
|
||||
|
||||
// Assert
|
||||
exported.Count.ShouldBe(2);
|
||||
exported.ShouldContain(p => p.NodeId == "peer1");
|
||||
exported.ShouldContain(p => p.NodeId == "peer2");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that importing peer configurations adds peers to the store.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task PeerConfigStore_ImportAsync_AddsPeers()
|
||||
{
|
||||
// Arrange
|
||||
var peers = new[]
|
||||
{
|
||||
CreatePeerConfig("imported1", "host1:5000"),
|
||||
CreatePeerConfig("imported2", "host2:5000")
|
||||
};
|
||||
|
||||
// Act
|
||||
await _peerConfigStore.ImportAsync(peers);
|
||||
|
||||
// Assert
|
||||
var exported = (await _peerConfigStore.ExportAsync()).ToList();
|
||||
exported.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that merging peer configurations adds only new peers.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task PeerConfigStore_MergeAsync_OnlyAddsNewPeers()
|
||||
{
|
||||
// Arrange - Add existing peer
|
||||
var existing = CreatePeerConfig("existing-peer", "host1:5000");
|
||||
await _peerConfigStore.SaveRemotePeerAsync(existing);
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
var toMerge = new[]
|
||||
{
|
||||
CreatePeerConfig("existing-peer", "host1-updated:5000"), // Same ID - should be skipped
|
||||
CreatePeerConfig("new-peer", "host2:5000")
|
||||
};
|
||||
|
||||
// Act
|
||||
await _peerConfigStore.MergeAsync(toMerge);
|
||||
|
||||
// Assert
|
||||
var exported = (await _peerConfigStore.ExportAsync()).ToList();
|
||||
exported.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that dropping peer configurations removes all peers.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task PeerConfigStore_DropAsync_ClearsAllPeers()
|
||||
{
|
||||
// Arrange
|
||||
await _peerConfigStore.SaveRemotePeerAsync(CreatePeerConfig("peer1", "host1:5000"));
|
||||
await _peerConfigStore.SaveRemotePeerAsync(CreatePeerConfig("peer2", "host2:5000"));
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Act
|
||||
await _peerConfigStore.DropAsync();
|
||||
|
||||
// Assert
|
||||
var exported = (await _peerConfigStore.ExportAsync()).ToList();
|
||||
exported.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region SnapshotMetadataStore Tests
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that exporting snapshot metadata returns all persisted metadata entries.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task SnapshotMetadataStore_ExportAsync_ReturnsAllMetadata()
|
||||
{
|
||||
// Arrange
|
||||
var meta1 = CreateSnapshotMetadata("node1", 1000);
|
||||
var meta2 = CreateSnapshotMetadata("node2", 2000);
|
||||
await _snapshotMetadataStore.InsertSnapshotMetadataAsync(meta1);
|
||||
await _snapshotMetadataStore.InsertSnapshotMetadataAsync(meta2);
|
||||
|
||||
// Act
|
||||
var exported = (await _snapshotMetadataStore.ExportAsync()).ToList();
|
||||
|
||||
// Assert
|
||||
exported.Count.ShouldBe(2);
|
||||
exported.ShouldContain(m => m.NodeId == "node1");
|
||||
exported.ShouldContain(m => m.NodeId == "node2");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that importing snapshot metadata adds metadata entries to the store.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task SnapshotMetadataStore_ImportAsync_AddsMetadata()
|
||||
{
|
||||
// Arrange
|
||||
var metadata = new[]
|
||||
{
|
||||
CreateSnapshotMetadata("imported1", 1000),
|
||||
CreateSnapshotMetadata("imported2", 2000)
|
||||
};
|
||||
|
||||
// Act
|
||||
await _snapshotMetadataStore.ImportAsync(metadata);
|
||||
|
||||
// Assert
|
||||
var exported = (await _snapshotMetadataStore.ExportAsync()).ToList();
|
||||
exported.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that merging snapshot metadata adds only entries with new node identifiers.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task SnapshotMetadataStore_MergeAsync_OnlyAddsNewMetadata()
|
||||
{
|
||||
// Arrange - Add existing metadata
|
||||
var existing = CreateSnapshotMetadata("existing-node", 1000);
|
||||
await _snapshotMetadataStore.InsertSnapshotMetadataAsync(existing);
|
||||
|
||||
var toMerge = new[]
|
||||
{
|
||||
CreateSnapshotMetadata("existing-node", 9999), // Same NodeId - should be skipped
|
||||
CreateSnapshotMetadata("new-node", 2000)
|
||||
};
|
||||
|
||||
// Act
|
||||
await _snapshotMetadataStore.MergeAsync(toMerge);
|
||||
|
||||
// Assert
|
||||
var exported = (await _snapshotMetadataStore.ExportAsync()).ToList();
|
||||
exported.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that dropping snapshot metadata removes all metadata entries.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task SnapshotMetadataStore_DropAsync_ClearsAllMetadata()
|
||||
{
|
||||
// Arrange
|
||||
await _snapshotMetadataStore.InsertSnapshotMetadataAsync(CreateSnapshotMetadata("node1", 1000));
|
||||
await _snapshotMetadataStore.InsertSnapshotMetadataAsync(CreateSnapshotMetadata("node2", 2000));
|
||||
|
||||
// Act
|
||||
await _snapshotMetadataStore.DropAsync();
|
||||
|
||||
// Assert
|
||||
var exported = (await _snapshotMetadataStore.ExportAsync()).ToList();
|
||||
exported.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region DocumentStore Tests
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that exporting documents returns all persisted documents.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task DocumentStore_ExportAsync_ReturnsAllDocuments()
|
||||
{
|
||||
// Arrange
|
||||
await _context.Users.InsertAsync(new User { Id = "u1", Name = "User 1", Age = 20 });
|
||||
await _context.Users.InsertAsync(new User { Id = "u2", Name = "User 2", Age = 25 });
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Act
|
||||
var exported = (await _documentStore.ExportAsync()).ToList();
|
||||
|
||||
// Assert
|
||||
exported.Count.ShouldBe(2);
|
||||
exported.ShouldContain(d => d.Key == "u1");
|
||||
exported.ShouldContain(d => d.Key == "u2");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that importing documents adds them to the underlying store.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task DocumentStore_ImportAsync_AddsDocuments()
|
||||
{
|
||||
// Arrange
|
||||
var docs = new[]
|
||||
{
|
||||
CreateDocument("Users", "imported1", new User { Id = "imported1", Name = "Imported 1", Age = 30 }),
|
||||
CreateDocument("Users", "imported2", new User { Id = "imported2", Name = "Imported 2", Age = 35 })
|
||||
};
|
||||
|
||||
// Act
|
||||
await _documentStore.ImportAsync(docs);
|
||||
|
||||
// Assert
|
||||
var u1 = _context.Users.FindById("imported1");
|
||||
var u2 = _context.Users.FindById("imported2");
|
||||
u1.ShouldNotBeNull();
|
||||
u2.ShouldNotBeNull();
|
||||
u1.Name.ShouldBe("Imported 1");
|
||||
u2.Name.ShouldBe("Imported 2");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that document merge behavior honors conflict resolution.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task DocumentStore_MergeAsync_UsesConflictResolution()
|
||||
{
|
||||
// Arrange - Add existing document
|
||||
await _context.Users.InsertAsync(new User { Id = "merge-user", Name = "Original", Age = 20 });
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Create document to merge with newer timestamp
|
||||
var newerDoc = new Document(
|
||||
"Users",
|
||||
"merge-user",
|
||||
JsonDocument.Parse("{\"Id\":\"merge-user\",\"Name\":\"Updated\",\"Age\":25}").RootElement,
|
||||
new HlcTimestamp(DateTimeOffset.UtcNow.ToUnixTimeMilliseconds() + 10000, 0, "other-node"),
|
||||
false
|
||||
);
|
||||
|
||||
// Act
|
||||
await _documentStore.MergeAsync([newerDoc]);
|
||||
|
||||
// Assert - With LastWriteWins, newer document should win
|
||||
var user = _context.Users.FindById("merge-user");
|
||||
user.ShouldNotBeNull();
|
||||
user.Name.ShouldBe("Updated");
|
||||
user.Age.ShouldBe(25);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that dropping documents removes all persisted documents.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task DocumentStore_DropAsync_ClearsAllDocuments()
|
||||
{
|
||||
// Arrange
|
||||
await _context.Users.InsertAsync(new User { Id = "drop1", Name = "User 1", Age = 20 });
|
||||
await _context.Users.InsertAsync(new User { Id = "drop2", Name = "User 2", Age = 25 });
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Act
|
||||
await _documentStore.DropAsync();
|
||||
|
||||
// Assert
|
||||
var exported = (await _documentStore.ExportAsync()).ToList();
|
||||
exported.ShouldBeEmpty();
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Helpers
|
||||
|
||||
private static OplogEntry CreateOplogEntry(string collection, string key, string nodeId, long physicalTime)
|
||||
{
|
||||
var payload = JsonDocument.Parse($"{{\"test\": \"{key}\"}}").RootElement;
|
||||
var timestamp = new HlcTimestamp(physicalTime, 0, nodeId);
|
||||
return new OplogEntry(collection, key, OperationType.Put, payload, timestamp, "");
|
||||
}
|
||||
|
||||
private static RemotePeerConfiguration CreatePeerConfig(string nodeId, string address)
|
||||
{
|
||||
return new RemotePeerConfiguration
|
||||
{
|
||||
NodeId = nodeId,
|
||||
Address = address,
|
||||
Type = PeerType.StaticRemote,
|
||||
IsEnabled = true,
|
||||
InterestingCollections = new List<string> { "Users" }
|
||||
};
|
||||
}
|
||||
|
||||
private static SnapshotMetadata CreateSnapshotMetadata(string nodeId, long physicalTime)
|
||||
{
|
||||
return new SnapshotMetadata
|
||||
{
|
||||
NodeId = nodeId,
|
||||
TimestampPhysicalTime = physicalTime,
|
||||
TimestampLogicalCounter = 0,
|
||||
Hash = $"hash-{nodeId}"
|
||||
};
|
||||
}
|
||||
|
||||
private static Document CreateDocument<T>(string collection, string key, T entity) where T : class
|
||||
{
|
||||
var json = JsonSerializer.Serialize(entity);
|
||||
var content = JsonDocument.Parse(json).RootElement;
|
||||
return new Document(collection, key, content, new HlcTimestamp(0, 0, ""), false);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
/// <summary>
|
||||
/// Disposes test resources and removes the temporary database file.
|
||||
/// </summary>
|
||||
public void Dispose()
|
||||
{
|
||||
_documentStore?.Dispose();
|
||||
_context?.Dispose();
|
||||
|
||||
if (File.Exists(_testDbPath))
|
||||
{
|
||||
try { File.Delete(_testDbPath); } catch { }
|
||||
}
|
||||
}
|
||||
|
||||
private static IPeerNodeConfigurationProvider CreateConfigProvider(string nodeId)
|
||||
{
|
||||
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
|
||||
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration
|
||||
{
|
||||
NodeId = nodeId,
|
||||
TcpPort = 5000,
|
||||
AuthToken = "test-token",
|
||||
OplogRetentionHours = 24,
|
||||
MaintenanceIntervalMinutes = 60
|
||||
});
|
||||
return configProvider;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
global using ZB.MOM.WW.CBDDC.Sample.Console;
|
||||
global using NSubstitute;
|
||||
global using Shouldly;
|
||||
@@ -0,0 +1,108 @@
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using ZB.MOM.WW.CBDDC.Core;
|
||||
using ZB.MOM.WW.CBDDC.Core.Network;
|
||||
using ZB.MOM.WW.CBDDC.Persistence.BLite;
|
||||
|
||||
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
|
||||
|
||||
public class PeerOplogConfirmationStoreTests : IDisposable
|
||||
{
|
||||
private readonly string _testDbPath;
|
||||
private readonly SampleDbContext _context;
|
||||
private readonly BLitePeerOplogConfirmationStore<SampleDbContext> _store;
|
||||
|
||||
/// <summary>
|
||||
/// Initializes a new instance of the <see cref="PeerOplogConfirmationStoreTests"/> class.
|
||||
/// </summary>
|
||||
public PeerOplogConfirmationStoreTests()
|
||||
{
|
||||
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-peer-confirmation-{Guid.NewGuid()}.blite");
|
||||
_context = new SampleDbContext(_testDbPath);
|
||||
_store = new BLitePeerOplogConfirmationStore<SampleDbContext>(
|
||||
_context,
|
||||
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that ensuring peer registration multiple times remains idempotent.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task EnsurePeerRegisteredAsync_IsIdempotent()
|
||||
{
|
||||
await _store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote);
|
||||
await _store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote);
|
||||
|
||||
var active = (await _store.GetActiveTrackedPeersAsync()).ToList();
|
||||
var exported = (await _store.ExportAsync()).ToList();
|
||||
|
||||
active.Count.ShouldBe(1);
|
||||
active[0].ShouldBe("peer-a");
|
||||
exported.Count(x => x.PeerNodeId == "peer-a" && x.SourceNodeId == "__peer_registration__").ShouldBe(1);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies create, update, and read flows for peer oplog confirmations.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task ConfirmationStore_CrudFlow_Works()
|
||||
{
|
||||
await _store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote);
|
||||
await _store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(100, 1, "source-1"), "hash-1");
|
||||
|
||||
var firstRead = (await _store.GetConfirmationsForPeerAsync("peer-a")).ToList();
|
||||
firstRead.Count.ShouldBe(1);
|
||||
firstRead[0].ConfirmedWall.ShouldBe(100);
|
||||
firstRead[0].ConfirmedLogic.ShouldBe(1);
|
||||
firstRead[0].ConfirmedHash.ShouldBe("hash-1");
|
||||
|
||||
await _store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(120, 2, "source-1"), "hash-2");
|
||||
await _store.UpdateConfirmationAsync("peer-a", "source-2", new HlcTimestamp(130, 0, "source-2"), "hash-3");
|
||||
|
||||
var secondRead = (await _store.GetConfirmationsForPeerAsync("peer-a")).OrderBy(x => x.SourceNodeId).ToList();
|
||||
var allConfirmations = (await _store.GetConfirmationsAsync()).ToList();
|
||||
|
||||
secondRead.Count.ShouldBe(2);
|
||||
secondRead[0].SourceNodeId.ShouldBe("source-1");
|
||||
secondRead[0].ConfirmedWall.ShouldBe(120);
|
||||
secondRead[0].ConfirmedLogic.ShouldBe(2);
|
||||
secondRead[0].ConfirmedHash.ShouldBe("hash-2");
|
||||
secondRead[1].SourceNodeId.ShouldBe("source-2");
|
||||
secondRead[1].ConfirmedWall.ShouldBe(130);
|
||||
secondRead[1].ConfirmedLogic.ShouldBe(0);
|
||||
secondRead[1].ConfirmedHash.ShouldBe("hash-3");
|
||||
allConfirmations.Count.ShouldBe(2);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that removing peer tracking deactivates tracking records for that peer.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task RemovePeerTrackingAsync_DeactivatesPeerTracking()
|
||||
{
|
||||
await _store.EnsurePeerRegisteredAsync("peer-a", "10.0.0.10:5050", PeerType.StaticRemote);
|
||||
await _store.EnsurePeerRegisteredAsync("peer-b", "10.0.0.11:5050", PeerType.StaticRemote);
|
||||
await _store.UpdateConfirmationAsync("peer-a", "source-1", new HlcTimestamp(100, 0, "source-1"), "hash-a");
|
||||
await _store.UpdateConfirmationAsync("peer-b", "source-1", new HlcTimestamp(100, 0, "source-1"), "hash-b");
|
||||
|
||||
await _store.RemovePeerTrackingAsync("peer-a");
|
||||
|
||||
var activePeers = (await _store.GetActiveTrackedPeersAsync()).ToList();
|
||||
var exported = (await _store.ExportAsync()).ToList();
|
||||
var peerARows = exported.Where(x => x.PeerNodeId == "peer-a").ToList();
|
||||
|
||||
activePeers.ShouldContain("peer-b");
|
||||
activePeers.ShouldNotContain("peer-a");
|
||||
peerARows.ShouldNotBeEmpty();
|
||||
peerARows.All(x => !x.IsActive).ShouldBeTrue();
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public void Dispose()
|
||||
{
|
||||
_context?.Dispose();
|
||||
if (File.Exists(_testDbPath))
|
||||
{
|
||||
try { File.Delete(_testDbPath); } catch { }
|
||||
}
|
||||
}
|
||||
}
|
||||
226
tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SampleDbContextTests.cs
Executable file
226
tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SampleDbContextTests.cs
Executable file
@@ -0,0 +1,226 @@
|
||||
using ZB.MOM.WW.CBDDC.Core;
|
||||
using ZB.MOM.WW.CBDDC.Core.Storage;
|
||||
using ZB.MOM.WW.CBDDC.Core.Sync;
|
||||
using ZB.MOM.WW.CBDDC.Persistence.BLite;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using System.Text.Json;
|
||||
|
||||
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
|
||||
|
||||
public class SampleDbContextTests : IDisposable
|
||||
{
|
||||
private readonly string _dbPath;
|
||||
private readonly SampleDbContext _context;
|
||||
|
||||
/// <summary>
|
||||
/// Initializes a new test context backed by a temporary database file.
|
||||
/// </summary>
|
||||
public SampleDbContextTests()
|
||||
{
|
||||
_dbPath = Path.Combine(Path.GetTempPath(), $"test_sample_{Guid.NewGuid()}.db");
|
||||
_context = new SampleDbContext(_dbPath);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Releases test resources and removes the temporary database file.
|
||||
/// </summary>
|
||||
public void Dispose()
|
||||
{
|
||||
_context?.Dispose();
|
||||
if (File.Exists(_dbPath))
|
||||
{
|
||||
try { File.Delete(_dbPath); } catch { }
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that required collections are initialized in the context.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Context_ShouldInitializeCollections()
|
||||
{
|
||||
// Verifica che le collezioni siano state inizializzate
|
||||
_context.ShouldNotBeNull();
|
||||
_context.Users.ShouldNotBeNull("Users collection should be initialized by BLite");
|
||||
_context.TodoLists.ShouldNotBeNull("TodoLists collection should be initialized by BLite");
|
||||
_context.OplogEntries.ShouldNotBeNull("OplogEntries collection should be initialized by BLite");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that inserting a user persists the document.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Users_Insert_ShouldPersist()
|
||||
{
|
||||
// Arrange
|
||||
var user = new User
|
||||
{
|
||||
Id = "user1",
|
||||
Name = "Alice",
|
||||
Age = 30,
|
||||
Address = new Address { City = "Rome" }
|
||||
};
|
||||
|
||||
// Act
|
||||
await _context.Users.InsertAsync(user);
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Assert
|
||||
var retrieved = _context.Users.FindById("user1");
|
||||
retrieved.ShouldNotBeNull();
|
||||
retrieved!.Name.ShouldBe("Alice");
|
||||
retrieved.Age.ShouldBe(30);
|
||||
retrieved.Address?.City.ShouldBe("Rome");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that updating a user modifies the existing document.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Users_Update_ShouldModifyExisting()
|
||||
{
|
||||
// Arrange
|
||||
var user = new User { Id = "user2", Name = "Bob", Age = 25 };
|
||||
await _context.Users.InsertAsync(user);
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Act
|
||||
user.Age = 26;
|
||||
user.Address = new Address { City = "Milan" };
|
||||
await _context.Users.UpdateAsync(user);
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Assert
|
||||
var retrieved = _context.Users.FindById("user2");
|
||||
retrieved.ShouldNotBeNull();
|
||||
retrieved!.Age.ShouldBe(26);
|
||||
retrieved.Address?.City.ShouldBe("Milan");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that deleting a user removes the document.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Users_Delete_ShouldRemove()
|
||||
{
|
||||
// Arrange
|
||||
var user = new User { Id = "user3", Name = "Charlie", Age = 35 };
|
||||
await _context.Users.InsertAsync(user);
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Act
|
||||
await _context.Users.DeleteAsync("user3");
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Assert
|
||||
var retrieved = _context.Users.FindById("user3");
|
||||
retrieved.ShouldBeNull();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that inserting a todo list with items persists nested data.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task TodoLists_InsertWithItems_ShouldPersist()
|
||||
{
|
||||
// Arrange
|
||||
var todoList = new TodoList
|
||||
{
|
||||
Id = "list1",
|
||||
Name = "Shopping",
|
||||
Items = new List<TodoItem>
|
||||
{
|
||||
new() { Task = "Buy milk", Completed = false },
|
||||
new() { Task = "Buy bread", Completed = true }
|
||||
}
|
||||
};
|
||||
|
||||
// Act
|
||||
await _context.TodoLists.InsertAsync(todoList);
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Assert
|
||||
var retrieved = _context.TodoLists.FindById("list1");
|
||||
retrieved.ShouldNotBeNull();
|
||||
retrieved!.Name.ShouldBe("Shopping");
|
||||
retrieved.Items.Count.ShouldBe(2);
|
||||
retrieved.Items.ShouldContain(i => i.Task == "Buy milk" && !i.Completed);
|
||||
retrieved.Items.ShouldContain(i => i.Task == "Buy bread" && i.Completed);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that updating todo items modifies the nested collection.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task TodoLists_UpdateItems_ShouldModifyNestedCollection()
|
||||
{
|
||||
// Arrange
|
||||
var todoList = new TodoList
|
||||
{
|
||||
Id = "list2",
|
||||
Name = "Work Tasks",
|
||||
Items = new List<TodoItem>
|
||||
{
|
||||
new() { Task = "Write report", Completed = false }
|
||||
}
|
||||
};
|
||||
await _context.TodoLists.InsertAsync(todoList);
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Act - Mark task as completed and add new task
|
||||
todoList.Items[0].Completed = true;
|
||||
todoList.Items.Add(new TodoItem { Task = "Review report", Completed = false });
|
||||
await _context.TodoLists.UpdateAsync(todoList);
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Assert
|
||||
var retrieved = _context.TodoLists.FindById("list2");
|
||||
retrieved.ShouldNotBeNull();
|
||||
retrieved!.Items.Count.ShouldBe(2);
|
||||
retrieved.Items.First().Completed.ShouldBe(true);
|
||||
retrieved.Items.Last().Completed.ShouldBe(false);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that querying all users returns all inserted users.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Users_FindAll_ShouldReturnAllUsers()
|
||||
{
|
||||
// Arrange
|
||||
_context.Users.InsertAsync(new User { Id = "u1", Name = "User1", Age = 20 }).Wait();
|
||||
_context.Users.InsertAsync(new User { Id = "u2", Name = "User2", Age = 30 }).Wait();
|
||||
_context.Users.InsertAsync(new User { Id = "u3", Name = "User3", Age = 40 }).Wait();
|
||||
_context.SaveChangesAsync().Wait();
|
||||
|
||||
// Act
|
||||
var allUsers = _context.Users.FindAll().ToList();
|
||||
|
||||
// Assert
|
||||
allUsers.Count.ShouldBe(3);
|
||||
allUsers.Select(u => u.Name).ShouldContain("User1");
|
||||
allUsers.Select(u => u.Name).ShouldContain("User2");
|
||||
allUsers.Select(u => u.Name).ShouldContain("User3");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that predicate-based queries return only matching users.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public void Users_Find_WithPredicate_ShouldFilterCorrectly()
|
||||
{
|
||||
// Arrange
|
||||
_context.Users.InsertAsync(new User { Id = "f1", Name = "Young", Age = 18 }).Wait();
|
||||
_context.Users.InsertAsync(new User { Id = "f2", Name = "Adult", Age = 30 }).Wait();
|
||||
_context.Users.InsertAsync(new User { Id = "f3", Name = "Senior", Age = 65 }).Wait();
|
||||
_context.SaveChangesAsync().Wait();
|
||||
|
||||
// Act
|
||||
var adults = _context.Users.Find(u => u.Age >= 30).ToList();
|
||||
|
||||
// Assert
|
||||
adults.Count.ShouldBe(2);
|
||||
adults.Select(u => u.Name).ShouldContain("Adult");
|
||||
adults.Select(u => u.Name).ShouldContain("Senior");
|
||||
}
|
||||
}
|
||||
431
tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SnapshotStoreTests.cs
Executable file
431
tests/ZB.MOM.WW.CBDDC.Sample.Console.Tests/SnapshotStoreTests.cs
Executable file
@@ -0,0 +1,431 @@
|
||||
using ZB.MOM.WW.CBDDC.Core;
|
||||
using ZB.MOM.WW.CBDDC.Core.Network;
|
||||
using ZB.MOM.WW.CBDDC.Core.Storage;
|
||||
using ZB.MOM.WW.CBDDC.Core.Sync;
|
||||
using ZB.MOM.WW.CBDDC.Persistence.BLite;
|
||||
using Microsoft.Extensions.Logging.Abstractions;
|
||||
using System.Text.Json;
|
||||
using System.Text.Json.Nodes;
|
||||
using Xunit;
|
||||
using ZB.MOM.WW.CBDDC.Persistence;
|
||||
|
||||
namespace ZB.MOM.WW.CBDDC.Sample.Console.Tests;
|
||||
|
||||
public class SnapshotStoreTests : IDisposable
|
||||
{
|
||||
private readonly string _testDbPath;
|
||||
private readonly SampleDbContext _context;
|
||||
private readonly SampleDocumentStore _documentStore;
|
||||
private readonly BLiteOplogStore<SampleDbContext> _oplogStore;
|
||||
private readonly BLitePeerConfigurationStore<SampleDbContext> _peerConfigStore;
|
||||
private readonly BLitePeerOplogConfirmationStore<SampleDbContext> _peerConfirmationStore;
|
||||
private readonly SnapshotStore _snapshotStore;
|
||||
private readonly IPeerNodeConfigurationProvider _configProvider;
|
||||
|
||||
/// <summary>
|
||||
/// Initializes a new instance of the <see cref="SnapshotStoreTests"/> class.
|
||||
/// </summary>
|
||||
public SnapshotStoreTests()
|
||||
{
|
||||
_testDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-{Guid.NewGuid()}.blite");
|
||||
_context = new SampleDbContext(_testDbPath);
|
||||
_configProvider = CreateConfigProvider("test-node");
|
||||
var vectorClock = new VectorClockService();
|
||||
|
||||
_documentStore = new SampleDocumentStore(_context, _configProvider, vectorClock, NullLogger<SampleDocumentStore>.Instance);
|
||||
var snapshotMetadataStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
|
||||
_context,
|
||||
NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
|
||||
_oplogStore = new BLiteOplogStore<SampleDbContext>(
|
||||
_context,
|
||||
_documentStore,
|
||||
new LastWriteWinsConflictResolver(),
|
||||
vectorClock,
|
||||
snapshotMetadataStore,
|
||||
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
|
||||
_peerConfigStore = new BLitePeerConfigurationStore<SampleDbContext>(
|
||||
_context,
|
||||
NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
|
||||
_peerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
|
||||
_context,
|
||||
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
|
||||
|
||||
_snapshotStore = new SnapshotStore(
|
||||
_documentStore,
|
||||
_peerConfigStore,
|
||||
_oplogStore,
|
||||
new LastWriteWinsConflictResolver(),
|
||||
NullLogger<SnapshotStore>.Instance,
|
||||
_peerConfirmationStore);
|
||||
}
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that creating a snapshot writes valid JSON to the output stream.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task CreateSnapshotAsync_WritesValidJsonToStream()
|
||||
{
|
||||
// Arrange - Add some data
|
||||
var user = new User { Id = "user-1", Name = "Alice", Age = 30 };
|
||||
await _context.Users.InsertAsync(user);
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Act - Create snapshot
|
||||
using var stream = new MemoryStream();
|
||||
await _snapshotStore.CreateSnapshotAsync(stream);
|
||||
|
||||
// Assert - Stream should contain valid JSON
|
||||
(stream.Length > 0).ShouldBeTrue("Snapshot stream should not be empty");
|
||||
|
||||
// Reset stream position and verify JSON is valid
|
||||
stream.Position = 0;
|
||||
var json = await new StreamReader(stream).ReadToEndAsync();
|
||||
|
||||
string.IsNullOrWhiteSpace(json).ShouldBeFalse("Snapshot JSON should not be empty");
|
||||
json.Trim().ShouldStartWith("{");
|
||||
|
||||
// Verify it's valid JSON by parsing
|
||||
var doc = JsonDocument.Parse(json);
|
||||
doc.ShouldNotBeNull();
|
||||
|
||||
// Verify structure
|
||||
doc.RootElement.TryGetProperty("Version", out _).ShouldBeTrue("Should have Version property");
|
||||
doc.RootElement.TryGetProperty("Documents", out _).ShouldBeTrue("Should have Documents property");
|
||||
doc.RootElement.TryGetProperty("Oplog", out _).ShouldBeTrue("Should have Oplog property");
|
||||
doc.RootElement.TryGetProperty("PeerConfirmations", out _).ShouldBeTrue("Should have PeerConfirmations property");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that snapshot creation includes all persisted documents.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task CreateSnapshotAsync_IncludesAllDocuments()
|
||||
{
|
||||
// Arrange - Add multiple documents
|
||||
await _context.Users.InsertAsync(new User { Id = "u1", Name = "User 1", Age = 20 });
|
||||
await _context.Users.InsertAsync(new User { Id = "u2", Name = "User 2", Age = 25 });
|
||||
await _context.TodoLists.InsertAsync(new TodoList
|
||||
{
|
||||
Id = "t1",
|
||||
Name = "My List",
|
||||
Items = [new TodoItem { Task = "Task 1", Completed = false }]
|
||||
});
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Act
|
||||
using var stream = new MemoryStream();
|
||||
await _snapshotStore.CreateSnapshotAsync(stream);
|
||||
|
||||
// Assert
|
||||
stream.Position = 0;
|
||||
var json = await new StreamReader(stream).ReadToEndAsync();
|
||||
var doc = JsonDocument.Parse(json);
|
||||
|
||||
var documents = doc.RootElement.GetProperty("Documents");
|
||||
documents.GetArrayLength().ShouldBe(3);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that creating and replacing a snapshot preserves document data.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task RoundTrip_CreateAndReplace_PreservesData()
|
||||
{
|
||||
// Arrange - Add data to source
|
||||
var originalUser = new User { Id = "user-rt", Name = "RoundTrip User", Age = 42 };
|
||||
await _context.Users.InsertAsync(originalUser);
|
||||
await _peerConfirmationStore.UpdateConfirmationAsync(
|
||||
"peer-rt",
|
||||
"source-rt",
|
||||
new HlcTimestamp(500, 2, "source-rt"),
|
||||
"hash-rt");
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Create snapshot
|
||||
using var snapshotStream = new MemoryStream();
|
||||
await _snapshotStore.CreateSnapshotAsync(snapshotStream);
|
||||
snapshotStream.Position = 0;
|
||||
var snapshotJson = await new StreamReader(snapshotStream).ReadToEndAsync();
|
||||
var snapshotDoc = JsonDocument.Parse(snapshotJson);
|
||||
snapshotDoc.RootElement.GetProperty("PeerConfirmations").GetArrayLength().ShouldBe(1);
|
||||
snapshotStream.Position = 0;
|
||||
|
||||
// Create a new context/stores (simulating a different node)
|
||||
var newDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-target-{Guid.NewGuid()}.blite");
|
||||
try
|
||||
{
|
||||
using var newContext = new SampleDbContext(newDbPath);
|
||||
var newConfigProvider = CreateConfigProvider("test-new-node");
|
||||
var newVectorClock = new VectorClockService();
|
||||
var newDocStore = new SampleDocumentStore(newContext, newConfigProvider, newVectorClock, NullLogger<SampleDocumentStore>.Instance);
|
||||
var newSnapshotMetaStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
|
||||
newContext, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
|
||||
var newOplogStore = new BLiteOplogStore<SampleDbContext>(
|
||||
newContext, newDocStore, new LastWriteWinsConflictResolver(),
|
||||
newVectorClock,
|
||||
newSnapshotMetaStore,
|
||||
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
|
||||
var newPeerStore = new BLitePeerConfigurationStore<SampleDbContext>(
|
||||
newContext, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
|
||||
var newPeerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
|
||||
newContext,
|
||||
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
|
||||
|
||||
var newSnapshotStore = new SnapshotStore(
|
||||
newDocStore,
|
||||
newPeerStore,
|
||||
newOplogStore,
|
||||
new LastWriteWinsConflictResolver(),
|
||||
NullLogger<SnapshotStore>.Instance,
|
||||
newPeerConfirmationStore);
|
||||
|
||||
// Act - Replace database with snapshot
|
||||
await newSnapshotStore.ReplaceDatabaseAsync(snapshotStream);
|
||||
|
||||
// Assert - Data should be restored
|
||||
var restoredUser = newContext.Users.FindById("user-rt");
|
||||
restoredUser.ShouldNotBeNull();
|
||||
restoredUser.Name.ShouldBe("RoundTrip User");
|
||||
restoredUser.Age.ShouldBe(42);
|
||||
|
||||
var restoredConfirmations = (await newPeerConfirmationStore.GetConfirmationsAsync()).ToList();
|
||||
restoredConfirmations.Count.ShouldBe(1);
|
||||
restoredConfirmations[0].PeerNodeId.ShouldBe("peer-rt");
|
||||
restoredConfirmations[0].SourceNodeId.ShouldBe("source-rt");
|
||||
restoredConfirmations[0].ConfirmedWall.ShouldBe(500);
|
||||
restoredConfirmations[0].ConfirmedLogic.ShouldBe(2);
|
||||
restoredConfirmations[0].ConfirmedHash.ShouldBe("hash-rt");
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (File.Exists(newDbPath))
|
||||
try { File.Delete(newDbPath); } catch { }
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that merging a snapshot preserves existing data and adds new data.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task MergeSnapshotAsync_MergesWithExistingData()
|
||||
{
|
||||
// Arrange - Add initial data
|
||||
await _context.Users.InsertAsync(new User { Id = "existing", Name = "Existing User", Age = 30 });
|
||||
await _peerConfirmationStore.UpdateConfirmationAsync(
|
||||
"peer-merge",
|
||||
"source-a",
|
||||
new HlcTimestamp(100, 0, "source-a"),
|
||||
"target-hash-old");
|
||||
await _peerConfirmationStore.UpdateConfirmationAsync(
|
||||
"peer-local-only",
|
||||
"source-local",
|
||||
new HlcTimestamp(50, 0, "source-local"),
|
||||
"target-local-hash");
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Create snapshot with different data
|
||||
var sourceDbPath = Path.Combine(Path.GetTempPath(), $"test-snapshot-source-{Guid.NewGuid()}.blite");
|
||||
MemoryStream snapshotStream;
|
||||
|
||||
try
|
||||
{
|
||||
using var sourceContext = new SampleDbContext(sourceDbPath);
|
||||
await sourceContext.Users.InsertAsync(new User { Id = "new-user", Name = "New User", Age = 25 });
|
||||
await sourceContext.SaveChangesAsync();
|
||||
|
||||
var sourceConfigProvider = CreateConfigProvider("test-source-node");
|
||||
var sourceVectorClock = new VectorClockService();
|
||||
var sourceDocStore = new SampleDocumentStore(sourceContext, sourceConfigProvider, sourceVectorClock, NullLogger<SampleDocumentStore>.Instance);
|
||||
var sourceSnapshotMetaStore = new BLiteSnapshotMetadataStore<SampleDbContext>(
|
||||
sourceContext, NullLogger<BLiteSnapshotMetadataStore<SampleDbContext>>.Instance);
|
||||
var sourceOplogStore = new BLiteOplogStore<SampleDbContext>(
|
||||
sourceContext, sourceDocStore, new LastWriteWinsConflictResolver(),
|
||||
sourceVectorClock,
|
||||
sourceSnapshotMetaStore,
|
||||
NullLogger<BLiteOplogStore<SampleDbContext>>.Instance);
|
||||
var sourcePeerStore = new BLitePeerConfigurationStore<SampleDbContext>(
|
||||
sourceContext, NullLogger<BLitePeerConfigurationStore<SampleDbContext>>.Instance);
|
||||
var sourcePeerConfirmationStore = new BLitePeerOplogConfirmationStore<SampleDbContext>(
|
||||
sourceContext,
|
||||
NullLogger<BLitePeerOplogConfirmationStore<SampleDbContext>>.Instance);
|
||||
await sourcePeerConfirmationStore.UpdateConfirmationAsync(
|
||||
"peer-merge",
|
||||
"source-a",
|
||||
new HlcTimestamp(200, 1, "source-a"),
|
||||
"source-hash-new");
|
||||
await sourcePeerConfirmationStore.UpdateConfirmationAsync(
|
||||
"peer-merge",
|
||||
"source-b",
|
||||
new HlcTimestamp(300, 0, "source-b"),
|
||||
"source-hash-b");
|
||||
|
||||
var sourceSnapshotStore = new SnapshotStore(
|
||||
sourceDocStore,
|
||||
sourcePeerStore,
|
||||
sourceOplogStore,
|
||||
new LastWriteWinsConflictResolver(),
|
||||
NullLogger<SnapshotStore>.Instance,
|
||||
sourcePeerConfirmationStore);
|
||||
|
||||
snapshotStream = new MemoryStream();
|
||||
await sourceSnapshotStore.CreateSnapshotAsync(snapshotStream);
|
||||
snapshotStream.Position = 0;
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (File.Exists(sourceDbPath))
|
||||
try { File.Delete(sourceDbPath); } catch { }
|
||||
}
|
||||
|
||||
// Act - Merge snapshot into existing data
|
||||
await _snapshotStore.MergeSnapshotAsync(snapshotStream);
|
||||
|
||||
// Assert - Both users should exist
|
||||
var existingUser = _context.Users.FindById("existing");
|
||||
var newUser = _context.Users.FindById("new-user");
|
||||
|
||||
existingUser.ShouldNotBeNull();
|
||||
newUser.ShouldNotBeNull();
|
||||
existingUser.Name.ShouldBe("Existing User");
|
||||
newUser.Name.ShouldBe("New User");
|
||||
|
||||
var confirmations = (await _peerConfirmationStore.GetConfirmationsAsync())
|
||||
.OrderBy(c => c.PeerNodeId)
|
||||
.ThenBy(c => c.SourceNodeId)
|
||||
.ToList();
|
||||
|
||||
confirmations.Count.ShouldBe(3);
|
||||
confirmations[0].PeerNodeId.ShouldBe("peer-local-only");
|
||||
confirmations[0].SourceNodeId.ShouldBe("source-local");
|
||||
confirmations[0].ConfirmedWall.ShouldBe(50);
|
||||
confirmations[0].ConfirmedHash.ShouldBe("target-local-hash");
|
||||
|
||||
confirmations[1].PeerNodeId.ShouldBe("peer-merge");
|
||||
confirmations[1].SourceNodeId.ShouldBe("source-a");
|
||||
confirmations[1].ConfirmedWall.ShouldBe(200);
|
||||
confirmations[1].ConfirmedLogic.ShouldBe(1);
|
||||
confirmations[1].ConfirmedHash.ShouldBe("source-hash-new");
|
||||
|
||||
confirmations[2].PeerNodeId.ShouldBe("peer-merge");
|
||||
confirmations[2].SourceNodeId.ShouldBe("source-b");
|
||||
confirmations[2].ConfirmedWall.ShouldBe(300);
|
||||
confirmations[2].ConfirmedHash.ShouldBe("source-hash-b");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that replace can consume legacy snapshots that do not include peer confirmations.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task ReplaceDatabaseAsync_LegacySnapshotWithoutPeerConfirmations_IsSupported()
|
||||
{
|
||||
// Arrange
|
||||
await _context.Users.InsertAsync(new User { Id = "legacy-user", Name = "Legacy User", Age = 33 });
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
using var snapshotStream = new MemoryStream();
|
||||
await _snapshotStore.CreateSnapshotAsync(snapshotStream);
|
||||
snapshotStream.Position = 0;
|
||||
var snapshotJson = await new StreamReader(snapshotStream).ReadToEndAsync();
|
||||
|
||||
var legacySnapshot = JsonNode.Parse(snapshotJson)!.AsObject();
|
||||
legacySnapshot.Remove("PeerConfirmations");
|
||||
|
||||
using var legacyStream = new MemoryStream();
|
||||
await using (var writer = new Utf8JsonWriter(legacyStream))
|
||||
{
|
||||
legacySnapshot.WriteTo(writer);
|
||||
}
|
||||
legacyStream.Position = 0;
|
||||
|
||||
// Act
|
||||
await _snapshotStore.ReplaceDatabaseAsync(legacyStream);
|
||||
|
||||
// Assert
|
||||
_context.Users.FindById("legacy-user").ShouldNotBeNull();
|
||||
(await _peerConfirmationStore.GetConfirmationsAsync()).Count().ShouldBe(0);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that snapshot creation succeeds for an empty database.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task CreateSnapshotAsync_HandlesEmptyDatabase()
|
||||
{
|
||||
// Act - Create snapshot from empty database
|
||||
using var stream = new MemoryStream();
|
||||
await _snapshotStore.CreateSnapshotAsync(stream);
|
||||
|
||||
// Assert - Should still produce valid JSON
|
||||
(stream.Length > 0).ShouldBeTrue();
|
||||
|
||||
stream.Position = 0;
|
||||
var json = await new StreamReader(stream).ReadToEndAsync();
|
||||
var doc = JsonDocument.Parse(json);
|
||||
|
||||
var documents = doc.RootElement.GetProperty("Documents");
|
||||
documents.GetArrayLength().ShouldBe(0);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies that snapshot creation includes oplog entries.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task CreateSnapshotAsync_IncludesOplogEntries()
|
||||
{
|
||||
// Arrange - Create some oplog entries via document changes
|
||||
await _context.Users.InsertAsync(new User { Id = "op-user", Name = "Oplog User", Age = 20 });
|
||||
await _context.SaveChangesAsync();
|
||||
|
||||
// Manually add an oplog entry to ensure it's captured
|
||||
var oplogEntry = new OplogEntry(
|
||||
"Users",
|
||||
"manual-key",
|
||||
OperationType.Put,
|
||||
JsonDocument.Parse("{\"test\": true}").RootElement,
|
||||
new HlcTimestamp(DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(), 0, "test-node"),
|
||||
""
|
||||
);
|
||||
await _oplogStore.AppendOplogEntryAsync(oplogEntry);
|
||||
|
||||
// Act
|
||||
using var stream = new MemoryStream();
|
||||
await _snapshotStore.CreateSnapshotAsync(stream);
|
||||
|
||||
// Assert
|
||||
stream.Position = 0;
|
||||
var json = await new StreamReader(stream).ReadToEndAsync();
|
||||
var doc = JsonDocument.Parse(json);
|
||||
|
||||
var oplog = doc.RootElement.GetProperty("Oplog");
|
||||
(oplog.GetArrayLength() >= 1).ShouldBeTrue("Should have at least one oplog entry");
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Releases resources created for test execution.
|
||||
/// </summary>
|
||||
public void Dispose()
|
||||
{
|
||||
_documentStore?.Dispose();
|
||||
_context?.Dispose();
|
||||
|
||||
if (File.Exists(_testDbPath))
|
||||
{
|
||||
try { File.Delete(_testDbPath); } catch { }
|
||||
}
|
||||
}
|
||||
|
||||
private static IPeerNodeConfigurationProvider CreateConfigProvider(string nodeId)
|
||||
{
|
||||
var configProvider = Substitute.For<IPeerNodeConfigurationProvider>();
|
||||
configProvider.GetConfiguration().Returns(new PeerNodeConfiguration
|
||||
{
|
||||
NodeId = nodeId,
|
||||
TcpPort = 5000,
|
||||
AuthToken = "test-token",
|
||||
OplogRetentionHours = 24,
|
||||
MaintenanceIntervalMinutes = 60
|
||||
});
|
||||
return configProvider;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<AssemblyName>ZB.MOM.WW.CBDDC.Sample.Console.Tests</AssemblyName>
|
||||
<RootNamespace>ZB.MOM.WW.CBDDC.Sample.Console.Tests</RootNamespace>
|
||||
<PackageId>ZB.MOM.WW.CBDDC.Sample.Console.Tests</PackageId>
|
||||
<TargetFramework>net10.0</TargetFramework>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
<NoWarn>$(NoWarn);xUnit1031;xUnit1051</NoWarn>
|
||||
<IsPackable>false</IsPackable>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="coverlet.collector" Version="6.0.4" />
|
||||
<PackageReference Include="NSubstitute" Version="5.3.0" />
|
||||
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.14.1" />
|
||||
<PackageReference Include="Shouldly" Version="4.3.0" />
|
||||
<PackageReference Include="xunit.runner.visualstudio" Version="3.1.4" />
|
||||
<PackageReference Include="xunit.v3" Version="3.2.0" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<Using Include="Xunit" />
|
||||
</ItemGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\..\samples\ZB.MOM.WW.CBDDC.Sample.Console\ZB.MOM.WW.CBDDC.Sample.Console.csproj" />
|
||||
<ProjectReference Include="..\..\src\ZB.MOM.WW.CBDDC.Persistence\ZB.MOM.WW.CBDDC.Persistence.csproj" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
||||
Reference in New Issue
Block a user