Implement checkpoint modes with docs/tests and reorganize project file layout
All checks were successful
NuGet Publish / build-and-pack (push) Successful in 46s
NuGet Publish / publish-to-gitea (push) Successful in 53s

This commit is contained in:
Joseph Doherty
2026-02-21 07:56:36 -05:00
parent 3ffd468c79
commit 4c6aaa5a3f
96 changed files with 744 additions and 249 deletions

View File

@@ -0,0 +1,228 @@
using System.Reflection;
using ZB.MOM.WW.CBDD.Bson;
using ZB.MOM.WW.CBDD.Core.Storage;
using ZB.MOM.WW.CBDD.Core.Transactions;
using ZB.MOM.WW.CBDD.Shared;
namespace ZB.MOM.WW.CBDD.Tests;
public class CheckpointModeTests
{
/// <summary>
/// Verifies default checkpoint mode truncates WAL.
/// </summary>
[Fact]
public void Checkpoint_Default_ShouldUseTruncate()
{
var dbPath = NewDbPath();
try
{
using var db = new TestDbContext(dbPath);
db.Users.Insert(new User { Name = "checkpoint-default", Age = 42 });
db.SaveChanges();
db.Storage.GetWalSize().ShouldBeGreaterThan(0);
var result = db.Checkpoint();
result.Mode.ShouldBe(CheckpointMode.Truncate);
result.Executed.ShouldBeTrue();
result.Truncated.ShouldBeTrue();
db.Storage.GetWalSize().ShouldBe(0);
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies passive mode skips when checkpoint lock is contended.
/// </summary>
[Fact]
public void Checkpoint_Passive_ShouldSkip_WhenLockIsContended()
{
var dbPath = NewDbPath();
try
{
using var storage = new StorageEngine(dbPath, PageFileConfig.Default);
var gate = GetCommitGate(storage);
gate.Wait(TestContext.Current.CancellationToken);
try
{
var result = storage.Checkpoint(CheckpointMode.Passive);
result.Mode.ShouldBe(CheckpointMode.Passive);
result.Executed.ShouldBeFalse();
result.Truncated.ShouldBeFalse();
result.Restarted.ShouldBeFalse();
}
finally
{
gate.Release();
}
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies full checkpoint applies data and appends a checkpoint marker without truncating WAL.
/// </summary>
[Fact]
public void Checkpoint_Full_ShouldAppendMarker_AndPreserveWal()
{
var dbPath = NewDbPath();
var walPath = Path.ChangeExtension(dbPath, ".wal");
try
{
using (var db = new TestDbContext(dbPath))
{
db.Users.Insert(new User { Name = "checkpoint-full", Age = 50 });
db.SaveChanges();
var walBefore = db.Storage.GetWalSize();
walBefore.ShouldBeGreaterThan(0);
var result = db.Checkpoint(CheckpointMode.Full);
result.Mode.ShouldBe(CheckpointMode.Full);
result.Executed.ShouldBeTrue();
result.Truncated.ShouldBeFalse();
result.WalBytesAfter.ShouldBeGreaterThan(0);
db.Storage.GetWalSize().ShouldBeGreaterThan(0);
}
using var wal = new WriteAheadLog(walPath);
wal.ReadAll().Any(r => r.Type == WalRecordType.Checkpoint).ShouldBeTrue();
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies restart checkpoint clears WAL and allows subsequent writes.
/// </summary>
[Fact]
public void Checkpoint_Restart_ShouldResetWal_AndAcceptNewWrites()
{
var dbPath = NewDbPath();
try
{
using var db = new TestDbContext(dbPath);
db.Users.Insert(new User { Name = "restart-before", Age = 30 });
db.SaveChanges();
db.Storage.GetWalSize().ShouldBeGreaterThan(0);
var result = db.Checkpoint(CheckpointMode.Restart);
result.Mode.ShouldBe(CheckpointMode.Restart);
result.Executed.ShouldBeTrue();
result.Truncated.ShouldBeTrue();
result.Restarted.ShouldBeTrue();
db.Storage.GetWalSize().ShouldBe(0);
db.Users.Insert(new User { Name = "restart-after", Age = 31 });
db.SaveChanges();
db.Storage.GetWalSize().ShouldBeGreaterThan(0);
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies recovery remains deterministic after a full checkpoint boundary.
/// </summary>
[Fact]
public void Recover_AfterFullCheckpoint_ShouldApplyLatestCommitDeterministically()
{
var dbPath = NewDbPath();
try
{
uint pageId;
using (var storage = new StorageEngine(dbPath, PageFileConfig.Default))
{
pageId = storage.AllocatePage();
using (var tx1 = storage.BeginTransaction())
{
var first = new byte[storage.PageSize];
first[0] = 1;
storage.WritePage(pageId, tx1.TransactionId, first);
tx1.Commit();
}
storage.Checkpoint(CheckpointMode.Full);
using (var tx2 = storage.BeginTransaction())
{
var second = new byte[storage.PageSize];
second[0] = 2;
storage.WritePage(pageId, tx2.TransactionId, second);
tx2.Commit();
}
}
using (var recovered = new StorageEngine(dbPath, PageFileConfig.Default))
{
var buffer = new byte[recovered.PageSize];
recovered.ReadPage(pageId, 0, buffer);
buffer[0].ShouldBe((byte)2);
recovered.GetWalSize().ShouldBe(0);
}
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies asynchronous mode-based checkpoints return expected result metadata.
/// </summary>
[Fact]
public async Task CheckpointAsync_Full_ShouldReturnResult()
{
var dbPath = NewDbPath();
try
{
using var db = new TestDbContext(dbPath);
db.Users.Insert(new User { Name = "checkpoint-async", Age = 38 });
db.SaveChanges();
var result = await db.CheckpointAsync(CheckpointMode.Full, TestContext.Current.CancellationToken);
result.Mode.ShouldBe(CheckpointMode.Full);
result.Executed.ShouldBeTrue();
result.Truncated.ShouldBeFalse();
}
finally
{
CleanupFiles(dbPath);
}
}
private static SemaphoreSlim GetCommitGate(StorageEngine storage)
{
var field = typeof(StorageEngine).GetField("_commitLock", BindingFlags.Instance | BindingFlags.NonPublic);
field.ShouldNotBeNull();
return (SemaphoreSlim)field!.GetValue(storage)!;
}
private static string NewDbPath()
=> Path.Combine(Path.GetTempPath(), $"checkpoint_mode_{Guid.NewGuid():N}.db");
private static void CleanupFiles(string dbPath)
{
if (File.Exists(dbPath)) File.Delete(dbPath);
var walPath = Path.ChangeExtension(dbPath, ".wal");
if (File.Exists(walPath)) File.Delete(walPath);
var markerPath = $"{dbPath}.compact.state";
if (File.Exists(markerPath)) File.Delete(markerPath);
}
}

View File

@@ -0,0 +1,229 @@
using ZB.MOM.WW.CBDD.Core;
using ZB.MOM.WW.CBDD.Core.Storage;
using System.Text;
using Xunit;
namespace ZB.MOM.WW.CBDD.Tests;
public class DictionaryPageTests
{
private const int PageSize = 16384;
/// <summary>
/// Verifies dictionary page initialization sets expected defaults.
/// </summary>
[Fact]
public void Initialize_ShouldSetupEmptyPage()
{
var page = new byte[PageSize];
DictionaryPage.Initialize(page, 1);
var header = PageHeader.ReadFrom(page);
header.PageType.ShouldBe(PageType.Dictionary);
header.PageId.ShouldBe(1u);
var count = BitConverter.ToUInt16(page, 32); // CountOffset
count.ShouldBe((ushort)0);
var freeSpaceEnd = BitConverter.ToUInt16(page, 34); // FreeSpaceEndOffset
freeSpaceEnd.ShouldBe((ushort)PageSize);
}
/// <summary>
/// Verifies insert adds entries and keeps them ordered.
/// </summary>
[Fact]
public void Insert_ShouldAddEntryAndSort()
{
var page = new byte[PageSize];
DictionaryPage.Initialize(page, 1);
// Insert "B"
bool inserted = DictionaryPage.Insert(page, "B", 20);
inserted.ShouldBeTrue();
// Insert "A" (should go before B)
inserted = DictionaryPage.Insert(page, "A", 10);
inserted.ShouldBeTrue();
// Insert "C" (should go after B)
inserted = DictionaryPage.Insert(page, "C", 30);
inserted.ShouldBeTrue();
// Verify Order
var entries = DictionaryPage.GetAll(page).ToList();
entries.Count.ShouldBe(3);
entries[0].Key.ShouldBe("A");
entries[0].Value.ShouldBe((ushort)10);
entries[1].Key.ShouldBe("B");
entries[1].Value.ShouldBe((ushort)20);
entries[2].Key.ShouldBe("C");
entries[2].Value.ShouldBe((ushort)30);
}
/// <summary>
/// Verifies key lookup returns the expected value.
/// </summary>
[Fact]
public void TryFind_ShouldReturnCorrectValue()
{
var page = new byte[PageSize];
DictionaryPage.Initialize(page, 1);
DictionaryPage.Insert(page, "Key1", 100);
DictionaryPage.Insert(page, "Key2", 200);
DictionaryPage.Insert(page, "Key3", 300);
bool found = DictionaryPage.TryFind(page, Encoding.UTF8.GetBytes("Key2"), out ushort value);
found.ShouldBeTrue();
value.ShouldBe((ushort)200);
found = DictionaryPage.TryFind(page, Encoding.UTF8.GetBytes("Key999"), out value);
found.ShouldBeFalse();
}
/// <summary>
/// Verifies inserts fail when the page is full.
/// </summary>
[Fact]
public void Overflow_ShouldReturnFalse_WhenFull()
{
var page = new byte[PageSize];
DictionaryPage.Initialize(page, 1);
string bigKey = new string('X', 250);
int count = 0;
while (true)
{
// Use unique keys
var key = bigKey + count;
if (!DictionaryPage.Insert(page, key, (ushort)count))
{
// Should fail here
break;
}
count++;
if (count > 1000) throw new ShouldAssertException("Should have filled the page much earlier");
}
// Now page is full enough that `bigKey` (250 bytes) shouldn't fit.
// We can't guarantee a small key won't fit (fragmentation/remaining space),
// but a key of the SAME size that triggered the break should definitely fail.
bool inserted = DictionaryPage.Insert(page, bigKey + "X", 9999);
inserted.ShouldBeFalse();
}
/// <summary>
/// Verifies global lookup finds keys across chained dictionary pages.
/// </summary>
[Fact]
public void Chaining_ShouldFindKeysInLinkedPages()
{
var dbPath = Path.Combine(Path.GetTempPath(), $"test_dict_chain_{Guid.NewGuid()}.db");
using var storage = new StorageEngine(dbPath, PageFileConfig.Default);
// 1. Create First Page
var page1Id = storage.AllocatePage();
var pageBuffer = new byte[storage.PageSize];
DictionaryPage.Initialize(pageBuffer, page1Id);
// Fill Page 1
DictionaryPage.Insert(pageBuffer, "Key1", 100);
DictionaryPage.Insert(pageBuffer, "KeyA", 200);
// 2. Create Second Page
var page2Id = storage.AllocatePage();
var page2Buffer = new byte[storage.PageSize];
DictionaryPage.Initialize(page2Buffer, page2Id);
// Fill Page 2
DictionaryPage.Insert(page2Buffer, "Key2", 300);
DictionaryPage.Insert(page2Buffer, "KeyB", 400); // 400
// 3. Link Page 1 -> Page 2
var header1 = PageHeader.ReadFrom(pageBuffer);
header1.NextPageId = page2Id;
header1.WriteTo(pageBuffer);
// 4. Write pages to storage
storage.WritePageImmediate(page1Id, pageBuffer);
storage.WritePageImmediate(page2Id, page2Buffer);
// 5. Test Global Find
// Find in Page 1
bool found = DictionaryPage.TryFindGlobal(storage, page1Id, "Key1", out ushort val);
found.ShouldBeTrue();
val.ShouldBe((ushort)100);
// Find in Page 2
found = DictionaryPage.TryFindGlobal(storage, page1Id, "KeyB", out val);
found.ShouldBeTrue();
val.ShouldBe((ushort)400);
// Not Found
found = DictionaryPage.TryFindGlobal(storage, page1Id, "KeyMissing", out val);
found.ShouldBeFalse();
storage.Dispose();
if (File.Exists(dbPath)) File.Delete(dbPath);
if (File.Exists(Path.ChangeExtension(dbPath, ".wal"))) File.Delete(Path.ChangeExtension(dbPath, ".wal"));
}
/// <summary>
/// Verifies global enumeration returns keys across chained dictionary pages.
/// </summary>
[Fact]
public void FindAllGlobal_ShouldRetrieveAllKeys()
{
var dbPath = Path.Combine(Path.GetTempPath(), $"test_dict_findall_{Guid.NewGuid()}.db");
using var storage = new StorageEngine(dbPath, PageFileConfig.Default);
// 1. Create Chain of 3 Pages
var page1Id = storage.AllocatePage();
var page2Id = storage.AllocatePage();
var page3Id = storage.AllocatePage();
var buf = new byte[storage.PageSize];
// Page 1
DictionaryPage.Initialize(buf, page1Id);
DictionaryPage.Insert(buf, "P1_A", 10);
DictionaryPage.Insert(buf, "P1_B", 11);
var h1 = PageHeader.ReadFrom(buf);
h1.NextPageId = page2Id;
h1.WriteTo(buf);
storage.WritePageImmediate(page1Id, buf);
// Page 2
DictionaryPage.Initialize(buf, page2Id);
DictionaryPage.Insert(buf, "P2_A", 20);
var h2 = PageHeader.ReadFrom(buf);
h2.NextPageId = page3Id;
h2.WriteTo(buf);
storage.WritePageImmediate(page2Id, buf);
// Page 3
DictionaryPage.Initialize(buf, page3Id);
DictionaryPage.Insert(buf, "P3_A", 30);
DictionaryPage.Insert(buf, "P3_B", 31);
DictionaryPage.Insert(buf, "P3_C", 32);
storage.WritePageImmediate(page3Id, buf);
// 2. Execute FindAllGlobal
var allEntries = DictionaryPage.FindAllGlobal(storage, page1Id).ToList();
// 3. Verify
allEntries.Count.ShouldBe(6);
allEntries.ShouldContain(e => e.Key == "P1_A" && e.Value == 10);
allEntries.ShouldContain(e => e.Key == "P2_A" && e.Value == 20);
allEntries.ShouldContain(e => e.Key == "P3_C" && e.Value == 32);
storage.Dispose();
if (File.Exists(dbPath)) File.Delete(dbPath);
if (File.Exists(Path.ChangeExtension(dbPath, ".wal"))) File.Delete(Path.ChangeExtension(dbPath, ".wal"));
}
}

View File

@@ -0,0 +1,168 @@
using ZB.MOM.WW.CBDD.Bson;
using ZB.MOM.WW.CBDD.Core.Collections;
using ZB.MOM.WW.CBDD.Core.Storage;
using Xunit;
using System.Collections.Generic;
using System.Linq;
using ZB.MOM.WW.CBDD.Bson.Schema;
namespace ZB.MOM.WW.CBDD.Tests;
public class DictionaryPersistenceTests : IDisposable
{
private readonly string _dbPath;
private readonly StorageEngine _storage;
/// <summary>
/// Initializes a new instance of the <see cref="DictionaryPersistenceTests"/> class.
/// </summary>
public DictionaryPersistenceTests()
{
_dbPath = Path.Combine(Path.GetTempPath(), $"cbdd_dict_{Guid.NewGuid():N}.db");
_storage = new StorageEngine(_dbPath, PageFileConfig.Default);
}
/// <summary>
/// Disposes test resources and removes temporary files.
/// </summary>
public void Dispose()
{
_storage.Dispose();
if (File.Exists(_dbPath)) File.Delete(_dbPath);
var walPath = Path.ChangeExtension(_dbPath, ".wal");
if (File.Exists(walPath)) File.Delete(walPath);
}
private class MockMapper : DocumentMapperBase<ObjectId, Dictionary<string, object>>
{
private readonly string _collectionName;
private readonly List<string> _keys;
/// <summary>
/// Initializes a new instance of the <see cref="MockMapper"/> class.
/// </summary>
/// <param name="name">The collection name.</param>
/// <param name="keys">The mapper keys.</param>
public MockMapper(string name, params string[] keys)
{
_collectionName = name;
_keys = keys.ToList();
}
/// <inheritdoc />
public override string CollectionName => _collectionName;
/// <inheritdoc />
public override IEnumerable<string> UsedKeys => _keys;
/// <inheritdoc />
public override BsonSchema GetSchema() => new BsonSchema { Title = _collectionName };
/// <inheritdoc />
public override ObjectId GetId(Dictionary<string, object> entity) => throw new NotImplementedException();
/// <inheritdoc />
public override void SetId(Dictionary<string, object> entity, ObjectId id) => throw new NotImplementedException();
/// <inheritdoc />
public override int Serialize(Dictionary<string, object> entity, BsonSpanWriter writer) => throw new NotImplementedException();
/// <inheritdoc />
public override Dictionary<string, object> Deserialize(BsonSpanReader reader) => throw new NotImplementedException();
}
/// <summary>
/// Verifies mapper registration adds all unique dictionary keys.
/// </summary>
[Fact]
public void RegisterMappers_Registers_All_Unique_Keys()
{
var mapper1 = new MockMapper("Coll1", "Name", "Age");
var mapper2 = new MockMapper("Coll2", "Name", "Address", "City");
_storage.RegisterMappers(new IDocumentMapper[] { mapper1, mapper2 });
// Verify keys in cache
_storage.GetOrAddDictionaryEntry("Name").ShouldNotBe((ushort)0);
_storage.GetOrAddDictionaryEntry("Age").ShouldNotBe((ushort)0);
_storage.GetOrAddDictionaryEntry("Address").ShouldNotBe((ushort)0);
_storage.GetOrAddDictionaryEntry("City").ShouldNotBe((ushort)0);
// Verify they have unique IDs (at least 4 unique IDs for 4 unique keys + internal ones)
var ids = new HashSet<ushort>
{
_storage.GetOrAddDictionaryEntry("Name"),
_storage.GetOrAddDictionaryEntry("Age"),
_storage.GetOrAddDictionaryEntry("Address"),
_storage.GetOrAddDictionaryEntry("City")
};
ids.Count.ShouldBe(4);
}
/// <summary>
/// Verifies dictionary keys persist across storage restarts.
/// </summary>
[Fact]
public void Dictionary_Keys_Persist_Across_Restarts()
{
var mapper = new MockMapper("Coll1", "PersistedKey");
_storage.RegisterMappers(new IDocumentMapper[] { mapper });
var originalId = _storage.GetOrAddDictionaryEntry("PersistedKey");
originalId.ShouldNotBe((ushort)0);
_storage.Dispose();
// Re-open
using var storage2 = new StorageEngine(_dbPath, PageFileConfig.Default);
var recoveredId = storage2.GetOrAddDictionaryEntry("PersistedKey");
recoveredId.ShouldBe(originalId);
}
private class NestedMockMapper : DocumentMapperBase<ObjectId, object>
{
/// <inheritdoc />
public override string CollectionName => "Nested";
/// <inheritdoc />
public override BsonSchema GetSchema()
{
var schema = new BsonSchema { Title = "Nested" };
schema.Fields.Add(new BsonField
{
Name = "Top",
Type = BsonType.Document,
NestedSchema = new BsonSchema
{
Fields = { new BsonField { Name = "Child", Type = BsonType.String } }
}
});
return schema;
}
/// <inheritdoc />
public override ObjectId GetId(object entity) => throw new NotImplementedException();
/// <inheritdoc />
public override void SetId(object entity, ObjectId id) => throw new NotImplementedException();
/// <inheritdoc />
public override int Serialize(object entity, BsonSpanWriter writer) => throw new NotImplementedException();
/// <inheritdoc />
public override object Deserialize(BsonSpanReader reader) => throw new NotImplementedException();
}
/// <summary>
/// Verifies nested schema fields are registered as dictionary keys.
/// </summary>
[Fact]
public void RegisterMappers_Handles_Nested_Keys()
{
var mapper = new NestedMockMapper();
_storage.RegisterMappers(new IDocumentMapper[] { mapper });
_storage.GetOrAddDictionaryEntry("Top").ShouldNotBe((ushort)0);
_storage.GetOrAddDictionaryEntry("Child").ShouldNotBe((ushort)0);
}
}

View File

@@ -0,0 +1,260 @@
using ZB.MOM.WW.CBDD.Bson;
using ZB.MOM.WW.CBDD.Core.Collections;
using ZB.MOM.WW.CBDD.Core.Compression;
using ZB.MOM.WW.CBDD.Core.Storage;
using ZB.MOM.WW.CBDD.Core.Transactions;
using ZB.MOM.WW.CBDD.Shared;
using ZB.MOM.WW.CBDD.Shared.TestDbContext_TestDbContext_Mappers;
using System.IO.Compression;
using System.IO.MemoryMappedFiles;
using Xunit;
namespace ZB.MOM.WW.CBDD.Tests;
public class DocumentOverflowTests : IDisposable
{
private readonly string _dbPath;
private readonly Shared.TestDbContext _db;
/// <summary>
/// Initializes a new instance of the <see cref="DocumentOverflowTests"/> class.
/// </summary>
public DocumentOverflowTests()
{
_dbPath = Path.Combine(Path.GetTempPath(), $"test_overflow_{Guid.NewGuid()}.db");
// Use default PageSize (16KB)
_db = new Shared.TestDbContext(_dbPath);
}
/// <summary>
/// Releases test resources.
/// </summary>
public void Dispose()
{
_db.Dispose();
if (File.Exists(_dbPath)) File.Delete(_dbPath);
}
/// <summary>
/// Verifies inserting a medium-sized document succeeds.
/// </summary>
[Fact]
public void Insert_MediumDoc_64KB_ShouldSucceed()
{
// 20KB - Fits in 64KB buffer (First attempt)
// But triggers overflow pages in storage (20KB > 16KB PageSize)
var largeString = new string('A', 20 * 1024);
var user = new User
{
Id = ObjectId.NewObjectId(),
Name = largeString,
Age = 10
};
var id = _db.Users.Insert(user);
_db.SaveChanges();
var retrieved = _db.Users.FindById(id);
retrieved.ShouldNotBeNull();
retrieved.Name.ShouldBe(largeString);
}
/// <summary>
/// Verifies inserting a large document succeeds.
/// </summary>
[Fact]
public void Insert_LargeDoc_100KB_ShouldSucceed()
{
// 100KB - Fails 64KB buffer, Retries with 2MB
var largeString = new string('B', 100 * 1024);
var user = new User
{
Id = ObjectId.NewObjectId(),
Name = largeString,
Age = 20
};
var id = _db.Users.Insert(user);
_db.SaveChanges();
var retrieved = _db.Users.FindById(id);
retrieved.ShouldNotBeNull();
retrieved.Name.ShouldBe(largeString);
}
/// <summary>
/// Verifies inserting a very large document succeeds.
/// </summary>
[Fact]
public void Insert_HugeDoc_3MB_ShouldSucceed()
{
// 3MB - Fails 64KB, Fails 2MB, Retries with 16MB
var largeString = new string('C', 3 * 1024 * 1024);
var user = new User
{
Id = ObjectId.NewObjectId(),
Name = largeString,
Age = 30
};
var id = _db.Users.Insert(user);
_db.SaveChanges();
var retrieved = _db.Users.FindById(id);
retrieved.ShouldNotBeNull();
retrieved.Name.Length.ShouldBe(largeString.Length);
// Checking full string might be slow, length check + substring check is faster
retrieved.Name.Substring(0, 100).ShouldBe(largeString.Substring(0, 100));
retrieved.Name.Substring(retrieved.Name.Length - 100).ShouldBe(largeString.Substring(largeString.Length - 100));
}
/// <summary>
/// Verifies updating from a small payload to a huge payload succeeds.
/// </summary>
[Fact]
public void Update_SmallToHuge_ShouldSucceed()
{
// Insert Small
var user = new User { Id = ObjectId.NewObjectId(), Name = "Small", Age = 1 };
var id = _db.Users.Insert(user);
_db.SaveChanges();
// Update to Huge (3MB)
var hugeString = new string('U', 3 * 1024 * 1024);
user.Name = hugeString;
var updated = _db.Users.Update(user);
_db.SaveChanges();
updated.ShouldBeTrue();
var retrieved = _db.Users.FindById(id);
retrieved.ShouldNotBeNull();
retrieved.Name.Length.ShouldBe(hugeString.Length);
}
/// <summary>
/// Verifies bulk inserts with mixed payload sizes succeed.
/// </summary>
[Fact]
public void InsertBulk_MixedSizes_ShouldSucceed()
{
var users = new List<User>
{
new User { Id = ObjectId.NewObjectId(), Name = "Small 1", Age = 1 },
new User { Id = ObjectId.NewObjectId(), Name = new string('M', 100 * 1024), Age = 2 }, // 100KB
new User { Id = ObjectId.NewObjectId(), Name = "Small 2", Age = 3 },
new User { Id = ObjectId.NewObjectId(), Name = new string('H', 3 * 1024 * 1024), Age = 4 } // 3MB
};
var ids = _db.Users.InsertBulk(users);
ids.Count.ShouldBe(4);
foreach (var u in users)
{
var r = _db.Users.FindById(u.Id);
r.ShouldNotBeNull();
r.Name.Length.ShouldBe(u.Name.Length);
}
}
/// <summary>
/// Verifies huge inserts succeed with compression enabled and small page configuration.
/// </summary>
[Fact]
public void Insert_HugeDoc_WithCompressionEnabledAndSmallPages_ShouldSucceed()
{
var localDbPath = Path.Combine(Path.GetTempPath(), $"test_overflow_compression_{Guid.NewGuid():N}.db");
var options = new CompressionOptions
{
EnableCompression = true,
MinSizeBytes = 0,
MinSavingsPercent = 0,
Codec = CompressionCodec.Brotli,
Level = CompressionLevel.Fastest
};
try
{
using var db = new Shared.TestDbContext(localDbPath, TinyPageConfig(), options);
var huge = new string('Z', 2 * 1024 * 1024);
var id = db.Users.Insert(new User
{
Id = ObjectId.NewObjectId(),
Name = huge,
Age = 50
});
db.SaveChanges();
var loaded = db.Users.FindById(id);
loaded.ShouldNotBeNull();
loaded.Name.ShouldBe(huge);
db.GetCompressionStats().CompressedDocumentCount.ShouldBeGreaterThanOrEqualTo(1);
}
finally
{
CleanupLocalFiles(localDbPath);
}
}
/// <summary>
/// Verifies updates from huge to small payloads succeed with compression enabled.
/// </summary>
[Fact]
public void Update_HugeToSmall_WithCompressionEnabled_ShouldSucceed()
{
var localDbPath = Path.Combine(Path.GetTempPath(), $"test_overflow_compression_update_{Guid.NewGuid():N}.db");
var options = new CompressionOptions
{
EnableCompression = true,
MinSizeBytes = 1024,
MinSavingsPercent = 0,
Codec = CompressionCodec.Deflate,
Level = CompressionLevel.Fastest
};
try
{
using var db = new Shared.TestDbContext(localDbPath, TinyPageConfig(), options);
var user = new User
{
Id = ObjectId.NewObjectId(),
Name = new string('Q', 256 * 1024),
Age = 44
};
var id = db.Users.Insert(user);
db.SaveChanges();
user.Name = "small-after-overflow";
db.Users.Update(user).ShouldBeTrue();
db.SaveChanges();
var loaded = db.Users.FindById(id);
loaded.ShouldNotBeNull();
loaded.Name.ShouldBe("small-after-overflow");
}
finally
{
CleanupLocalFiles(localDbPath);
}
}
private static PageFileConfig TinyPageConfig()
{
return new PageFileConfig
{
PageSize = 16 * 1024,
InitialFileSize = 1024 * 1024,
Access = MemoryMappedFileAccess.ReadWrite
};
}
private static void CleanupLocalFiles(string dbPath)
{
var walPath = Path.ChangeExtension(dbPath, ".wal");
var markerPath = $"{dbPath}.compact.state";
if (File.Exists(dbPath)) File.Delete(dbPath);
if (File.Exists(walPath)) File.Delete(walPath);
if (File.Exists(markerPath)) File.Delete(markerPath);
}
}

View File

@@ -0,0 +1,166 @@
using System.IO.Compression;
using ZB.MOM.WW.CBDD.Core.Compression;
using ZB.MOM.WW.CBDD.Core.Storage;
using ZB.MOM.WW.CBDD.Shared;
namespace ZB.MOM.WW.CBDD.Tests;
public class MaintenanceDiagnosticsAndMigrationTests
{
/// <summary>
/// Verifies diagnostics APIs return page usage, compression, and fragmentation data.
/// </summary>
[Fact]
public void DiagnosticsApis_ShouldReturnPageUsageCompressionAndFragmentationData()
{
var dbPath = NewDbPath();
try
{
var options = new CompressionOptions
{
EnableCompression = true,
MinSizeBytes = 0,
MinSavingsPercent = 0,
Codec = CompressionCodec.Brotli,
Level = CompressionLevel.Fastest
};
using var db = new TestDbContext(dbPath, options);
for (var i = 0; i < 40; i++)
{
db.Users.Insert(new User
{
Name = BuildPayload(i, 9000),
Age = i
});
}
db.SaveChanges();
db.ForceCheckpoint();
var byType = db.GetPageUsageByPageType();
byType.Count.ShouldBeGreaterThan(0);
byType.Any(x => x.PageType == PageType.Data && x.PageCount > 0).ShouldBeTrue();
var byCollection = db.GetPageUsageByCollection();
byCollection.Any(x => x.CollectionName.Equals("users", StringComparison.OrdinalIgnoreCase)).ShouldBeTrue();
var compressionByCollection = db.GetCompressionRatioByCollection();
var usersCompression = compressionByCollection.First(x => x.CollectionName.Equals("users", StringComparison.OrdinalIgnoreCase));
usersCompression.DocumentCount.ShouldBeGreaterThan(0);
usersCompression.BytesBeforeCompression.ShouldBeGreaterThan(0);
usersCompression.BytesAfterCompression.ShouldBeGreaterThan(0);
var freeList = db.GetFreeListSummary();
freeList.PageCount.ShouldBeGreaterThan(0u);
var fragmentation = db.GetFragmentationMap();
fragmentation.Pages.Count.ShouldBeGreaterThan(0);
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies compression migration dry-run and apply modes return deterministic stats and preserve data.
/// </summary>
[Fact]
public void MigrateCompression_DryRunAndApply_ShouldReturnDeterministicStatsAndPreserveData()
{
var dbPath = NewDbPath();
try
{
using var db = new TestDbContext(dbPath, CompressionOptions.Default);
var ids = new List<ZB.MOM.WW.CBDD.Bson.ObjectId>();
for (var i = 0; i < 60; i++)
{
ids.Add(db.Users.Insert(new User
{
Name = BuildPayload(i, 12000),
Age = i % 17
}));
}
db.SaveChanges();
db.ForceCheckpoint();
var dryRun = db.MigrateCompression(new CompressionMigrationOptions
{
DryRun = true,
Codec = CompressionCodec.Deflate,
Level = CompressionLevel.Fastest,
MinSizeBytes = 0,
MinSavingsPercent = 0,
IncludeCollections = ["users"]
});
dryRun.DryRun.ShouldBeTrue();
dryRun.DocumentsScanned.ShouldBeGreaterThan(0);
dryRun.BytesBefore.ShouldBeGreaterThan(0);
dryRun.BytesEstimatedAfter.ShouldBeGreaterThan(0);
var apply = db.MigrateCompression(new CompressionMigrationOptions
{
DryRun = false,
Codec = CompressionCodec.Deflate,
Level = CompressionLevel.Fastest,
MinSizeBytes = 0,
MinSavingsPercent = 0,
IncludeCollections = ["users"]
});
apply.DryRun.ShouldBeFalse();
apply.DocumentsScanned.ShouldBeGreaterThan(0);
foreach (var id in ids)
{
var user = db.Users.FindById(id);
user.ShouldNotBeNull();
user!.Name.Length.ShouldBeGreaterThan(1000);
}
}
finally
{
CleanupFiles(dbPath);
}
}
private static string BuildPayload(int seed, int approxLength)
{
var builder = new System.Text.StringBuilder(approxLength + 128);
var i = 0;
while (builder.Length < approxLength)
{
builder.Append("diag-migrate-");
builder.Append(seed.ToString("D4"));
builder.Append('-');
builder.Append(i.ToString("D6"));
builder.Append('|');
i++;
}
return builder.ToString();
}
private static string NewDbPath()
=> Path.Combine(Path.GetTempPath(), $"maint_diag_migrate_{Guid.NewGuid():N}.db");
private static void CleanupFiles(string dbPath)
{
var walPath = Path.ChangeExtension(dbPath, ".wal");
var markerPath = $"{dbPath}.compact.state";
var tempPath = $"{dbPath}.compact.tmp";
var backupPath = $"{dbPath}.compact.bak";
if (File.Exists(dbPath)) File.Delete(dbPath);
if (File.Exists(walPath)) File.Delete(walPath);
if (File.Exists(markerPath)) File.Delete(markerPath);
if (File.Exists(tempPath)) File.Delete(tempPath);
if (File.Exists(backupPath)) File.Delete(backupPath);
}
}

View File

@@ -0,0 +1,111 @@
using ZB.MOM.WW.CBDD.Bson;
using ZB.MOM.WW.CBDD.Core.Collections;
using ZB.MOM.WW.CBDD.Core.Indexing;
using ZB.MOM.WW.CBDD.Core.Storage;
using ZB.MOM.WW.CBDD.Core.Transactions;
using ZB.MOM.WW.CBDD.Shared;
using ZB.MOM.WW.CBDD.Shared.TestDbContext_TestDbContext_Mappers;
using Xunit;
namespace ZB.MOM.WW.CBDD.Tests;
public class MetadataPersistenceTests : IDisposable
{
private readonly string _dbPath;
private readonly string _walPath;
/// <summary>
/// Initializes a new instance of the <see cref="MetadataPersistenceTests"/> class.
/// </summary>
public MetadataPersistenceTests()
{
_dbPath = Path.Combine(Path.GetTempPath(), $"docdb_meta_{Guid.NewGuid()}.db");
_walPath = Path.ChangeExtension(_dbPath, ".wal");
}
/// <summary>
/// Tests index definitions are persisted and reloaded.
/// </summary>
[Fact]
public void IndexDefinitions_ArePersisted_AndReloaded()
{
// 1. Create index in first session
using (var storage = new StorageEngine(_dbPath, PageFileConfig.Default))
{
// Disable auto-checkpoint to ensure cleaner test tracing, though not strictly required
var mapper = new ZB_MOM_WW_CBDD_Shared_UserMapper();
var indexManager = new CollectionIndexManager<ObjectId, User>(storage, mapper, nameof(User));
// Create 2 indexes
indexManager.CreateIndex(u => u.Age, "idx_age");
indexManager.CreateIndex(u => u.Name, unique: true); // name auto-generated
}
// 2. Re-open storage and verify indexes exist
using (var storage = new StorageEngine(_dbPath, PageFileConfig.Default))
{
var mapper = new ZB_MOM_WW_CBDD_Shared_UserMapper();
// Assuming Page 1 was allocated above in clean DB
var indexManager = new CollectionIndexManager<ObjectId, User>(storage, mapper, nameof(User));
var indexes = indexManager.GetAllIndexes().ToList();
indexes.Count.ShouldBe(2);
var ageIdx = indexManager.GetIndex("idx_age");
ageIdx.ShouldNotBeNull();
ageIdx.Definition.IsUnique.ShouldBeFalse();
ageIdx.Definition.PropertyPaths.Count().ShouldBe(1);
ageIdx.Definition.PropertyPaths[0].ShouldBe("Age");
// Check auto-generated name index
var nameIdx = indexes.FirstOrDefault(i => i.Definition.PropertyPaths[0] == "Name");
nameIdx.ShouldNotBeNull();
nameIdx.Definition.IsUnique.ShouldBeTrue();
}
}
/// <summary>
/// Tests ensure index does not recreate if index exists.
/// </summary>
[Fact]
public void EnsureIndex_DoesNotRecreate_IfIndexExists()
{
// 1. Create index
using (var context = new Shared.TestDbContext(_dbPath))
{
context.Users.EnsureIndex(u => u.Age);
}
// 2. Re-open and EnsureIndex again - should be fast/no-op
using (var context = new Shared.TestDbContext(_dbPath))
{
var mapper = new ZB_MOM_WW_CBDD_Shared_UserMapper();
// Use reflection or diagnostic to check if it triggered rebuild?
// Currently hard to verify "no rebuild" without logs or mocking.
// But we can verify it doesn't throw and index is still valid.
var idx = context.Users.EnsureIndex(u => u.Age);
idx.ShouldNotBeNull();
// Verify functioning
using var txn = context.BeginTransaction();
context.Users.Insert(new User { Name = "Bob", Age = 50 });
txn.Commit();
// Should find it via index
var results = context.Users.Find(u => u.Age == 50).ToList();
results.Count().ShouldBe(1);
}
}
/// <summary>
/// Disposes the resources used by this instance.
/// </summary>
public void Dispose()
{
if (File.Exists(_dbPath)) File.Delete(_dbPath);
if (File.Exists(_walPath)) File.Delete(_walPath);
}
}

View File

@@ -0,0 +1,86 @@
using ZB.MOM.WW.CBDD.Bson;
using ZB.MOM.WW.CBDD.Core.Collections;
using Xunit;
using System.Collections.Generic;
using System;
using System.Linq;
namespace ZB.MOM.WW.CBDD.Tests;
public class RobustnessTests
{
public struct Point
{
/// <summary>
/// Gets or sets the X.
/// </summary>
public int X { get; set; }
/// <summary>
/// Gets or sets the Y.
/// </summary>
public int Y { get; set; }
}
public class RobustEntity
{
/// <summary>
/// Gets or sets the NullableInts.
/// </summary>
public List<int?> NullableInts { get; set; } = new();
/// <summary>
/// Gets or sets the Map.
/// </summary>
public Dictionary<string, int> Map { get; set; } = new();
/// <summary>
/// Gets or sets the EnumerableStrings.
/// </summary>
public IEnumerable<string> EnumerableStrings { get; set; } = Array.Empty<string>();
/// <summary>
/// Gets or sets the Location.
/// </summary>
public Point Location { get; set; }
/// <summary>
/// Gets or sets the NullableLocation.
/// </summary>
public Point? NullableLocation { get; set; }
}
/// <summary>
/// Executes GenerateSchema_RobustnessChecks.
/// </summary>
[Fact]
public void GenerateSchema_RobustnessChecks()
{
var schema = BsonSchemaGenerator.FromType<RobustEntity>();
// 1. Nullable Value Types in List
var nullableInts = schema.Fields.First(f => f.Name == "nullableints");
nullableInts.Type.ShouldBe(BsonType.Array);
nullableInts.ArrayItemType.ShouldBe(BsonType.Int32);
// Note: Current Schema doesn't capture "ItemIsNullable", but verifying it doesn't crash/return Undefined
// 2. Dictionary (likely treated as Array of KVPs currently, or Undefined if structs fail)
// With current logic: Dictionary implements IEnumerable<KVP>. KVP is struct.
// If generator doesn't handle structs as Documents, item type might be Undefined.
var map = schema.Fields.First(f => f.Name == "map");
map.Type.ShouldBe(BsonType.Array);
// 3. IEnumerable property
var enumerable = schema.Fields.First(f => f.Name == "enumerablestrings");
enumerable.Type.ShouldBe(BsonType.Array);
enumerable.ArrayItemType.ShouldBe(BsonType.String);
// 4. Struct
var location = schema.Fields.First(f => f.Name == "location");
// Structs should be treated as Documents in BSON if not primitive
location.Type.ShouldBe(BsonType.Document);
location.NestedSchema.ShouldNotBeNull();
location.NestedSchema.Fields.ShouldContain(f => f.Name == "x");
// 5. Nullable Struct
var nullableLocation = schema.Fields.First(f => f.Name == "nullablelocation");
nullableLocation.Type.ShouldBe(BsonType.Document);
nullableLocation.IsNullable.ShouldBeTrue();
nullableLocation.NestedSchema.ShouldNotBeNull();
}
}

View File

@@ -0,0 +1,113 @@
using ZB.MOM.WW.CBDD.Core.Storage;
using Xunit;
namespace ZB.MOM.WW.CBDD.Tests;
public class StorageEngineDictionaryTests
{
private string GetTempDbPath() => Path.Combine(Path.GetTempPath(), $"test_storage_dict_{Guid.NewGuid()}.db");
private void Cleanup(string path)
{
if (File.Exists(path)) File.Delete(path);
if (File.Exists(Path.ChangeExtension(path, ".wal"))) File.Delete(Path.ChangeExtension(path, ".wal"));
}
/// <summary>
/// Verifies dictionary pages are initialized and return normalized keys.
/// </summary>
[Fact]
public void StorageEngine_ShouldInitializeDictionary()
{
var path = GetTempDbPath();
try
{
using (var storage = new StorageEngine(path, PageFileConfig.Default))
{
// Should generate ID > 100
var id = storage.GetOrAddDictionaryEntry("TestKey");
(id > DictionaryPage.ReservedValuesEnd).ShouldBeTrue();
var key = storage.GetDictionaryKey(id);
key.ShouldBe("testkey");
}
}
finally { Cleanup(path); }
}
/// <summary>
/// Verifies dictionary entries persist across reopen.
/// </summary>
[Fact]
public void StorageEngine_ShouldPersistDictionary()
{
var path = GetTempDbPath();
try
{
ushort id1, id2;
using (var storage = new StorageEngine(path, PageFileConfig.Default))
{
id1 = storage.GetOrAddDictionaryEntry("Key1");
id2 = storage.GetOrAddDictionaryEntry("Key2");
}
// Reopen
using (var storage = new StorageEngine(path, PageFileConfig.Default))
{
var val1 = storage.GetOrAddDictionaryEntry("Key1");
var val2 = storage.GetOrAddDictionaryEntry("Key2");
val1.ShouldBe(id1);
val2.ShouldBe(id2);
storage.GetDictionaryKey(val1).ShouldBe("key1");
storage.GetDictionaryKey(val2).ShouldBe("key2");
}
}
finally { Cleanup(path); }
}
/// <summary>
/// Verifies dictionary handling scales to many keys and remains durable.
/// </summary>
[Fact]
public void StorageEngine_ShouldHandleManyKeys()
{
var path = GetTempDbPath();
try
{
const int keyCount = 3000;
var expectedIds = new Dictionary<string, ushort>();
using (var storage = new StorageEngine(path, PageFileConfig.Default))
{
for (int i = 0; i < keyCount; i++)
{
var key = $"Key_{i}";
var id = storage.GetOrAddDictionaryEntry(key);
expectedIds[key] = id;
}
}
// Reopen and Verify
using (var storage = new StorageEngine(path, PageFileConfig.Default))
{
for (int i = 0; i < keyCount; i++)
{
var key = $"Key_{i}";
var id = storage.GetOrAddDictionaryEntry(key); // Should get existing
id.ShouldBe(expectedIds[key]);
var loadedKey = storage.GetDictionaryKey(id);
loadedKey.ShouldBe(key.ToLowerInvariant());
}
// Add new one
var newId = storage.GetOrAddDictionaryEntry("NewKeyAfterReopen");
(newId > 0).ShouldBeTrue();
expectedIds.ContainsValue(newId).ShouldBeFalse();
}
}
finally { Cleanup(path); }
}
}

View File

@@ -0,0 +1,235 @@
using ZB.MOM.WW.CBDD.Core.Storage;
using ZB.MOM.WW.CBDD.Core.Transactions;
namespace ZB.MOM.WW.CBDD.Tests;
public class StorageEngineTransactionProtocolTests
{
/// <summary>
/// Verifies preparing an unknown transaction returns false.
/// </summary>
[Fact]
public void PrepareTransaction_Should_ReturnFalse_For_Unknown_Transaction()
{
var dbPath = NewDbPath();
try
{
using var storage = new StorageEngine(dbPath, PageFileConfig.Default);
storage.PrepareTransaction(999_999).ShouldBeFalse();
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies committing a detached transaction object throws.
/// </summary>
[Fact]
public void CommitTransaction_With_TransactionObject_Should_Throw_When_Not_Active()
{
var dbPath = NewDbPath();
try
{
using var storage = new StorageEngine(dbPath, PageFileConfig.Default);
var detached = new Transaction(123, storage);
Should.Throw<InvalidOperationException>(() => storage.CommitTransaction(detached));
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies committing a transaction object persists writes and clears active state.
/// </summary>
[Fact]
public void CommitTransaction_With_TransactionObject_Should_Commit_Writes()
{
var dbPath = NewDbPath();
try
{
using var storage = new StorageEngine(dbPath, PageFileConfig.Default);
using var txn = storage.BeginTransaction();
var pageId = storage.AllocatePage();
var data = new byte[storage.PageSize];
data[0] = 0xAB;
storage.WritePage(pageId, txn.TransactionId, data);
storage.CommitTransaction(txn);
storage.ActiveTransactionCount.ShouldBe(0);
var readBuffer = new byte[storage.PageSize];
storage.ReadPage(pageId, 0, readBuffer);
readBuffer[0].ShouldBe((byte)0xAB);
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies committing by identifier with no writes does not throw.
/// </summary>
[Fact]
public void CommitTransaction_ById_With_NoWrites_Should_Not_Throw()
{
var dbPath = NewDbPath();
try
{
using var storage = new StorageEngine(dbPath, PageFileConfig.Default);
storage.CommitTransaction(424242);
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies committed transaction cache moves into readable state and active count is cleared.
/// </summary>
[Fact]
public void MarkTransactionCommitted_Should_Move_Cache_And_Clear_ActiveCount()
{
var dbPath = NewDbPath();
try
{
using var storage = new StorageEngine(dbPath, PageFileConfig.Default);
using var txn = storage.BeginTransaction();
var pageId = storage.AllocatePage();
var data = new byte[storage.PageSize];
data[5] = 0x5A;
storage.WritePage(pageId, txn.TransactionId, data);
storage.ActiveTransactionCount.ShouldBe(1);
storage.MarkTransactionCommitted(txn.TransactionId);
storage.ActiveTransactionCount.ShouldBe(0);
var readBuffer = new byte[storage.PageSize];
storage.ReadPage(pageId, 0, readBuffer);
readBuffer[5].ShouldBe((byte)0x5A);
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies rollback discards uncommitted page writes.
/// </summary>
[Fact]
public void RollbackTransaction_Should_Discard_Uncommitted_Write()
{
var dbPath = NewDbPath();
try
{
using var storage = new StorageEngine(dbPath, PageFileConfig.Default);
var pageId = storage.AllocatePage();
var baseline = new byte[storage.PageSize];
baseline[0] = 0x11;
storage.WritePageImmediate(pageId, baseline);
using var txn = storage.BeginTransaction();
var changed = new byte[storage.PageSize];
changed[0] = 0x99;
storage.WritePage(pageId, txn.TransactionId, changed);
storage.ActiveTransactionCount.ShouldBe(1);
storage.RollbackTransaction(txn.TransactionId);
storage.ActiveTransactionCount.ShouldBe(0);
var readBuffer = new byte[storage.PageSize];
storage.ReadPage(pageId, 0, readBuffer);
readBuffer[0].ShouldBe((byte)0x11);
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies marking a transaction committed transitions state correctly.
/// </summary>
[Fact]
public void Transaction_MarkCommitted_Should_Transition_State()
{
var dbPath = NewDbPath();
try
{
using var storage = new StorageEngine(dbPath, PageFileConfig.Default);
using var txn = storage.BeginTransaction();
var pageId = storage.AllocatePage();
var data = new byte[storage.PageSize];
data[3] = 0x33;
storage.WritePage(pageId, txn.TransactionId, data);
txn.MarkCommitted();
txn.State.ShouldBe(TransactionState.Committed);
storage.ActiveTransactionCount.ShouldBe(0);
var readBuffer = new byte[storage.PageSize];
storage.ReadPage(pageId, 0, readBuffer);
readBuffer[3].ShouldBe((byte)0x33);
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies preparing then committing writes WAL data and updates transaction state.
/// </summary>
[Fact]
public void Transaction_Prepare_Should_Write_Wal_And_Transition_State()
{
var dbPath = NewDbPath();
try
{
using var storage = new StorageEngine(dbPath, PageFileConfig.Default);
using var txn = storage.BeginTransaction();
var pageId = storage.AllocatePage();
var data = new byte[storage.PageSize];
data[11] = 0x7B;
storage.WritePage(pageId, txn.TransactionId, data);
txn.Prepare().ShouldBeTrue();
txn.State.ShouldBe(TransactionState.Preparing);
txn.Commit();
txn.State.ShouldBe(TransactionState.Committed);
}
finally
{
CleanupFiles(dbPath);
}
}
private static string NewDbPath()
=> Path.Combine(Path.GetTempPath(), $"storage_txn_{Guid.NewGuid():N}.db");
private static void CleanupFiles(string dbPath)
{
if (File.Exists(dbPath)) File.Delete(dbPath);
var walPath = Path.ChangeExtension(dbPath, ".wal");
if (File.Exists(walPath)) File.Delete(walPath);
var altWalPath = dbPath + "-wal";
if (File.Exists(altWalPath)) File.Delete(altWalPath);
}
}