Implement checkpoint modes with docs/tests and reorganize project file layout
All checks were successful
NuGet Publish / build-and-pack (push) Successful in 46s
NuGet Publish / publish-to-gitea (push) Successful in 53s

This commit is contained in:
Joseph Doherty
2026-02-21 07:56:36 -05:00
parent 3ffd468c79
commit 4c6aaa5a3f
96 changed files with 744 additions and 249 deletions

View File

@@ -0,0 +1,134 @@
using ZB.MOM.WW.CBDD.Bson;
using ZB.MOM.WW.CBDD.Core.Storage;
using ZB.MOM.WW.CBDD.Shared;
namespace ZB.MOM.WW.CBDD.Tests;
public class CompactionCrashRecoveryTests
{
/// <summary>
/// Verifies compaction resumes from marker phases and preserves data.
/// </summary>
/// <param name="phase">The crash marker phase to resume from.</param>
[Theory]
[InlineData("Started")]
[InlineData("Copied")]
[InlineData("Swapped")]
public void ResumeCompaction_FromCrashMarkerPhases_ShouldFinalizeAndPreserveData(string phase)
{
var dbPath = NewDbPath();
var markerPath = MarkerPath(dbPath);
try
{
using var db = new TestDbContext(dbPath);
var ids = SeedData(db);
db.ForceCheckpoint();
WriteMarker(markerPath, dbPath, phase);
var resumed = db.Storage.ResumeCompactionIfNeeded(new CompactionOptions
{
EnableTailTruncation = true,
DefragmentSlottedPages = true,
NormalizeFreeList = true
});
resumed.ShouldNotBeNull();
resumed!.ResumedFromMarker.ShouldBeTrue();
File.Exists(markerPath).ShouldBeFalse();
db.Users.Count().ShouldBe(ids.Count);
var recoveredDoc = ids
.Select(id => db.Users.FindById(id))
.FirstOrDefault(x => x != null);
recoveredDoc.ShouldNotBeNull();
recoveredDoc!.Name.ShouldContain("user-");
db.Storage.ResumeCompactionIfNeeded().ShouldBeNull();
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies corrupted compaction markers are recovered deterministically.
/// </summary>
[Fact]
public void ResumeCompaction_WithCorruptedMarker_ShouldRecoverDeterministically()
{
var dbPath = NewDbPath();
var markerPath = MarkerPath(dbPath);
try
{
using var db = new TestDbContext(dbPath);
var ids = SeedData(db);
db.ForceCheckpoint();
File.WriteAllText(markerPath, "{invalid-json-marker");
var resumed = db.Storage.ResumeCompactionIfNeeded(new CompactionOptions
{
EnableTailTruncation = true
});
resumed.ShouldNotBeNull();
resumed!.ResumedFromMarker.ShouldBeTrue();
File.Exists(markerPath).ShouldBeFalse();
db.Users.Count().ShouldBe(ids.Count);
var recoveredDoc = ids
.Select(id => db.Users.FindById(id))
.FirstOrDefault(x => x != null);
recoveredDoc.ShouldNotBeNull();
recoveredDoc!.Name.ShouldContain("user-");
}
finally
{
CleanupFiles(dbPath);
}
}
private static List<ObjectId> SeedData(TestDbContext db)
{
var ids = new List<ObjectId>();
for (var i = 0; i < 120; i++)
{
ids.Add(db.Users.Insert(new User
{
Name = $"user-{i:D4}-payload-{new string('x', 120)}",
Age = i % 20
}));
}
db.SaveChanges();
return ids;
}
private static void WriteMarker(string markerPath, string dbPath, string phase)
{
var safeDbPath = dbPath.Replace("\\", "\\\\", StringComparison.Ordinal);
var now = DateTimeOffset.UtcNow.ToString("O");
var json = $$"""
{"version":1,"phase":"{{phase}}","databasePath":"{{safeDbPath}}","startedAtUtc":"{{now}}","lastUpdatedUtc":"{{now}}","onlineMode":false,"mode":"InPlace"}
""";
File.WriteAllText(markerPath, json);
}
private static string MarkerPath(string dbPath) => $"{dbPath}.compact.state";
private static string NewDbPath()
=> Path.Combine(Path.GetTempPath(), $"compaction_crash_{Guid.NewGuid():N}.db");
private static void CleanupFiles(string dbPath)
{
var walPath = Path.ChangeExtension(dbPath, ".wal");
var markerPath = MarkerPath(dbPath);
if (File.Exists(dbPath)) File.Delete(dbPath);
if (File.Exists(walPath)) File.Delete(walPath);
if (File.Exists(markerPath)) File.Delete(markerPath);
}
}

View File

@@ -0,0 +1,474 @@
using System.IO.MemoryMappedFiles;
using ZB.MOM.WW.CBDD.Bson;
using ZB.MOM.WW.CBDD.Core.Indexing;
using ZB.MOM.WW.CBDD.Core.Storage;
using ZB.MOM.WW.CBDD.Shared;
namespace ZB.MOM.WW.CBDD.Tests;
public class CompactionOfflineTests
{
/// <summary>
/// Tests offline compact should preserve logical data equivalence.
/// </summary>
[Fact]
public void OfflineCompact_ShouldPreserveLogicalDataEquivalence()
{
var dbPath = NewDbPath();
try
{
using var db = new TestDbContext(dbPath);
var ids = new List<ObjectId>();
for (var i = 0; i < 160; i++)
{
ids.Add(db.Users.Insert(new User { Name = $"user-{i:D4}", Age = i % 31 }));
}
for (var i = 0; i < ids.Count; i += 9)
{
if (db.Users.FindById(ids[i]) != null)
{
db.Users.Delete(ids[i]).ShouldBeTrue();
}
}
var updateTargets = db.Users.FindAll(u => u.Age % 4 == 0)
.Select(u => u.Id)
.ToList();
foreach (var id in updateTargets)
{
var user = db.Users.FindById(id);
if (user == null)
{
continue;
}
user.Name += "-updated";
db.Users.Update(user).ShouldBeTrue();
}
db.SaveChanges();
db.ForceCheckpoint();
var expected = db.Users.FindAll()
.ToDictionary(u => u.Id, u => (u.Name, u.Age));
db.SaveChanges();
var stats = db.Compact();
stats.OnlineMode.ShouldBeFalse();
var actual = db.Users.FindAll()
.ToDictionary(u => u.Id, u => (u.Name, u.Age));
actual.Count.ShouldBe(expected.Count);
foreach (var kvp in expected)
{
actual.ShouldContainKey(kvp.Key);
actual[kvp.Key].ShouldBe(kvp.Value);
}
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Tests offline compact should keep index results consistent.
/// </summary>
[Fact]
public void OfflineCompact_ShouldKeepIndexResultsConsistent()
{
var dbPath = NewDbPath();
try
{
using var db = new TestDbContext(dbPath);
for (var i = 0; i < 300; i++)
{
db.People.Insert(new Person
{
Name = $"person-{i:D4}",
Age = i % 12
});
}
db.SaveChanges();
db.ForceCheckpoint();
var expectedByAge = db.People.FindAll()
.GroupBy(p => p.Age)
.ToDictionary(g => g.Key, g => g.Select(x => x.Name).OrderBy(x => x).ToArray());
db.SaveChanges();
var indexNamesBefore = db.People.GetIndexes().Select(x => x.Name).OrderBy(x => x).ToArray();
var stats = db.Compact(new CompactionOptions
{
DefragmentSlottedPages = true,
NormalizeFreeList = true,
EnableTailTruncation = true
});
stats.PrePageCount.ShouldBeGreaterThanOrEqualTo(stats.PostPageCount);
var indexNamesAfter = db.People.GetIndexes().Select(x => x.Name).OrderBy(x => x).ToArray();
indexNamesAfter.ShouldBe(indexNamesBefore);
foreach (var age in expectedByAge.Keys.OrderBy(x => x))
{
var actual = db.People.FindAll(p => p.Age == age)
.Select(x => x.Name)
.OrderBy(x => x)
.ToArray();
actual.ShouldBe(expectedByAge[age]);
}
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Tests offline compact should rebuild hash index metadata and preserve results.
/// </summary>
[Fact]
public void OfflineCompact_ShouldRebuildHashIndexMetadataAndPreserveResults()
{
var dbPath = NewDbPath();
try
{
using var db = new TestDbContext(dbPath);
for (var i = 0; i < 300; i++)
{
db.People.Insert(new Person
{
Name = $"hash-person-{i:D4}",
Age = i % 12
});
}
db.SaveChanges();
db.ForceCheckpoint();
var expectedByAge = db.People.FindAll()
.GroupBy(p => p.Age)
.ToDictionary(g => g.Key, g => g.Select(x => x.Name).OrderBy(x => x).ToArray());
var metadata = db.Storage.GetCollectionMetadata("people_collection");
metadata.ShouldNotBeNull();
var targetIndex = metadata!.Indexes
.FirstOrDefault(index => index.PropertyPaths.Any(path => path.Equals("Age", StringComparison.OrdinalIgnoreCase)));
targetIndex.ShouldNotBeNull();
targetIndex!.Type = IndexType.Hash;
db.Storage.SaveCollectionMetadata(metadata);
db.SaveChanges();
var stats = db.Compact(new CompactionOptions
{
DefragmentSlottedPages = true,
NormalizeFreeList = true,
EnableTailTruncation = true
});
stats.PrePageCount.ShouldBeGreaterThanOrEqualTo(stats.PostPageCount);
var reloadedMetadata = db.Storage.GetCollectionMetadata("people_collection");
reloadedMetadata.ShouldNotBeNull();
var rebuiltIndex = reloadedMetadata!.Indexes.FirstOrDefault(index => index.Name == targetIndex.Name);
rebuiltIndex.ShouldNotBeNull();
rebuiltIndex!.Type.ShouldBe(IndexType.Hash);
rebuiltIndex.RootPageId.ShouldBeGreaterThan(0u);
var runtimeIndex = db.People.GetIndexes().FirstOrDefault(index => index.Name == targetIndex.Name);
runtimeIndex.ShouldNotBeNull();
runtimeIndex!.Type.ShouldBe(IndexType.Hash);
foreach (var age in expectedByAge.Keys.OrderBy(x => x))
{
var actual = db.People.FindAll(p => p.Age == age)
.Select(x => x.Name)
.OrderBy(x => x)
.ToArray();
actual.ShouldBe(expectedByAge[age]);
}
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Tests offline compact when tail is reclaimable should reduce file size.
/// </summary>
[Fact]
public void OfflineCompact_WhenTailIsReclaimable_ShouldReduceFileSize()
{
var dbPath = NewDbPath();
var ids = new List<ObjectId>();
try
{
using var db = new TestDbContext(dbPath, SmallPageConfig());
for (var i = 0; i < 240; i++)
{
var id = db.Users.Insert(new User
{
Name = BuildPayload(i, 18_000),
Age = i
});
ids.Add(id);
}
db.SaveChanges();
db.ForceCheckpoint();
for (var i = ids.Count - 1; i >= 60; i--)
{
if (db.Users.FindById(ids[i]) != null)
{
db.Users.Delete(ids[i]).ShouldBeTrue();
}
}
db.SaveChanges();
db.ForceCheckpoint();
var preCompactSize = new FileInfo(dbPath).Length;
var stats = db.Compact(new CompactionOptions
{
EnableTailTruncation = true,
MinimumRetainedPages = 2
});
var postCompactSize = new FileInfo(dbPath).Length;
postCompactSize.ShouldBeLessThanOrEqualTo(preCompactSize);
stats.ReclaimedFileBytes.ShouldBeGreaterThanOrEqualTo(0);
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Tests offline compact with invalid primary root metadata should fail validation.
/// </summary>
[Fact]
public void OfflineCompact_WithInvalidPrimaryRootMetadata_ShouldFailValidation()
{
var dbPath = NewDbPath();
try
{
using var db = new TestDbContext(dbPath);
for (var i = 0; i < 32; i++)
{
db.Users.Insert(new User { Name = $"invalid-primary-{i:D3}", Age = i });
}
db.SaveChanges();
db.ForceCheckpoint();
var metadata = db.Storage.GetCollectionMetadata("users");
metadata.ShouldNotBeNull();
metadata!.PrimaryRootPageId = 1; // Metadata page, not an index page.
db.Storage.SaveCollectionMetadata(metadata);
Should.Throw<InvalidDataException>(() => db.Compact())
.Message.ShouldContain("primary index root page id");
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Tests offline compact with invalid secondary root metadata should fail validation.
/// </summary>
[Fact]
public void OfflineCompact_WithInvalidSecondaryRootMetadata_ShouldFailValidation()
{
var dbPath = NewDbPath();
try
{
using var db = new TestDbContext(dbPath);
for (var i = 0; i < 48; i++)
{
db.People.Insert(new Person { Name = $"invalid-secondary-{i:D3}", Age = i % 10 });
}
db.SaveChanges();
db.ForceCheckpoint();
var metadata = db.Storage.GetCollectionMetadata("people_collection");
metadata.ShouldNotBeNull();
metadata!.Indexes.Count.ShouldBeGreaterThan(0);
metadata.Indexes[0].RootPageId = uint.MaxValue; // Out-of-range page id.
db.Storage.SaveCollectionMetadata(metadata);
Should.Throw<InvalidDataException>(() => db.Compact())
.Message.ShouldContain("out of range");
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Tests offline compact should report live bytes relocation and throughput telemetry.
/// </summary>
[Fact]
public void OfflineCompact_ShouldReportLiveBytesRelocationAndThroughputTelemetry()
{
var dbPath = NewDbPath();
try
{
using var db = new TestDbContext(dbPath, SmallPageConfig());
var ids = new List<ObjectId>();
for (var i = 0; i < 160; i++)
{
ids.Add(db.Users.Insert(new User
{
Name = BuildPayload(i, 9_000),
Age = i
}));
}
for (var i = 0; i < ids.Count; i += 7)
{
if (db.Users.FindById(ids[i]) != null)
{
db.Users.Delete(ids[i]).ShouldBeTrue();
}
}
db.SaveChanges();
db.ForceCheckpoint();
var stats = db.Compact(new CompactionOptions
{
DefragmentSlottedPages = true,
NormalizeFreeList = true,
EnableTailTruncation = true
});
stats.PreLiveBytes.ShouldBe(Math.Max(0, stats.PreFileSizeBytes - stats.PreFreeBytes));
stats.PostLiveBytes.ShouldBe(Math.Max(0, stats.PostFileSizeBytes - stats.PostFreeBytes));
stats.DocumentsRelocated.ShouldBeGreaterThanOrEqualTo(0);
stats.PagesRelocated.ShouldBeGreaterThanOrEqualTo(0);
stats.ThroughputBytesPerSecond.ShouldBeGreaterThan(0);
stats.ThroughputPagesPerSecond.ShouldBeGreaterThanOrEqualTo(0);
stats.ThroughputDocumentsPerSecond.ShouldBeGreaterThanOrEqualTo(0);
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Tests offline compact when primary index points to deleted slot should fail validation.
/// </summary>
[Fact]
public void OfflineCompact_WhenPrimaryIndexPointsToDeletedSlot_ShouldFailValidation()
{
var dbPath = NewDbPath();
try
{
using var db = new TestDbContext(dbPath, SmallPageConfig());
var id = db.Users.Insert(new User { Name = BuildPayload(1, 7_500), Age = 9 });
db.SaveChanges();
db.ForceCheckpoint();
var metadata = db.Storage.GetCollectionMetadata("users");
metadata.ShouldNotBeNull();
metadata!.PrimaryRootPageId.ShouldBeGreaterThan(0u);
var primaryIndex = new BTreeIndex(db.Storage, IndexOptions.CreateUnique("_id"), metadata.PrimaryRootPageId);
primaryIndex.TryFind(new IndexKey(id), out var location).ShouldBeTrue();
var page = new byte[db.Storage.PageSize];
db.Storage.ReadPage(location.PageId, null, page);
var header = SlottedPageHeader.ReadFrom(page);
var slotOffset = SlottedPageHeader.Size + (location.SlotIndex * SlotEntry.Size);
var slot = SlotEntry.ReadFrom(page.AsSpan(slotOffset, SlotEntry.Size));
slot.Flags |= SlotFlags.Deleted;
slot.WriteTo(page.AsSpan(slotOffset, SlotEntry.Size));
header.WriteTo(page);
db.Storage.WritePageImmediate(location.PageId, page);
var ex = Should.Throw<InvalidDataException>(() => db.Compact(new CompactionOptions
{
DefragmentSlottedPages = true,
NormalizeFreeList = true,
EnableTailTruncation = true
}));
ex.Message.ShouldContain("Compaction validation failed");
}
finally
{
CleanupFiles(dbPath);
}
}
private static PageFileConfig SmallPageConfig()
{
return new PageFileConfig
{
PageSize = 4096,
InitialFileSize = 1024 * 1024,
Access = MemoryMappedFileAccess.ReadWrite
};
}
private static string BuildPayload(int seed, int approxLength)
{
var builder = new System.Text.StringBuilder(approxLength + 256);
var i = 0;
while (builder.Length < approxLength)
{
builder.Append("compact-tail-");
builder.Append(seed.ToString("D4"));
builder.Append('-');
builder.Append(i.ToString("D6"));
builder.Append('|');
i++;
}
return builder.ToString();
}
private static string NewDbPath()
=> Path.Combine(Path.GetTempPath(), $"compaction_offline_{Guid.NewGuid():N}.db");
private static void CleanupFiles(string dbPath)
{
var walPath = Path.ChangeExtension(dbPath, ".wal");
var markerPath = $"{dbPath}.compact.state";
var tempPath = $"{dbPath}.compact.tmp";
var backupPath = $"{dbPath}.compact.bak";
if (File.Exists(dbPath)) File.Delete(dbPath);
if (File.Exists(walPath)) File.Delete(walPath);
if (File.Exists(markerPath)) File.Delete(markerPath);
if (File.Exists(tempPath)) File.Delete(tempPath);
if (File.Exists(backupPath)) File.Delete(backupPath);
}
}

View File

@@ -0,0 +1,140 @@
using ZB.MOM.WW.CBDD.Bson;
using ZB.MOM.WW.CBDD.Core.Storage;
using ZB.MOM.WW.CBDD.Shared;
namespace ZB.MOM.WW.CBDD.Tests;
public class CompactionOnlineConcurrencyTests
{
/// <summary>
/// Verifies online compaction completes without deadlock under concurrent workload.
/// </summary>
[Fact]
public async Task OnlineCompaction_WithConcurrentishWorkload_ShouldCompleteWithoutDeadlock()
{
var dbPath = NewDbPath();
var activeIds = new List<ObjectId>();
var sync = new object();
var completedOps = 0;
try
{
using var db = new TestDbContext(dbPath);
var testCancellation = TestContext.Current.CancellationToken;
for (var i = 0; i < 120; i++)
{
var id = db.Users.Insert(new User { Name = $"seed-{i:D4}", Age = i % 40 });
activeIds.Add(id);
}
db.SaveChanges();
db.ForceCheckpoint();
var workloadTask = Task.Run(() =>
{
for (var i = 0; i < 150; i++)
{
if (i % 3 == 0)
{
var id = db.Users.Insert(new User { Name = $"insert-{i:D4}", Age = i % 60 });
lock (sync)
{
activeIds.Add(id);
}
}
else if (i % 3 == 1)
{
ObjectId? candidate = null;
lock (sync)
{
if (activeIds.Count > 0)
{
candidate = activeIds[i % activeIds.Count];
}
}
if (candidate.HasValue)
{
var entity = db.Users.FindById(candidate.Value);
if (entity != null)
{
entity.Age += 1;
db.Users.Update(entity).ShouldBeTrue();
}
}
}
else
{
ObjectId? candidate = null;
lock (sync)
{
if (activeIds.Count > 60)
{
candidate = activeIds[^1];
activeIds.RemoveAt(activeIds.Count - 1);
}
}
if (candidate.HasValue)
{
db.Users.Delete(candidate.Value);
}
}
db.SaveChanges();
_ = db.Users.Count();
db.SaveChanges();
Interlocked.Increment(ref completedOps);
}
}, testCancellation);
var compactionTask = Task.Run(() => db.Compact(new CompactionOptions
{
OnlineMode = true,
OnlineBatchPageLimit = 4,
OnlineBatchDelay = TimeSpan.FromMilliseconds(2),
MaxOnlineDuration = TimeSpan.FromMilliseconds(400),
EnableTailTruncation = true
}), testCancellation);
await Task.WhenAll(workloadTask, compactionTask).WaitAsync(TimeSpan.FromSeconds(20), testCancellation);
var stats = await compactionTask;
stats.OnlineMode.ShouldBeTrue();
completedOps.ShouldBeGreaterThanOrEqualTo(100);
var allUsers = db.Users.FindAll().ToList();
allUsers.Count.ShouldBeGreaterThan(0);
db.SaveChanges();
List<ObjectId> snapshotIds;
lock (sync)
{
snapshotIds = activeIds.ToList();
}
var actualIds = allUsers.Select(x => x.Id).ToHashSet();
foreach (var id in snapshotIds)
{
actualIds.ShouldContain(id);
}
}
finally
{
CleanupFiles(dbPath);
}
}
private static string NewDbPath()
=> Path.Combine(Path.GetTempPath(), $"compaction_online_{Guid.NewGuid():N}.db");
private static void CleanupFiles(string dbPath)
{
var walPath = Path.ChangeExtension(dbPath, ".wal");
var markerPath = $"{dbPath}.compact.state";
if (File.Exists(dbPath)) File.Delete(dbPath);
if (File.Exists(walPath)) File.Delete(walPath);
if (File.Exists(markerPath)) File.Delete(markerPath);
}
}

View File

@@ -0,0 +1,115 @@
using ZB.MOM.WW.CBDD.Bson;
using ZB.MOM.WW.CBDD.Core.Storage;
using ZB.MOM.WW.CBDD.Shared;
namespace ZB.MOM.WW.CBDD.Tests;
public class CompactionWalCoordinationTests
{
/// <summary>
/// Verifies offline compaction checkpoints and leaves the WAL empty.
/// </summary>
[Fact]
public void OfflineCompact_ShouldCheckpointAndLeaveWalEmpty()
{
var dbPath = NewDbPath();
var markerPath = $"{dbPath}.compact.state";
try
{
using var db = new TestDbContext(dbPath);
for (var i = 0; i < 80; i++)
{
db.Users.Insert(new User { Name = $"wal-compact-{i:D3}", Age = i });
}
db.SaveChanges();
db.Storage.GetWalSize().ShouldBeGreaterThan(0);
var stats = db.Compact(new CompactionOptions
{
EnableTailTruncation = true,
NormalizeFreeList = true,
DefragmentSlottedPages = true
});
stats.OnlineMode.ShouldBeFalse();
db.Storage.GetWalSize().ShouldBe(0);
File.Exists(markerPath).ShouldBeFalse();
db.Users.Count().ShouldBe(80);
}
finally
{
CleanupFiles(dbPath);
}
}
/// <summary>
/// Verifies compaction after WAL recovery preserves durable data.
/// </summary>
[Fact]
public void Compact_AfterWalRecovery_ShouldKeepDataDurable()
{
var dbPath = NewDbPath();
var walPath = Path.ChangeExtension(dbPath, ".wal");
var expected = new List<(ObjectId Id, string Name)>();
try
{
using (var writer = new TestDbContext(dbPath))
{
for (var i = 0; i < 48; i++)
{
var name = $"recoverable-{i:D3}";
var id = writer.Users.Insert(new User { Name = name, Age = i % 13 });
expected.Add((id, name));
}
writer.SaveChanges();
writer.Storage.GetWalSize().ShouldBeGreaterThan(0);
}
new FileInfo(walPath).Length.ShouldBeGreaterThan(0);
using (var recovered = new TestDbContext(dbPath))
{
recovered.Users.Count().ShouldBe(expected.Count);
foreach (var item in expected)
{
recovered.Users.FindById(item.Id)!.Name.ShouldBe(item.Name);
}
recovered.SaveChanges();
recovered.Compact();
recovered.Storage.GetWalSize().ShouldBe(0);
}
using (var verify = new TestDbContext(dbPath))
{
verify.Users.Count().ShouldBe(expected.Count);
foreach (var item in expected)
{
verify.Users.FindById(item.Id)!.Name.ShouldBe(item.Name);
}
}
}
finally
{
CleanupFiles(dbPath);
}
}
private static string NewDbPath()
=> Path.Combine(Path.GetTempPath(), $"compaction_wal_{Guid.NewGuid():N}.db");
private static void CleanupFiles(string dbPath)
{
var walPath = Path.ChangeExtension(dbPath, ".wal");
var markerPath = $"{dbPath}.compact.state";
if (File.Exists(dbPath)) File.Delete(dbPath);
if (File.Exists(walPath)) File.Delete(walPath);
if (File.Exists(markerPath)) File.Delete(markerPath);
}
}