Add enterprise docs structure and include pending core maintenance updates.
This commit is contained in:
@@ -124,14 +124,28 @@ public partial class DocumentCollection<TId, T> : IDisposable where T : class
|
||||
_indexManager.SetPrimaryRootPageId(_primaryIndex.RootPageId);
|
||||
}
|
||||
|
||||
// Register keys used by the mapper to ensure they are available for compression
|
||||
_storage.RegisterKeys(_mapper.UsedKeys);
|
||||
}
|
||||
|
||||
private void EnsureSchema()
|
||||
{
|
||||
var currentSchema = _mapper.GetSchema();
|
||||
var metadata = _indexManager.GetMetadata();
|
||||
// Register keys used by the mapper to ensure they are available for compression
|
||||
_storage.RegisterKeys(_mapper.UsedKeys);
|
||||
}
|
||||
|
||||
private void RefreshPrimaryIndexRootFromMetadata()
|
||||
{
|
||||
_indexManager.RefreshFromStorageMetadata();
|
||||
|
||||
var primaryRootPageId = _indexManager.PrimaryRootPageId;
|
||||
if (primaryRootPageId == 0)
|
||||
return;
|
||||
|
||||
if (primaryRootPageId != _primaryIndex.RootPageId)
|
||||
{
|
||||
_primaryIndex.SetRootPageId(primaryRootPageId);
|
||||
}
|
||||
}
|
||||
|
||||
private void EnsureSchema()
|
||||
{
|
||||
var currentSchema = _mapper.GetSchema();
|
||||
var metadata = _indexManager.GetMetadata();
|
||||
|
||||
var persistedSchemas = _storage.GetSchemas(metadata.SchemaRootPageId);
|
||||
var latestPersisted = persistedSchemas.Count > 0 ? persistedSchemas[persistedSchemas.Count - 1] : null;
|
||||
@@ -363,12 +377,13 @@ public partial class DocumentCollection<TId, T> : IDisposable where T : class
|
||||
/// Rebuilds an index by scanning all existing documents and re-inserting them.
|
||||
/// Called automatically when creating a new index.
|
||||
/// </summary>
|
||||
private void RebuildIndex(CollectionSecondaryIndex<TId, T> index)
|
||||
{
|
||||
var transaction = _transactionHolder.GetCurrentTransactionOrStart();
|
||||
// Iterate all documents in the collection via primary index
|
||||
var minKey = new IndexKey(Array.Empty<byte>());
|
||||
var maxKey = new IndexKey(Enumerable.Repeat((byte)0xFF, 32).ToArray());
|
||||
private void RebuildIndex(CollectionSecondaryIndex<TId, T> index)
|
||||
{
|
||||
RefreshPrimaryIndexRootFromMetadata();
|
||||
var transaction = _transactionHolder.GetCurrentTransactionOrStart();
|
||||
// Iterate all documents in the collection via primary index
|
||||
var minKey = new IndexKey(Array.Empty<byte>());
|
||||
var maxKey = new IndexKey(Enumerable.Repeat((byte)0xFF, 32).ToArray());
|
||||
|
||||
foreach (var entry in _primaryIndex.Range(minKey, maxKey, IndexDirection.Forward, transaction.TransactionId))
|
||||
{
|
||||
@@ -967,6 +982,7 @@ public partial class DocumentCollection<TId, T> : IDisposable where T : class
|
||||
|
||||
private void InsertDataCore(TId id, T entity, ReadOnlySpan<byte> docData)
|
||||
{
|
||||
RefreshPrimaryIndexRootFromMetadata();
|
||||
var transaction = _transactionHolder.GetCurrentTransactionOrStart();
|
||||
var (storedPayloadOverride, storedPayloadFlags) = PreparePayloadForStorage(docData);
|
||||
ReadOnlySpan<byte> storedPayload = storedPayloadOverride is null ? docData : storedPayloadOverride;
|
||||
@@ -1005,11 +1021,12 @@ public partial class DocumentCollection<TId, T> : IDisposable where T : class
|
||||
/// <param name="id">ObjectId of the document</param>
|
||||
/// <param name="transaction">Optional transaction for isolation (supports Read Your Own Writes)</param>
|
||||
/// <returns>The document, or null if not found</returns>
|
||||
public T? FindById(TId id)
|
||||
{
|
||||
var transaction = _transactionHolder.GetCurrentTransactionOrStart();
|
||||
try
|
||||
{
|
||||
public T? FindById(TId id)
|
||||
{
|
||||
RefreshPrimaryIndexRootFromMetadata();
|
||||
var transaction = _transactionHolder.GetCurrentTransactionOrStart();
|
||||
try
|
||||
{
|
||||
var key = _mapper.ToIndexKey(id);
|
||||
|
||||
if (!_primaryIndex.TryFind(key, out var location, transaction.TransactionId))
|
||||
@@ -1031,11 +1048,12 @@ public partial class DocumentCollection<TId, T> : IDisposable where T : class
|
||||
/// </summary>
|
||||
/// <param name="transaction">Transaction for isolation (REQUIRED for consistent reads during concurrent writes)</param>
|
||||
/// <returns>Enumerable of all documents</returns>
|
||||
public IEnumerable<T> FindAll()
|
||||
{
|
||||
var transaction = _transactionHolder.GetCurrentTransactionOrStart();
|
||||
var txnId = transaction?.TransactionId ?? 0;
|
||||
var minKey = new IndexKey(Array.Empty<byte>());
|
||||
public IEnumerable<T> FindAll()
|
||||
{
|
||||
RefreshPrimaryIndexRootFromMetadata();
|
||||
var transaction = _transactionHolder.GetCurrentTransactionOrStart();
|
||||
var txnId = transaction?.TransactionId ?? 0;
|
||||
var minKey = new IndexKey(Array.Empty<byte>());
|
||||
var maxKey = new IndexKey(Enumerable.Repeat((byte)0xFF, 32).ToArray());
|
||||
|
||||
foreach (var entry in _primaryIndex.Range(minKey, maxKey, IndexDirection.Forward, txnId))
|
||||
@@ -1202,11 +1220,12 @@ public partial class DocumentCollection<TId, T> : IDisposable where T : class
|
||||
}
|
||||
}
|
||||
|
||||
private int UpdateBulkInternal(List<T> entityList)
|
||||
{
|
||||
var transaction = _transactionHolder.GetCurrentTransactionOrStart();
|
||||
int updateCount = 0;
|
||||
const int BATCH_SIZE = 50;
|
||||
private int UpdateBulkInternal(List<T> entityList)
|
||||
{
|
||||
RefreshPrimaryIndexRootFromMetadata();
|
||||
var transaction = _transactionHolder.GetCurrentTransactionOrStart();
|
||||
int updateCount = 0;
|
||||
const int BATCH_SIZE = 50;
|
||||
|
||||
for (int batchStart = 0; batchStart < entityList.Count; batchStart += BATCH_SIZE)
|
||||
{
|
||||
@@ -1272,6 +1291,7 @@ public partial class DocumentCollection<TId, T> : IDisposable where T : class
|
||||
|
||||
private bool UpdateDataCore(TId id, T entity, ReadOnlySpan<byte> docData)
|
||||
{
|
||||
RefreshPrimaryIndexRootFromMetadata();
|
||||
var transaction = _transactionHolder.GetCurrentTransactionOrStart();
|
||||
var key = _mapper.ToIndexKey(id);
|
||||
var (storedPayloadOverride, storedPayloadFlags) = PreparePayloadForStorage(docData);
|
||||
@@ -1438,11 +1458,12 @@ public partial class DocumentCollection<TId, T> : IDisposable where T : class
|
||||
return deleteCount;
|
||||
}
|
||||
|
||||
private bool DeleteCore(TId id, bool notifyCdc = true)
|
||||
{
|
||||
var transaction = _transactionHolder.GetCurrentTransactionOrStart();
|
||||
var key = _mapper.ToIndexKey(id);
|
||||
if (!_primaryIndex.TryFind(key, out var location, transaction.TransactionId))
|
||||
private bool DeleteCore(TId id, bool notifyCdc = true)
|
||||
{
|
||||
RefreshPrimaryIndexRootFromMetadata();
|
||||
var transaction = _transactionHolder.GetCurrentTransactionOrStart();
|
||||
var key = _mapper.ToIndexKey(id);
|
||||
if (!_primaryIndex.TryFind(key, out var location, transaction.TransactionId))
|
||||
return false;
|
||||
|
||||
// Notify secondary indexes BEFORE deleting document from storage
|
||||
@@ -1524,11 +1545,12 @@ public partial class DocumentCollection<TId, T> : IDisposable where T : class
|
||||
/// </summary>
|
||||
/// <param name="transaction">Optional transaction for isolation</param>
|
||||
/// <returns>Number of documents</returns>
|
||||
public int Count()
|
||||
{
|
||||
var transaction = _transactionHolder.GetCurrentTransactionOrStart();
|
||||
// Count all entries in primary index
|
||||
// Use generic min/max keys for the index
|
||||
public int Count()
|
||||
{
|
||||
RefreshPrimaryIndexRootFromMetadata();
|
||||
var transaction = _transactionHolder.GetCurrentTransactionOrStart();
|
||||
// Count all entries in primary index
|
||||
// Use generic min/max keys for the index
|
||||
var minKey = IndexKey.MinKey;
|
||||
var maxKey = IndexKey.MaxKey;
|
||||
return _primaryIndex.Range(minKey, maxKey, IndexDirection.Forward, transaction.TransactionId).Count();
|
||||
|
||||
@@ -82,6 +82,18 @@ public sealed class BTreeIndex
|
||||
/// Gets the current root page identifier for the B+Tree.
|
||||
/// </summary>
|
||||
public uint RootPageId => _rootPageId;
|
||||
|
||||
/// <summary>
|
||||
/// Updates the in-memory root page identifier.
|
||||
/// </summary>
|
||||
/// <param name="rootPageId">The root page identifier to use for subsequent operations.</param>
|
||||
internal void SetRootPageId(uint rootPageId)
|
||||
{
|
||||
if (rootPageId == 0)
|
||||
throw new ArgumentOutOfRangeException(nameof(rootPageId));
|
||||
|
||||
_rootPageId = rootPageId;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Reads a page using StorageEngine for transaction isolation.
|
||||
|
||||
@@ -504,6 +504,37 @@ public sealed class CollectionIndexManager<TId, T> : IDisposable where T : class
|
||||
/// </summary>
|
||||
public uint PrimaryRootPageId => _metadata.PrimaryRootPageId;
|
||||
|
||||
/// <summary>
|
||||
/// Refreshes cached metadata and index root bindings from storage.
|
||||
/// </summary>
|
||||
internal void RefreshFromStorageMetadata()
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
if (_disposed)
|
||||
throw new ObjectDisposedException(nameof(CollectionIndexManager<TId, T>));
|
||||
|
||||
var latest = _storage.GetCollectionMetadata(_collectionName) ?? new CollectionMetadata { Name = _collectionName };
|
||||
if (MetadataEquals(_metadata, latest))
|
||||
return;
|
||||
|
||||
foreach (var index in _indexes.Values)
|
||||
{
|
||||
try { index.Dispose(); } catch { /* Best effort */ }
|
||||
}
|
||||
|
||||
_indexes.Clear();
|
||||
_metadata = latest;
|
||||
|
||||
foreach (var idxMeta in _metadata.Indexes)
|
||||
{
|
||||
var definition = RebuildDefinition(idxMeta.Name, idxMeta.PropertyPaths, idxMeta.IsUnique, idxMeta.Type, idxMeta.Dimensions, idxMeta.Metric);
|
||||
var index = new CollectionSecondaryIndex<TId, T>(definition, _storage, _mapper, idxMeta.RootPageId);
|
||||
_indexes[idxMeta.Name] = index;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Sets the root page identifier for the primary index.
|
||||
/// </summary>
|
||||
@@ -526,11 +557,52 @@ public sealed class CollectionIndexManager<TId, T> : IDisposable where T : class
|
||||
/// <returns>The collection metadata.</returns>
|
||||
public CollectionMetadata GetMetadata() => _metadata;
|
||||
|
||||
private void SaveMetadata()
|
||||
{
|
||||
UpdateMetadata();
|
||||
_storage.SaveCollectionMetadata(_metadata);
|
||||
}
|
||||
private void SaveMetadata()
|
||||
{
|
||||
UpdateMetadata();
|
||||
_storage.SaveCollectionMetadata(_metadata);
|
||||
}
|
||||
|
||||
private static bool MetadataEquals(CollectionMetadata left, CollectionMetadata right)
|
||||
{
|
||||
if (!string.Equals(left.Name, right.Name, StringComparison.OrdinalIgnoreCase))
|
||||
return false;
|
||||
|
||||
if (left.PrimaryRootPageId != right.PrimaryRootPageId ||
|
||||
left.SchemaRootPageId != right.SchemaRootPageId ||
|
||||
left.Indexes.Count != right.Indexes.Count)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
for (var i = 0; i < left.Indexes.Count; i++)
|
||||
{
|
||||
var l = left.Indexes[i];
|
||||
var r = right.Indexes[i];
|
||||
if (!string.Equals(l.Name, r.Name, StringComparison.OrdinalIgnoreCase) ||
|
||||
l.RootPageId != r.RootPageId ||
|
||||
l.Type != r.Type ||
|
||||
l.IsUnique != r.IsUnique ||
|
||||
l.Dimensions != r.Dimensions ||
|
||||
l.Metric != r.Metric)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
var lPaths = l.PropertyPaths ?? Array.Empty<string>();
|
||||
var rPaths = r.PropertyPaths ?? Array.Empty<string>();
|
||||
if (lPaths.Length != rPaths.Length)
|
||||
return false;
|
||||
|
||||
for (var p = 0; p < lPaths.Length; p++)
|
||||
{
|
||||
if (!string.Equals(lPaths[p], rPaths[p], StringComparison.Ordinal))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Releases resources used by the index manager.
|
||||
|
||||
@@ -645,6 +645,39 @@ public sealed class PageFile : IDisposable
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Trims excess physical file capacity beyond the current logical page count.
|
||||
/// </summary>
|
||||
/// <returns>The number of bytes removed from the file.</returns>
|
||||
public long TrimExcessCapacityToLogicalPageCount()
|
||||
{
|
||||
lock (_lock)
|
||||
{
|
||||
EnsureFileOpen();
|
||||
|
||||
var targetLengthBytes = (long)_nextPageId * _config.PageSize;
|
||||
var currentLengthBytes = _fileStream!.Length;
|
||||
if (currentLengthBytes <= targetLengthBytes)
|
||||
return 0;
|
||||
|
||||
_mappedFile?.Dispose();
|
||||
_mappedFile = null;
|
||||
|
||||
_fileStream.SetLength(targetLengthBytes);
|
||||
_fileStream.Flush(flushToDisk: true);
|
||||
|
||||
_mappedFile = MemoryMappedFile.CreateFromFile(
|
||||
_fileStream,
|
||||
null,
|
||||
targetLengthBytes,
|
||||
_config.Access,
|
||||
HandleInheritability.None,
|
||||
leaveOpen: true);
|
||||
|
||||
return currentLengthBytes - targetLengthBytes;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Defragments a slotted page in place by packing live slot payloads densely at the end of the page.
|
||||
/// </summary>
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user