refactor: extract NATS.Server.Transport.Tests project

Move TLS, OCSP, WebSocket, Networking, and IO test files from
NATS.Server.Tests into a dedicated NATS.Server.Transport.Tests
project. Update namespaces, replace private GetFreePort/ReadUntilAsync
with shared TestUtilities helpers, extract TestCertHelper to
TestUtilities, and replace Task.Delay polling loops with
PollHelper.WaitUntilAsync/YieldForAsync for proper synchronization.
This commit is contained in:
Joseph Doherty
2026-03-12 14:57:35 -04:00
parent 5c608f07e3
commit d2c04fcca5
36 changed files with 157 additions and 152 deletions

View File

@@ -0,0 +1,126 @@
using NATS.Server.IO;
using Shouldly;
namespace NATS.Server.Transport.Tests.IO;
/// <summary>
/// Tests for the consecutive short-read counter in AdaptiveReadBuffer.
/// Go reference: server/client.go — readLoop buffer sizing with short-read counter.
/// </summary>
public class AdaptiveReadBufferShortReadTests
{
[Fact]
public void Initial_size_is_4096()
{
var b = new AdaptiveReadBuffer();
b.CurrentSize.ShouldBe(4096);
}
[Fact]
public void Full_read_doubles_size()
{
var b = new AdaptiveReadBuffer();
b.RecordRead(4096);
b.CurrentSize.ShouldBe(8192);
}
[Fact]
public void Single_short_read_does_not_shrink()
{
// A short read is less than target/4 = 4096/4 = 1024
var b = new AdaptiveReadBuffer();
b.RecordRead(100);
b.CurrentSize.ShouldBe(4096);
}
[Fact]
public void Three_short_reads_do_not_shrink()
{
var b = new AdaptiveReadBuffer();
b.RecordRead(100);
b.RecordRead(100);
b.RecordRead(100);
b.CurrentSize.ShouldBe(4096);
}
[Fact]
public void Four_short_reads_triggers_shrink()
{
var b = new AdaptiveReadBuffer();
b.RecordRead(100);
b.RecordRead(100);
b.RecordRead(100);
b.RecordRead(100);
b.CurrentSize.ShouldBe(2048);
}
[Fact]
public void Short_read_counter_resets_on_full_read()
{
// 3 short reads, then a full read resets the counter — subsequent short read should not shrink
var b = new AdaptiveReadBuffer();
b.RecordRead(100); // short
b.RecordRead(100); // short
b.RecordRead(100); // short (3 total, not yet at threshold)
b.RecordRead(4096); // full read — doubles size and resets counter
b.RecordRead(512); // short (relative to new size 8192; 512 < 8192/4=2048) — only 1 consecutive
b.CurrentSize.ShouldBe(8192);
}
[Fact]
public void Short_read_counter_resets_on_medium_read()
{
// A medium read is >= target/4 but < target
// For target 4096: medium range is [1024, 4096)
var b = new AdaptiveReadBuffer();
b.RecordRead(100); // short — counter = 1
b.RecordRead(100); // short — counter = 2
b.RecordRead(100); // short — counter = 3
b.RecordRead(2000); // medium (>= 4096/4=1024, < 4096) — resets counter
b.RecordRead(100); // short — counter = 1, should not shrink
b.CurrentSize.ShouldBe(4096);
}
[Fact]
public void Short_read_counter_resets_after_shrink()
{
// After 4 short reads trigger a shrink, counter resets to 0
var b = new AdaptiveReadBuffer();
b.RecordRead(100); // short — counter = 1
b.RecordRead(100); // short — counter = 2
b.RecordRead(100); // short — counter = 3
b.RecordRead(100); // short — counter = 4 → shrinks to 2048, resets counter to 0
b.ConsecutiveShortReads.ShouldBe(0);
// One more short read should be counter = 1 (not triggering another shrink)
b.RecordRead(50); // short relative to 2048 (50 < 2048/4=512) — counter = 1
b.ConsecutiveShortReads.ShouldBe(1);
b.CurrentSize.ShouldBe(2048);
}
[Fact]
public void Size_never_goes_below_512()
{
// Force the buffer down to 512 then attempt to shrink further
var b = new AdaptiveReadBuffer();
// Drive target down to 512 via repeated shrink cycles
for (var i = 0; i < 4; i++) b.RecordRead(1); // target 4096 → 2048
for (var i = 0; i < 4; i++) b.RecordRead(1); // 2048 → 1024
for (var i = 0; i < 4; i++) b.RecordRead(1); // 1024 → 512
b.CurrentSize.ShouldBe(512);
// Now try to shrink again — should stay at 512
for (var i = 0; i < 4; i++) b.RecordRead(1);
b.CurrentSize.ShouldBe(512);
}
[Fact]
public void ConsecutiveShortReads_property_reflects_count()
{
var b = new AdaptiveReadBuffer();
b.RecordRead(100); // short — counter = 1
b.RecordRead(100); // short — counter = 2
b.ConsecutiveShortReads.ShouldBe(2);
}
}

View File

@@ -0,0 +1,17 @@
using NATS.Server.IO;
namespace NATS.Server.Transport.Tests;
public class AdaptiveReadBufferTests
{
[Fact]
public void Read_buffer_scales_between_512_and_65536_based_on_recent_payload_pattern()
{
var b = new AdaptiveReadBuffer();
b.RecordRead(512);
b.RecordRead(4096);
b.RecordRead(32000);
b.CurrentSize.ShouldBeGreaterThan(4096);
b.CurrentSize.ShouldBeLessThanOrEqualTo(64 * 1024);
}
}

View File

@@ -0,0 +1,179 @@
using System.Text;
using NATS.Server.IO;
using Shouldly;
// Go reference: client.go — dynamic buffer sizing and broadcast flush coalescing for fan-out.
namespace NATS.Server.Transport.Tests.IO;
public class DynamicBufferPoolTests
{
// -----------------------------------------------------------------------
// Rent (IMemoryOwner<byte>)
// -----------------------------------------------------------------------
[Fact]
public void Rent_returns_buffer_of_requested_size_or_larger()
{
// Go ref: client.go — dynamic buffer sizing (512 → 65536).
var pool = new OutboundBufferPool();
using var owner = pool.Rent(100);
owner.Memory.Length.ShouldBeGreaterThanOrEqualTo(100);
}
// -----------------------------------------------------------------------
// RentBuffer — tier sizing
// -----------------------------------------------------------------------
[Fact]
public void RentBuffer_returns_small_buffer()
{
// Go ref: client.go — initial 512 B write buffer per connection.
var pool = new OutboundBufferPool();
var buf = pool.RentBuffer(100);
buf.Length.ShouldBeGreaterThanOrEqualTo(512);
pool.ReturnBuffer(buf);
}
[Fact]
public void RentBuffer_returns_medium_buffer()
{
// Go ref: client.go — 4 KiB write buffer growth step.
var pool = new OutboundBufferPool();
var buf = pool.RentBuffer(1000);
buf.Length.ShouldBeGreaterThanOrEqualTo(4096);
pool.ReturnBuffer(buf);
}
[Fact]
public void RentBuffer_returns_large_buffer()
{
// Go ref: client.go — max 64 KiB write buffer per connection.
var pool = new OutboundBufferPool();
var buf = pool.RentBuffer(10000);
buf.Length.ShouldBeGreaterThanOrEqualTo(65536);
pool.ReturnBuffer(buf);
}
// -----------------------------------------------------------------------
// ReturnBuffer + reuse
// -----------------------------------------------------------------------
[Fact]
public void ReturnBuffer_and_reuse()
{
// Verifies that a returned buffer is available for reuse on the next
// RentBuffer call of the same tier.
// Go ref: client.go — buffer pooling to avoid GC pressure.
var pool = new OutboundBufferPool();
var first = pool.RentBuffer(100); // small tier → 512 B
first.Length.ShouldBe(512);
pool.ReturnBuffer(first);
var second = pool.RentBuffer(100); // should reuse the returned buffer
second.Length.ShouldBe(512);
// ReferenceEquals confirms the exact same array instance was reused.
ReferenceEquals(first, second).ShouldBeTrue();
pool.ReturnBuffer(second);
}
// -----------------------------------------------------------------------
// BroadcastDrain — coalescing
// -----------------------------------------------------------------------
[Fact]
public void BroadcastDrain_coalesces_writes()
{
// Go ref: client.go — broadcast flush for fan-out publish.
var pool = new OutboundBufferPool();
var p1 = Encoding.UTF8.GetBytes("Hello");
var p2 = Encoding.UTF8.GetBytes(", ");
var p3 = Encoding.UTF8.GetBytes("World");
IReadOnlyList<ReadOnlyMemory<byte>> pending =
[
p1.AsMemory(),
p2.AsMemory(),
p3.AsMemory(),
];
var dest = new byte[OutboundBufferPool.CalculateBroadcastSize(pending)];
pool.BroadcastDrain(pending, dest);
Encoding.UTF8.GetString(dest).ShouldBe("Hello, World");
}
[Fact]
public void BroadcastDrain_returns_correct_byte_count()
{
// Go ref: client.go — total bytes written during coalesced drain.
var pool = new OutboundBufferPool();
IReadOnlyList<ReadOnlyMemory<byte>> pending =
[
new byte[10].AsMemory(),
new byte[20].AsMemory(),
new byte[30].AsMemory(),
];
var dest = new byte[60];
var written = pool.BroadcastDrain(pending, dest);
written.ShouldBe(60);
}
// -----------------------------------------------------------------------
// CalculateBroadcastSize
// -----------------------------------------------------------------------
[Fact]
public void CalculateBroadcastSize_sums_all_writes()
{
// Go ref: client.go — pre-check buffer capacity before coalesced drain.
IReadOnlyList<ReadOnlyMemory<byte>> pending =
[
new byte[7].AsMemory(),
new byte[13].AsMemory(),
];
OutboundBufferPool.CalculateBroadcastSize(pending).ShouldBe(20);
}
// -----------------------------------------------------------------------
// Stats counters
// -----------------------------------------------------------------------
[Fact]
public void RentCount_increments()
{
// Go ref: client.go — observability for buffer allocation rate.
var pool = new OutboundBufferPool();
pool.RentCount.ShouldBe(0L);
using var _ = pool.Rent(100);
pool.RentBuffer(200);
pool.RentCount.ShouldBe(2L);
}
[Fact]
public void BroadcastCount_increments()
{
// Go ref: client.go — observability for fan-out drain operations.
var pool = new OutboundBufferPool();
pool.BroadcastCount.ShouldBe(0L);
IReadOnlyList<ReadOnlyMemory<byte>> pending = [new byte[4].AsMemory()];
var dest = new byte[4];
pool.BroadcastDrain(pending, dest);
pool.BroadcastDrain(pending, dest);
pool.BroadcastDrain(pending, dest);
pool.BroadcastCount.ShouldBe(3L);
}
}

View File

@@ -0,0 +1,17 @@
using NATS.Server.IO;
namespace NATS.Server.Transport.Tests;
public class OutboundBufferPoolTests
{
[Theory]
[InlineData(100, 512)]
[InlineData(1000, 4096)]
[InlineData(10000, 64 * 1024)]
public void Rent_uses_three_tier_buffer_buckets(int requested, int expectedMinimum)
{
var pool = new OutboundBufferPool();
using var owner = pool.Rent(requested);
owner.Memory.Length.ShouldBeGreaterThanOrEqualTo(expectedMinimum);
}
}