@@ -0,0 +1,131 @@
|
||||
using S7NetCpuType = global::S7.Net.CpuType;
|
||||
using Shouldly;
|
||||
using Xunit;
|
||||
|
||||
namespace ZB.MOM.WW.OtOpcUa.Driver.S7.IntegrationTests.S7_1500;
|
||||
|
||||
/// <summary>
|
||||
/// End-to-end verification of the block-read coalescing planner (PR-S7-B2)
|
||||
/// against the python-snap7 S7-1500 simulator. The headline assertion: 50
|
||||
/// contiguous DBW reads (DB1.DBW0..DB1.DBW98) coalesce into exactly ONE
|
||||
/// <c>Plc.ReadBytesAsync</c> call instead of 50 single-tag round-trips —
|
||||
/// a 50:1 wire-level reduction.
|
||||
/// </summary>
|
||||
[Collection(Snap7ServerCollection.Name)]
|
||||
[Trait("Category", "Integration")]
|
||||
[Trait("Device", "S7_1500")]
|
||||
public sealed class S7_1500BlockCoalescingTests(Snap7ServerFixture sim)
|
||||
{
|
||||
[Fact]
|
||||
public async Task Driver_coalesces_contiguous_DBWs_into_single_byte_range_read()
|
||||
{
|
||||
if (sim.SkipReason is not null) Assert.Skip(sim.SkipReason);
|
||||
|
||||
// Build a 50-tag config covering DB1.DBW0, DBW2, DBW4, ..., DBW98.
|
||||
// Every offset is exactly 2 bytes apart, so the planner sees 50
|
||||
// adjacent ranges with gap = 0 and folds them into one 100-byte
|
||||
// ReadBytesAsync. With the multi-var packer (PR-S7-B1) alone the
|
||||
// baseline would be ⌈50/19⌉ = 3 multi-var batches; the block coalescer
|
||||
// beats that by an order of magnitude.
|
||||
var tags = new List<S7TagDefinition>(50);
|
||||
for (var i = 0; i < 50; i++)
|
||||
tags.Add(new S7TagDefinition($"BulkDBW{i:D2}", $"DB1.DBW{i * 2}", S7DataType.UInt16));
|
||||
|
||||
var options = new S7DriverOptions
|
||||
{
|
||||
Host = sim.Host,
|
||||
Port = sim.Port,
|
||||
CpuType = S7NetCpuType.S71500,
|
||||
Rack = 0,
|
||||
Slot = 0,
|
||||
Timeout = TimeSpan.FromSeconds(5),
|
||||
Probe = new S7ProbeOptions { Enabled = false },
|
||||
Tags = tags,
|
||||
};
|
||||
|
||||
await using var drv = new S7Driver(options, driverInstanceId: "s7-block-coalesce");
|
||||
await drv.InitializeAsync("{}", TestContext.Current.CancellationToken);
|
||||
|
||||
var blockReadsBefore = drv.TotalBlockReads;
|
||||
var multiVarBefore = drv.TotalMultiVarBatches;
|
||||
|
||||
var snapshots = await drv.ReadAsync(
|
||||
tags.Select(t => t.Name).ToList(),
|
||||
TestContext.Current.CancellationToken);
|
||||
|
||||
snapshots.Count.ShouldBe(50);
|
||||
snapshots.ShouldAllBe(s => s.StatusCode == 0u, "every coalesced read must surface a Good status");
|
||||
|
||||
// Headline assertion: exactly one byte-range PDU was issued for the
|
||||
// entire 50-tag fan-in. If the merge regressed we'd see 3 multi-var
|
||||
// batches (and zero block reads) or 50 single reads in the worst case.
|
||||
var blockReadsDelta = drv.TotalBlockReads - blockReadsBefore;
|
||||
var multiVarDelta = drv.TotalMultiVarBatches - multiVarBefore;
|
||||
|
||||
blockReadsDelta.ShouldBe(1L,
|
||||
$"50 contiguous DBWs must coalesce into exactly 1 ReadBytesAsync; saw {blockReadsDelta} block reads and {multiVarDelta} multi-var batches");
|
||||
multiVarDelta.ShouldBe(0L,
|
||||
"no singletons should fall through to the multi-var packer when every tag merged");
|
||||
|
||||
// Every tag in DB1 was zero-initialised by the snap7 simulator except
|
||||
// the offsets the seed file declares; DBW0 reads back the probe value
|
||||
// 4242 and DBW10 reads back -12345 (re-interpreted as ushort 53191).
|
||||
// Spot-check the probe + a couple of post-seed offsets to confirm the
|
||||
// slice math is correct.
|
||||
Convert.ToInt32(snapshots[0].Value).ShouldBe(4242, "DB1.DBW0 carries the seeded 4242 probe value");
|
||||
Convert.ToInt32(snapshots[5].Value).ShouldBe(unchecked((ushort)(short)-12345),
|
||||
"DB1.DBW10 carries the seeded -12345 (read as UInt16 wire pattern)");
|
||||
}
|
||||
|
||||
[Fact]
|
||||
public async Task Driver_skips_coalescing_when_gap_threshold_is_zero_and_layout_is_sparse()
|
||||
{
|
||||
if (sim.SkipReason is not null) Assert.Skip(sim.SkipReason);
|
||||
|
||||
// Sparse layout: 3 DBWs with a 100-byte gap between each. Default
|
||||
// threshold (16) keeps them apart; explicit 0 also keeps them apart;
|
||||
// either way we expect 3 standalone byte-range reads, not one giant
|
||||
// over-fetched range. Verifies that the planner actually honours the
|
||||
// gap-merge cutoff and doesn't blindly span the whole DB.
|
||||
var tags = new[]
|
||||
{
|
||||
new S7TagDefinition("Sparse_0", "DB1.DBW0", S7DataType.UInt16),
|
||||
new S7TagDefinition("Sparse_100", "DB1.DBW100", S7DataType.UInt16),
|
||||
new S7TagDefinition("Sparse_200", "DB1.DBW200", S7DataType.UInt16),
|
||||
};
|
||||
|
||||
var options = new S7DriverOptions
|
||||
{
|
||||
Host = sim.Host,
|
||||
Port = sim.Port,
|
||||
CpuType = S7NetCpuType.S71500,
|
||||
Rack = 0,
|
||||
Slot = 0,
|
||||
Timeout = TimeSpan.FromSeconds(5),
|
||||
Probe = new S7ProbeOptions { Enabled = false },
|
||||
BlockCoalescingGapBytes = 0, // strict: only adjacent ranges merge
|
||||
Tags = tags,
|
||||
};
|
||||
|
||||
await using var drv = new S7Driver(options, driverInstanceId: "s7-block-coalesce-sparse");
|
||||
await drv.InitializeAsync("{}", TestContext.Current.CancellationToken);
|
||||
|
||||
var blockReadsBefore = drv.TotalBlockReads;
|
||||
var multiVarBefore = drv.TotalMultiVarBatches;
|
||||
|
||||
var snapshots = await drv.ReadAsync(
|
||||
tags.Select(t => t.Name).ToList(),
|
||||
TestContext.Current.CancellationToken);
|
||||
|
||||
snapshots.ShouldAllBe(s => s.StatusCode == 0u);
|
||||
|
||||
// Each tag is a singleton range — the planner emits 3 single-tag
|
||||
// ranges and the driver routes them through the multi-var packer
|
||||
// rather than one ReadBytesAsync per tag. Result: 0 block reads, 1
|
||||
// multi-var batch covering all 3 tags.
|
||||
(drv.TotalBlockReads - blockReadsBefore).ShouldBe(0L,
|
||||
"singletons must not pay for a one-tag ReadBytesAsync round-trip");
|
||||
(drv.TotalMultiVarBatches - multiVarBefore).ShouldBe(1L,
|
||||
"3 singleton tags should pack into a single multi-var batch");
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user