using Shouldly; using Xunit; namespace ZB.MOM.WW.OtOpcUa.Driver.OpcUaClient.Tests; /// /// Unit tests for the OperationLimits chunking surface (PR #275 / opcuaclient-3). Focused /// on the static helper + the /// sentinel semantics. Live /// end-to-end tests against an in-process server land in the integration suite. /// [Trait("Category", "Unit")] public sealed class OpcUaClientOperationLimitsTests { [Fact] public void ChunkBy_with_cap_5_splits_12_items_into_3_slices_of_5_5_2() { // The PR-3 acceptance scenario: server advertises MaxNodesPerRead=5, client batches a // 12-tag read; driver must issue exactly 3 wire calls of sizes 5/5/2 in order. var input = Enumerable.Range(0, 12).ToArray(); var slices = OpcUaClientDriver.ChunkBy(input, cap: 5).ToArray(); slices.Length.ShouldBe(3); slices[0].Count.ShouldBe(5); slices[1].Count.ShouldBe(5); slices[2].Count.ShouldBe(2); // Order + offsets must reflect the original sequence — chunking must not reorder // tags, otherwise the indexMap ↔ result-index alignment breaks. slices[0].ShouldBe(new[] { 0, 1, 2, 3, 4 }); slices[1].ShouldBe(new[] { 5, 6, 7, 8, 9 }); slices[2].ShouldBe(new[] { 10, 11 }); } [Fact] public void ChunkBy_with_null_cap_yields_single_slice_no_chunking() { // cap=null is the "fetch hasn't completed" / "server reports 0 = no limit" sentinel. // Both must collapse to a single SDK call so the wire path doesn't change when the // server doesn't impose a cap. var input = Enumerable.Range(0, 12).ToArray(); var slices = OpcUaClientDriver.ChunkBy(input, cap: null).ToArray(); slices.Length.ShouldBe(1, "null cap means no chunking — single SDK call"); slices[0].Count.ShouldBe(12); } [Fact] public void ChunkBy_with_zero_cap_yields_single_slice_no_chunking() { // OPC UA Part 5: 0 is the wire-level "no limit" sentinel. NormalizeLimit folds it // into null upstream of ChunkBy, but the chunker itself must also treat 0 as // no-chunking — defence in depth in case a caller bypasses NormalizeLimit. var input = Enumerable.Range(0, 7).ToArray(); var slices = OpcUaClientDriver.ChunkBy(input, cap: 0).ToArray(); slices.Length.ShouldBe(1); slices[0].Count.ShouldBe(7); } [Fact] public void ChunkBy_with_cap_larger_than_input_yields_single_slice() { var input = new[] { 1, 2, 3 }; var slices = OpcUaClientDriver.ChunkBy(input, cap: 100).ToArray(); slices.Length.ShouldBe(1); slices[0].Count.ShouldBe(3); } [Fact] public void ChunkBy_with_empty_input_yields_no_slices() { // Empty batch must short-circuit before the wire call — saves a round-trip and // matches the !toSend.Count == 0 guard in the driver. var input = Array.Empty(); var slices = OpcUaClientDriver.ChunkBy(input, cap: 5).ToArray(); slices.Length.ShouldBe(0); } [Fact] public void ChunkBy_with_cap_equal_to_input_size_yields_single_slice() { // Edge case: exactly N items at cap N. Must NOT produce an extra empty slice. var input = Enumerable.Range(0, 5).ToArray(); var slices = OpcUaClientDriver.ChunkBy(input, cap: 5).ToArray(); slices.Length.ShouldBe(1); slices[0].Count.ShouldBe(5); } [Fact] public void ChunkBy_with_cap_1_splits_each_item_into_its_own_slice() { // Pathological cap — degrades to N wire calls. Verifies the chunker handles the // boundary cleanly without off-by-one. var input = new[] { 10, 20, 30 }; var slices = OpcUaClientDriver.ChunkBy(input, cap: 1).ToArray(); slices.Length.ShouldBe(3); slices[0].ShouldBe(new[] { 10 }); slices[1].ShouldBe(new[] { 20 }); slices[2].ShouldBe(new[] { 30 }); } [Fact] public void OperationLimitsCache_records_all_four_caps_as_nullable_uint() { // The cache surfaces the four limits the driver chunks against. Storing as uint? // lets the chunker distinguish "not yet fetched" / "no limit" (null) from "limit=N". var cache = new OpcUaClientDriver.OperationLimitsCache( MaxNodesPerRead: 100u, MaxNodesPerWrite: 50u, MaxNodesPerBrowse: null, MaxNodesPerHistoryReadData: 10u); cache.MaxNodesPerRead.ShouldBe(100u); cache.MaxNodesPerWrite.ShouldBe(50u); cache.MaxNodesPerBrowse.ShouldBeNull(); cache.MaxNodesPerHistoryReadData.ShouldBe(10u); } [Fact] public void Driver_starts_with_no_cached_OperationLimits() { // Pre-init / pre-first-batch state: cache is null so callers fall through to // single-call behaviour. Lazy fetch happens on the first ReadAsync/WriteAsync. using var drv = new OpcUaClientDriver(new OpcUaClientDriverOptions(), "opcua-cache-init"); drv.OperationLimitsForTest.ShouldBeNull(); } }