Files
lmxopcua/tests/ZB.MOM.WW.OtOpcUa.Tests.v1Archive/Integration/MultiClientTests.cs
Joseph Doherty a3d16a28f1 Phase 2 Stream D Option B — archive v1 surface + new Driver.Galaxy.E2E parity suite. Non-destructive intermediate state: the v1 OtOpcUa.Host + Historian.Aveva + Tests + IntegrationTests projects all still build (494 v1 unit + 6 v1 integration tests still pass when run explicitly), but solution-level dotnet test ZB.MOM.WW.OtOpcUa.slnx now skips them via IsTestProject=false on the test projects + archive-status PropertyGroup comments on the src projects. The destructive deletion is reserved for Phase 2 PR 3 with explicit operator review per CLAUDE.md "only use destructive operations when truly the best approach". tests/ZB.MOM.WW.OtOpcUa.Tests/ renamed via git mv to tests/ZB.MOM.WW.OtOpcUa.Tests.v1Archive/; csproj <AssemblyName> kept as the original ZB.MOM.WW.OtOpcUa.Tests so v1 OtOpcUa.Host's [InternalsVisibleTo("ZB.MOM.WW.OtOpcUa.Tests")] still matches and the project rebuilds clean. tests/ZB.MOM.WW.OtOpcUa.IntegrationTests gets <IsTestProject>false</IsTestProject>. src/ZB.MOM.WW.OtOpcUa.Host + src/ZB.MOM.WW.OtOpcUa.Historian.Aveva get PropertyGroup archive-status comments documenting they're functionally superseded but kept in-build because cascading dependencies (Historian.Aveva → Host; IntegrationTests → Host) make a single-PR deletion high blast-radius. New tests/ZB.MOM.WW.OtOpcUa.Driver.Galaxy.E2E/ project (.NET 10) with ParityFixture that spawns OtOpcUa.Driver.Galaxy.Host.exe (net48 x86) as a Process.Start subprocess with OTOPCUA_GALAXY_BACKEND=db env vars, awaits 2s for the PipeServer to bind, then exposes a connected GalaxyProxyDriver; skips on non-Windows / Administrator shells (PipeAcl denies admins per decision #76) / ZB unreachable / Host EXE not built — each skip carries a SkipReason string the test method reads via Assert.Skip(SkipReason). RecordingAddressSpaceBuilder captures every Folder/Variable/AddProperty registration so parity tests can assert on the same shape v1 LmxNodeManager produced. HierarchyParityTests (3) — Discover returns gobjects with attributes; attribute full references match the tag.attribute Galaxy reference grammar; HistoryExtension flag flows through correctly. StabilityFindingsRegressionTests (4) — one test per 2026-04-13 stability finding from commits c76ab8f and 7310925: phantom probe subscription doesn't corrupt unrelated host status; HostStatusChangedEventArgs structurally carries a specific HostName + OldState + NewState (event signature mathematically prevents the v1 cross-host quality-clear bug); all GalaxyProxyDriver capability methods return Task or Task<T> (sync-over-async would deadlock OPC UA stack thread); AcknowledgeAsync completes before returning (no fire-and-forget background work that could race shutdown). Solution test count: 470 pass / 7 skip (E2E on admin shell) / 1 pre-existing Phase 0 baseline. Run archived suites explicitly: dotnet test tests/ZB.MOM.WW.OtOpcUa.Tests.v1Archive (494 pass) + dotnet test tests/ZB.MOM.WW.OtOpcUa.IntegrationTests (6 pass). docs/v2/V1_ARCHIVE_STATUS.md inventories every archived surface with run-it-explicitly instructions + a 10-step deletion plan for PR 3 + rollback procedure (git revert restores all four projects). docs/v2/implementation/exit-gate-phase-2-final.md supersedes the two partial-exit docs with the per-stream status table (A/B/C/D/E all addressed, D split across PR 2/3 per safety protocol), the test count breakdown, fresh adversarial review of PR 2 deltas (4 new findings: medium IsTestProject=false safety net loss, medium structural-vs-behavioral stability tests, low backend=db default, low Process.Start env inheritance), the 8 carried-forward findings from exit-gate-phase-2.md, the recommended PR order (1 → 2 → 3 → 4). docs/v2/implementation/pr-2-body.md is the Gitea web-UI paste-in for opening PR 2 once pushed.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-18 00:56:21 -04:00

430 lines
17 KiB
C#

using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Opc.Ua;
using Opc.Ua.Client;
using Shouldly;
using Xunit;
using ZB.MOM.WW.OtOpcUa.Tests.Helpers;
namespace ZB.MOM.WW.OtOpcUa.Tests.Integration
{
/// <summary>
/// Integration tests verifying multi-client subscription sync and concurrent operations.
/// </summary>
public class MultiClientTests
{
// ── Subscription Sync ─────────────────────────────────────────────
/// <summary>
/// Confirms that multiple OPC UA clients subscribed to the same tag all receive the same runtime update.
/// </summary>
[Fact]
public async Task MultipleClients_SubscribeToSameTag_AllReceiveDataChanges()
{
var fixture = OpcUaServerFixture.WithFakes();
await fixture.InitializeAsync();
try
{
var clients = new List<OpcUaTestClient>();
var notifications = new ConcurrentDictionary<int, List<MonitoredItemNotification>>();
var subscriptions = new List<Subscription>();
for (var i = 0; i < 3; i++)
{
var client = new OpcUaTestClient();
await client.ConnectAsync(fixture.EndpointUrl);
clients.Add(client);
var nodeId = client.MakeNodeId("TestMachine_001.MachineID");
var (sub, item) = await client.SubscribeAsync(nodeId, 100);
subscriptions.Add(sub);
var clientIndex = i;
notifications[clientIndex] = new List<MonitoredItemNotification>();
item.Notification += (_, e) =>
{
if (e.NotificationValue is MonitoredItemNotification n)
notifications[clientIndex].Add(n);
};
}
await Task.Delay(500); // let subscriptions settle
// Simulate data change
fixture.MxProxy!.SimulateDataChangeByAddress("TestMachine_001.MachineID", "MACHINE_42");
await Task.Delay(1000); // let publish cycle deliver
// All 3 clients should have received the notification
for (var i = 0; i < 3; i++)
notifications[i].Count.ShouldBeGreaterThan(0, $"Client {i} did not receive notification");
foreach (var sub in subscriptions) await sub.DeleteAsync(true);
foreach (var c in clients) c.Dispose();
}
finally
{
await fixture.DisposeAsync();
}
}
/// <summary>
/// Confirms that one client disconnecting does not stop remaining clients from receiving updates.
/// </summary>
[Fact]
public async Task Client_Disconnects_OtherClientsStillReceive()
{
var fixture = OpcUaServerFixture.WithFakes();
await fixture.InitializeAsync();
try
{
var client1 = new OpcUaTestClient();
var client2 = new OpcUaTestClient();
var client3 = new OpcUaTestClient();
await client1.ConnectAsync(fixture.EndpointUrl);
await client2.ConnectAsync(fixture.EndpointUrl);
await client3.ConnectAsync(fixture.EndpointUrl);
var notifications1 = new ConcurrentBag<MonitoredItemNotification>();
var notifications3 = new ConcurrentBag<MonitoredItemNotification>();
var (sub1, item1) = await client1.SubscribeAsync(client1.MakeNodeId("TestMachine_001.MachineID"), 100);
var (sub2, _) = await client2.SubscribeAsync(client2.MakeNodeId("TestMachine_001.MachineID"), 100);
var (sub3, item3) = await client3.SubscribeAsync(client3.MakeNodeId("TestMachine_001.MachineID"), 100);
item1.Notification += (_, e) =>
{
if (e.NotificationValue is MonitoredItemNotification n) notifications1.Add(n);
};
item3.Notification += (_, e) =>
{
if (e.NotificationValue is MonitoredItemNotification n) notifications3.Add(n);
};
await Task.Delay(500);
// Disconnect client 2
client2.Dispose();
await Task.Delay(500); // let server process disconnect
// Simulate data change — should not crash, clients 1+3 should still receive
fixture.MxProxy!.SimulateDataChangeByAddress("TestMachine_001.MachineID", "AFTER_DISCONNECT");
await Task.Delay(1000);
notifications1.Count.ShouldBeGreaterThan(0,
"Client 1 should still receive after client 2 disconnected");
notifications3.Count.ShouldBeGreaterThan(0,
"Client 3 should still receive after client 2 disconnected");
await sub1.DeleteAsync(true);
await sub3.DeleteAsync(true);
client1.Dispose();
client3.Dispose();
}
finally
{
await fixture.DisposeAsync();
}
}
/// <summary>
/// Confirms that one client unsubscribing does not interrupt delivery to other subscribed clients.
/// </summary>
[Fact]
public async Task Client_Unsubscribes_OtherClientsStillReceive()
{
var fixture = OpcUaServerFixture.WithFakes();
await fixture.InitializeAsync();
try
{
var client1 = new OpcUaTestClient();
var client2 = new OpcUaTestClient();
await client1.ConnectAsync(fixture.EndpointUrl);
await client2.ConnectAsync(fixture.EndpointUrl);
var notifications2 = new ConcurrentBag<MonitoredItemNotification>();
var (sub1, _) = await client1.SubscribeAsync(client1.MakeNodeId("TestMachine_001.MachineID"), 100);
var (sub2, item2) = await client2.SubscribeAsync(client2.MakeNodeId("TestMachine_001.MachineID"), 100);
item2.Notification += (_, e) =>
{
if (e.NotificationValue is MonitoredItemNotification n) notifications2.Add(n);
};
await Task.Delay(500);
// Client 1 unsubscribes
await sub1.DeleteAsync(true);
await Task.Delay(500);
// Simulate data change — client 2 should still receive
fixture.MxProxy!.SimulateDataChangeByAddress("TestMachine_001.MachineID", "AFTER_UNSUB");
await Task.Delay(1000);
notifications2.Count.ShouldBeGreaterThan(0,
"Client 2 should still receive after client 1 unsubscribed");
await sub2.DeleteAsync(true);
client1.Dispose();
client2.Dispose();
}
finally
{
await fixture.DisposeAsync();
}
}
/// <summary>
/// Confirms that clients subscribed to different tags only receive updates for their own monitored data.
/// </summary>
[Fact]
public async Task MultipleClients_SubscribeToDifferentTags_EachGetsOwnData()
{
var fixture = OpcUaServerFixture.WithFakes();
await fixture.InitializeAsync();
try
{
var client1 = new OpcUaTestClient();
var client2 = new OpcUaTestClient();
await client1.ConnectAsync(fixture.EndpointUrl);
await client2.ConnectAsync(fixture.EndpointUrl);
var notifications1 = new ConcurrentBag<MonitoredItemNotification>();
var notifications2 = new ConcurrentBag<MonitoredItemNotification>();
var (sub1, item1) = await client1.SubscribeAsync(client1.MakeNodeId("TestMachine_001.MachineID"), 100);
var (sub2, item2) =
await client2.SubscribeAsync(client2.MakeNodeId("DelmiaReceiver_001.DownloadPath"), 100);
item1.Notification += (_, e) =>
{
if (e.NotificationValue is MonitoredItemNotification n) notifications1.Add(n);
};
item2.Notification += (_, e) =>
{
if (e.NotificationValue is MonitoredItemNotification n) notifications2.Add(n);
};
await Task.Delay(500);
// Only change MachineID
fixture.MxProxy!.SimulateDataChangeByAddress("TestMachine_001.MachineID", "CHANGED");
await Task.Delay(1000);
notifications1.Count.ShouldBeGreaterThan(0, "Client 1 should receive MachineID change");
// Client 2 subscribed to DownloadPath, should NOT receive MachineID change
// (it may have received initial BadWaitingForInitialData, but not the "CHANGED" value)
var client2HasMachineIdValue = notifications2.Any(n =>
n.Value.Value is string s && s == "CHANGED");
client2HasMachineIdValue.ShouldBe(false, "Client 2 should not receive MachineID data");
await sub1.DeleteAsync(true);
await sub2.DeleteAsync(true);
client1.Dispose();
client2.Dispose();
}
finally
{
await fixture.DisposeAsync();
}
}
// ── Concurrent Operation Tests ────────────────────────────────────
/// <summary>
/// Confirms that concurrent browse operations from several clients all complete successfully.
/// </summary>
[Fact]
public async Task ConcurrentBrowseFromMultipleClients_AllSucceed()
{
// Tests concurrent browse operations from 5 clients — browses don't go through MxAccess
var fixture = OpcUaServerFixture.WithFakes();
await fixture.InitializeAsync();
try
{
var clients = new List<OpcUaTestClient>();
for (var i = 0; i < 5; i++)
{
var c = new OpcUaTestClient();
await c.ConnectAsync(fixture.EndpointUrl);
clients.Add(c);
}
var nodes = new[]
{
"ZB", "TestMachine_001", "DelmiaReceiver_001",
"MESReceiver_001", "TestMachine_001"
};
// All 5 clients browse simultaneously
var browseTasks = clients.Select((c, i) =>
c.BrowseAsync(c.MakeNodeId(nodes[i]))).ToArray();
var results = await Task.WhenAll(browseTasks);
results.Length.ShouldBe(5);
foreach (var r in results)
r.ShouldNotBeEmpty();
foreach (var c in clients) c.Dispose();
}
finally
{
await fixture.DisposeAsync();
}
}
/// <summary>
/// Confirms that concurrent browse requests return consistent results across clients.
/// </summary>
[Fact]
public async Task ConcurrentBrowse_AllReturnSameResults()
{
var fixture = OpcUaServerFixture.WithFakes();
await fixture.InitializeAsync();
try
{
var clients = new List<OpcUaTestClient>();
for (var i = 0; i < 5; i++)
{
var c = new OpcUaTestClient();
await c.ConnectAsync(fixture.EndpointUrl);
clients.Add(c);
}
// All browse TestMachine_001 simultaneously
var browseTasks = clients.Select(c =>
c.BrowseAsync(c.MakeNodeId("TestMachine_001"))).ToArray();
var results = await Task.WhenAll(browseTasks);
// All should get identical child lists
var firstResult = results[0].Select(r => r.Name).OrderBy(n => n).ToList();
for (var i = 1; i < results.Length; i++)
{
var thisResult = results[i].Select(r => r.Name).OrderBy(n => n).ToList();
thisResult.ShouldBe(firstResult, $"Client {i} got different browse results");
}
foreach (var c in clients) c.Dispose();
}
finally
{
await fixture.DisposeAsync();
}
}
/// <summary>
/// Confirms that simultaneous browse and subscribe operations do not interfere with one another.
/// </summary>
[Fact]
public async Task ConcurrentBrowseAndSubscribe_NoInterference()
{
var fixture = OpcUaServerFixture.WithFakes();
await fixture.InitializeAsync();
try
{
var clients = new List<OpcUaTestClient>();
for (var i = 0; i < 4; i++)
{
var c = new OpcUaTestClient();
await c.ConnectAsync(fixture.EndpointUrl);
clients.Add(c);
}
// 2 browse + 2 subscribe simultaneously
var tasks = new Task[]
{
clients[0].BrowseAsync(clients[0].MakeNodeId("TestMachine_001")),
clients[1].BrowseAsync(clients[1].MakeNodeId("ZB")),
clients[2].SubscribeAsync(clients[2].MakeNodeId("TestMachine_001.MachineID"), 200),
clients[3].SubscribeAsync(clients[3].MakeNodeId("DelmiaReceiver_001.DownloadPath"), 200)
};
await Task.WhenAll(tasks);
// All should complete without errors
foreach (var c in clients) c.Dispose();
}
finally
{
await fixture.DisposeAsync();
}
}
/// <summary>
/// Confirms that concurrent subscribe, read, and browse operations complete without deadlocking the server.
/// </summary>
[Fact]
public async Task ConcurrentSubscribeAndRead_NoDeadlock()
{
var fixture = OpcUaServerFixture.WithFakes();
await fixture.InitializeAsync();
try
{
var client1 = new OpcUaTestClient();
var client2 = new OpcUaTestClient();
var client3 = new OpcUaTestClient();
await client1.ConnectAsync(fixture.EndpointUrl);
await client2.ConnectAsync(fixture.EndpointUrl);
await client3.ConnectAsync(fixture.EndpointUrl);
// All three operate simultaneously — should not deadlock
var timeout = Task.Delay(TimeSpan.FromSeconds(15));
var operations = Task.WhenAll(
client1.SubscribeAsync(client1.MakeNodeId("TestMachine_001.MachineID"), 200)
.ContinueWith(t => (object)t.Result),
Task.Run(() => (object)client2.Read(client2.MakeNodeId("DelmiaReceiver_001.DownloadPath"))),
client3.BrowseAsync(client3.MakeNodeId("TestMachine_001"))
.ContinueWith(t => (object)t.Result)
);
var completed = await Task.WhenAny(operations, timeout);
completed.ShouldBe(operations, "Operations should complete before timeout (possible deadlock)");
client1.Dispose();
client2.Dispose();
client3.Dispose();
}
finally
{
await fixture.DisposeAsync();
}
}
/// <summary>
/// Confirms that repeated client churn does not leave the server in an unstable state.
/// </summary>
[Fact]
public async Task RapidConnectDisconnect_ServerStaysStable()
{
var fixture = OpcUaServerFixture.WithFakes();
await fixture.InitializeAsync();
try
{
// Rapidly connect, browse, disconnect — 10 iterations
for (var i = 0; i < 10; i++)
{
using var client = new OpcUaTestClient();
await client.ConnectAsync(fixture.EndpointUrl);
var children = await client.BrowseAsync(client.MakeNodeId("ZB"));
children.ShouldNotBeEmpty();
}
// After all that churn, server should still be responsive
using var finalClient = new OpcUaTestClient();
await finalClient.ConnectAsync(fixture.EndpointUrl);
var finalChildren = await finalClient.BrowseAsync(finalClient.MakeNodeId("TestMachine_001"));
finalChildren.ShouldContain(c => c.Name == "MachineID");
finalChildren.ShouldContain(c => c.Name == "DelmiaReceiver");
}
finally
{
await fixture.DisposeAsync();
}
}
}
}