Phase 0 — mechanical rename ZB.MOM.WW.LmxOpcUa.* → ZB.MOM.WW.OtOpcUa.*
Renames all 11 projects (5 src + 6 tests), the .slnx solution file, all source-file namespaces, all axaml namespace references, and all v1 documentation references in CLAUDE.md and docs/*.md (excluding docs/v2/ which is already in OtOpcUa form). Also updates the TopShelf service registration name from "LmxOpcUa" to "OtOpcUa" per Phase 0 Task 0.6.
Preserves runtime identifiers per Phase 0 Out-of-Scope rules to avoid breaking v1/v2 client trust during coexistence: OPC UA `ApplicationUri` defaults (`urn:{GalaxyName}:LmxOpcUa`), server `EndpointPath` (`/LmxOpcUa`), `ServerName` default (feeds cert subject CN), `MxAccessConfiguration.ClientName` default (defensive — stays "LmxOpcUa" for MxAccess audit-trail consistency), client OPC UA identifiers (`ApplicationName = "LmxOpcUaClient"`, `ApplicationUri = "urn:localhost:LmxOpcUaClient"`, cert directory `%LocalAppData%\LmxOpcUaClient\pki\`), and the `LmxOpcUaServer` class name (class rename out of Phase 0 scope per Task 0.5 sed pattern; happens in Phase 1 alongside `LmxNodeManager → GenericDriverNodeManager` Core extraction). 23 LmxOpcUa references retained, all enumerated and justified in `docs/v2/implementation/exit-gate-phase-0.md`.
Build clean: 0 errors, 30 warnings (lower than baseline 167). Tests at strict improvement over baseline: 821 passing / 1 failing vs baseline 820 / 2 (one flaky pre-existing failure passed this run; the other still fails — both pre-existing and unrelated to the rename). `Client.UI.Tests`, `Historian.Aveva.Tests`, `Client.Shared.Tests`, `IntegrationTests` all match baseline exactly. Exit gate compliance results recorded in `docs/v2/implementation/exit-gate-phase-0.md` with all 7 checks PASS or DEFERRED-to-PR-review (#7 service install verification needs Windows service permissions on the reviewer's box).
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
430
tests/ZB.MOM.WW.OtOpcUa.Tests/Integration/MultiClientTests.cs
Normal file
430
tests/ZB.MOM.WW.OtOpcUa.Tests/Integration/MultiClientTests.cs
Normal file
@@ -0,0 +1,430 @@
|
||||
using System;
|
||||
using System.Collections.Concurrent;
|
||||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
using System.Threading.Tasks;
|
||||
using Opc.Ua;
|
||||
using Opc.Ua.Client;
|
||||
using Shouldly;
|
||||
using Xunit;
|
||||
using ZB.MOM.WW.OtOpcUa.Tests.Helpers;
|
||||
|
||||
namespace ZB.MOM.WW.OtOpcUa.Tests.Integration
|
||||
{
|
||||
/// <summary>
|
||||
/// Integration tests verifying multi-client subscription sync and concurrent operations.
|
||||
/// </summary>
|
||||
public class MultiClientTests
|
||||
{
|
||||
// ── Subscription Sync ─────────────────────────────────────────────
|
||||
|
||||
/// <summary>
|
||||
/// Confirms that multiple OPC UA clients subscribed to the same tag all receive the same runtime update.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task MultipleClients_SubscribeToSameTag_AllReceiveDataChanges()
|
||||
{
|
||||
var fixture = OpcUaServerFixture.WithFakes();
|
||||
await fixture.InitializeAsync();
|
||||
try
|
||||
{
|
||||
var clients = new List<OpcUaTestClient>();
|
||||
var notifications = new ConcurrentDictionary<int, List<MonitoredItemNotification>>();
|
||||
var subscriptions = new List<Subscription>();
|
||||
|
||||
for (var i = 0; i < 3; i++)
|
||||
{
|
||||
var client = new OpcUaTestClient();
|
||||
await client.ConnectAsync(fixture.EndpointUrl);
|
||||
clients.Add(client);
|
||||
|
||||
var nodeId = client.MakeNodeId("TestMachine_001.MachineID");
|
||||
var (sub, item) = await client.SubscribeAsync(nodeId, 100);
|
||||
subscriptions.Add(sub);
|
||||
|
||||
var clientIndex = i;
|
||||
notifications[clientIndex] = new List<MonitoredItemNotification>();
|
||||
item.Notification += (_, e) =>
|
||||
{
|
||||
if (e.NotificationValue is MonitoredItemNotification n)
|
||||
notifications[clientIndex].Add(n);
|
||||
};
|
||||
}
|
||||
|
||||
await Task.Delay(500); // let subscriptions settle
|
||||
|
||||
// Simulate data change
|
||||
fixture.MxProxy!.SimulateDataChangeByAddress("TestMachine_001.MachineID", "MACHINE_42");
|
||||
await Task.Delay(1000); // let publish cycle deliver
|
||||
|
||||
// All 3 clients should have received the notification
|
||||
for (var i = 0; i < 3; i++)
|
||||
notifications[i].Count.ShouldBeGreaterThan(0, $"Client {i} did not receive notification");
|
||||
|
||||
foreach (var sub in subscriptions) await sub.DeleteAsync(true);
|
||||
foreach (var c in clients) c.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
await fixture.DisposeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Confirms that one client disconnecting does not stop remaining clients from receiving updates.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Client_Disconnects_OtherClientsStillReceive()
|
||||
{
|
||||
var fixture = OpcUaServerFixture.WithFakes();
|
||||
await fixture.InitializeAsync();
|
||||
try
|
||||
{
|
||||
var client1 = new OpcUaTestClient();
|
||||
var client2 = new OpcUaTestClient();
|
||||
var client3 = new OpcUaTestClient();
|
||||
await client1.ConnectAsync(fixture.EndpointUrl);
|
||||
await client2.ConnectAsync(fixture.EndpointUrl);
|
||||
await client3.ConnectAsync(fixture.EndpointUrl);
|
||||
|
||||
var notifications1 = new ConcurrentBag<MonitoredItemNotification>();
|
||||
var notifications3 = new ConcurrentBag<MonitoredItemNotification>();
|
||||
|
||||
var (sub1, item1) = await client1.SubscribeAsync(client1.MakeNodeId("TestMachine_001.MachineID"), 100);
|
||||
var (sub2, _) = await client2.SubscribeAsync(client2.MakeNodeId("TestMachine_001.MachineID"), 100);
|
||||
var (sub3, item3) = await client3.SubscribeAsync(client3.MakeNodeId("TestMachine_001.MachineID"), 100);
|
||||
|
||||
item1.Notification += (_, e) =>
|
||||
{
|
||||
if (e.NotificationValue is MonitoredItemNotification n) notifications1.Add(n);
|
||||
};
|
||||
item3.Notification += (_, e) =>
|
||||
{
|
||||
if (e.NotificationValue is MonitoredItemNotification n) notifications3.Add(n);
|
||||
};
|
||||
|
||||
await Task.Delay(500);
|
||||
|
||||
// Disconnect client 2
|
||||
client2.Dispose();
|
||||
|
||||
await Task.Delay(500); // let server process disconnect
|
||||
|
||||
// Simulate data change — should not crash, clients 1+3 should still receive
|
||||
fixture.MxProxy!.SimulateDataChangeByAddress("TestMachine_001.MachineID", "AFTER_DISCONNECT");
|
||||
await Task.Delay(1000);
|
||||
|
||||
notifications1.Count.ShouldBeGreaterThan(0,
|
||||
"Client 1 should still receive after client 2 disconnected");
|
||||
notifications3.Count.ShouldBeGreaterThan(0,
|
||||
"Client 3 should still receive after client 2 disconnected");
|
||||
|
||||
await sub1.DeleteAsync(true);
|
||||
await sub3.DeleteAsync(true);
|
||||
client1.Dispose();
|
||||
client3.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
await fixture.DisposeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Confirms that one client unsubscribing does not interrupt delivery to other subscribed clients.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task Client_Unsubscribes_OtherClientsStillReceive()
|
||||
{
|
||||
var fixture = OpcUaServerFixture.WithFakes();
|
||||
await fixture.InitializeAsync();
|
||||
try
|
||||
{
|
||||
var client1 = new OpcUaTestClient();
|
||||
var client2 = new OpcUaTestClient();
|
||||
await client1.ConnectAsync(fixture.EndpointUrl);
|
||||
await client2.ConnectAsync(fixture.EndpointUrl);
|
||||
|
||||
var notifications2 = new ConcurrentBag<MonitoredItemNotification>();
|
||||
|
||||
var (sub1, _) = await client1.SubscribeAsync(client1.MakeNodeId("TestMachine_001.MachineID"), 100);
|
||||
var (sub2, item2) = await client2.SubscribeAsync(client2.MakeNodeId("TestMachine_001.MachineID"), 100);
|
||||
item2.Notification += (_, e) =>
|
||||
{
|
||||
if (e.NotificationValue is MonitoredItemNotification n) notifications2.Add(n);
|
||||
};
|
||||
|
||||
await Task.Delay(500);
|
||||
|
||||
// Client 1 unsubscribes
|
||||
await sub1.DeleteAsync(true);
|
||||
await Task.Delay(500);
|
||||
|
||||
// Simulate data change — client 2 should still receive
|
||||
fixture.MxProxy!.SimulateDataChangeByAddress("TestMachine_001.MachineID", "AFTER_UNSUB");
|
||||
await Task.Delay(1000);
|
||||
|
||||
notifications2.Count.ShouldBeGreaterThan(0,
|
||||
"Client 2 should still receive after client 1 unsubscribed");
|
||||
|
||||
await sub2.DeleteAsync(true);
|
||||
client1.Dispose();
|
||||
client2.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
await fixture.DisposeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Confirms that clients subscribed to different tags only receive updates for their own monitored data.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task MultipleClients_SubscribeToDifferentTags_EachGetsOwnData()
|
||||
{
|
||||
var fixture = OpcUaServerFixture.WithFakes();
|
||||
await fixture.InitializeAsync();
|
||||
try
|
||||
{
|
||||
var client1 = new OpcUaTestClient();
|
||||
var client2 = new OpcUaTestClient();
|
||||
await client1.ConnectAsync(fixture.EndpointUrl);
|
||||
await client2.ConnectAsync(fixture.EndpointUrl);
|
||||
|
||||
var notifications1 = new ConcurrentBag<MonitoredItemNotification>();
|
||||
var notifications2 = new ConcurrentBag<MonitoredItemNotification>();
|
||||
|
||||
var (sub1, item1) = await client1.SubscribeAsync(client1.MakeNodeId("TestMachine_001.MachineID"), 100);
|
||||
var (sub2, item2) =
|
||||
await client2.SubscribeAsync(client2.MakeNodeId("DelmiaReceiver_001.DownloadPath"), 100);
|
||||
|
||||
item1.Notification += (_, e) =>
|
||||
{
|
||||
if (e.NotificationValue is MonitoredItemNotification n) notifications1.Add(n);
|
||||
};
|
||||
item2.Notification += (_, e) =>
|
||||
{
|
||||
if (e.NotificationValue is MonitoredItemNotification n) notifications2.Add(n);
|
||||
};
|
||||
|
||||
await Task.Delay(500);
|
||||
|
||||
// Only change MachineID
|
||||
fixture.MxProxy!.SimulateDataChangeByAddress("TestMachine_001.MachineID", "CHANGED");
|
||||
await Task.Delay(1000);
|
||||
|
||||
notifications1.Count.ShouldBeGreaterThan(0, "Client 1 should receive MachineID change");
|
||||
// Client 2 subscribed to DownloadPath, should NOT receive MachineID change
|
||||
// (it may have received initial BadWaitingForInitialData, but not the "CHANGED" value)
|
||||
var client2HasMachineIdValue = notifications2.Any(n =>
|
||||
n.Value.Value is string s && s == "CHANGED");
|
||||
client2HasMachineIdValue.ShouldBe(false, "Client 2 should not receive MachineID data");
|
||||
|
||||
await sub1.DeleteAsync(true);
|
||||
await sub2.DeleteAsync(true);
|
||||
client1.Dispose();
|
||||
client2.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
await fixture.DisposeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
// ── Concurrent Operation Tests ────────────────────────────────────
|
||||
|
||||
/// <summary>
|
||||
/// Confirms that concurrent browse operations from several clients all complete successfully.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task ConcurrentBrowseFromMultipleClients_AllSucceed()
|
||||
{
|
||||
// Tests concurrent browse operations from 5 clients — browses don't go through MxAccess
|
||||
var fixture = OpcUaServerFixture.WithFakes();
|
||||
await fixture.InitializeAsync();
|
||||
try
|
||||
{
|
||||
var clients = new List<OpcUaTestClient>();
|
||||
for (var i = 0; i < 5; i++)
|
||||
{
|
||||
var c = new OpcUaTestClient();
|
||||
await c.ConnectAsync(fixture.EndpointUrl);
|
||||
clients.Add(c);
|
||||
}
|
||||
|
||||
var nodes = new[]
|
||||
{
|
||||
"ZB", "TestMachine_001", "DelmiaReceiver_001",
|
||||
"MESReceiver_001", "TestMachine_001"
|
||||
};
|
||||
|
||||
// All 5 clients browse simultaneously
|
||||
var browseTasks = clients.Select((c, i) =>
|
||||
c.BrowseAsync(c.MakeNodeId(nodes[i]))).ToArray();
|
||||
|
||||
var results = await Task.WhenAll(browseTasks);
|
||||
|
||||
results.Length.ShouldBe(5);
|
||||
foreach (var r in results)
|
||||
r.ShouldNotBeEmpty();
|
||||
|
||||
foreach (var c in clients) c.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
await fixture.DisposeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Confirms that concurrent browse requests return consistent results across clients.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task ConcurrentBrowse_AllReturnSameResults()
|
||||
{
|
||||
var fixture = OpcUaServerFixture.WithFakes();
|
||||
await fixture.InitializeAsync();
|
||||
try
|
||||
{
|
||||
var clients = new List<OpcUaTestClient>();
|
||||
for (var i = 0; i < 5; i++)
|
||||
{
|
||||
var c = new OpcUaTestClient();
|
||||
await c.ConnectAsync(fixture.EndpointUrl);
|
||||
clients.Add(c);
|
||||
}
|
||||
|
||||
// All browse TestMachine_001 simultaneously
|
||||
var browseTasks = clients.Select(c =>
|
||||
c.BrowseAsync(c.MakeNodeId("TestMachine_001"))).ToArray();
|
||||
|
||||
var results = await Task.WhenAll(browseTasks);
|
||||
|
||||
// All should get identical child lists
|
||||
var firstResult = results[0].Select(r => r.Name).OrderBy(n => n).ToList();
|
||||
for (var i = 1; i < results.Length; i++)
|
||||
{
|
||||
var thisResult = results[i].Select(r => r.Name).OrderBy(n => n).ToList();
|
||||
thisResult.ShouldBe(firstResult, $"Client {i} got different browse results");
|
||||
}
|
||||
|
||||
foreach (var c in clients) c.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
await fixture.DisposeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Confirms that simultaneous browse and subscribe operations do not interfere with one another.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task ConcurrentBrowseAndSubscribe_NoInterference()
|
||||
{
|
||||
var fixture = OpcUaServerFixture.WithFakes();
|
||||
await fixture.InitializeAsync();
|
||||
try
|
||||
{
|
||||
var clients = new List<OpcUaTestClient>();
|
||||
for (var i = 0; i < 4; i++)
|
||||
{
|
||||
var c = new OpcUaTestClient();
|
||||
await c.ConnectAsync(fixture.EndpointUrl);
|
||||
clients.Add(c);
|
||||
}
|
||||
|
||||
// 2 browse + 2 subscribe simultaneously
|
||||
var tasks = new Task[]
|
||||
{
|
||||
clients[0].BrowseAsync(clients[0].MakeNodeId("TestMachine_001")),
|
||||
clients[1].BrowseAsync(clients[1].MakeNodeId("ZB")),
|
||||
clients[2].SubscribeAsync(clients[2].MakeNodeId("TestMachine_001.MachineID"), 200),
|
||||
clients[3].SubscribeAsync(clients[3].MakeNodeId("DelmiaReceiver_001.DownloadPath"), 200)
|
||||
};
|
||||
|
||||
await Task.WhenAll(tasks);
|
||||
// All should complete without errors
|
||||
|
||||
foreach (var c in clients) c.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
await fixture.DisposeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Confirms that concurrent subscribe, read, and browse operations complete without deadlocking the server.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task ConcurrentSubscribeAndRead_NoDeadlock()
|
||||
{
|
||||
var fixture = OpcUaServerFixture.WithFakes();
|
||||
await fixture.InitializeAsync();
|
||||
try
|
||||
{
|
||||
var client1 = new OpcUaTestClient();
|
||||
var client2 = new OpcUaTestClient();
|
||||
var client3 = new OpcUaTestClient();
|
||||
await client1.ConnectAsync(fixture.EndpointUrl);
|
||||
await client2.ConnectAsync(fixture.EndpointUrl);
|
||||
await client3.ConnectAsync(fixture.EndpointUrl);
|
||||
|
||||
// All three operate simultaneously — should not deadlock
|
||||
var timeout = Task.Delay(TimeSpan.FromSeconds(15));
|
||||
var operations = Task.WhenAll(
|
||||
client1.SubscribeAsync(client1.MakeNodeId("TestMachine_001.MachineID"), 200)
|
||||
.ContinueWith(t => (object)t.Result),
|
||||
Task.Run(() => (object)client2.Read(client2.MakeNodeId("DelmiaReceiver_001.DownloadPath"))),
|
||||
client3.BrowseAsync(client3.MakeNodeId("TestMachine_001"))
|
||||
.ContinueWith(t => (object)t.Result)
|
||||
);
|
||||
|
||||
var completed = await Task.WhenAny(operations, timeout);
|
||||
completed.ShouldBe(operations, "Operations should complete before timeout (possible deadlock)");
|
||||
|
||||
client1.Dispose();
|
||||
client2.Dispose();
|
||||
client3.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
await fixture.DisposeAsync();
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Confirms that repeated client churn does not leave the server in an unstable state.
|
||||
/// </summary>
|
||||
[Fact]
|
||||
public async Task RapidConnectDisconnect_ServerStaysStable()
|
||||
{
|
||||
var fixture = OpcUaServerFixture.WithFakes();
|
||||
await fixture.InitializeAsync();
|
||||
try
|
||||
{
|
||||
// Rapidly connect, browse, disconnect — 10 iterations
|
||||
for (var i = 0; i < 10; i++)
|
||||
{
|
||||
using var client = new OpcUaTestClient();
|
||||
await client.ConnectAsync(fixture.EndpointUrl);
|
||||
var children = await client.BrowseAsync(client.MakeNodeId("ZB"));
|
||||
children.ShouldNotBeEmpty();
|
||||
}
|
||||
|
||||
// After all that churn, server should still be responsive
|
||||
using var finalClient = new OpcUaTestClient();
|
||||
await finalClient.ConnectAsync(fixture.EndpointUrl);
|
||||
var finalChildren = await finalClient.BrowseAsync(finalClient.MakeNodeId("TestMachine_001"));
|
||||
finalChildren.ShouldContain(c => c.Name == "MachineID");
|
||||
finalChildren.ShouldContain(c => c.Name == "DelmiaReceiver");
|
||||
}
|
||||
finally
|
||||
{
|
||||
await fixture.DisposeAsync();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user