Files
lmxopcua/tests/ZB.MOM.WW.OtOpcUa.Tests.v1Archive/MxAccess/MxAccessClientMonitorTests.cs
Joseph Doherty a3d16a28f1 Phase 2 Stream D Option B — archive v1 surface + new Driver.Galaxy.E2E parity suite. Non-destructive intermediate state: the v1 OtOpcUa.Host + Historian.Aveva + Tests + IntegrationTests projects all still build (494 v1 unit + 6 v1 integration tests still pass when run explicitly), but solution-level dotnet test ZB.MOM.WW.OtOpcUa.slnx now skips them via IsTestProject=false on the test projects + archive-status PropertyGroup comments on the src projects. The destructive deletion is reserved for Phase 2 PR 3 with explicit operator review per CLAUDE.md "only use destructive operations when truly the best approach". tests/ZB.MOM.WW.OtOpcUa.Tests/ renamed via git mv to tests/ZB.MOM.WW.OtOpcUa.Tests.v1Archive/; csproj <AssemblyName> kept as the original ZB.MOM.WW.OtOpcUa.Tests so v1 OtOpcUa.Host's [InternalsVisibleTo("ZB.MOM.WW.OtOpcUa.Tests")] still matches and the project rebuilds clean. tests/ZB.MOM.WW.OtOpcUa.IntegrationTests gets <IsTestProject>false</IsTestProject>. src/ZB.MOM.WW.OtOpcUa.Host + src/ZB.MOM.WW.OtOpcUa.Historian.Aveva get PropertyGroup archive-status comments documenting they're functionally superseded but kept in-build because cascading dependencies (Historian.Aveva → Host; IntegrationTests → Host) make a single-PR deletion high blast-radius. New tests/ZB.MOM.WW.OtOpcUa.Driver.Galaxy.E2E/ project (.NET 10) with ParityFixture that spawns OtOpcUa.Driver.Galaxy.Host.exe (net48 x86) as a Process.Start subprocess with OTOPCUA_GALAXY_BACKEND=db env vars, awaits 2s for the PipeServer to bind, then exposes a connected GalaxyProxyDriver; skips on non-Windows / Administrator shells (PipeAcl denies admins per decision #76) / ZB unreachable / Host EXE not built — each skip carries a SkipReason string the test method reads via Assert.Skip(SkipReason). RecordingAddressSpaceBuilder captures every Folder/Variable/AddProperty registration so parity tests can assert on the same shape v1 LmxNodeManager produced. HierarchyParityTests (3) — Discover returns gobjects with attributes; attribute full references match the tag.attribute Galaxy reference grammar; HistoryExtension flag flows through correctly. StabilityFindingsRegressionTests (4) — one test per 2026-04-13 stability finding from commits c76ab8f and 7310925: phantom probe subscription doesn't corrupt unrelated host status; HostStatusChangedEventArgs structurally carries a specific HostName + OldState + NewState (event signature mathematically prevents the v1 cross-host quality-clear bug); all GalaxyProxyDriver capability methods return Task or Task<T> (sync-over-async would deadlock OPC UA stack thread); AcknowledgeAsync completes before returning (no fire-and-forget background work that could race shutdown). Solution test count: 470 pass / 7 skip (E2E on admin shell) / 1 pre-existing Phase 0 baseline. Run archived suites explicitly: dotnet test tests/ZB.MOM.WW.OtOpcUa.Tests.v1Archive (494 pass) + dotnet test tests/ZB.MOM.WW.OtOpcUa.IntegrationTests (6 pass). docs/v2/V1_ARCHIVE_STATUS.md inventories every archived surface with run-it-explicitly instructions + a 10-step deletion plan for PR 3 + rollback procedure (git revert restores all four projects). docs/v2/implementation/exit-gate-phase-2-final.md supersedes the two partial-exit docs with the per-stream status table (A/B/C/D/E all addressed, D split across PR 2/3 per safety protocol), the test count breakdown, fresh adversarial review of PR 2 deltas (4 new findings: medium IsTestProject=false safety net loss, medium structural-vs-behavioral stability tests, low backend=db default, low Process.Start env inheritance), the 8 carried-forward findings from exit-gate-phase-2.md, the recommended PR order (1 → 2 → 3 → 4). docs/v2/implementation/pr-2-body.md is the Gitea web-UI paste-in for opening PR 2 once pushed.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-18 00:56:21 -04:00

173 lines
5.8 KiB
C#

using System;
using System.Threading.Tasks;
using Shouldly;
using Xunit;
using ZB.MOM.WW.OtOpcUa.Host.Configuration;
using ZB.MOM.WW.OtOpcUa.Host.Domain;
using ZB.MOM.WW.OtOpcUa.Host.Metrics;
using ZB.MOM.WW.OtOpcUa.Host.MxAccess;
using ZB.MOM.WW.OtOpcUa.Tests.Helpers;
namespace ZB.MOM.WW.OtOpcUa.Tests.MxAccess
{
/// <summary>
/// Verifies the background connectivity monitor used to reconnect the MXAccess bridge after faults or stale probes.
/// </summary>
public class MxAccessClientMonitorTests : IDisposable
{
private readonly PerformanceMetrics _metrics;
private readonly FakeMxProxy _proxy;
private readonly StaComThread _staThread;
/// <summary>
/// Initializes the monitor test fixture with a shared STA thread, fake proxy, and metrics collector.
/// </summary>
public MxAccessClientMonitorTests()
{
_staThread = new StaComThread();
_staThread.Start();
_proxy = new FakeMxProxy();
_metrics = new PerformanceMetrics();
}
/// <summary>
/// Disposes the monitor test fixture resources.
/// </summary>
public void Dispose()
{
_staThread.Dispose();
_metrics.Dispose();
}
/// <summary>
/// Confirms that the monitor reconnects the client after an observed disconnect.
/// </summary>
[Fact]
public async Task Monitor_ReconnectsOnDisconnect()
{
var config = new MxAccessConfiguration
{
MonitorIntervalSeconds = 1,
AutoReconnect = true
};
var client = new MxAccessClient(_staThread, _proxy, config, _metrics);
await client.ConnectAsync();
await client.DisconnectAsync();
client.StartMonitor();
// Wait for monitor to detect disconnect and reconnect
await Task.Delay(2500);
client.StopMonitor();
client.State.ShouldBe(ConnectionState.Connected);
client.ReconnectCount.ShouldBeGreaterThan(0);
client.Dispose();
}
/// <summary>
/// Confirms that the monitor can be started and stopped without throwing.
/// </summary>
[Fact]
public async Task Monitor_StopsOnCancel()
{
var config = new MxAccessConfiguration { MonitorIntervalSeconds = 1 };
var client = new MxAccessClient(_staThread, _proxy, config, _metrics);
await client.ConnectAsync();
client.StartMonitor();
client.StopMonitor();
// Should not throw
await Task.Delay(200);
client.Dispose();
}
/// <summary>
/// Confirms that a stale probe tag triggers a reconnect when monitoring is enabled.
/// </summary>
[Fact]
public async Task Monitor_ProbeStale_ForcesReconnect()
{
var config = new MxAccessConfiguration
{
ProbeTag = "TestProbe",
ProbeStaleThresholdSeconds = 2,
MonitorIntervalSeconds = 1,
AutoReconnect = true
};
var client = new MxAccessClient(_staThread, _proxy, config, _metrics);
await client.ConnectAsync();
client.StartMonitor();
// Wait long enough for probe to go stale (threshold=2s, monitor interval=1s)
// No data changes simulated → probe becomes stale → reconnect triggered
await Task.Delay(4000);
client.StopMonitor();
client.ReconnectCount.ShouldBeGreaterThan(0);
client.Dispose();
}
/// <summary>
/// Confirms that fresh probe updates prevent unnecessary reconnects.
/// </summary>
[Fact]
public async Task Monitor_ProbeDataChange_PreventsStaleReconnect()
{
var config = new MxAccessConfiguration
{
ProbeTag = "TestProbe",
ProbeStaleThresholdSeconds = 5,
MonitorIntervalSeconds = 1,
AutoReconnect = true
};
var client = new MxAccessClient(_staThread, _proxy, config, _metrics);
await client.ConnectAsync();
client.StartMonitor();
// Continuously simulate probe data changes to keep it fresh
// Stale threshold (5s) is well above the delay (500ms) to avoid timing flakes
for (var i = 0; i < 8; i++)
{
await Task.Delay(500);
_proxy.SimulateDataChangeByAddress("TestProbe", i);
}
client.StopMonitor();
// Probe was kept fresh → no reconnect should have happened
client.ReconnectCount.ShouldBe(0);
client.State.ShouldBe(ConnectionState.Connected);
client.Dispose();
}
/// <summary>
/// Confirms that enabling the monitor without a probe tag does not trigger false reconnects.
/// </summary>
[Fact]
public async Task Monitor_NoProbeConfigured_NoFalseReconnect()
{
var config = new MxAccessConfiguration
{
ProbeTag = null, // No probe
MonitorIntervalSeconds = 1,
AutoReconnect = true
};
var client = new MxAccessClient(_staThread, _proxy, config, _metrics);
await client.ConnectAsync();
client.StartMonitor();
// Wait several monitor cycles — should stay connected with no reconnects
await Task.Delay(3000);
client.StopMonitor();
client.State.ShouldBe(ConnectionState.Connected);
client.ReconnectCount.ShouldBe(0);
client.Dispose();
}
}
}