Renames all 11 projects (5 src + 6 tests), the .slnx solution file, all source-file namespaces, all axaml namespace references, and all v1 documentation references in CLAUDE.md and docs/*.md (excluding docs/v2/ which is already in OtOpcUa form). Also updates the TopShelf service registration name from "LmxOpcUa" to "OtOpcUa" per Phase 0 Task 0.6.
Preserves runtime identifiers per Phase 0 Out-of-Scope rules to avoid breaking v1/v2 client trust during coexistence: OPC UA `ApplicationUri` defaults (`urn:{GalaxyName}:LmxOpcUa`), server `EndpointPath` (`/LmxOpcUa`), `ServerName` default (feeds cert subject CN), `MxAccessConfiguration.ClientName` default (defensive — stays "LmxOpcUa" for MxAccess audit-trail consistency), client OPC UA identifiers (`ApplicationName = "LmxOpcUaClient"`, `ApplicationUri = "urn:localhost:LmxOpcUaClient"`, cert directory `%LocalAppData%\LmxOpcUaClient\pki\`), and the `LmxOpcUaServer` class name (class rename out of Phase 0 scope per Task 0.5 sed pattern; happens in Phase 1 alongside `LmxNodeManager → GenericDriverNodeManager` Core extraction). 23 LmxOpcUa references retained, all enumerated and justified in `docs/v2/implementation/exit-gate-phase-0.md`.
Build clean: 0 errors, 30 warnings (lower than baseline 167). Tests at strict improvement over baseline: 821 passing / 1 failing vs baseline 820 / 2 (one flaky pre-existing failure passed this run; the other still fails — both pre-existing and unrelated to the rename). `Client.UI.Tests`, `Historian.Aveva.Tests`, `Client.Shared.Tests`, `IntegrationTests` all match baseline exactly. Exit gate compliance results recorded in `docs/v2/implementation/exit-gate-phase-0.md` with all 7 checks PASS or DEFERRED-to-PR-review (#7 service install verification needs Windows service permissions on the reviewer's box).
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
292 lines
10 KiB
C#
292 lines
10 KiB
C#
using System;
|
|
using System.Collections.Generic;
|
|
using System.Linq;
|
|
using System.Threading;
|
|
using System.Threading.Tasks;
|
|
using Shouldly;
|
|
using Xunit;
|
|
using ZB.MOM.WW.OtOpcUa.Host.Configuration;
|
|
|
|
namespace ZB.MOM.WW.OtOpcUa.Historian.Aveva.Tests
|
|
{
|
|
/// <summary>
|
|
/// Exhaustive coverage of the cluster endpoint picker: config parsing, healthy-list ordering,
|
|
/// cooldown behavior with an injected clock, and thread-safety under concurrent writers.
|
|
/// </summary>
|
|
public class HistorianClusterEndpointPickerTests
|
|
{
|
|
// ---------- Construction / config parsing ----------
|
|
|
|
[Fact]
|
|
public void SingleServerName_FallbackWhenServerNamesEmpty()
|
|
{
|
|
var picker = new HistorianClusterEndpointPicker(Config(serverName: "host-a"));
|
|
picker.NodeCount.ShouldBe(1);
|
|
picker.GetHealthyNodes().ShouldBe(new[] { "host-a" });
|
|
}
|
|
|
|
[Fact]
|
|
public void ServerNames_TakesPrecedenceOverLegacyServerName()
|
|
{
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverName: "legacy", serverNames: new[] { "host-a", "host-b" }));
|
|
picker.NodeCount.ShouldBe(2);
|
|
picker.GetHealthyNodes().ShouldBe(new[] { "host-a", "host-b" });
|
|
}
|
|
|
|
[Fact]
|
|
public void ServerNames_OrderedAsConfigured()
|
|
{
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "c", "a", "b" }));
|
|
picker.GetHealthyNodes().ShouldBe(new[] { "c", "a", "b" });
|
|
}
|
|
|
|
[Fact]
|
|
public void ServerNames_WhitespaceTrimmedAndEmptyDropped()
|
|
{
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { " host-a ", "", " ", "host-b" }));
|
|
picker.GetHealthyNodes().ShouldBe(new[] { "host-a", "host-b" });
|
|
}
|
|
|
|
[Fact]
|
|
public void ServerNames_CaseInsensitiveDeduplication()
|
|
{
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "Host-A", "HOST-A", "host-a" }));
|
|
picker.NodeCount.ShouldBe(1);
|
|
}
|
|
|
|
[Fact]
|
|
public void EmptyConfig_ProducesEmptyPool()
|
|
{
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverName: "", serverNames: Array.Empty<string>()));
|
|
picker.NodeCount.ShouldBe(0);
|
|
picker.GetHealthyNodes().ShouldBeEmpty();
|
|
}
|
|
|
|
// ---------- MarkFailed / cooldown window ----------
|
|
|
|
[Fact]
|
|
public void MarkFailed_RemovesNodeFromHealthyList()
|
|
{
|
|
var clock = new FakeClock();
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "a", "b" }, cooldownSeconds: 60), clock.Now);
|
|
|
|
picker.MarkFailed("a", "boom");
|
|
|
|
picker.GetHealthyNodes().ShouldBe(new[] { "b" });
|
|
picker.HealthyNodeCount.ShouldBe(1);
|
|
}
|
|
|
|
[Fact]
|
|
public void MarkFailed_RecordsErrorAndTimestamp()
|
|
{
|
|
var clock = new FakeClock { UtcNow = new DateTime(2026, 4, 13, 10, 0, 0, DateTimeKind.Utc) };
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "a", "b" }), clock.Now);
|
|
|
|
picker.MarkFailed("a", "connection refused");
|
|
|
|
var states = picker.SnapshotNodeStates();
|
|
var a = states.First(s => s.Name == "a");
|
|
a.IsHealthy.ShouldBeFalse();
|
|
a.FailureCount.ShouldBe(1);
|
|
a.LastError.ShouldBe("connection refused");
|
|
a.LastFailureTime.ShouldBe(clock.UtcNow);
|
|
}
|
|
|
|
[Fact]
|
|
public void MarkFailed_CooldownExpiryRestoresNode()
|
|
{
|
|
var clock = new FakeClock { UtcNow = new DateTime(2026, 4, 13, 10, 0, 0, DateTimeKind.Utc) };
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "a", "b" }, cooldownSeconds: 60), clock.Now);
|
|
|
|
picker.MarkFailed("a", "boom");
|
|
picker.GetHealthyNodes().ShouldBe(new[] { "b" });
|
|
|
|
// Advance clock just before expiry — still in cooldown
|
|
clock.UtcNow = clock.UtcNow.AddSeconds(59);
|
|
picker.GetHealthyNodes().ShouldBe(new[] { "b" });
|
|
|
|
// Advance past cooldown — node returns to pool
|
|
clock.UtcNow = clock.UtcNow.AddSeconds(2);
|
|
picker.GetHealthyNodes().ShouldBe(new[] { "a", "b" });
|
|
}
|
|
|
|
[Fact]
|
|
public void ZeroCooldown_NeverBenchesNode()
|
|
{
|
|
var clock = new FakeClock();
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "a", "b" }, cooldownSeconds: 0), clock.Now);
|
|
|
|
picker.MarkFailed("a", "boom");
|
|
|
|
// Zero cooldown → node remains eligible immediately
|
|
picker.GetHealthyNodes().ShouldBe(new[] { "a", "b" });
|
|
var state = picker.SnapshotNodeStates().First(s => s.Name == "a");
|
|
state.FailureCount.ShouldBe(1);
|
|
state.LastError.ShouldBe("boom");
|
|
}
|
|
|
|
[Fact]
|
|
public void AllNodesFailed_HealthyListIsEmpty()
|
|
{
|
|
var clock = new FakeClock();
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "a", "b" }, cooldownSeconds: 60), clock.Now);
|
|
|
|
picker.MarkFailed("a", "boom");
|
|
picker.MarkFailed("b", "boom");
|
|
|
|
picker.GetHealthyNodes().ShouldBeEmpty();
|
|
picker.HealthyNodeCount.ShouldBe(0);
|
|
}
|
|
|
|
[Fact]
|
|
public void MarkFailed_AccumulatesFailureCount()
|
|
{
|
|
var clock = new FakeClock();
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "a" }, cooldownSeconds: 10), clock.Now);
|
|
|
|
picker.MarkFailed("a", "error 1");
|
|
clock.UtcNow = clock.UtcNow.AddSeconds(20); // recover
|
|
picker.MarkFailed("a", "error 2");
|
|
|
|
picker.SnapshotNodeStates().First().FailureCount.ShouldBe(2);
|
|
picker.SnapshotNodeStates().First().LastError.ShouldBe("error 2");
|
|
}
|
|
|
|
// ---------- MarkHealthy ----------
|
|
|
|
[Fact]
|
|
public void MarkHealthy_ClearsCooldownImmediately()
|
|
{
|
|
var clock = new FakeClock();
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "a", "b" }, cooldownSeconds: 3600), clock.Now);
|
|
|
|
picker.MarkFailed("a", "boom");
|
|
picker.GetHealthyNodes().ShouldBe(new[] { "b" });
|
|
|
|
picker.MarkHealthy("a");
|
|
picker.GetHealthyNodes().ShouldBe(new[] { "a", "b" });
|
|
}
|
|
|
|
[Fact]
|
|
public void MarkHealthy_PreservesCumulativeFailureCount()
|
|
{
|
|
var clock = new FakeClock();
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "a" }), clock.Now);
|
|
|
|
picker.MarkFailed("a", "boom");
|
|
picker.MarkHealthy("a");
|
|
|
|
var state = picker.SnapshotNodeStates().First();
|
|
state.IsHealthy.ShouldBeTrue();
|
|
state.FailureCount.ShouldBe(1); // history preserved
|
|
}
|
|
|
|
// ---------- Unknown node handling ----------
|
|
|
|
[Fact]
|
|
public void MarkFailed_UnknownNode_IsIgnored()
|
|
{
|
|
var clock = new FakeClock();
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "a" }), clock.Now);
|
|
|
|
Should.NotThrow(() => picker.MarkFailed("not-configured", "boom"));
|
|
picker.GetHealthyNodes().ShouldBe(new[] { "a" });
|
|
}
|
|
|
|
[Fact]
|
|
public void MarkHealthy_UnknownNode_IsIgnored()
|
|
{
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "a" }));
|
|
Should.NotThrow(() => picker.MarkHealthy("not-configured"));
|
|
}
|
|
|
|
// ---------- SnapshotNodeStates ----------
|
|
|
|
[Fact]
|
|
public void SnapshotNodeStates_ReflectsConfigurationOrder()
|
|
{
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "z", "m", "a" }));
|
|
picker.SnapshotNodeStates().Select(s => s.Name).ShouldBe(new[] { "z", "m", "a" });
|
|
}
|
|
|
|
[Fact]
|
|
public void SnapshotNodeStates_HealthyEntriesHaveNoCooldown()
|
|
{
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "a" }));
|
|
var state = picker.SnapshotNodeStates().First();
|
|
state.IsHealthy.ShouldBeTrue();
|
|
state.CooldownUntil.ShouldBeNull();
|
|
state.LastError.ShouldBeNull();
|
|
state.LastFailureTime.ShouldBeNull();
|
|
}
|
|
|
|
// ---------- Thread safety smoke test ----------
|
|
|
|
[Fact]
|
|
public void ConcurrentMarkAndQuery_DoesNotCorrupt()
|
|
{
|
|
var clock = new FakeClock();
|
|
var picker = new HistorianClusterEndpointPicker(
|
|
Config(serverNames: new[] { "a", "b", "c", "d" }, cooldownSeconds: 5), clock.Now);
|
|
|
|
var tasks = new List<Task>();
|
|
for (var i = 0; i < 8; i++)
|
|
{
|
|
tasks.Add(Task.Run(() =>
|
|
{
|
|
for (var j = 0; j < 1000; j++)
|
|
{
|
|
picker.MarkFailed("a", "boom");
|
|
picker.MarkHealthy("a");
|
|
_ = picker.GetHealthyNodes();
|
|
_ = picker.SnapshotNodeStates();
|
|
}
|
|
}));
|
|
}
|
|
|
|
Task.WaitAll(tasks.ToArray());
|
|
// Just verify we can still read state after the storm.
|
|
picker.NodeCount.ShouldBe(4);
|
|
picker.GetHealthyNodes().Count.ShouldBeInRange(3, 4);
|
|
}
|
|
|
|
// ---------- Helpers ----------
|
|
|
|
private static HistorianConfiguration Config(
|
|
string serverName = "localhost",
|
|
string[]? serverNames = null,
|
|
int cooldownSeconds = 60)
|
|
{
|
|
return new HistorianConfiguration
|
|
{
|
|
ServerName = serverName,
|
|
ServerNames = (serverNames ?? Array.Empty<string>()).ToList(),
|
|
FailureCooldownSeconds = cooldownSeconds
|
|
};
|
|
}
|
|
|
|
private sealed class FakeClock
|
|
{
|
|
public DateTime UtcNow { get; set; } = new DateTime(2026, 1, 1, 0, 0, 0, DateTimeKind.Utc);
|
|
public DateTime Now() => UtcNow;
|
|
}
|
|
}
|
|
}
|