Closes the per-device isolation gap flagged at the Phase 6.1 Stream A wire-up (PR #78 used driver.DriverInstanceId as the pipeline host for every call, so multi-host drivers like Modbus with N PLCs shared one pipeline — one dead PLC poisoned sibling breakers). Decision #144 requires per-device isolation; this PR wires it without breaking single-host drivers. Core.Abstractions: - IPerCallHostResolver interface. Optional driver capability. Drivers with multi-host topology (Modbus across N PLCs, AB CIP across a rack, etc.) implement this; single-host drivers (Galaxy, S7 against one PLC, OpcUaClient against one remote server) leave it alone. Must be fast + allocation-free — called once per tag on the hot path. Unknown refs return empty so dispatch falls back to single-host without throwing. Server/OpcUa/DriverNodeManager: - Captures `driver as IPerCallHostResolver` at construction alongside the existing capability casts. - New `ResolveHostFor(fullReference)` helper returns either the resolver's answer or the driver's DriverInstanceId (single-host fallback). Empty / whitespace resolver output also falls back to DriverInstanceId. - Every dispatch site now passes `ResolveHostFor(fullRef)` to the invoker instead of `_driver.DriverInstanceId` — OnReadValue, OnWriteValue, all four HistoryRead paths. The HistoryRead Events path tolerates fullRef=null and falls back to DriverInstanceId for those cluster-wide event queries. - Drivers without IPerCallHostResolver observe zero behavioural change: every call still keys on DriverInstanceId, same as before. Tests (4 new PerCallHostResolverDispatchTests, all pass): - DeadPlc_DoesNotOpenBreaker_For_HealthyPlc_With_Resolver — 2 PLCs behind one driver; hammer the dead PLC past its breaker threshold; assert the healthy PLC's first call succeeds on its first attempt (decision #144). - EmptyString / unknown-ref fallback behaviour documented via test. - WithoutResolver_SameHost_Shares_One_Pipeline — regression guard for the single-host pre-existing behaviour. - WithResolver_TwoHosts_Get_Two_Pipelines — builds the CachedPipelineCount assertion to confirm the shared-builder cache keys correctly. Full solution dotnet test: 1219 passing (was 1215, +4). Pre-existing Client.CLI Subscribe flake unchanged. Adoption: Modbus driver (#120 follow-up), AB CIP / AB Legacy / TwinCAT drivers (also #120) implement the interface and return the per-tag PLC host string. Single-host drivers stay silent and pay zero cost. Remaining sub-items of #160 still deferred: - IAlarmSource.SubscribeAlarmsAsync + AcknowledgeAsync invoker wrapping. Non-trivial because alarm subscription is push-based from driver through IAlarmConditionSink — the wrap has to happen at the driver-to-server glue rather than a synchronous dispatch site. - Roslyn analyzer asserting every capability-interface call routes through CapabilityInvoker. Substantial (separate analyzer project + test harness); noise-value ratio favors shipping this post-v2-GA once the coverage is known-stable. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
111 lines
4.6 KiB
C#
111 lines
4.6 KiB
C#
using Shouldly;
|
|
using Xunit;
|
|
using ZB.MOM.WW.OtOpcUa.Core.Abstractions;
|
|
using ZB.MOM.WW.OtOpcUa.Core.Resilience;
|
|
|
|
namespace ZB.MOM.WW.OtOpcUa.Core.Tests.Resilience;
|
|
|
|
/// <summary>
|
|
/// Exercises the per-call host resolver contract against the shared
|
|
/// <see cref="DriverResiliencePipelineBuilder"/> + <see cref="CapabilityInvoker"/> — one
|
|
/// dead PLC behind a multi-device driver must NOT open the breaker for healthy sibling
|
|
/// PLCs (decision #144).
|
|
/// </summary>
|
|
[Trait("Category", "Unit")]
|
|
public sealed class PerCallHostResolverDispatchTests
|
|
{
|
|
private sealed class StaticResolver : IPerCallHostResolver
|
|
{
|
|
private readonly Dictionary<string, string> _map;
|
|
public StaticResolver(Dictionary<string, string> map) => _map = map;
|
|
public string ResolveHost(string fullReference) =>
|
|
_map.TryGetValue(fullReference, out var host) ? host : string.Empty;
|
|
}
|
|
|
|
[Fact]
|
|
public async Task DeadPlc_DoesNotOpenBreaker_For_HealthyPlc_With_Resolver()
|
|
{
|
|
// Two PLCs behind one driver. Dead PLC keeps failing; healthy PLC must keep serving.
|
|
var builder = new DriverResiliencePipelineBuilder();
|
|
var options = new DriverResilienceOptions { Tier = DriverTier.B };
|
|
var invoker = new CapabilityInvoker(builder, "drv-modbus", () => options);
|
|
|
|
var resolver = new StaticResolver(new Dictionary<string, string>
|
|
{
|
|
["tag-on-dead"] = "plc-dead",
|
|
["tag-on-alive"] = "plc-alive",
|
|
});
|
|
|
|
var threshold = options.Resolve(DriverCapability.Read).BreakerFailureThreshold;
|
|
for (var i = 0; i < threshold + 3; i++)
|
|
{
|
|
await Should.ThrowAsync<Exception>(async () =>
|
|
await invoker.ExecuteAsync(
|
|
DriverCapability.Read,
|
|
hostName: resolver.ResolveHost("tag-on-dead"),
|
|
_ => throw new InvalidOperationException("plc-dead unreachable"),
|
|
CancellationToken.None));
|
|
}
|
|
|
|
// Healthy PLC's pipeline is in a different bucket; the first call should succeed
|
|
// without hitting the dead-PLC breaker.
|
|
var aliveAttempts = 0;
|
|
await invoker.ExecuteAsync(
|
|
DriverCapability.Read,
|
|
hostName: resolver.ResolveHost("tag-on-alive"),
|
|
_ => { aliveAttempts++; return ValueTask.FromResult("ok"); },
|
|
CancellationToken.None);
|
|
|
|
aliveAttempts.ShouldBe(1, "decision #144 — per-PLC isolation keeps healthy PLCs serving");
|
|
}
|
|
|
|
[Fact]
|
|
public void Resolver_EmptyString_Treated_As_Single_Host_Fallback()
|
|
{
|
|
var resolver = new StaticResolver(new Dictionary<string, string>
|
|
{
|
|
["tag-unknown"] = "",
|
|
});
|
|
|
|
resolver.ResolveHost("tag-unknown").ShouldBe("");
|
|
resolver.ResolveHost("not-in-map").ShouldBe("", "unknown refs return empty so dispatch falls back to single-host");
|
|
}
|
|
|
|
[Fact]
|
|
public async Task WithoutResolver_SameHost_Shares_One_Pipeline()
|
|
{
|
|
// Without a resolver all calls share the DriverInstanceId pipeline — that's the
|
|
// pre-decision-#144 behavior single-host drivers should keep.
|
|
var builder = new DriverResiliencePipelineBuilder();
|
|
var options = new DriverResilienceOptions { Tier = DriverTier.A };
|
|
var invoker = new CapabilityInvoker(builder, "drv-single", () => options);
|
|
|
|
await invoker.ExecuteAsync(DriverCapability.Read, "drv-single",
|
|
_ => ValueTask.FromResult("a"), CancellationToken.None);
|
|
await invoker.ExecuteAsync(DriverCapability.Read, "drv-single",
|
|
_ => ValueTask.FromResult("b"), CancellationToken.None);
|
|
|
|
builder.CachedPipelineCount.ShouldBe(1, "single-host drivers share one pipeline");
|
|
}
|
|
|
|
[Fact]
|
|
public async Task WithResolver_TwoHosts_Get_Two_Pipelines()
|
|
{
|
|
var builder = new DriverResiliencePipelineBuilder();
|
|
var options = new DriverResilienceOptions { Tier = DriverTier.B };
|
|
var invoker = new CapabilityInvoker(builder, "drv-modbus", () => options);
|
|
var resolver = new StaticResolver(new Dictionary<string, string>
|
|
{
|
|
["tag-a"] = "plc-a",
|
|
["tag-b"] = "plc-b",
|
|
});
|
|
|
|
await invoker.ExecuteAsync(DriverCapability.Read, resolver.ResolveHost("tag-a"),
|
|
_ => ValueTask.FromResult(1), CancellationToken.None);
|
|
await invoker.ExecuteAsync(DriverCapability.Read, resolver.ResolveHost("tag-b"),
|
|
_ => ValueTask.FromResult(2), CancellationToken.None);
|
|
|
|
builder.CachedPipelineCount.ShouldBe(2, "each host keyed on its own pipeline");
|
|
}
|
|
}
|