Files
lmxopcua/tests/ZB.MOM.WW.OtOpcUa.Driver.Historian.Wonderware.Client.Tests/FakeSidecarServer.cs
Joseph Doherty 14947fde51 PR 3.4 — Wonderware historian sidecar .NET 10 client
New project Driver.Historian.Wonderware.Client (net10 x64) implements both
Core.Abstractions.IHistorianDataSource (read paths consumed by the server's
IHistoryRouter) and Core.AlarmHistorian.IAlarmHistorianWriter (alarm-event
drain consumed by SqliteStoreAndForwardSink) against the sidecar's PR 3.3
pipe protocol.

Wire-format files (Framing/MessageKind, Hello, Contracts, FrameReader,
FrameWriter) are byte-identical mirrors of the sidecar's net48 originals —
the sidecar can't be referenced as a ProjectReference because of the
runtime/bitness gap, so we duplicate and pin the wire bytes via tests.

PipeChannel owns one bidirectional NamedPipeClientStream + Hello handshake +
serializes calls. Single in-flight at a time (semaphore); transport failures
trigger one in-flight reconnect-and-retry before propagating. Connect is
abstracted behind a Func<CancellationToken, Task<Stream>> so tests inject
in-process pipes.

WonderwareHistorianClient maps:
- HistorianSampleDto.Quality (raw OPC DA byte) → OPC UA StatusCode uint via
  QualityMapper (port of HistorianQualityMapper from sidecar).
- HistorianAggregateSampleDto.Value=null → BadNoData (0x800E0000).
- WriteAlarmEventsReply.PerEventOk[i]=true → Ack, false → RetryPlease.
  Whole-call failure or transport exception → RetryPlease for every event in
  the batch (drain worker handles backoff).
- AlarmHistorianEvent → AlarmHistorianEventDto with severity bucketed via
  AlarmSeverity-to-ushort mapping (Low=250, Medium=500, High=700, Crit=900).

GetHealthSnapshot tracks transport success + sidecar-reported failure
separately; ConsecutiveFailures rises on operation-level errors, not just
transport drops.

10 round-trip tests via FakeSidecarServer (in-process net10 fake using the
client's own framing): byte→uint quality mapping, null-bucket BadNoData,
at-time order preservation, event-field round-trip, sidecar error surfacing,
WriteBatch per-event status, whole-call retry-please mapping, Hello
shared-secret rejection, transport-drop reconnect-and-retry, health snapshot
counters.

PR 3.W will register this client as IHistorianDataSource + IAlarmHistorianWriter
in OpcUaServerService DI.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-29 14:40:56 -04:00

146 lines
6.6 KiB
C#

using System.IO.Pipes;
using MessagePack;
using ZB.MOM.WW.OtOpcUa.Driver.Historian.Wonderware.Client.Ipc;
namespace ZB.MOM.WW.OtOpcUa.Driver.Historian.Wonderware.Client.Tests;
/// <summary>
/// In-process fake of the Wonderware historian sidecar. Reuses the client-side framing
/// code (which is byte-identical to the real sidecar) so the wire bytes round-trip
/// correctly without requiring the .NET 4.8 sidecar binary at test time.
/// </summary>
internal sealed class FakeSidecarServer : IAsyncDisposable
{
private readonly string _pipeName;
private readonly string _expectedSecret;
private readonly CancellationTokenSource _cts = new();
private Task? _loop;
public Func<ReadRawRequest, ReadRawReply> OnReadRaw { get; set; } = _ => new ReadRawReply { Success = true };
public Func<ReadProcessedRequest, ReadProcessedReply> OnReadProcessed { get; set; } = _ => new ReadProcessedReply { Success = true };
public Func<ReadAtTimeRequest, ReadAtTimeReply> OnReadAtTime { get; set; } = _ => new ReadAtTimeReply { Success = true };
public Func<ReadEventsRequest, ReadEventsReply> OnReadEvents { get; set; } = _ => new ReadEventsReply { Success = true };
public Func<WriteAlarmEventsRequest, WriteAlarmEventsReply> OnWriteAlarmEvents { get; set; } = req
=> new WriteAlarmEventsReply { Success = true, PerEventOk = Enumerable.Repeat(true, req.Events.Length).ToArray() };
/// <summary>Force-disconnect the next accepted client mid-call to exercise reconnect.</summary>
public bool DisconnectAfterHandshake { get; set; }
public FakeSidecarServer(string pipeName, string expectedSecret)
{
_pipeName = pipeName;
_expectedSecret = expectedSecret;
}
public string PipeName => _pipeName;
public Task StartAsync()
{
_loop = Task.Run(() => RunAsync(_cts.Token));
// Give the listener a moment to start so client connect doesn't race.
return Task.Delay(50);
}
private async Task RunAsync(CancellationToken ct)
{
while (!ct.IsCancellationRequested)
{
await using var pipe = new NamedPipeServerStream(
_pipeName, PipeDirection.InOut, maxNumberOfServerInstances: 1,
PipeTransmissionMode.Byte, PipeOptions.Asynchronous,
inBufferSize: 64 * 1024, outBufferSize: 64 * 1024);
try { await pipe.WaitForConnectionAsync(ct).ConfigureAwait(false); }
catch (OperationCanceledException) { break; }
try
{
using var reader = new FrameReader(pipe, leaveOpen: true);
using var writer = new FrameWriter(pipe, leaveOpen: true);
// Hello handshake.
var first = await reader.ReadFrameAsync(ct).ConfigureAwait(false);
if (first is null || first.Value.Kind != MessageKind.Hello) continue;
var hello = MessagePackSerializer.Deserialize<Hello>(first.Value.Body);
if (!string.Equals(hello.SharedSecret, _expectedSecret, StringComparison.Ordinal))
{
await writer.WriteAsync(MessageKind.HelloAck, new HelloAck { Accepted = false, RejectReason = "shared-secret-mismatch" }, ct);
continue;
}
await writer.WriteAsync(MessageKind.HelloAck, new HelloAck { Accepted = true, HostName = "fake-sidecar" }, ct);
if (DisconnectAfterHandshake)
{
DisconnectAfterHandshake = false; // arm once
pipe.Disconnect();
continue;
}
while (!ct.IsCancellationRequested)
{
var frame = await reader.ReadFrameAsync(ct).ConfigureAwait(false);
if (frame is null) break;
switch (frame.Value.Kind)
{
case MessageKind.ReadRawRequest:
{
var req = MessagePackSerializer.Deserialize<ReadRawRequest>(frame.Value.Body);
var reply = OnReadRaw(req);
reply.CorrelationId = req.CorrelationId;
await writer.WriteAsync(MessageKind.ReadRawReply, reply, ct);
break;
}
case MessageKind.ReadProcessedRequest:
{
var req = MessagePackSerializer.Deserialize<ReadProcessedRequest>(frame.Value.Body);
var reply = OnReadProcessed(req);
reply.CorrelationId = req.CorrelationId;
await writer.WriteAsync(MessageKind.ReadProcessedReply, reply, ct);
break;
}
case MessageKind.ReadAtTimeRequest:
{
var req = MessagePackSerializer.Deserialize<ReadAtTimeRequest>(frame.Value.Body);
var reply = OnReadAtTime(req);
reply.CorrelationId = req.CorrelationId;
await writer.WriteAsync(MessageKind.ReadAtTimeReply, reply, ct);
break;
}
case MessageKind.ReadEventsRequest:
{
var req = MessagePackSerializer.Deserialize<ReadEventsRequest>(frame.Value.Body);
var reply = OnReadEvents(req);
reply.CorrelationId = req.CorrelationId;
await writer.WriteAsync(MessageKind.ReadEventsReply, reply, ct);
break;
}
case MessageKind.WriteAlarmEventsRequest:
{
var req = MessagePackSerializer.Deserialize<WriteAlarmEventsRequest>(frame.Value.Body);
var reply = OnWriteAlarmEvents(req);
reply.CorrelationId = req.CorrelationId;
await writer.WriteAsync(MessageKind.WriteAlarmEventsReply, reply, ct);
break;
}
}
}
}
catch (OperationCanceledException) { break; }
catch (IOException) { /* peer dropped — accept next */ }
}
}
public async ValueTask DisposeAsync()
{
_cts.Cancel();
if (_loop is not null)
{
try { await _loop.ConfigureAwait(false); } catch { /* ignore shutdown errors */ }
}
_cts.Dispose();
}
}