Files
lmxopcua/src/ZB.MOM.WW.OtOpcUa.Driver.Historian.Wonderware.Client/WonderwareHistorianClient.cs
Joseph Doherty 14947fde51 PR 3.4 — Wonderware historian sidecar .NET 10 client
New project Driver.Historian.Wonderware.Client (net10 x64) implements both
Core.Abstractions.IHistorianDataSource (read paths consumed by the server's
IHistoryRouter) and Core.AlarmHistorian.IAlarmHistorianWriter (alarm-event
drain consumed by SqliteStoreAndForwardSink) against the sidecar's PR 3.3
pipe protocol.

Wire-format files (Framing/MessageKind, Hello, Contracts, FrameReader,
FrameWriter) are byte-identical mirrors of the sidecar's net48 originals —
the sidecar can't be referenced as a ProjectReference because of the
runtime/bitness gap, so we duplicate and pin the wire bytes via tests.

PipeChannel owns one bidirectional NamedPipeClientStream + Hello handshake +
serializes calls. Single in-flight at a time (semaphore); transport failures
trigger one in-flight reconnect-and-retry before propagating. Connect is
abstracted behind a Func<CancellationToken, Task<Stream>> so tests inject
in-process pipes.

WonderwareHistorianClient maps:
- HistorianSampleDto.Quality (raw OPC DA byte) → OPC UA StatusCode uint via
  QualityMapper (port of HistorianQualityMapper from sidecar).
- HistorianAggregateSampleDto.Value=null → BadNoData (0x800E0000).
- WriteAlarmEventsReply.PerEventOk[i]=true → Ack, false → RetryPlease.
  Whole-call failure or transport exception → RetryPlease for every event in
  the batch (drain worker handles backoff).
- AlarmHistorianEvent → AlarmHistorianEventDto with severity bucketed via
  AlarmSeverity-to-ushort mapping (Low=250, Medium=500, High=700, Crit=900).

GetHealthSnapshot tracks transport success + sidecar-reported failure
separately; ConsecutiveFailures rises on operation-level errors, not just
transport drops.

10 round-trip tests via FakeSidecarServer (in-process net10 fake using the
client's own framing): byte→uint quality mapping, null-bucket BadNoData,
at-time order preservation, event-field round-trip, sidecar error surfacing,
WriteBatch per-event status, whole-call retry-please mapping, Hello
shared-secret rejection, transport-drop reconnect-and-retry, health snapshot
counters.

PR 3.W will register this client as IHistorianDataSource + IAlarmHistorianWriter
in OpcUaServerService DI.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-29 14:40:56 -04:00

363 lines
15 KiB
C#

using MessagePack;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using ZB.MOM.WW.OtOpcUa.Core.Abstractions;
using ZB.MOM.WW.OtOpcUa.Core.AlarmHistorian;
using ZB.MOM.WW.OtOpcUa.Driver.Historian.Wonderware.Client.Internal;
using ZB.MOM.WW.OtOpcUa.Driver.Historian.Wonderware.Client.Ipc;
using ClientHistorianEventDto = ZB.MOM.WW.OtOpcUa.Driver.Historian.Wonderware.Client.Ipc.HistorianEventDto;
namespace ZB.MOM.WW.OtOpcUa.Driver.Historian.Wonderware.Client;
/// <summary>
/// .NET 10 client for the Wonderware historian sidecar (PR 3.3 protocol). Implements both
/// <see cref="IHistorianDataSource"/> (read paths consumed by
/// <c>Server.History.IHistoryRouter</c>) and <see cref="IAlarmHistorianWriter"/>
/// (alarm-event drain consumed by <c>Core.AlarmHistorian.SqliteStoreAndForwardSink</c>).
/// </summary>
/// <remarks>
/// The client owns a single <see cref="PipeChannel"/> with one in-flight call at a time;
/// concurrent calls serialize on the channel's gate. Reconnect is handled inside the
/// channel — transient transport failures retry once before propagating.
/// </remarks>
public sealed class WonderwareHistorianClient : IHistorianDataSource, IAlarmHistorianWriter, IAsyncDisposable
{
private readonly PipeChannel _channel;
private readonly object _healthLock = new();
private DateTime? _lastSuccessUtc;
private DateTime? _lastFailureUtc;
private string? _lastError;
private long _totalQueries;
private long _totalSuccesses;
private long _totalFailures;
private int _consecutiveFailures;
/// <summary>
/// Creates a client over a real named-pipe connection. Tests that need an in-process
/// duplex pair use the <see cref="ForTests"/> factory.
/// </summary>
public WonderwareHistorianClient(WonderwareHistorianClientOptions options, ILogger<WonderwareHistorianClient>? logger = null)
: this(options, ct => PipeChannel.DefaultNamedPipeConnectFactory(options, ct), logger)
{
}
/// <summary>Test seam — inject an arbitrary connect callback.</summary>
public static WonderwareHistorianClient ForTests(
WonderwareHistorianClientOptions options,
Func<CancellationToken, Task<Stream>> connect,
ILogger<WonderwareHistorianClient>? logger = null)
=> new(options, connect, logger);
private WonderwareHistorianClient(
WonderwareHistorianClientOptions options,
Func<CancellationToken, Task<Stream>> connect,
ILogger<WonderwareHistorianClient>? logger)
{
ArgumentNullException.ThrowIfNull(options);
var log = (ILogger?)logger ?? NullLogger.Instance;
_channel = new PipeChannel(options, connect, log);
}
// ===== IHistorianDataSource =====
public async Task<HistoryReadResult> ReadRawAsync(
string fullReference, DateTime startUtc, DateTime endUtc, uint maxValuesPerNode,
CancellationToken cancellationToken)
{
var req = new ReadRawRequest
{
TagName = fullReference,
StartUtcTicks = startUtc.Ticks,
EndUtcTicks = endUtc.Ticks,
MaxValues = (int)Math.Min(maxValuesPerNode, int.MaxValue),
CorrelationId = Guid.NewGuid().ToString("N"),
};
var reply = await Invoke<ReadRawRequest, ReadRawReply>(MessageKind.ReadRawRequest, MessageKind.ReadRawReply, req, cancellationToken).ConfigureAwait(false);
ThrowIfFailed(reply.Success, reply.Error, "ReadRaw");
return new HistoryReadResult(ToSnapshots(reply.Samples), ContinuationPoint: null);
}
public async Task<HistoryReadResult> ReadProcessedAsync(
string fullReference, DateTime startUtc, DateTime endUtc, TimeSpan interval,
HistoryAggregateType aggregate, CancellationToken cancellationToken)
{
var req = new ReadProcessedRequest
{
TagName = fullReference,
StartUtcTicks = startUtc.Ticks,
EndUtcTicks = endUtc.Ticks,
IntervalMs = interval.TotalMilliseconds,
AggregateColumn = MapAggregate(aggregate),
CorrelationId = Guid.NewGuid().ToString("N"),
};
var reply = await Invoke<ReadProcessedRequest, ReadProcessedReply>(MessageKind.ReadProcessedRequest, MessageKind.ReadProcessedReply, req, cancellationToken).ConfigureAwait(false);
ThrowIfFailed(reply.Success, reply.Error, "ReadProcessed");
return new HistoryReadResult(ToAggregateSnapshots(reply.Buckets), ContinuationPoint: null);
}
public async Task<HistoryReadResult> ReadAtTimeAsync(
string fullReference, IReadOnlyList<DateTime> timestampsUtc, CancellationToken cancellationToken)
{
var ticks = new long[timestampsUtc.Count];
for (var i = 0; i < timestampsUtc.Count; i++) ticks[i] = timestampsUtc[i].Ticks;
var req = new ReadAtTimeRequest
{
TagName = fullReference,
TimestampsUtcTicks = ticks,
CorrelationId = Guid.NewGuid().ToString("N"),
};
var reply = await Invoke<ReadAtTimeRequest, ReadAtTimeReply>(MessageKind.ReadAtTimeRequest, MessageKind.ReadAtTimeReply, req, cancellationToken).ConfigureAwait(false);
ThrowIfFailed(reply.Success, reply.Error, "ReadAtTime");
return new HistoryReadResult(ToSnapshots(reply.Samples), ContinuationPoint: null);
}
public async Task<HistoricalEventsResult> ReadEventsAsync(
string? sourceName, DateTime startUtc, DateTime endUtc, int maxEvents,
CancellationToken cancellationToken)
{
var req = new ReadEventsRequest
{
SourceName = sourceName,
StartUtcTicks = startUtc.Ticks,
EndUtcTicks = endUtc.Ticks,
MaxEvents = maxEvents,
CorrelationId = Guid.NewGuid().ToString("N"),
};
var reply = await Invoke<ReadEventsRequest, ReadEventsReply>(MessageKind.ReadEventsRequest, MessageKind.ReadEventsReply, req, cancellationToken).ConfigureAwait(false);
ThrowIfFailed(reply.Success, reply.Error, "ReadEvents");
return new HistoricalEventsResult(ToHistoricalEvents(reply.Events), ContinuationPoint: null);
}
public HistorianHealthSnapshot GetHealthSnapshot()
{
lock (_healthLock)
{
return new HistorianHealthSnapshot(
TotalQueries: _totalQueries,
TotalSuccesses: _totalSuccesses,
TotalFailures: _totalFailures,
ConsecutiveFailures: _consecutiveFailures,
LastSuccessTime: _lastSuccessUtc,
LastFailureTime: _lastFailureUtc,
LastError: _lastError,
ProcessConnectionOpen: _channel.IsConnected,
EventConnectionOpen: _channel.IsConnected,
ActiveProcessNode: null,
ActiveEventNode: null,
Nodes: []);
}
}
// ===== IAlarmHistorianWriter =====
public async Task<IReadOnlyList<HistorianWriteOutcome>> WriteBatchAsync(
IReadOnlyList<AlarmHistorianEvent> batch, CancellationToken cancellationToken)
{
ArgumentNullException.ThrowIfNull(batch);
if (batch.Count == 0) return [];
var dtos = new AlarmHistorianEventDto[batch.Count];
for (var i = 0; i < batch.Count; i++) dtos[i] = ToDto(batch[i]);
var req = new WriteAlarmEventsRequest
{
Events = dtos,
CorrelationId = Guid.NewGuid().ToString("N"),
};
try
{
var reply = await Invoke<WriteAlarmEventsRequest, WriteAlarmEventsReply>(
MessageKind.WriteAlarmEventsRequest, MessageKind.WriteAlarmEventsReply, req, cancellationToken).ConfigureAwait(false);
// Whole-call failure → transient retry for every event in the batch.
if (!reply.Success)
{
var fail = new HistorianWriteOutcome[batch.Count];
Array.Fill(fail, HistorianWriteOutcome.RetryPlease);
return fail;
}
// Per-event status: PerEventOk[i] = true → Ack; false → RetryPlease.
var outcomes = new HistorianWriteOutcome[batch.Count];
for (var i = 0; i < batch.Count; i++)
{
var ok = i < reply.PerEventOk.Length && reply.PerEventOk[i];
outcomes[i] = ok ? HistorianWriteOutcome.Ack : HistorianWriteOutcome.RetryPlease;
}
return outcomes;
}
catch
{
// Transport / deserialization failure — every event is retry-please. The drain
// worker's backoff handles recovery.
var fail = new HistorianWriteOutcome[batch.Count];
Array.Fill(fail, HistorianWriteOutcome.RetryPlease);
return fail;
}
}
// ===== Helpers =====
private async Task<TReply> Invoke<TRequest, TReply>(
MessageKind requestKind, MessageKind expectedReplyKind, TRequest request, CancellationToken ct)
where TReply : class
{
Interlocked.Increment(ref _totalQueries);
try
{
var reply = await _channel.InvokeAsync<TRequest, TReply>(requestKind, expectedReplyKind, request, ct).ConfigureAwait(false);
RecordSuccess();
return reply;
}
catch (Exception ex)
{
RecordFailure(ex.Message);
throw;
}
}
private void RecordSuccess()
{
lock (_healthLock)
{
_totalSuccesses++;
_consecutiveFailures = 0;
_lastSuccessUtc = DateTime.UtcNow;
}
}
private void RecordFailure(string message)
{
lock (_healthLock)
{
_totalFailures++;
_consecutiveFailures++;
_lastFailureUtc = DateTime.UtcNow;
_lastError = message;
}
}
private void ThrowIfFailed(bool success, string? error, string op)
{
if (!success)
{
// Sidecar-reported failure counts as an operation failure even though the
// transport delivered a reply. The Invoke wrapper already recorded transport
// success — undo that and record the failure so the health snapshot reflects
// operation-level success rates rather than just connectivity.
ReclassifySuccessAsFailure(error);
throw new InvalidOperationException(
$"Sidecar {op} failed: {error ?? "<no message>"}.");
}
}
private void ReclassifySuccessAsFailure(string? message)
{
lock (_healthLock)
{
// Transport-level RecordSuccess happened a moment ago; reverse it.
_totalSuccesses--;
_totalFailures++;
_consecutiveFailures++;
_lastFailureUtc = DateTime.UtcNow;
_lastError = message;
}
}
private static IReadOnlyList<DataValueSnapshot> ToSnapshots(HistorianSampleDto[] dtos)
{
if (dtos.Length == 0) return [];
var snapshots = new DataValueSnapshot[dtos.Length];
for (var i = 0; i < dtos.Length; i++)
{
var dto = dtos[i];
var value = dto.ValueBytes is null ? null : MessagePackSerializer.Deserialize<object>(dto.ValueBytes);
snapshots[i] = new DataValueSnapshot(
Value: value,
StatusCode: QualityMapper.Map(dto.Quality),
SourceTimestampUtc: new DateTime(dto.TimestampUtcTicks, DateTimeKind.Utc),
ServerTimestampUtc: DateTime.UtcNow);
}
return snapshots;
}
private static IReadOnlyList<DataValueSnapshot> ToAggregateSnapshots(HistorianAggregateSampleDto[] dtos)
{
if (dtos.Length == 0) return [];
var snapshots = new DataValueSnapshot[dtos.Length];
for (var i = 0; i < dtos.Length; i++)
{
var dto = dtos[i];
// Null aggregate value → BadNoData per Core.Abstractions HistoryReadResult convention.
snapshots[i] = new DataValueSnapshot(
Value: dto.Value,
StatusCode: dto.Value is null ? 0x800E0000u /* BadNoData */ : 0x00000000u /* Good */,
SourceTimestampUtc: new DateTime(dto.TimestampUtcTicks, DateTimeKind.Utc),
ServerTimestampUtc: DateTime.UtcNow);
}
return snapshots;
}
private static IReadOnlyList<HistoricalEvent> ToHistoricalEvents(ClientHistorianEventDto[] dtos)
{
if (dtos.Length == 0) return [];
var events = new HistoricalEvent[dtos.Length];
for (var i = 0; i < dtos.Length; i++)
{
var dto = dtos[i];
events[i] = new HistoricalEvent(
EventId: dto.EventId,
SourceName: dto.Source,
EventTimeUtc: new DateTime(dto.EventTimeUtcTicks, DateTimeKind.Utc),
ReceivedTimeUtc: new DateTime(dto.ReceivedTimeUtcTicks, DateTimeKind.Utc),
Message: dto.DisplayText,
Severity: dto.Severity);
}
return events;
}
private static AlarmHistorianEventDto ToDto(AlarmHistorianEvent evt) => new()
{
EventId = evt.AlarmId,
SourceName = evt.EquipmentPath,
ConditionId = evt.AlarmName,
AlarmType = evt.AlarmTypeName + ":" + evt.EventKind,
Message = evt.Message,
Severity = MapSeverity(evt.Severity),
EventTimeUtcTicks = evt.TimestampUtc.Ticks,
AckComment = evt.Comment,
};
private static ushort MapSeverity(AlarmSeverity severity) => severity switch
{
AlarmSeverity.Low => 250,
AlarmSeverity.Medium => 500,
AlarmSeverity.High => 700,
AlarmSeverity.Critical => 900,
_ => 500,
};
private static string MapAggregate(HistoryAggregateType aggregate) => aggregate switch
{
HistoryAggregateType.Average => "Average",
HistoryAggregateType.Minimum => "Minimum",
HistoryAggregateType.Maximum => "Maximum",
HistoryAggregateType.Count => "ValueCount",
HistoryAggregateType.Total => throw new NotSupportedException(
"HistoryAggregateType.Total is not supported by the Wonderware AnalogSummary query — use Average/Minimum/Maximum/Count."),
_ => throw new NotSupportedException($"Unknown HistoryAggregateType {aggregate}"),
};
public ValueTask DisposeAsync() => _channel.DisposeAsync();
/// <summary>
/// Synchronous dispose required by <see cref="IDisposable"/> on
/// <see cref="IHistorianDataSource"/>. The underlying channel's async cleanup is
/// non-blocking (just resets transport state + disposes streams), so the
/// GetAwaiter()/GetResult() bridge is safe.
/// </summary>
public void Dispose() => _channel.DisposeAsync().AsTask().GetAwaiter().GetResult();
}