|
|
|
|
@@ -27,8 +27,15 @@ namespace ZB.MOM.WW.OtOpcUa.Driver.OpcUaClient;
|
|
|
|
|
/// </para>
|
|
|
|
|
/// </remarks>
|
|
|
|
|
public sealed class OpcUaClientDriver(OpcUaClientDriverOptions options, string driverInstanceId)
|
|
|
|
|
: IDriver, ITagDiscovery, IReadable, IWritable, ISubscribable, IHostConnectivityProbe, IDisposable, IAsyncDisposable
|
|
|
|
|
: IDriver, ITagDiscovery, IReadable, IWritable, ISubscribable, IHostConnectivityProbe, IAlarmSource, IDisposable, IAsyncDisposable
|
|
|
|
|
{
|
|
|
|
|
// ---- IAlarmSource state ----
|
|
|
|
|
|
|
|
|
|
private readonly System.Collections.Concurrent.ConcurrentDictionary<long, RemoteAlarmSubscription> _alarmSubscriptions = new();
|
|
|
|
|
private long _nextAlarmSubscriptionId;
|
|
|
|
|
|
|
|
|
|
public event EventHandler<AlarmEventArgs>? OnAlarmEvent;
|
|
|
|
|
|
|
|
|
|
// ---- ISubscribable + IHostConnectivityProbe state ----
|
|
|
|
|
|
|
|
|
|
private readonly System.Collections.Concurrent.ConcurrentDictionary<long, RemoteSubscription> _subscriptions = new();
|
|
|
|
|
@@ -61,6 +68,12 @@ public sealed class OpcUaClientDriver(OpcUaClientDriverOptions options, string d
|
|
|
|
|
private bool _disposed;
|
|
|
|
|
/// <summary>URL of the endpoint the driver actually connected to. Exposed via <see cref="HostName"/>.</summary>
|
|
|
|
|
private string? _connectedEndpointUrl;
|
|
|
|
|
/// <summary>
|
|
|
|
|
/// SDK-provided reconnect handler that owns the retry loop + session-transfer machinery
|
|
|
|
|
/// when the session's keep-alive channel reports a bad status. Null outside the
|
|
|
|
|
/// reconnecting window; constructed lazily inside the keep-alive handler.
|
|
|
|
|
/// </summary>
|
|
|
|
|
private SessionReconnectHandler? _reconnectHandler;
|
|
|
|
|
|
|
|
|
|
public string DriverInstanceId => driverInstanceId;
|
|
|
|
|
public string DriverType => "OpcUaClient";
|
|
|
|
|
@@ -104,16 +117,13 @@ public sealed class OpcUaClientDriver(OpcUaClientDriverOptions options, string d
|
|
|
|
|
"Tried:\n " + string.Join("\n ", attemptErrors),
|
|
|
|
|
attemptErrors.Select(e => new InvalidOperationException(e)));
|
|
|
|
|
|
|
|
|
|
// Wire the session's keep-alive channel into HostState. OPC UA keep-alives are
|
|
|
|
|
// authoritative for session liveness: the SDK pings on KeepAliveInterval and sets
|
|
|
|
|
// KeepAliveStopped when N intervals elapse without a response. That's strictly
|
|
|
|
|
// better than a driver-side polling probe — no extra round-trip, no duplicate
|
|
|
|
|
// semantic.
|
|
|
|
|
_keepAliveHandler = (_, e) =>
|
|
|
|
|
{
|
|
|
|
|
var healthy = !ServiceResult.IsBad(e.Status);
|
|
|
|
|
TransitionTo(healthy ? HostState.Running : HostState.Stopped);
|
|
|
|
|
};
|
|
|
|
|
// Wire the session's keep-alive channel into HostState + the reconnect trigger.
|
|
|
|
|
// OPC UA keep-alives are authoritative for session liveness: the SDK pings on
|
|
|
|
|
// KeepAliveInterval and sets KeepAliveStopped when N intervals elapse without a
|
|
|
|
|
// response. On a bad keep-alive the driver spins up a SessionReconnectHandler
|
|
|
|
|
// which transparently retries + swaps the underlying session. Subscriptions move
|
|
|
|
|
// via TransferSubscriptions so local MonitoredItem handles stay valid.
|
|
|
|
|
_keepAliveHandler = OnKeepAlive;
|
|
|
|
|
session.KeepAlive += _keepAliveHandler;
|
|
|
|
|
|
|
|
|
|
Session = session;
|
|
|
|
|
@@ -392,6 +402,20 @@ public sealed class OpcUaClientDriver(OpcUaClientDriverOptions options, string d
|
|
|
|
|
}
|
|
|
|
|
_subscriptions.Clear();
|
|
|
|
|
|
|
|
|
|
foreach (var ras in _alarmSubscriptions.Values)
|
|
|
|
|
{
|
|
|
|
|
try { await ras.Subscription.DeleteAsync(silent: true, cancellationToken).ConfigureAwait(false); }
|
|
|
|
|
catch { /* best-effort */ }
|
|
|
|
|
}
|
|
|
|
|
_alarmSubscriptions.Clear();
|
|
|
|
|
|
|
|
|
|
// Abort any in-flight reconnect attempts before touching the session — BeginReconnect's
|
|
|
|
|
// retry loop holds a reference to the current session and would fight Session.CloseAsync
|
|
|
|
|
// if left spinning.
|
|
|
|
|
try { _reconnectHandler?.CancelReconnect(); } catch { }
|
|
|
|
|
_reconnectHandler?.Dispose();
|
|
|
|
|
_reconnectHandler = null;
|
|
|
|
|
|
|
|
|
|
if (_keepAliveHandler is not null && Session is not null)
|
|
|
|
|
{
|
|
|
|
|
try { Session.KeepAlive -= _keepAliveHandler; } catch { }
|
|
|
|
|
@@ -926,6 +950,205 @@ public sealed class OpcUaClientDriver(OpcUaClientDriverOptions options, string d
|
|
|
|
|
public string DiagnosticId => $"opcua-sub-{Id}";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ---- IAlarmSource ----
|
|
|
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
|
/// Field positions in the EventFilter SelectClauses below. Used to index into the
|
|
|
|
|
/// <c>EventFieldList.EventFields</c> Variant collection when an event arrives.
|
|
|
|
|
/// </summary>
|
|
|
|
|
private const int AlarmFieldEventId = 0;
|
|
|
|
|
private const int AlarmFieldEventType = 1;
|
|
|
|
|
private const int AlarmFieldSourceNode = 2;
|
|
|
|
|
private const int AlarmFieldMessage = 3;
|
|
|
|
|
private const int AlarmFieldSeverity = 4;
|
|
|
|
|
private const int AlarmFieldTime = 5;
|
|
|
|
|
private const int AlarmFieldConditionId = 6;
|
|
|
|
|
|
|
|
|
|
public async Task<IAlarmSubscriptionHandle> SubscribeAlarmsAsync(
|
|
|
|
|
IReadOnlyList<string> sourceNodeIds, CancellationToken cancellationToken)
|
|
|
|
|
{
|
|
|
|
|
var session = RequireSession();
|
|
|
|
|
var id = Interlocked.Increment(ref _nextAlarmSubscriptionId);
|
|
|
|
|
var handle = new OpcUaAlarmSubscriptionHandle(id);
|
|
|
|
|
|
|
|
|
|
// Pre-resolve the source-node filter set so the per-event notification handler can
|
|
|
|
|
// match in O(1) without re-parsing on every event.
|
|
|
|
|
var sourceFilter = new HashSet<string>(sourceNodeIds, StringComparer.Ordinal);
|
|
|
|
|
|
|
|
|
|
var subscription = new Subscription(telemetry: null!, new SubscriptionOptions
|
|
|
|
|
{
|
|
|
|
|
DisplayName = $"opcua-alarm-sub-{id}",
|
|
|
|
|
PublishingInterval = 500, // 500ms — alarms don't need fast polling; the server pushes
|
|
|
|
|
KeepAliveCount = 10,
|
|
|
|
|
LifetimeCount = 1000,
|
|
|
|
|
MaxNotificationsPerPublish = 0,
|
|
|
|
|
PublishingEnabled = true,
|
|
|
|
|
Priority = 0,
|
|
|
|
|
TimestampsToReturn = TimestampsToReturn.Both,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// EventFilter SelectClauses — pick the standard BaseEventType fields we need to
|
|
|
|
|
// materialize an AlarmEventArgs. Field positions are indexed by the AlarmField*
|
|
|
|
|
// constants so the notification handler indexes in O(1) without re-examining the
|
|
|
|
|
// QualifiedName BrowsePaths.
|
|
|
|
|
var filter = new EventFilter();
|
|
|
|
|
void AddField(string browseName) => filter.SelectClauses.Add(new SimpleAttributeOperand
|
|
|
|
|
{
|
|
|
|
|
TypeDefinitionId = ObjectTypeIds.BaseEventType,
|
|
|
|
|
BrowsePath = [new QualifiedName(browseName)],
|
|
|
|
|
AttributeId = Attributes.Value,
|
|
|
|
|
});
|
|
|
|
|
AddField("EventId");
|
|
|
|
|
AddField("EventType");
|
|
|
|
|
AddField("SourceNode");
|
|
|
|
|
AddField("Message");
|
|
|
|
|
AddField("Severity");
|
|
|
|
|
AddField("Time");
|
|
|
|
|
// ConditionId on ConditionType nodes is the branch identifier for
|
|
|
|
|
// acknowledgeable conditions. Not a BaseEventType field — reach it via the typed path.
|
|
|
|
|
filter.SelectClauses.Add(new SimpleAttributeOperand
|
|
|
|
|
{
|
|
|
|
|
TypeDefinitionId = ObjectTypeIds.ConditionType,
|
|
|
|
|
BrowsePath = [], // empty path = the condition node itself
|
|
|
|
|
AttributeId = Attributes.NodeId,
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
await _gate.WaitAsync(cancellationToken).ConfigureAwait(false);
|
|
|
|
|
try
|
|
|
|
|
{
|
|
|
|
|
session.AddSubscription(subscription);
|
|
|
|
|
await subscription.CreateAsync(cancellationToken).ConfigureAwait(false);
|
|
|
|
|
|
|
|
|
|
var eventItem = new MonitoredItem(telemetry: null!, new MonitoredItemOptions
|
|
|
|
|
{
|
|
|
|
|
DisplayName = "Server/Events",
|
|
|
|
|
StartNodeId = ObjectIds.Server,
|
|
|
|
|
AttributeId = Attributes.EventNotifier,
|
|
|
|
|
MonitoringMode = MonitoringMode.Reporting,
|
|
|
|
|
QueueSize = 1000, // deep queue — a server can fire many alarms in bursts
|
|
|
|
|
DiscardOldest = false,
|
|
|
|
|
Filter = filter,
|
|
|
|
|
})
|
|
|
|
|
{
|
|
|
|
|
Handle = handle,
|
|
|
|
|
};
|
|
|
|
|
eventItem.Notification += (mi, args) => OnEventNotification(handle, sourceFilter, mi, args);
|
|
|
|
|
subscription.AddItem(eventItem);
|
|
|
|
|
await subscription.CreateItemsAsync(cancellationToken).ConfigureAwait(false);
|
|
|
|
|
|
|
|
|
|
_alarmSubscriptions[id] = new RemoteAlarmSubscription(subscription, handle);
|
|
|
|
|
}
|
|
|
|
|
finally { _gate.Release(); }
|
|
|
|
|
|
|
|
|
|
return handle;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public async Task UnsubscribeAlarmsAsync(IAlarmSubscriptionHandle handle, CancellationToken cancellationToken)
|
|
|
|
|
{
|
|
|
|
|
if (handle is not OpcUaAlarmSubscriptionHandle h) return;
|
|
|
|
|
if (!_alarmSubscriptions.TryRemove(h.Id, out var rs)) return;
|
|
|
|
|
|
|
|
|
|
await _gate.WaitAsync(cancellationToken).ConfigureAwait(false);
|
|
|
|
|
try
|
|
|
|
|
{
|
|
|
|
|
try { await rs.Subscription.DeleteAsync(silent: true, cancellationToken).ConfigureAwait(false); }
|
|
|
|
|
catch { /* best-effort — session may already be gone across a reconnect */ }
|
|
|
|
|
}
|
|
|
|
|
finally { _gate.Release(); }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public async Task AcknowledgeAsync(
|
|
|
|
|
IReadOnlyList<AlarmAcknowledgeRequest> acknowledgements, CancellationToken cancellationToken)
|
|
|
|
|
{
|
|
|
|
|
// Short-circuit empty batch BEFORE touching the session so callers can pass an empty
|
|
|
|
|
// list without guarding the size themselves — e.g. a bulk-ack UI that built an empty
|
|
|
|
|
// list because the filter matched nothing.
|
|
|
|
|
if (acknowledgements.Count == 0) return;
|
|
|
|
|
var session = RequireSession();
|
|
|
|
|
|
|
|
|
|
// OPC UA A&C: call the AcknowledgeableConditionType.Acknowledge method on each
|
|
|
|
|
// condition node with EventId + Comment arguments. CallAsync accepts a batch —
|
|
|
|
|
// one CallMethodRequest per ack.
|
|
|
|
|
var callRequests = new CallMethodRequestCollection();
|
|
|
|
|
foreach (var ack in acknowledgements)
|
|
|
|
|
{
|
|
|
|
|
if (!TryParseNodeId(session, ack.ConditionId, out var conditionId)) continue;
|
|
|
|
|
callRequests.Add(new CallMethodRequest
|
|
|
|
|
{
|
|
|
|
|
ObjectId = conditionId,
|
|
|
|
|
MethodId = MethodIds.AcknowledgeableConditionType_Acknowledge,
|
|
|
|
|
InputArguments = [
|
|
|
|
|
new Variant(Array.Empty<byte>()), // EventId — server-side best-effort; empty resolves to 'most recent'
|
|
|
|
|
new Variant(new LocalizedText(ack.Comment ?? string.Empty)),
|
|
|
|
|
],
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (callRequests.Count == 0) return;
|
|
|
|
|
|
|
|
|
|
await _gate.WaitAsync(cancellationToken).ConfigureAwait(false);
|
|
|
|
|
try
|
|
|
|
|
{
|
|
|
|
|
try
|
|
|
|
|
{
|
|
|
|
|
_ = await session.CallAsync(
|
|
|
|
|
requestHeader: null,
|
|
|
|
|
methodsToCall: callRequests,
|
|
|
|
|
ct: cancellationToken).ConfigureAwait(false);
|
|
|
|
|
}
|
|
|
|
|
catch { /* best-effort — caller's re-ack mechanism catches pathological paths */ }
|
|
|
|
|
}
|
|
|
|
|
finally { _gate.Release(); }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private void OnEventNotification(
|
|
|
|
|
OpcUaAlarmSubscriptionHandle handle,
|
|
|
|
|
HashSet<string> sourceFilter,
|
|
|
|
|
MonitoredItem item,
|
|
|
|
|
MonitoredItemNotificationEventArgs args)
|
|
|
|
|
{
|
|
|
|
|
if (args.NotificationValue is not EventFieldList efl) return;
|
|
|
|
|
if (efl.EventFields.Count <= AlarmFieldConditionId) return;
|
|
|
|
|
|
|
|
|
|
var sourceNode = efl.EventFields[AlarmFieldSourceNode].Value?.ToString() ?? string.Empty;
|
|
|
|
|
if (sourceFilter.Count > 0 && !sourceFilter.Contains(sourceNode)) return;
|
|
|
|
|
|
|
|
|
|
var eventType = efl.EventFields[AlarmFieldEventType].Value?.ToString() ?? "BaseEventType";
|
|
|
|
|
var message = (efl.EventFields[AlarmFieldMessage].Value as LocalizedText)?.Text ?? string.Empty;
|
|
|
|
|
var severity = efl.EventFields[AlarmFieldSeverity].Value is ushort sev ? sev : (ushort)0;
|
|
|
|
|
var time = efl.EventFields[AlarmFieldTime].Value is DateTime t ? t : DateTime.UtcNow;
|
|
|
|
|
var conditionId = efl.EventFields[AlarmFieldConditionId].Value?.ToString() ?? string.Empty;
|
|
|
|
|
|
|
|
|
|
OnAlarmEvent?.Invoke(this, new AlarmEventArgs(
|
|
|
|
|
SubscriptionHandle: handle,
|
|
|
|
|
SourceNodeId: sourceNode,
|
|
|
|
|
ConditionId: conditionId,
|
|
|
|
|
AlarmType: eventType,
|
|
|
|
|
Message: message,
|
|
|
|
|
Severity: MapSeverity(severity),
|
|
|
|
|
SourceTimestampUtc: time));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
|
/// Map an OPC UA <c>BaseEventType.Severity</c> (1..1000) to our coarse-grained
|
|
|
|
|
/// <see cref="AlarmSeverity"/> bucket. Thresholds match the OPC UA A&C Part 9
|
|
|
|
|
/// guidance: 1-200 Low, 201-500 Medium, 501-800 High, 801-1000 Critical.
|
|
|
|
|
/// </summary>
|
|
|
|
|
internal static AlarmSeverity MapSeverity(ushort opcSeverity) => opcSeverity switch
|
|
|
|
|
{
|
|
|
|
|
<= 200 => AlarmSeverity.Low,
|
|
|
|
|
<= 500 => AlarmSeverity.Medium,
|
|
|
|
|
<= 800 => AlarmSeverity.High,
|
|
|
|
|
_ => AlarmSeverity.Critical,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
private sealed record RemoteAlarmSubscription(Subscription Subscription, OpcUaAlarmSubscriptionHandle Handle);
|
|
|
|
|
|
|
|
|
|
private sealed record OpcUaAlarmSubscriptionHandle(long Id) : IAlarmSubscriptionHandle
|
|
|
|
|
{
|
|
|
|
|
public string DiagnosticId => $"opcua-alarm-sub-{Id}";
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ---- IHostConnectivityProbe ----
|
|
|
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
|
@@ -945,6 +1168,76 @@ public sealed class OpcUaClientDriver(OpcUaClientDriverOptions options, string d
|
|
|
|
|
return [new HostConnectivityStatus(HostName, _hostState, _hostStateChangedUtc)];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
|
/// Session keep-alive handler. On a healthy ping, bumps HostState back to Running
|
|
|
|
|
/// (typical bounce after a transient network blip). On a bad ping, starts the SDK's
|
|
|
|
|
/// <see cref="SessionReconnectHandler"/> which retries on the configured period +
|
|
|
|
|
/// fires <see cref="OnReconnectComplete"/> when it lands a new session.
|
|
|
|
|
/// </summary>
|
|
|
|
|
private void OnKeepAlive(ISession sender, KeepAliveEventArgs e)
|
|
|
|
|
{
|
|
|
|
|
if (!ServiceResult.IsBad(e.Status))
|
|
|
|
|
{
|
|
|
|
|
TransitionTo(HostState.Running);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TransitionTo(HostState.Stopped);
|
|
|
|
|
|
|
|
|
|
// Kick off the SDK's reconnect loop exactly once per drop. The handler handles its
|
|
|
|
|
// own retry cadence via ReconnectPeriod; we tear it down in OnReconnectComplete.
|
|
|
|
|
if (_reconnectHandler is not null) return;
|
|
|
|
|
|
|
|
|
|
_reconnectHandler = new SessionReconnectHandler(telemetry: null!,
|
|
|
|
|
reconnectAbort: false,
|
|
|
|
|
maxReconnectPeriod: (int)TimeSpan.FromMinutes(2).TotalMilliseconds);
|
|
|
|
|
|
|
|
|
|
var state = _reconnectHandler.BeginReconnect(
|
|
|
|
|
sender,
|
|
|
|
|
(int)_options.ReconnectPeriod.TotalMilliseconds,
|
|
|
|
|
OnReconnectComplete);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// <summary>
|
|
|
|
|
/// Called by <see cref="SessionReconnectHandler"/> when its retry loop has either
|
|
|
|
|
/// successfully swapped to a new session or given up. Reads the new session off
|
|
|
|
|
/// <c>handler.Session</c>, unwires the old keep-alive hook, rewires for the new
|
|
|
|
|
/// one, and tears down the handler. Subscription migration is already handled
|
|
|
|
|
/// inside the SDK via <c>TransferSubscriptions</c> (the SDK calls it automatically
|
|
|
|
|
/// when <see cref="Session.TransferSubscriptionsOnReconnect"/> is <c>true</c>,
|
|
|
|
|
/// which is the default).
|
|
|
|
|
/// </summary>
|
|
|
|
|
private void OnReconnectComplete(object? sender, EventArgs e)
|
|
|
|
|
{
|
|
|
|
|
if (sender is not SessionReconnectHandler handler) return;
|
|
|
|
|
var newSession = handler.Session;
|
|
|
|
|
var oldSession = Session;
|
|
|
|
|
|
|
|
|
|
// Rewire keep-alive onto the new session — without this the next drop wouldn't
|
|
|
|
|
// trigger another reconnect attempt.
|
|
|
|
|
if (oldSession is not null && _keepAliveHandler is not null)
|
|
|
|
|
{
|
|
|
|
|
try { oldSession.KeepAlive -= _keepAliveHandler; } catch { }
|
|
|
|
|
}
|
|
|
|
|
if (newSession is not null && _keepAliveHandler is not null)
|
|
|
|
|
{
|
|
|
|
|
newSession.KeepAlive += _keepAliveHandler;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Session = newSession;
|
|
|
|
|
_reconnectHandler?.Dispose();
|
|
|
|
|
_reconnectHandler = null;
|
|
|
|
|
|
|
|
|
|
// Whether the reconnect actually succeeded depends on whether the session is
|
|
|
|
|
// non-null + connected. When it succeeded, flip back to Running so downstream
|
|
|
|
|
// consumers see recovery.
|
|
|
|
|
if (newSession is not null)
|
|
|
|
|
{
|
|
|
|
|
TransitionTo(HostState.Running);
|
|
|
|
|
_health = new DriverHealth(DriverState.Healthy, DateTime.UtcNow, null);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private void TransitionTo(HostState newState)
|
|
|
|
|
{
|
|
|
|
|
HostState old;
|
|
|
|
|
|