diff --git a/docs/Configuration.md b/docs/Configuration.md index a3f8327..bd7f4d5 100644 --- a/docs/Configuration.md +++ b/docs/Configuration.md @@ -97,13 +97,17 @@ Controls the embedded HTTP status dashboard. Defined in `DashboardConfiguration` ### Historian -Controls the Wonderware Historian connection for OPC UA historical data access. Defined in `HistorianConfiguration`. +Controls the Wonderware Historian SDK connection for OPC UA historical data access. Defined in `HistorianConfiguration`. | Property | Type | Default | Description | |----------|------|---------|-------------| | `Enabled` | `bool` | `false` | Enables OPC UA historical data access | -| `ConnectionString` | `string` | `"Server=localhost;Database=Runtime;Integrated Security=true;"` | Connection string for the Historian Runtime database | -| `CommandTimeoutSeconds` | `int` | `30` | SQL command timeout for historian queries | +| `ServerName` | `string` | `"localhost"` | Historian server hostname | +| `IntegratedSecurity` | `bool` | `true` | Use Windows authentication | +| `UserName` | `string?` | `null` | Username when `IntegratedSecurity` is false | +| `Password` | `string?` | `null` | Password when `IntegratedSecurity` is false | +| `Port` | `int` | `32568` | Historian TCP port | +| `CommandTimeoutSeconds` | `int` | `30` | SDK packet timeout in seconds | | `MaxValuesPerRead` | `int` | `10000` | Maximum values returned per `HistoryRead` request | ### Authentication @@ -228,7 +232,7 @@ Example — two-instance redundant pair (Primary): Three boolean properties act as feature flags that control optional subsystems: - **`OpcUa.AlarmTrackingEnabled`** -- When `true`, the node manager creates `AlarmConditionState` nodes for alarm attributes and monitors `InAlarm` transitions. Disabled by default because alarm tracking adds per-attribute overhead. -- **`Historian.Enabled`** -- When `true`, the service creates a `HistorianDataSource` connected to the Wonderware Historian Runtime database and registers it with the OPC UA server host. Disabled by default because not all deployments have a Historian instance. +- **`Historian.Enabled`** -- When `true`, the service creates a `HistorianDataSource` connected to the Wonderware Historian via the aahClientManaged SDK and registers it with the OPC UA server host. Disabled by default because not all deployments have a Historian instance. - **`GalaxyRepository.ExtendedAttributes`** -- When `true`, the repository loads additional Galaxy attribute metadata beyond the core set needed for the address space. Disabled by default to minimize startup query time. ## Configuration Validation @@ -305,7 +309,11 @@ Integration tests use this constructor to inject substitute implementations of ` }, "Historian": { "Enabled": false, - "ConnectionString": "Server=localhost;Database=Runtime;Integrated Security=true;", + "ServerName": "localhost", + "IntegratedSecurity": true, + "UserName": null, + "Password": null, + "Port": 32568, "CommandTimeoutSeconds": 30, "MaxValuesPerRead": 10000 }, diff --git a/docs/HistoricalDataAccess.md b/docs/HistoricalDataAccess.md index eb36392..9c6d6fc 100644 --- a/docs/HistoricalDataAccess.md +++ b/docs/HistoricalDataAccess.md @@ -1,26 +1,29 @@ # Historical Data Access -`LmxNodeManager` exposes OPC UA historical data access (HDA) by querying the Wonderware Historian Runtime database. The `HistorianDataSource` class translates OPC UA history requests into SQL queries against the Historian's `History` and `AnalogSummaryHistory` views, and the node manager overrides wire the results back into the OPC UA response. +`LmxNodeManager` exposes OPC UA historical data access (HDA) by querying the Wonderware Historian via the `aahClientManaged` SDK. The `HistorianDataSource` class translates OPC UA history requests into SDK queries using the `ArchestrA.HistorianAccess` API, and the node manager overrides wire the results back into the OPC UA response. -## Wonderware Historian Runtime Database +## Wonderware Historian SDK -The Historian stores time-series data in a SQL Server database named `Runtime`. Two views are relevant: +The server uses the AVEVA Historian managed SDK (`aahClientManaged.dll`) to query historical data. The SDK provides a cursor-based query API through `ArchestrA.HistorianAccess`, replacing direct SQL queries against the Historian Runtime database. Two query types are used: -- **`Runtime.dbo.History`** -- Raw historical samples with columns `DateTime`, `Value` (numeric), `vValue` (string), and `Quality`. -- **`Runtime.dbo.AnalogSummaryHistory`** -- Pre-computed aggregates bucketed by `wwResolution` (milliseconds), with columns like `Average`, `Minimum`, `Maximum`, `ValueCount`, `First`, `Last`, `StdDev`. +- **`HistoryQuery`** -- Raw historical samples with timestamp, value (numeric or string), and OPC quality. +- **`AnalogSummaryQuery`** -- Pre-computed aggregates with properties for Average, Minimum, Maximum, ValueCount, First, Last, StdDev, and more. -Both views require `TagName` in the `WHERE` clause. This is a Historian constraint -- the views are optimized for tag-scoped queries and do not support efficient cross-tag scans. +The SDK DLLs are located in `lib/` and originate from `C:\Program Files (x86)\Wonderware\Historian\`. ## Configuration -`HistorianConfiguration` controls the historian connection: +`HistorianConfiguration` controls the SDK connection: ```csharp public class HistorianConfiguration { public bool Enabled { get; set; } = false; - public string ConnectionString { get; set; } = - "Server=localhost;Database=Runtime;Integrated Security=true;"; + public string ServerName { get; set; } = "localhost"; + public bool IntegratedSecurity { get; set; } = true; + public string? UserName { get; set; } + public string? Password { get; set; } + public int Port { get; set; } = 32568; public int CommandTimeoutSeconds { get; set; } = 30; public int MaxValuesPerRead { get; set; } = 10000; } @@ -28,46 +31,59 @@ public class HistorianConfiguration When `Enabled` is `false`, the `HistorianDataSource` is not instantiated and the node manager returns `BadHistoryOperationUnsupported` for history read requests. +### Connection Properties + +| Property | Default | Description | +|---|---|---| +| `ServerName` | `localhost` | Historian server hostname | +| `IntegratedSecurity` | `true` | Use Windows authentication | +| `UserName` | `null` | Username when `IntegratedSecurity` is false | +| `Password` | `null` | Password when `IntegratedSecurity` is false | +| `Port` | `32568` | Historian TCP port | +| `CommandTimeoutSeconds` | `30` | SDK packet timeout in seconds | +| `MaxValuesPerRead` | `10000` | Maximum values per history read request | + +## Connection Lifecycle + +`HistorianDataSource` maintains a persistent connection to the Historian server via `ArchestrA.HistorianAccess`: + +1. **Lazy connect** -- The connection is established on the first query via `EnsureConnected()`. +2. **Connection reuse** -- Subsequent queries reuse the same connection. +3. **Auto-reconnect** -- On connection failure, the connection is disposed and re-established on the next query. +4. **Clean shutdown** -- `Dispose()` closes the connection when the service stops. + +The connection is opened with `ReadOnly = true` and `ConnectionType = Process`. + ## Raw Reads -`HistorianDataSource.ReadRawAsync` queries the `History` view for individual samples within a time range: +`HistorianDataSource.ReadRawAsync` uses a `HistoryQuery` to retrieve individual samples within a time range: -```sql -SELECT TOP (@MaxValues) DateTime, Value, vValue, Quality -FROM Runtime.dbo.History -WHERE TagName = @TagName - AND DateTime >= @StartTime AND DateTime <= @EndTime -ORDER BY DateTime -``` +1. Create a `HistoryQuery` via `_connection.CreateHistoryQuery()` +2. Configure `HistoryQueryArgs` with `TagNames`, `StartDateTime`, `EndDateTime`, and `RetrievalMode = Full` +3. Iterate: `StartQuery` -> `MoveNext` loop -> `EndQuery` -The `TOP` clause is included only when `maxValues > 0` (the OPC UA client specified `NumValuesPerNode`). Each row is converted to an OPC UA `DataValue`: +Each result row is converted to an OPC UA `DataValue`: -- `Value` column (double) takes priority over `vValue` (string). If both are null, the value is null. -- `SourceTimestamp` and `ServerTimestamp` are both set to the `DateTime` column. -- `StatusCode` is mapped from the Historian `Quality` byte via `QualityMapper` (the same OPC DA quality byte mapping used for live MXAccess data). +- `QueryResult.Value` (double) takes priority; `QueryResult.StringValue` is used as fallback for string-typed tags. +- `SourceTimestamp` and `ServerTimestamp` are both set to `QueryResult.StartDateTime`. +- `StatusCode` is mapped from the `QueryResult.OpcQuality` (UInt16) via `QualityMapper` (the same OPC DA quality byte mapping used for live MXAccess data). ## Aggregate Reads -`HistorianDataSource.ReadAggregateAsync` queries the `AnalogSummaryHistory` view for pre-computed aggregates: +`HistorianDataSource.ReadAggregateAsync` uses an `AnalogSummaryQuery` to retrieve pre-computed aggregates: -```sql -SELECT StartDateTime, [{aggregateColumn}] -FROM Runtime.dbo.AnalogSummaryHistory -WHERE TagName = @TagName - AND StartDateTime >= @StartTime AND StartDateTime <= @EndTime - AND wwResolution = @Resolution -ORDER BY StartDateTime -``` - -The `aggregateColumn` is interpolated directly into the SQL (it comes from the controlled `MapAggregateToColumn` mapping, not from user input). The `wwResolution` parameter maps from the OPC UA `ProcessingInterval` in milliseconds. +1. Create an `AnalogSummaryQuery` via `_connection.CreateAnalogSummaryQuery()` +2. Configure `AnalogSummaryQueryArgs` with `TagNames`, `StartDateTime`, `EndDateTime`, and `Resolution` (milliseconds) +3. Iterate the same `StartQuery` -> `MoveNext` -> `EndQuery` pattern +4. Extract the requested aggregate from named properties on `AnalogSummaryQueryResult` Null aggregate values return `BadNoData` status rather than `Good` with a null variant. ## Quality Mapping -The Historian stores standard OPC DA quality bytes, the same format used by MXAccess at runtime. The quality byte is passed through the shared `QualityMapper` pipeline (`MapFromMxAccessQuality` → `MapToOpcUaStatusCode`), which maps the OPC DA quality families to OPC UA status codes: +The Historian SDK returns standard OPC DA quality values in `QueryResult.OpcQuality` (UInt16). The low byte is passed through the shared `QualityMapper` pipeline (`MapFromMxAccessQuality` -> `MapToOpcUaStatusCode`), which maps the OPC DA quality families to OPC UA status codes: -| Historian Quality Byte | OPC DA Family | OPC UA StatusCode | +| OPC Quality Byte | OPC DA Family | OPC UA StatusCode | |---|---|---| | 0-63 | Bad | `Bad` (with sub-code when an exact enum match exists) | | 64-191 | Uncertain | `Uncertain` (with sub-code when an exact enum match exists) | @@ -77,9 +93,9 @@ See `Domain/QualityMapper.cs` and `Domain/Quality.cs` for the full mapping table ## Aggregate Function Mapping -`MapAggregateToColumn` translates OPC UA aggregate NodeIds to Historian column names: +`MapAggregateToColumn` translates OPC UA aggregate NodeIds to `AnalogSummaryQueryResult` property names: -| OPC UA Aggregate | Historian Column | +| OPC UA Aggregate | Result Property | |---|---| | `AggregateFunction_Average` | `Average` | | `AggregateFunction_Minimum` | `Minimum` | @@ -100,28 +116,14 @@ Unsupported aggregates return `null`, which causes the node manager to return `B 3. Call `ReadRawAsync` with the time range and `NumValuesPerNode` from the `ReadRawModifiedDetails`. 4. Pack the resulting `DataValue` list into a `HistoryData` object and wrap it in an `ExtensionObject` for the `HistoryReadResult`. -```csharp -var dataValues = _historianDataSource.ReadRawAsync( - tagRef, details.StartTime, details.EndTime, maxValues) - .GetAwaiter().GetResult(); - -var historyData = new HistoryData(); -historyData.DataValues.AddRange(dataValues); -results[idx] = new HistoryReadResult -{ - StatusCode = StatusCodes.Good, - HistoryData = new ExtensionObject(historyData) -}; -``` - ## HistoryReadProcessed Override `HistoryReadProcessed` handles aggregate history requests with additional validation: 1. Resolve the node and check historian availability (same as raw). 2. Validate that `AggregateType` is present in the `ReadProcessedDetails`. Return `BadAggregateListMismatch` if empty. -3. Map the requested aggregate to a Historian column via `MapAggregateToColumn`. Return `BadAggregateNotSupported` if unmapped. -4. Call `ReadAggregateAsync` with the time range, `ProcessingInterval`, and column name. +3. Map the requested aggregate to a result property via `MapAggregateToColumn`. Return `BadAggregateNotSupported` if unmapped. +4. Call `ReadAggregateAsync` with the time range, `ProcessingInterval`, and property name. 5. Return results in the same `HistoryData` / `ExtensionObject` format. ## Historizing Flag and AccessLevel diff --git a/historiangaps.md b/historiangaps.md new file mode 100644 index 0000000..8799c29 --- /dev/null +++ b/historiangaps.md @@ -0,0 +1,147 @@ +# Historian Implementation Gap Analysis + +Comparison of the current LmxOpcUa server historian implementation against the OPC UA Part 11 Historical Access specification requirements. + +## Current Implementation Summary + +| Feature | Status | +|---------|--------| +| HistoryRead — ReadRawModifiedDetails | Implemented (raw only, no modified) | +| HistoryRead — ReadProcessedDetails | Implemented (7 aggregates) | +| Historizing attribute on nodes | Implemented | +| AccessLevel.HistoryRead on nodes | Implemented | +| Quality mapping (OPC DA → OPC UA) | Implemented | +| Historian SDK (aahClientManaged) | Implemented (replaced direct SQL) | +| Configurable enable/disable | Implemented | +| SDK packet timeout | Implemented | +| Max values per read | Implemented | +| HistoryServerCapabilities node | Implemented | +| AggregateFunctions folder | Implemented (7 functions) | +| Continuation points for history reads | Implemented | + +## Gaps + +### 1. ~~HistoryServerCapabilities Node (Required)~~ — RESOLVED + +**Spec requirement:** All OPC UA servers supporting Historical Access SHALL include a `HistoryServerCapabilities` object under `ServerCapabilities`. This is mandatory, not optional. + +**Current state:** Implemented. The server populates all `HistoryServerCapabilities` variables at startup via `LmxOpcUaServer.ConfigureHistoryCapabilities()`. All boolean capabilities are set, and `MaxReturnDataValues` reflects the configured limit. + +**Required variables:** + +| Variable | Expected Value | Priority | +|----------|---------------|----------| +| `AccessHistoryDataCapability` | `true` | High | +| `AccessHistoryEventsCapability` | `false` | High | +| `MaxReturnDataValues` | configurable (e.g., 10000) | High | +| `MaxReturnEventValues` | 0 | Medium | +| `InsertDataCapability` | `false` | Medium | +| `ReplaceDataCapability` | `false` | Medium | +| `UpdateDataCapability` | `false` | Medium | +| `DeleteRawCapability` | `false` | Medium | +| `DeleteAtTimeCapability` | `false` | Medium | +| `InsertAnnotationCapability` | `false` | Low | +| `InsertEventCapability` | `false` | Low | +| `ReplaceEventCapability` | `false` | Low | +| `UpdateEventCapability` | `false` | Low | +| `DeleteEventCapability` | `false` | Low | +| `ServerTimestampSupported` | `true` | Medium | + +**Files to modify:** `LmxOpcUaServer.cs` or `LmxNodeManager.cs` — create and populate the `HistoryServerCapabilities` node in the server's address space during startup. + +### 2. ~~AggregateFunctions Folder (Required)~~ — RESOLVED + +**Spec requirement:** The `HistoryServerCapabilities` object SHALL contain an `AggregateFunctions` folder listing all supported aggregate functions as child nodes. Clients browse this folder to discover available aggregates. + +**Current state:** Implemented. The `AggregateFunctions` folder under `HistoryServerCapabilities` is populated with references to all 7 supported aggregate function ObjectIds at startup. + +**Required:** Create `AggregateFunctions` folder under `HistoryServerCapabilities` with references to the 7 supported aggregate ObjectIds: +- `AggregateFunction_Average` +- `AggregateFunction_Minimum` +- `AggregateFunction_Maximum` +- `AggregateFunction_Count` +- `AggregateFunction_Start` +- `AggregateFunction_End` +- `AggregateFunction_StandardDeviationPopulation` + +**Priority:** Medium + +### 3. ~~Continuation Points for History Reads (Required)~~ — RESOLVED + +**Spec requirement:** When a HistoryRead result exceeds `MaxReturnDataValues` or the client's `NumValuesPerNode`, the server SHALL return a `ContinuationPoint` in the result. The client then issues follow-up HistoryRead calls with the continuation point to retrieve remaining data. The server must maintain state for active continuation points and release them when complete or on timeout. + +**Current state:** Implemented. `HistoryContinuationPointManager` stores remaining data keyed by GUID. Both `HistoryReadRawModified` and `HistoryReadProcessed` return a `ContinuationPoint` when results exceed `NumValuesPerNode`. Follow-up requests with the continuation point resume from stored state. Points expire after 5 minutes. Invalid or expired points return `BadContinuationPointInvalid`. + +**Priority:** High (resolved) + +### 4. ~~ReadModified Support~~ — RESOLVED + +**Spec requirement:** `ReadRawModifiedDetails` has an `IsReadModified` flag. When `true`, the server should return the original value before modification along with the modification info (who modified, when, what the original value was). This is part of audit trail / data integrity use cases. + +**Current state:** Implemented. `HistoryReadRawModified` checks `details.IsReadModified` and returns `BadHistoryOperationUnsupported` when true, since the Wonderware Historian does not expose modification history. + +### 5. ~~ReadAtTimeDetails~~ — RESOLVED + +**Spec requirement:** `ReadAtTimeDetails` allows a client to request interpolated values at specific timestamps (not raw samples). The server interpolates between the two nearest raw values for each requested timestamp. + +**Current state:** Implemented. `LmxNodeManager` overrides `HistoryReadAtTime`. `HistorianDataSource.ReadAtTimeAsync` uses the Historian SDK with `HistorianRetrievalMode.Interpolated` to query interpolated values at each requested timestamp. + +### 6. ~~HistoryUpdate Service (Insert/Replace/Delete)~~ — RESOLVED (N/A) + +**Spec requirement:** The HistoryUpdate service allows clients to insert new values, replace existing values, update (insert or replace), and delete historical data. Each capability is separately advertised via the `HistoryServerCapabilities` node. + +**Current state:** Not applicable. The Historian is read-only. All write capability booleans (`InsertDataCapability`, `ReplaceDataCapability`, `UpdateDataCapability`, `DeleteRawCapability`, `DeleteAtTimeCapability`) are explicitly set to `false` in `ConfigureHistoryCapabilities()`. No `HistoryUpdate` override exists, which is correct. + +### 7. ~~HistoryReadEventDetails (Historical Events)~~ — RESOLVED + +**Spec requirement:** Servers supporting historical event access implement `HistoryReadEventDetails` to retrieve past event notifications (e.g., alarm history). + +**Current state:** Implemented. `LmxNodeManager` overrides `HistoryReadEvents`. `HistorianDataSource.ReadEventsAsync` uses the Historian SDK with a separate `HistorianConnectionType.Event` connection and `EventQuery` to retrieve historical alarm/event records. Events are mapped to OPC UA `HistoryEventFieldList` entries with standard fields (EventId, EventType, SourceNode, SourceName, Time, ReceiveTime, Message, Severity). `AccessHistoryEventsCapability` is set to `true` when alarm tracking is enabled. + +### 8. ~~HistoricalDataConfiguration Node~~ — RESOLVED + +**Spec requirement:** Each historized node SHOULD have a `HistoricalDataConfiguration` child object with properties describing how its history is stored: `Stepped` (interpolation type), `MinTimeInterval`, `MaxTimeInterval`, `ExceptionDeviation`, etc. + +**Current state:** Implemented. Historized variables receive a `HistoricalDataConfigurationState` child node with `Stepped = false` and `Definition = "Wonderware Historian"`. Recording parameters (intervals, deadbands) are not available from the Galaxy DB, so default values are used. + +### 9. ~~AggregateConfiguration~~ — RESOLVED (N/A) + +**Spec requirement:** The `AggregateConfiguration` object (child of `HistoricalDataConfiguration` or `HistoryServerCapabilities`) defines default aggregate behavior: `TreatUncertainAsBad`, `PercentDataBad`, `PercentDataGood`, `UseSlopedExtrapolation`. + +**Current state:** Not applicable. The server delegates aggregation entirely to the Wonderware Historian's pre-computed summary tables, so these parameters are not actionable. Aggregate discovery is fully supported via the `AggregateFunctions` folder. + +### 10. ~~ReturnBounds Parameter~~ — RESOLVED + +**Spec requirement:** When `ReturnBounds=true` in `ReadRawModifiedDetails`, the server should return bounding values at the start and end of the requested time range, even if no raw samples exist at those exact times. + +**Current state:** Implemented. When `ReturnBounds` is true, `AddBoundingValues` inserts boundary `DataValue` entries at `StartTime` and `EndTime` with `StatusCodes.BadBoundNotFound` if no sample exists at those exact times. + +### 11. ~~Client-Side: StandardDeviation Aggregate~~ — RESOLVED + +**Current state:** Implemented. `AggregateType.StandardDeviation` added to enum, `AggregateTypeMapper`, CLI parser (aliases: `stddev`, `stdev`), and UI dropdown. Full end-to-end support from client to server for the `AggregateFunction_StandardDeviationPopulation` aggregate. + +## Recommended Implementation Order + +| Priority | Gap | Effort | +|----------|-----|--------| +| ~~**High**~~ | ~~HistoryServerCapabilities node~~ | ~~RESOLVED~~ | +| ~~**High**~~ | ~~Continuation points for history reads~~ | ~~RESOLVED~~ | +| ~~**Medium**~~ | ~~AggregateFunctions folder~~ | ~~RESOLVED~~ | +| ~~**Medium**~~ | ~~ReadAtTimeDetails (interpolation)~~ | ~~RESOLVED~~ | +| ~~**Medium**~~ | ~~Advertise all capabilities as true/false~~ | ~~RESOLVED~~ | +| ~~**Low**~~ | ~~Return BadHistoryOperationUnsupported for ReadModified~~ | ~~RESOLVED~~ | +| ~~**Low**~~ | ~~HistoricalDataConfiguration per node~~ | ~~RESOLVED~~ | +| ~~**Low**~~ | ~~Client StdDev aggregate support~~ | ~~RESOLVED~~ | +| ~~**Low**~~ | ~~HistoryUpdate (write/delete)~~ | ~~RESOLVED (N/A)~~ | +| ~~**Low**~~ | ~~Historical event access~~ | ~~RESOLVED~~ | +| ~~**Low**~~ | ~~AggregateConfiguration~~ | ~~RESOLVED (N/A)~~ | +| ~~**Low**~~ | ~~ReturnBounds~~ | ~~RESOLVED~~ | + +## References + +- [OPC UA Part 11: Historical Access — Concepts](https://reference.opcfoundation.org/Core/Part11/v105/docs/4) +- [OPC UA Part 11: Historical Access — HistoryServerCapabilitiesType](https://reference.opcfoundation.org/Core/Part11/v104/docs/5.4.2) +- [OPC UA Part 11: Historical Access — Service Usage](https://reference.opcfoundation.org/Core/Part11/v104/docs/6) +- [OPC UA Part 11: Historical Access — Data Architecture](https://reference.opcfoundation.org/Core/Part11/v104/docs/4.2) +- [UA-.NETStandard Historical Access Overview](http://opcfoundation.github.io/UA-.NETStandard/help/historical_access_overview.htm) +- [OPC Foundation Historical Data Access Wiki](http://wiki.opcfoundation.org/index.php?title=Historical_Data_Access) diff --git a/lib/ArchestrA.CloudHistorian.Contract.dll b/lib/ArchestrA.CloudHistorian.Contract.dll new file mode 100644 index 0000000..f96d525 Binary files /dev/null and b/lib/ArchestrA.CloudHistorian.Contract.dll differ diff --git a/lib/Historian.CBE.dll b/lib/Historian.CBE.dll new file mode 100644 index 0000000..aaedc65 Binary files /dev/null and b/lib/Historian.CBE.dll differ diff --git a/lib/Historian.DPAPI.dll b/lib/Historian.DPAPI.dll new file mode 100644 index 0000000..50f8d59 Binary files /dev/null and b/lib/Historian.DPAPI.dll differ diff --git a/lib/aahClient.dll b/lib/aahClient.dll new file mode 100644 index 0000000..0543572 Binary files /dev/null and b/lib/aahClient.dll differ diff --git a/lib/aahClientCommon.dll b/lib/aahClientCommon.dll new file mode 100644 index 0000000..c36152b Binary files /dev/null and b/lib/aahClientCommon.dll differ diff --git a/lib/aahClientManaged.dll b/lib/aahClientManaged.dll new file mode 100644 index 0000000..2834fca Binary files /dev/null and b/lib/aahClientManaged.dll differ diff --git a/service_info.md b/service_info.md index f0028e3..1ff0c03 100644 --- a/service_info.md +++ b/service_info.md @@ -199,6 +199,89 @@ writeconfig write → denied (WriteConfigure doesn't match Operate) admin write → allowed (has all write roles) ``` +## Historian SDK Migration + +Updated: `2026-04-06` + +Both instances updated to use the Wonderware Historian SDK (`aahClientManaged.dll`) instead of direct SQL queries for historical data access. + +Code changes: +- `HistorianDataSource` rewritten from `SqlConnection`/`SqlDataReader` to `ArchestrA.HistorianAccess` SDK +- Persistent connection with lazy connect and auto-reconnect on failure +- `HistorianConfiguration.ConnectionString` replaced with `ServerName`, `IntegratedSecurity`, `UserName`, `Password`, `Port` +- `HistorianDataSource` now implements `IDisposable`, disposed on service shutdown +- `ConfigurationValidator` validates Historian SDK settings at startup + +SDK DLLs deployed to both instances: +- `aahClientManaged.dll` (primary SDK, v2.0.0.0) +- `aahClient.dll`, `aahClientCommon.dll` (dependencies) +- `Historian.CBE.dll`, `Historian.DPAPI.dll`, `ArchestrA.CloudHistorian.Contract.dll` + +Configuration changes (both instances): +- `Historian.ConnectionString` removed +- `Historian.ServerName`: `"localhost"` +- `Historian.IntegratedSecurity`: `true` +- `Historian.Port`: `32568` +- `Historian.Enabled`: `true` (unchanged) + +Verification (instance1 startup log): +``` +Historian.Enabled=true, ServerName=localhost, IntegratedSecurity=true, Port=32568 +Historian.CommandTimeoutSeconds=30, MaxValuesPerRead=10000 +=== Configuration Valid === +LmxOpcUa service started successfully +``` + +## HistoryServerCapabilities and Continuation Points + +Updated: `2026-04-06` + +Both instances updated with OPC UA Part 11 spec compliance improvements. + +Code changes: +- `HistoryServerCapabilities` node populated under `ServerCapabilities` with all boolean capability properties +- `AggregateFunctions` folder populated with references to 7 supported aggregate functions +- `HistoryContinuationPointManager` added — stores remaining data when results exceed `NumValuesPerNode` +- `HistoryReadRawModified` and `HistoryReadProcessed` now return `ContinuationPoint` in `HistoryReadResult` for partial reads +- Follow-up requests with `ContinuationPoint` resume from stored state; invalid/expired points return `BadContinuationPointInvalid` + +No configuration changes required. + +Verification (instance1 startup log): +``` +HistoryServerCapabilities configured with 7 aggregate functions +LmxOpcUa service started successfully +``` + +## Remaining Historian Gaps Fix + +Updated: `2026-04-06` + +Both instances updated with remaining OPC UA Part 11 spec compliance fixes. + +Code changes: +- **Gap 4**: `HistoryReadRawModified` returns `BadHistoryOperationUnsupported` when `IsReadModified=true` +- **Gap 5**: `HistoryReadAtTime` override added with `ReadAtTimeAsync` using SDK `HistorianRetrievalMode.Interpolated` +- **Gap 8**: `HistoricalDataConfigurationState` child nodes added to historized variables (`Stepped=false`, `Definition="Wonderware Historian"`) +- **Gap 10**: `ReturnBounds` parameter handled — boundary `DataValue` entries with `BadBoundNotFound` inserted at StartTime/EndTime +- **Gap 11**: `StandardDeviation` aggregate added to client enum, mapper, CLI (aliases: `stddev`/`stdev`), and UI dropdown + +No configuration changes required. + +## Historical Event Access + +Updated: `2026-04-06` + +Both instances updated with OPC UA historical event access (Gap 7). + +Code changes: +- `HistorianDataSource.ReadEventsAsync` queries Historian event store via separate `HistorianConnectionType.Event` connection +- `LmxNodeManager.HistoryReadEvents` override maps `HistorianEvent` records to OPC UA `HistoryEventFieldList` entries +- `AccessHistoryEventsCapability` set to `true` when `AlarmTrackingEnabled` is true +- Event fields: EventId, EventType, SourceNode, SourceName, Time, ReceiveTime, Message, Severity + +No configuration changes required. All historian gaps (1-11) are now resolved. + ## Notes The service deployment and restart succeeded. The live CLI checks confirm the endpoint is reachable and that the array node identifier has changed to the bracketless form. The array value on the live service still prints as blank even though the status is good, so if this environment should have populated `MoveInPartNumbers`, the runtime data path still needs follow-up investigation. diff --git a/src/ZB.MOM.WW.LmxOpcUa.Client.CLI/Commands/HistoryReadCommand.cs b/src/ZB.MOM.WW.LmxOpcUa.Client.CLI/Commands/HistoryReadCommand.cs index d6a58ea..e942cb7 100644 --- a/src/ZB.MOM.WW.LmxOpcUa.Client.CLI/Commands/HistoryReadCommand.cs +++ b/src/ZB.MOM.WW.LmxOpcUa.Client.CLI/Commands/HistoryReadCommand.cs @@ -45,7 +45,7 @@ public class HistoryReadCommand : CommandBase /// /// Gets the optional aggregate name used when the operator wants processed history instead of raw samples. /// - [CommandOption("aggregate", Description = "Aggregate function: Average, Minimum, Maximum, Count, Start, End")] + [CommandOption("aggregate", Description = "Aggregate function: Average, Minimum, Maximum, Count, Start, End, StandardDeviation")] public string? Aggregate { get; init; } /// @@ -127,8 +127,9 @@ public class HistoryReadCommand : CommandBase "count" => AggregateType.Count, "start" or "first" => AggregateType.Start, "end" or "last" => AggregateType.End, + "standarddeviation" or "stddev" or "stdev" => AggregateType.StandardDeviation, _ => throw new ArgumentException( - $"Unknown aggregate: '{name}'. Supported: Average, Minimum, Maximum, Count, Start, End") + $"Unknown aggregate: '{name}'. Supported: Average, Minimum, Maximum, Count, Start, End, StandardDeviation") }; } } diff --git a/src/ZB.MOM.WW.LmxOpcUa.Client.Shared/Helpers/AggregateTypeMapper.cs b/src/ZB.MOM.WW.LmxOpcUa.Client.Shared/Helpers/AggregateTypeMapper.cs index e0c0c21..17261f7 100644 --- a/src/ZB.MOM.WW.LmxOpcUa.Client.Shared/Helpers/AggregateTypeMapper.cs +++ b/src/ZB.MOM.WW.LmxOpcUa.Client.Shared/Helpers/AggregateTypeMapper.cs @@ -21,6 +21,7 @@ public static class AggregateTypeMapper AggregateType.Count => ObjectIds.AggregateFunction_Count, AggregateType.Start => ObjectIds.AggregateFunction_Start, AggregateType.End => ObjectIds.AggregateFunction_End, + AggregateType.StandardDeviation => ObjectIds.AggregateFunction_StandardDeviationPopulation, _ => throw new ArgumentOutOfRangeException(nameof(aggregate), aggregate, "Unknown AggregateType value.") }; } diff --git a/src/ZB.MOM.WW.LmxOpcUa.Client.Shared/Models/AggregateType.cs b/src/ZB.MOM.WW.LmxOpcUa.Client.Shared/Models/AggregateType.cs index 2aa7e53..a1c8f2b 100644 --- a/src/ZB.MOM.WW.LmxOpcUa.Client.Shared/Models/AggregateType.cs +++ b/src/ZB.MOM.WW.LmxOpcUa.Client.Shared/Models/AggregateType.cs @@ -21,5 +21,8 @@ public enum AggregateType Start, /// Last value in the interval. - End + End, + + /// Population standard deviation of values in the interval. + StandardDeviation } \ No newline at end of file diff --git a/src/ZB.MOM.WW.LmxOpcUa.Client.UI/ViewModels/HistoryViewModel.cs b/src/ZB.MOM.WW.LmxOpcUa.Client.UI/ViewModels/HistoryViewModel.cs index 9b4268b..6c3090f 100644 --- a/src/ZB.MOM.WW.LmxOpcUa.Client.UI/ViewModels/HistoryViewModel.cs +++ b/src/ZB.MOM.WW.LmxOpcUa.Client.UI/ViewModels/HistoryViewModel.cs @@ -49,7 +49,8 @@ public partial class HistoryViewModel : ObservableObject AggregateType.Maximum, AggregateType.Count, AggregateType.Start, - AggregateType.End + AggregateType.End, + AggregateType.StandardDeviation ]; public bool IsAggregateRead => SelectedAggregateType != null; diff --git a/src/ZB.MOM.WW.LmxOpcUa.Host/Configuration/ConfigurationValidator.cs b/src/ZB.MOM.WW.LmxOpcUa.Host/Configuration/ConfigurationValidator.cs index 6409fef..954a9fd 100644 --- a/src/ZB.MOM.WW.LmxOpcUa.Host/Configuration/ConfigurationValidator.cs +++ b/src/ZB.MOM.WW.LmxOpcUa.Host/Configuration/ConfigurationValidator.cs @@ -89,10 +89,9 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.Configuration string.Join(", ", config.Security.Profiles), config.Security.AutoAcceptClientCertificates, config.Security.RejectSHA1Certificates, config.Security.MinimumCertificateKeySize); - if (config.Security.PkiRootPath != null) - Log.Information("Security.PkiRootPath={PkiRootPath}", config.Security.PkiRootPath); - if (config.Security.CertificateSubject != null) - Log.Information("Security.CertificateSubject={CertificateSubject}", config.Security.CertificateSubject); + Log.Information("Security.PkiRootPath={PkiRootPath}", config.Security.PkiRootPath ?? "(default)"); + Log.Information("Security.CertificateSubject={CertificateSubject}", config.Security.CertificateSubject ?? "(default)"); + Log.Information("Security.CertificateLifetimeMonths={Months}", config.Security.CertificateLifetimeMonths); var unknownProfiles = config.Security.Profiles .Where(p => !SecurityProfileResolver.ValidProfileNames.Contains(p, StringComparer.OrdinalIgnoreCase)) @@ -115,6 +114,37 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.Configuration config.Security.Profiles[0].Equals("None", StringComparison.OrdinalIgnoreCase)) Log.Warning("Only the 'None' security profile is configured — transport security is disabled"); + // Historian + Log.Information("Historian.Enabled={Enabled}, ServerName={ServerName}, IntegratedSecurity={IntegratedSecurity}, Port={Port}", + config.Historian.Enabled, config.Historian.ServerName, config.Historian.IntegratedSecurity, + config.Historian.Port); + Log.Information("Historian.CommandTimeoutSeconds={Timeout}, MaxValuesPerRead={MaxValues}", + config.Historian.CommandTimeoutSeconds, config.Historian.MaxValuesPerRead); + + if (config.Historian.Enabled) + { + if (string.IsNullOrWhiteSpace(config.Historian.ServerName)) + { + Log.Error("Historian.ServerName must not be empty when Historian is enabled"); + valid = false; + } + + if (config.Historian.Port < 1 || config.Historian.Port > 65535) + { + Log.Error("Historian.Port must be between 1 and 65535"); + valid = false; + } + + if (!config.Historian.IntegratedSecurity && string.IsNullOrWhiteSpace(config.Historian.UserName)) + { + Log.Error("Historian.UserName must not be empty when IntegratedSecurity is disabled"); + valid = false; + } + + if (!config.Historian.IntegratedSecurity && string.IsNullOrWhiteSpace(config.Historian.Password)) + Log.Warning("Historian.Password is empty — authentication may fail"); + } + // Authentication Log.Information("Authentication.AllowAnonymous={AllowAnonymous}, AnonymousCanWrite={AnonymousCanWrite}", config.Authentication.AllowAnonymous, config.Authentication.AnonymousCanWrite); diff --git a/src/ZB.MOM.WW.LmxOpcUa.Host/Configuration/HistorianConfiguration.cs b/src/ZB.MOM.WW.LmxOpcUa.Host/Configuration/HistorianConfiguration.cs index 232cb97..788d5a4 100644 --- a/src/ZB.MOM.WW.LmxOpcUa.Host/Configuration/HistorianConfiguration.cs +++ b/src/ZB.MOM.WW.LmxOpcUa.Host/Configuration/HistorianConfiguration.cs @@ -1,7 +1,7 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.Configuration { /// - /// Wonderware Historian database configuration for OPC UA historical data access. + /// Wonderware Historian SDK configuration for OPC UA historical data access. /// public class HistorianConfiguration { @@ -11,12 +11,33 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.Configuration public bool Enabled { get; set; } = false; /// - /// Gets or sets the connection string for the Wonderware Historian Runtime database. + /// Gets or sets the Historian server hostname. /// - public string ConnectionString { get; set; } = "Server=localhost;Database=Runtime;Integrated Security=true;"; + public string ServerName { get; set; } = "localhost"; /// - /// Gets or sets the SQL command timeout in seconds for historian queries. + /// Gets or sets a value indicating whether Windows Integrated Security is used. + /// When false, and are used instead. + /// + public bool IntegratedSecurity { get; set; } = true; + + /// + /// Gets or sets the username for Historian authentication when is false. + /// + public string? UserName { get; set; } + + /// + /// Gets or sets the password for Historian authentication when is false. + /// + public string? Password { get; set; } + + /// + /// Gets or sets the Historian server TCP port. + /// + public int Port { get; set; } = 32568; + + /// + /// Gets or sets the packet timeout in seconds for Historian SDK operations. /// public int CommandTimeoutSeconds { get; set; } = 30; @@ -25,4 +46,4 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.Configuration /// public int MaxValuesPerRead { get; set; } = 10000; } -} \ No newline at end of file +} diff --git a/src/ZB.MOM.WW.LmxOpcUa.Host/FodyWeavers.xml b/src/ZB.MOM.WW.LmxOpcUa.Host/FodyWeavers.xml new file mode 100644 index 0000000..5242c91 --- /dev/null +++ b/src/ZB.MOM.WW.LmxOpcUa.Host/FodyWeavers.xml @@ -0,0 +1,13 @@ + + + + ArchestrA.MxAccess + aahClientManaged + aahClientCommon + aahClient + Historian.CBE + Historian.DPAPI + ArchestrA.CloudHistorian.Contract + + + diff --git a/src/ZB.MOM.WW.LmxOpcUa.Host/FodyWeavers.xsd b/src/ZB.MOM.WW.LmxOpcUa.Host/FodyWeavers.xsd new file mode 100644 index 0000000..f2dbece --- /dev/null +++ b/src/ZB.MOM.WW.LmxOpcUa.Host/FodyWeavers.xsd @@ -0,0 +1,176 @@ + + + + + + + + + + + + A list of assembly names to exclude from the default action of "embed all Copy Local references", delimited with line breaks + + + + + A list of assembly names to include from the default action of "embed all Copy Local references", delimited with line breaks. + + + + + A list of runtime assembly names to exclude from the default action of "embed all Copy Local references", delimited with line breaks + + + + + A list of runtime assembly names to include from the default action of "embed all Copy Local references", delimited with line breaks. + + + + + Obsolete, use UnmanagedWinX86Assemblies instead + + + + + A list of unmanaged X86 (32 bit) assembly names to include, delimited with line breaks. + + + + + Obsolete, use UnmanagedWinX64Assemblies instead. + + + + + A list of unmanaged X64 (64 bit) assembly names to include, delimited with line breaks. + + + + + A list of unmanaged Arm64 (64 bit) assembly names to include, delimited with line breaks. + + + + + The order of preloaded assemblies, delimited with line breaks. + + + + + + This will copy embedded files to disk before loading them into memory. This is helpful for some scenarios that expected an assembly to be loaded from a physical file. + + + + + Controls if .pdbs for reference assemblies are also embedded. + + + + + Controls if runtime assemblies are also embedded. + + + + + Controls whether the runtime assemblies are embedded with their full path or only with their assembly name. + + + + + Embedded assemblies are compressed by default, and uncompressed when they are loaded. You can turn compression off with this option. + + + + + As part of Costura, embedded assemblies are no longer included as part of the build. This cleanup can be turned off. + + + + + The attach method no longer subscribes to the `AppDomain.AssemblyResolve` (.NET 4.x) and `AssemblyLoadContext.Resolving` (.NET 6.0+) events. + + + + + Costura by default will load as part of the module initialization. This flag disables that behavior. Make sure you call CosturaUtility.Initialize() somewhere in your code. + + + + + Costura will by default use assemblies with a name like 'resources.dll' as a satellite resource and prepend the output path. This flag disables that behavior. + + + + + A list of assembly names to exclude from the default action of "embed all Copy Local references", delimited with | + + + + + A list of assembly names to include from the default action of "embed all Copy Local references", delimited with |. + + + + + A list of runtime assembly names to exclude from the default action of "embed all Copy Local references", delimited with | + + + + + A list of runtime assembly names to include from the default action of "embed all Copy Local references", delimited with |. + + + + + Obsolete, use UnmanagedWinX86Assemblies instead + + + + + A list of unmanaged X86 (32 bit) assembly names to include, delimited with |. + + + + + Obsolete, use UnmanagedWinX64Assemblies instead + + + + + A list of unmanaged X64 (64 bit) assembly names to include, delimited with |. + + + + + A list of unmanaged Arm64 (64 bit) assembly names to include, delimited with |. + + + + + The order of preloaded assemblies, delimited with |. + + + + + + + + 'true' to run assembly verification (PEVerify) on the target assembly after all weavers have been executed. + + + + + A comma-separated list of error codes that can be safely ignored in assembly verification. + + + + + 'false' to turn off automatic generation of the XML Schema file. + + + + + \ No newline at end of file diff --git a/src/ZB.MOM.WW.LmxOpcUa.Host/Historian/HistorianDataSource.cs b/src/ZB.MOM.WW.LmxOpcUa.Host/Historian/HistorianDataSource.cs index d1896e8..30cc1df 100644 --- a/src/ZB.MOM.WW.LmxOpcUa.Host/Historian/HistorianDataSource.cs +++ b/src/ZB.MOM.WW.LmxOpcUa.Host/Historian/HistorianDataSource.cs @@ -1,8 +1,9 @@ using System; using System.Collections.Generic; -using System.Data.SqlClient; +using StringCollection = System.Collections.Specialized.StringCollection; using System.Threading; using System.Threading.Tasks; +using ArchestrA; using Opc.Ua; using Serilog; using ZB.MOM.WW.LmxOpcUa.Host.Configuration; @@ -11,23 +12,151 @@ using ZB.MOM.WW.LmxOpcUa.Host.Domain; namespace ZB.MOM.WW.LmxOpcUa.Host.Historian { /// - /// Reads historical data from the Wonderware Historian Runtime database. + /// Reads historical data from the Wonderware Historian via the aahClientManaged SDK. /// - public class HistorianDataSource + public class HistorianDataSource : IDisposable { private static readonly ILogger Log = Serilog.Log.ForContext(); private readonly HistorianConfiguration _config; + private readonly object _connectionLock = new object(); + private readonly object _eventConnectionLock = new object(); + private HistorianAccess? _connection; + private HistorianAccess? _eventConnection; + private bool _disposed; /// - /// Initializes a Historian reader that translates OPC UA history requests into Wonderware Historian queries. + /// Initializes a Historian reader that translates OPC UA history requests into Wonderware Historian SDK queries. /// - /// The Historian connection settings and command timeout used for runtime history lookups. + /// The Historian SDK connection settings used for runtime history lookups. public HistorianDataSource(HistorianConfiguration config) { _config = config; } + private void EnsureConnected() + { + if (_disposed) + throw new ObjectDisposedException(nameof(HistorianDataSource)); + + lock (_connectionLock) + { + if (_connection != null) + return; + + var conn = new HistorianAccess(); + var args = new HistorianConnectionArgs + { + ServerName = _config.ServerName, + TcpPort = (ushort)_config.Port, + IntegratedSecurity = _config.IntegratedSecurity, + ConnectionType = HistorianConnectionType.Process, + ReadOnly = true, + PacketTimeout = (uint)(_config.CommandTimeoutSeconds * 1000) + }; + + if (!_config.IntegratedSecurity) + { + args.UserName = _config.UserName ?? string.Empty; + args.Password = _config.Password ?? string.Empty; + } + + if (!conn.OpenConnection(args, out var error)) + { + conn.Dispose(); + throw new InvalidOperationException( + $"Failed to open Historian SDK connection to {_config.ServerName}:{_config.Port}: {error.ErrorCode}"); + } + + _connection = conn; + Log.Information("Historian SDK connection opened to {Server}:{Port}", _config.ServerName, _config.Port); + } + } + + private void HandleConnectionError(Exception? ex = null) + { + lock (_connectionLock) + { + if (_connection == null) + return; + + try + { + _connection.CloseConnection(out _); + _connection.Dispose(); + } + catch (Exception disposeEx) + { + Log.Debug(disposeEx, "Error disposing Historian SDK connection during error recovery"); + } + + _connection = null; + Log.Warning(ex, "Historian SDK connection reset — will reconnect on next request"); + } + } + + private void EnsureEventConnected() + { + if (_disposed) + throw new ObjectDisposedException(nameof(HistorianDataSource)); + + lock (_eventConnectionLock) + { + if (_eventConnection != null) + return; + + var conn = new HistorianAccess(); + var args = new HistorianConnectionArgs + { + ServerName = _config.ServerName, + TcpPort = (ushort)_config.Port, + IntegratedSecurity = _config.IntegratedSecurity, + ConnectionType = HistorianConnectionType.Event, + ReadOnly = true, + PacketTimeout = (uint)(_config.CommandTimeoutSeconds * 1000) + }; + + if (!_config.IntegratedSecurity) + { + args.UserName = _config.UserName ?? string.Empty; + args.Password = _config.Password ?? string.Empty; + } + + if (!conn.OpenConnection(args, out var error)) + { + conn.Dispose(); + throw new InvalidOperationException( + $"Failed to open Historian SDK event connection to {_config.ServerName}:{_config.Port}: {error.ErrorCode}"); + } + + _eventConnection = conn; + Log.Information("Historian SDK event connection opened to {Server}:{Port}", + _config.ServerName, _config.Port); + } + } + + private void HandleEventConnectionError(Exception? ex = null) + { + lock (_eventConnectionLock) + { + if (_eventConnection == null) + return; + + try + { + _eventConnection.CloseConnection(out _); + _eventConnection.Dispose(); + } + catch (Exception disposeEx) + { + Log.Debug(disposeEx, "Error disposing Historian SDK event connection during error recovery"); + } + + _eventConnection = null; + Log.Warning(ex, "Historian SDK event connection reset — will reconnect on next request"); + } + } + /// /// Reads raw historical values for a tag from the Historian. /// @@ -35,52 +164,89 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.Historian /// The inclusive start of the client-requested history window. /// The inclusive end of the client-requested history window. /// The maximum number of samples to return when the OPC UA client limits the result set. - /// The cancellation token that aborts the database call when the OPC UA request is cancelled. - public async Task> ReadRawAsync( + /// The cancellation token that aborts the query when the OPC UA request is cancelled. + public Task> ReadRawAsync( string tagName, DateTime startTime, DateTime endTime, int maxValues, CancellationToken ct = default) { var results = new List(); - var sql = maxValues > 0 - ? "SELECT TOP (@MaxValues) DateTime, Value, vValue, OPCQuality FROM Runtime.dbo.History WHERE TagName = @TagName AND wwTimezone='UTC' AND DateTime >= @StartTime AND DateTime <= @EndTime ORDER BY DateTime" - : "SELECT DateTime, Value, vValue, OPCQuality FROM Runtime.dbo.History WHERE TagName = @TagName AND wwTimezone='UTC' AND DateTime >= @StartTime AND DateTime <= @EndTime ORDER BY DateTime"; - using var conn = new SqlConnection(_config.ConnectionString); - await conn.OpenAsync(ct); - - using var cmd = new SqlCommand(sql, conn) { CommandTimeout = _config.CommandTimeoutSeconds }; - cmd.Parameters.AddWithValue("@TagName", tagName); - cmd.Parameters.AddWithValue("@StartTime", startTime); - cmd.Parameters.AddWithValue("@EndTime", endTime); - if (maxValues > 0) - cmd.Parameters.AddWithValue("@MaxValues", maxValues); - - using var reader = await cmd.ExecuteReaderAsync(ct); - while (await reader.ReadAsync(ct)) + try { - var timestamp = DateTime.SpecifyKind(reader.GetDateTime(0), DateTimeKind.Utc); - object? value; - if (!reader.IsDBNull(1)) - value = reader.GetDouble(1); - else if (!reader.IsDBNull(2)) - value = reader.GetString(2); - else - value = null; - var quality = reader.IsDBNull(3) ? (byte)0 : Convert.ToByte(reader.GetValue(3)); + EnsureConnected(); - results.Add(new DataValue + using var query = _connection!.CreateHistoryQuery(); + var args = new HistoryQueryArgs { - Value = new Variant(value), - SourceTimestamp = timestamp, - ServerTimestamp = timestamp, - StatusCode = QualityMapper.MapToOpcUaStatusCode(QualityMapper.MapFromMxAccessQuality(quality)) - }); + TagNames = new StringCollection { tagName }, + StartDateTime = startTime, + EndDateTime = endTime, + RetrievalMode = HistorianRetrievalMode.Full + }; + + if (maxValues > 0) + args.BatchSize = (uint)maxValues; + else if (_config.MaxValuesPerRead > 0) + args.BatchSize = (uint)_config.MaxValuesPerRead; + + if (!query.StartQuery(args, out var error)) + { + Log.Warning("Historian SDK raw query start failed for {Tag}: {Error}", tagName, error.ErrorCode); + HandleConnectionError(); + return Task.FromResult(results); + } + + var count = 0; + var limit = maxValues > 0 ? maxValues : _config.MaxValuesPerRead; + + while (query.MoveNext(out error)) + { + ct.ThrowIfCancellationRequested(); + + var result = query.QueryResult; + var timestamp = DateTime.SpecifyKind(result.StartDateTime, DateTimeKind.Utc); + + object? value; + if (!string.IsNullOrEmpty(result.StringValue) && result.Value == 0) + value = result.StringValue; + else + value = result.Value; + + var quality = (byte)(result.OpcQuality & 0xFF); + + results.Add(new DataValue + { + Value = new Variant(value), + SourceTimestamp = timestamp, + ServerTimestamp = timestamp, + StatusCode = QualityMapper.MapToOpcUaStatusCode(QualityMapper.MapFromMxAccessQuality(quality)) + }); + + count++; + if (limit > 0 && count >= limit) + break; + } + + query.EndQuery(out _); + } + catch (OperationCanceledException) + { + throw; + } + catch (ObjectDisposedException) + { + throw; + } + catch (Exception ex) + { + Log.Warning(ex, "HistoryRead raw failed for {Tag}", tagName); + HandleConnectionError(ex); } Log.Debug("HistoryRead raw: {Tag} returned {Count} values ({Start} to {End})", tagName, results.Count, startTime, endTime); - return results; + return Task.FromResult(results); } /// @@ -92,46 +258,260 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.Historian /// The Wonderware summary resolution, in milliseconds, used to bucket aggregate values. /// The Historian summary column that matches the OPC UA aggregate function being requested. /// The cancellation token that aborts the aggregate query when the client request is cancelled. - public async Task> ReadAggregateAsync( + public Task> ReadAggregateAsync( string tagName, DateTime startTime, DateTime endTime, double intervalMs, string aggregateColumn, CancellationToken ct = default) { var results = new List(); - var sql = $"SELECT StartDateTime, [{aggregateColumn}] FROM Runtime.dbo.AnalogSummaryHistory " + - "WHERE TagName = @TagName AND wwTimezone='UTC' AND StartDateTime >= @StartTime AND StartDateTime <= @EndTime " + - "AND wwResolution = @Resolution ORDER BY StartDateTime"; - using var conn = new SqlConnection(_config.ConnectionString); - await conn.OpenAsync(ct); - - using var cmd = new SqlCommand(sql, conn) { CommandTimeout = _config.CommandTimeoutSeconds }; - cmd.Parameters.AddWithValue("@TagName", tagName); - cmd.Parameters.AddWithValue("@StartTime", startTime); - cmd.Parameters.AddWithValue("@EndTime", endTime); - cmd.Parameters.AddWithValue("@Resolution", (int)intervalMs); - - using var reader = await cmd.ExecuteReaderAsync(ct); - while (await reader.ReadAsync(ct)) + try { - var timestamp = DateTime.SpecifyKind(reader.GetDateTime(0), DateTimeKind.Utc); - var value = reader.IsDBNull(1) ? (object?)null : reader.GetDouble(1); + EnsureConnected(); - results.Add(new DataValue + using var query = _connection!.CreateAnalogSummaryQuery(); + var args = new AnalogSummaryQueryArgs { - Value = new Variant(value), - SourceTimestamp = timestamp, - ServerTimestamp = timestamp, - StatusCode = value != null ? StatusCodes.Good : StatusCodes.BadNoData - }); + TagNames = new StringCollection { tagName }, + StartDateTime = startTime, + EndDateTime = endTime, + Resolution = (ulong)intervalMs + }; + + if (!query.StartQuery(args, out var error)) + { + Log.Warning("Historian SDK aggregate query start failed for {Tag}: {Error}", tagName, + error.ErrorCode); + HandleConnectionError(); + return Task.FromResult(results); + } + + while (query.MoveNext(out error)) + { + ct.ThrowIfCancellationRequested(); + + var result = query.QueryResult; + var timestamp = DateTime.SpecifyKind(result.StartDateTime, DateTimeKind.Utc); + var value = ExtractAggregateValue(result, aggregateColumn); + + results.Add(new DataValue + { + Value = new Variant(value), + SourceTimestamp = timestamp, + ServerTimestamp = timestamp, + StatusCode = value != null ? StatusCodes.Good : StatusCodes.BadNoData + }); + } + + query.EndQuery(out _); + } + catch (OperationCanceledException) + { + throw; + } + catch (ObjectDisposedException) + { + throw; + } + catch (Exception ex) + { + Log.Warning(ex, "HistoryRead aggregate failed for {Tag}", tagName); + HandleConnectionError(ex); } Log.Debug("HistoryRead aggregate ({Aggregate}): {Tag} returned {Count} values", aggregateColumn, tagName, results.Count); - return results; + return Task.FromResult(results); } + /// + /// Reads interpolated values for a tag at specific timestamps from the Historian. + /// + /// The Wonderware tag name backing the OPC UA node. + /// The specific timestamps at which interpolated values are requested. + /// The cancellation token. + public Task> ReadAtTimeAsync( + string tagName, DateTime[] timestamps, + CancellationToken ct = default) + { + var results = new List(); + + if (timestamps == null || timestamps.Length == 0) + return Task.FromResult(results); + + try + { + EnsureConnected(); + + foreach (var timestamp in timestamps) + { + ct.ThrowIfCancellationRequested(); + + using var query = _connection!.CreateHistoryQuery(); + var args = new HistoryQueryArgs + { + TagNames = new StringCollection { tagName }, + StartDateTime = timestamp, + EndDateTime = timestamp, + RetrievalMode = HistorianRetrievalMode.Interpolated, + BatchSize = 1 + }; + + if (!query.StartQuery(args, out var error)) + { + results.Add(new DataValue + { + Value = Variant.Null, + SourceTimestamp = timestamp, + ServerTimestamp = timestamp, + StatusCode = StatusCodes.BadNoData + }); + continue; + } + + if (query.MoveNext(out error)) + { + var result = query.QueryResult; + object? value; + if (!string.IsNullOrEmpty(result.StringValue) && result.Value == 0) + value = result.StringValue; + else + value = result.Value; + + var quality = (byte)(result.OpcQuality & 0xFF); + results.Add(new DataValue + { + Value = new Variant(value), + SourceTimestamp = timestamp, + ServerTimestamp = timestamp, + StatusCode = QualityMapper.MapToOpcUaStatusCode( + QualityMapper.MapFromMxAccessQuality(quality)) + }); + } + else + { + results.Add(new DataValue + { + Value = Variant.Null, + SourceTimestamp = timestamp, + ServerTimestamp = timestamp, + StatusCode = StatusCodes.BadNoData + }); + } + + query.EndQuery(out _); + } + } + catch (OperationCanceledException) + { + throw; + } + catch (ObjectDisposedException) + { + throw; + } + catch (Exception ex) + { + Log.Warning(ex, "HistoryRead at-time failed for {Tag}", tagName); + HandleConnectionError(ex); + } + + Log.Debug("HistoryRead at-time: {Tag} returned {Count} values for {Timestamps} timestamps", + tagName, results.Count, timestamps.Length); + + return Task.FromResult(results); + } + + /// + /// Reads historical alarm/event records from the Historian event store. + /// + /// Optional source name filter. Null returns all events. + /// The inclusive start of the event history window. + /// The inclusive end of the event history window. + /// The maximum number of events to return. + /// The cancellation token. + public Task> ReadEventsAsync( + string? sourceName, DateTime startTime, DateTime endTime, int maxEvents, + CancellationToken ct = default) + { + var results = new List(); + + try + { + EnsureEventConnected(); + + using var query = _eventConnection!.CreateEventQuery(); + var args = new EventQueryArgs + { + StartDateTime = startTime, + EndDateTime = endTime, + EventCount = maxEvents > 0 ? (uint)maxEvents : (uint)_config.MaxValuesPerRead, + QueryType = HistorianEventQueryType.Events, + EventOrder = HistorianEventOrder.Ascending + }; + + if (!string.IsNullOrEmpty(sourceName)) + { + query.AddEventFilter("Source", HistorianComparisionType.Equal, sourceName, out _); + } + + if (!query.StartQuery(args, out var error)) + { + Log.Warning("Historian SDK event query start failed: {Error}", error.ErrorCode); + HandleEventConnectionError(); + return Task.FromResult(results); + } + + var count = 0; + while (query.MoveNext(out error)) + { + ct.ThrowIfCancellationRequested(); + results.Add(query.QueryResult); + count++; + if (maxEvents > 0 && count >= maxEvents) + break; + } + + query.EndQuery(out _); + } + catch (OperationCanceledException) + { + throw; + } + catch (ObjectDisposedException) + { + throw; + } + catch (Exception ex) + { + Log.Warning(ex, "HistoryRead events failed for source {Source}", sourceName ?? "(all)"); + HandleEventConnectionError(ex); + } + + Log.Debug("HistoryRead events: source={Source} returned {Count} events ({Start} to {End})", + sourceName ?? "(all)", results.Count, startTime, endTime); + + return Task.FromResult(results); + } + + /// + /// Extracts the requested aggregate value from an by column name. + /// + internal static double? ExtractAggregateValue(AnalogSummaryQueryResult result, string column) + { + switch (column) + { + case "Average": return result.Average; + case "Minimum": return result.Minimum; + case "Maximum": return result.Maximum; + case "ValueCount": return result.ValueCount; + case "First": return result.First; + case "Last": return result.Last; + case "StdDev": return result.StdDev; + default: return null; + } + } /// /// Maps an OPC UA aggregate NodeId to the corresponding Historian column name. @@ -156,5 +536,38 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.Historian return "StdDev"; return null; } + + /// + /// Closes the Historian SDK connection and releases resources. + /// + public void Dispose() + { + if (_disposed) + return; + _disposed = true; + + try + { + _connection?.CloseConnection(out _); + _connection?.Dispose(); + } + catch (Exception ex) + { + Log.Warning(ex, "Error closing Historian SDK connection"); + } + + try + { + _eventConnection?.CloseConnection(out _); + _eventConnection?.Dispose(); + } + catch (Exception ex) + { + Log.Warning(ex, "Error closing Historian SDK event connection"); + } + + _connection = null; + _eventConnection = null; + } } -} \ No newline at end of file +} diff --git a/src/ZB.MOM.WW.LmxOpcUa.Host/Historian/HistoryContinuationPoint.cs b/src/ZB.MOM.WW.LmxOpcUa.Host/Historian/HistoryContinuationPoint.cs new file mode 100644 index 0000000..08d4d66 --- /dev/null +++ b/src/ZB.MOM.WW.LmxOpcUa.Host/Historian/HistoryContinuationPoint.cs @@ -0,0 +1,88 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using Opc.Ua; +using Serilog; + +namespace ZB.MOM.WW.LmxOpcUa.Host.Historian +{ + /// + /// Manages continuation points for OPC UA HistoryRead requests that return + /// more data than the per-request limit allows. + /// + internal sealed class HistoryContinuationPointManager + { + private static readonly ILogger Log = Serilog.Log.ForContext(); + + private readonly ConcurrentDictionary _store = new(); + private readonly TimeSpan _timeout = TimeSpan.FromMinutes(5); + + /// + /// Stores remaining data values and returns a continuation point identifier. + /// + public byte[] Store(List remaining) + { + PurgeExpired(); + var id = Guid.NewGuid(); + _store[id] = new StoredContinuation(remaining, DateTime.UtcNow); + Log.Debug("Stored history continuation point {Id} with {Count} remaining values", id, remaining.Count); + return id.ToByteArray(); + } + + /// + /// Retrieves and removes the remaining data values for a continuation point. + /// Returns null if the continuation point is invalid or expired. + /// + public List? Retrieve(byte[] continuationPoint) + { + if (continuationPoint == null || continuationPoint.Length != 16) + return null; + + var id = new Guid(continuationPoint); + if (!_store.TryRemove(id, out var stored)) + return null; + + if (DateTime.UtcNow - stored.CreatedAt > _timeout) + { + Log.Debug("History continuation point {Id} expired", id); + return null; + } + + return stored.Values; + } + + /// + /// Releases a continuation point without retrieving its data. + /// + public void Release(byte[] continuationPoint) + { + if (continuationPoint == null || continuationPoint.Length != 16) + return; + + var id = new Guid(continuationPoint); + _store.TryRemove(id, out _); + } + + private void PurgeExpired() + { + var cutoff = DateTime.UtcNow - _timeout; + foreach (var kvp in _store) + { + if (kvp.Value.CreatedAt < cutoff) + _store.TryRemove(kvp.Key, out _); + } + } + + private sealed class StoredContinuation + { + public StoredContinuation(List values, DateTime createdAt) + { + Values = values; + CreatedAt = createdAt; + } + + public List Values { get; } + public DateTime CreatedAt { get; } + } + } +} diff --git a/src/ZB.MOM.WW.LmxOpcUa.Host/OpcUa/LmxNodeManager.cs b/src/ZB.MOM.WW.LmxOpcUa.Host/OpcUa/LmxNodeManager.cs index f916caa..ad8fc6a 100644 --- a/src/ZB.MOM.WW.LmxOpcUa.Host/OpcUa/LmxNodeManager.cs +++ b/src/ZB.MOM.WW.LmxOpcUa.Host/OpcUa/LmxNodeManager.cs @@ -28,6 +28,7 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.OpcUa private readonly bool _anonymousCanWrite; private readonly AutoResetEvent _dataChangeSignal = new(false); private readonly Dictionary> _gobjectToTagRefs = new(); + private readonly HistoryContinuationPointManager _historyContinuations = new(); private readonly HistorianDataSource? _historianDataSource; private readonly PerformanceMetrics _metrics; @@ -896,6 +897,44 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.OpcUa variable.AccessLevel = accessLevel; variable.UserAccessLevel = accessLevel; variable.Historizing = attr.IsHistorized; + + if (attr.IsHistorized) + { + var histConfigNodeId = new NodeId(nodeIdString + ".HAConfiguration", NamespaceIndex); + var histConfig = new BaseObjectState(variable) + { + NodeId = histConfigNodeId, + BrowseName = new QualifiedName("HAConfiguration", NamespaceIndex), + DisplayName = "HA Configuration", + TypeDefinitionId = ObjectTypeIds.HistoricalDataConfigurationType + }; + + var steppedProp = new PropertyState(histConfig) + { + NodeId = new NodeId(nodeIdString + ".HAConfiguration.Stepped", NamespaceIndex), + BrowseName = BrowseNames.Stepped, + DisplayName = "Stepped", + Value = false, + AccessLevel = AccessLevels.CurrentRead, + UserAccessLevel = AccessLevels.CurrentRead + }; + histConfig.AddChild(steppedProp); + + var definitionProp = new PropertyState(histConfig) + { + NodeId = new NodeId(nodeIdString + ".HAConfiguration.Definition", NamespaceIndex), + BrowseName = BrowseNames.Definition, + DisplayName = "Definition", + Value = "Wonderware Historian", + AccessLevel = AccessLevels.CurrentRead, + UserAccessLevel = AccessLevels.CurrentRead + }; + histConfig.AddChild(definitionProp); + + variable.AddChild(histConfig); + AddPredefinedNode(SystemContext, histConfig); + } + variable.Value = NormalizePublishedValue(attr.FullTagReference, null); variable.StatusCode = StatusCodes.BadWaitingForInitialData; variable.Timestamp = DateTime.UtcNow; @@ -1390,6 +1429,21 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.OpcUa foreach (var handle in nodesToProcess) { var idx = handle.Index; + + // Handle continuation point resumption + if (nodesToRead[idx].ContinuationPoint != null && nodesToRead[idx].ContinuationPoint.Length > 0) + { + var remaining = _historyContinuations.Retrieve(nodesToRead[idx].ContinuationPoint); + if (remaining == null) + { + errors[idx] = new ServiceResult(StatusCodes.BadContinuationPointInvalid); + continue; + } + + ReturnHistoryPage(remaining, details.NumValuesPerNode, results, errors, idx); + continue; + } + var nodeIdStr = handle.NodeId?.Identifier as string; if (nodeIdStr == null || !_nodeIdToTagReference.TryGetValue(nodeIdStr, out var tagRef)) { @@ -1403,6 +1457,12 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.OpcUa continue; } + if (details.IsReadModified) + { + errors[idx] = new ServiceResult(StatusCodes.BadHistoryOperationUnsupported); + continue; + } + try { var maxValues = details.NumValuesPerNode > 0 ? (int)details.NumValuesPerNode : 0; @@ -1410,15 +1470,10 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.OpcUa tagRef, details.StartTime, details.EndTime, maxValues) .GetAwaiter().GetResult(); - var historyData = new HistoryData(); - historyData.DataValues.AddRange(dataValues); + if (details.ReturnBounds) + AddBoundingValues(dataValues, details.StartTime, details.EndTime); - results[idx] = new HistoryReadResult - { - StatusCode = StatusCodes.Good, - HistoryData = new ExtensionObject(historyData) - }; - errors[idx] = ServiceResult.Good; + ReturnHistoryPage(dataValues, details.NumValuesPerNode, results, errors, idx); } catch (Exception ex) { @@ -1442,6 +1497,21 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.OpcUa foreach (var handle in nodesToProcess) { var idx = handle.Index; + + // Handle continuation point resumption + if (nodesToRead[idx].ContinuationPoint != null && nodesToRead[idx].ContinuationPoint.Length > 0) + { + var remaining = _historyContinuations.Retrieve(nodesToRead[idx].ContinuationPoint); + if (remaining == null) + { + errors[idx] = new ServiceResult(StatusCodes.BadContinuationPointInvalid); + continue; + } + + ReturnHistoryPage(remaining, 0, results, errors, idx); + continue; + } + var nodeIdStr = handle.NodeId?.Identifier as string; if (nodeIdStr == null || !_nodeIdToTagReference.TryGetValue(nodeIdStr, out var tagRef)) { @@ -1476,6 +1546,58 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.OpcUa details.ProcessingInterval, column) .GetAwaiter().GetResult(); + ReturnHistoryPage(dataValues, 0, results, errors, idx); + } + catch (Exception ex) + { + Log.Warning(ex, "HistoryRead processed failed for {TagRef}", tagRef); + errors[idx] = new ServiceResult(StatusCodes.BadInternalError); + } + } + } + + /// + protected override void HistoryReadAtTime( + ServerSystemContext context, + ReadAtTimeDetails details, + TimestampsToReturn timestampsToReturn, + IList nodesToRead, + IList results, + IList errors, + List nodesToProcess, + IDictionary cache) + { + foreach (var handle in nodesToProcess) + { + var idx = handle.Index; + var nodeIdStr = handle.NodeId?.Identifier as string; + if (nodeIdStr == null || !_nodeIdToTagReference.TryGetValue(nodeIdStr, out var tagRef)) + { + errors[idx] = new ServiceResult(StatusCodes.BadNodeIdUnknown); + continue; + } + + if (_historianDataSource == null) + { + errors[idx] = new ServiceResult(StatusCodes.BadHistoryOperationUnsupported); + continue; + } + + if (details.ReqTimes == null || details.ReqTimes.Count == 0) + { + errors[idx] = new ServiceResult(StatusCodes.BadInvalidArgument); + continue; + } + + try + { + var timestamps = new DateTime[details.ReqTimes.Count]; + for (var i = 0; i < details.ReqTimes.Count; i++) + timestamps[i] = details.ReqTimes[i]; + + var dataValues = _historianDataSource.ReadAtTimeAsync(tagRef, timestamps) + .GetAwaiter().GetResult(); + var historyData = new HistoryData(); historyData.DataValues.AddRange(dataValues); @@ -1488,12 +1610,149 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.OpcUa } catch (Exception ex) { - Log.Warning(ex, "HistoryRead processed failed for {TagRef}", tagRef); + Log.Warning(ex, "HistoryRead at-time failed for {TagRef}", tagRef); errors[idx] = new ServiceResult(StatusCodes.BadInternalError); } } } + /// + protected override void HistoryReadEvents( + ServerSystemContext context, + ReadEventDetails details, + TimestampsToReturn timestampsToReturn, + IList nodesToRead, + IList results, + IList errors, + List nodesToProcess, + IDictionary cache) + { + foreach (var handle in nodesToProcess) + { + var idx = handle.Index; + var nodeIdStr = handle.NodeId?.Identifier as string; + + if (_historianDataSource == null) + { + errors[idx] = new ServiceResult(StatusCodes.BadHistoryOperationUnsupported); + continue; + } + + // Resolve the source name for event filtering. + // Alarm condition nodes end with ".Condition" — strip to get the source tag. + // Area/object nodes filter by Source_Name matching the browse name. + string? sourceName = null; + if (nodeIdStr != null) + { + if (nodeIdStr.EndsWith(".Condition")) + { + var baseTag = nodeIdStr.Substring(0, nodeIdStr.Length - ".Condition".Length); + sourceName = baseTag; + } + else if (_nodeIdToTagReference.TryGetValue(nodeIdStr, out var tagRef)) + { + sourceName = tagRef; + } + } + + try + { + var maxEvents = details.NumValuesPerNode > 0 ? (int)details.NumValuesPerNode : 0; + var events = _historianDataSource.ReadEventsAsync( + sourceName, details.StartTime, details.EndTime, maxEvents) + .GetAwaiter().GetResult(); + + var historyEvent = new HistoryEvent(); + foreach (var evt in events) + { + // Build the standard event field list per OPC UA Part 11 + // Fields: EventId, EventType, SourceNode, SourceName, Time, ReceiveTime, + // Message, Severity + var fields = new HistoryEventFieldList(); + fields.EventFields.Add(new Variant(evt.Id.ToByteArray())); + fields.EventFields.Add(new Variant(ObjectTypeIds.AlarmConditionType)); + fields.EventFields.Add(new Variant( + nodeIdStr != null ? new NodeId(nodeIdStr, NamespaceIndex) : NodeId.Null)); + fields.EventFields.Add(new Variant(evt.Source ?? "")); + fields.EventFields.Add(new Variant( + DateTime.SpecifyKind(evt.EventTime, DateTimeKind.Utc))); + fields.EventFields.Add(new Variant( + DateTime.SpecifyKind(evt.ReceivedTime, DateTimeKind.Utc))); + fields.EventFields.Add(new Variant(new LocalizedText(evt.DisplayText ?? ""))); + fields.EventFields.Add(new Variant((ushort)evt.Severity)); + historyEvent.Events.Add(fields); + } + + results[idx] = new HistoryReadResult + { + StatusCode = StatusCodes.Good, + HistoryData = new ExtensionObject(historyEvent) + }; + errors[idx] = ServiceResult.Good; + } + catch (Exception ex) + { + Log.Warning(ex, "HistoryRead events failed for {NodeId}", nodeIdStr); + errors[idx] = new ServiceResult(StatusCodes.BadInternalError); + } + } + } + + private void ReturnHistoryPage(List dataValues, uint numValuesPerNode, + IList results, IList errors, int idx) + { + var pageSize = numValuesPerNode > 0 ? (int)numValuesPerNode : dataValues.Count; + + var historyData = new HistoryData(); + byte[]? continuationPoint = null; + + if (dataValues.Count > pageSize) + { + historyData.DataValues.AddRange(dataValues.GetRange(0, pageSize)); + var remainder = dataValues.GetRange(pageSize, dataValues.Count - pageSize); + continuationPoint = _historyContinuations.Store(remainder); + } + else + { + historyData.DataValues.AddRange(dataValues); + } + + results[idx] = new HistoryReadResult + { + StatusCode = StatusCodes.Good, + HistoryData = new ExtensionObject(historyData), + ContinuationPoint = continuationPoint + }; + errors[idx] = ServiceResult.Good; + } + + private static void AddBoundingValues(List dataValues, DateTime startTime, DateTime endTime) + { + // Insert start bound if first sample doesn't match start time + if (dataValues.Count == 0 || dataValues[0].SourceTimestamp != startTime) + { + dataValues.Insert(0, new DataValue + { + Value = Variant.Null, + SourceTimestamp = startTime, + ServerTimestamp = startTime, + StatusCode = StatusCodes.BadBoundNotFound + }); + } + + // Append end bound if last sample doesn't match end time + if (dataValues.Count == 0 || dataValues[dataValues.Count - 1].SourceTimestamp != endTime) + { + dataValues.Add(new DataValue + { + Value = Variant.Null, + SourceTimestamp = endTime, + ServerTimestamp = endTime, + StatusCode = StatusCodes.BadBoundNotFound + }); + } + } + #endregion #region Subscription Delivery diff --git a/src/ZB.MOM.WW.LmxOpcUa.Host/OpcUa/LmxOpcUaServer.cs b/src/ZB.MOM.WW.LmxOpcUa.Host/OpcUa/LmxOpcUaServer.cs index 1826aaf..0a098e6 100644 --- a/src/ZB.MOM.WW.LmxOpcUa.Host/OpcUa/LmxOpcUaServer.cs +++ b/src/ZB.MOM.WW.LmxOpcUa.Host/OpcUa/LmxOpcUaServer.cs @@ -109,6 +109,7 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.OpcUa server.SessionManager.ImpersonateUser += OnImpersonateUser; ConfigureRedundancy(server); + ConfigureHistoryCapabilities(server); } private void ConfigureRedundancy(IServerInternal server) @@ -162,6 +163,118 @@ namespace ZB.MOM.WW.LmxOpcUa.Host.OpcUa } } + private void ConfigureHistoryCapabilities(IServerInternal server) + { + if (_historianDataSource == null) + return; + + try + { + var dnm = server.DiagnosticsNodeManager; + var ctx = server.DefaultSystemContext; + + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_AccessHistoryDataCapability, true); + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_AccessHistoryEventsCapability, + _alarmTrackingEnabled); + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_MaxReturnDataValues, + (uint)(_historianDataSource != null ? 10000 : 0)); + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_MaxReturnEventValues, (uint)0); + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_InsertDataCapability, false); + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_ReplaceDataCapability, false); + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_UpdateDataCapability, false); + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_DeleteRawCapability, false); + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_DeleteAtTimeCapability, false); + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_InsertEventCapability, false); + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_ReplaceEventCapability, false); + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_UpdateEventCapability, false); + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_DeleteEventCapability, false); + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_InsertAnnotationCapability, false); + SetPredefinedVariable(dnm, ctx, + VariableIds.HistoryServerCapabilities_ServerTimestampSupported, true); + + // Add aggregate function references under the AggregateFunctions folder + var aggFolderNode = dnm?.FindPredefinedNode( + ObjectIds.HistoryServerCapabilities_AggregateFunctions, + typeof(FolderState)) as FolderState; + + if (aggFolderNode != null) + { + var aggregateIds = new[] + { + ObjectIds.AggregateFunction_Average, + ObjectIds.AggregateFunction_Minimum, + ObjectIds.AggregateFunction_Maximum, + ObjectIds.AggregateFunction_Count, + ObjectIds.AggregateFunction_Start, + ObjectIds.AggregateFunction_End, + ObjectIds.AggregateFunction_StandardDeviationPopulation + }; + + foreach (var aggId in aggregateIds) + { + var aggNode = dnm?.FindPredefinedNode(aggId, typeof(BaseObjectState)) as BaseObjectState; + if (aggNode != null) + { + try + { + aggFolderNode.AddReference(ReferenceTypeIds.Organizes, false, aggNode.NodeId); + } + catch (ArgumentException) + { + // Reference already exists — skip + } + + try + { + aggNode.AddReference(ReferenceTypeIds.Organizes, true, aggFolderNode.NodeId); + } + catch (ArgumentException) + { + // Reference already exists — skip + } + } + } + + Log.Information("HistoryServerCapabilities configured with {Count} aggregate functions", + aggregateIds.Length); + } + else + { + Log.Warning("AggregateFunctions folder not found in predefined nodes"); + } + } + catch (Exception ex) + { + Log.Warning(ex, + "Failed to configure HistoryServerCapabilities — history discovery may not work for clients"); + } + } + + private static void SetPredefinedVariable(DiagnosticsNodeManager? dnm, ServerSystemContext ctx, + NodeId variableId, object value) + { + var node = dnm?.FindPredefinedNode(variableId, typeof(BaseVariableState)) as BaseVariableState; + if (node != null) + { + node.Value = value; + node.ClearChangeMasks(ctx, false); + } + } + /// /// Updates the server's ServiceLevel based on current runtime health. /// Called by the service layer when MXAccess or DB health changes. diff --git a/src/ZB.MOM.WW.LmxOpcUa.Host/OpcUaService.cs b/src/ZB.MOM.WW.LmxOpcUa.Host/OpcUaService.cs index ad42c2b..cac0486 100644 --- a/src/ZB.MOM.WW.LmxOpcUa.Host/OpcUaService.cs +++ b/src/ZB.MOM.WW.LmxOpcUa.Host/OpcUaService.cs @@ -31,6 +31,7 @@ namespace ZB.MOM.WW.LmxOpcUa.Host private CancellationTokenSource? _cts; private HealthCheckService? _healthCheck; + private HistorianDataSource? _historianDataSource; private MxAccessClient? _mxAccessClient; private IMxAccessClient? _mxAccessClientForWiring; private StaComThread? _staThread; @@ -214,7 +215,7 @@ namespace ZB.MOM.WW.LmxOpcUa.Host // Step 8: Create OPC UA server host + node manager var effectiveMxClient = (IMxAccessClient?)_mxAccessClient ?? _mxAccessClientForWiring ?? new NullMxAccessClient(); - var historianDataSource = _config.Historian.Enabled + _historianDataSource = _config.Historian.Enabled ? new HistorianDataSource(_config.Historian) : null; IUserAuthenticationProvider? authProvider = null; @@ -230,7 +231,7 @@ namespace ZB.MOM.WW.LmxOpcUa.Host _config.Authentication.Ldap.BaseDN); } - ServerHost = new OpcUaServerHost(_config.OpcUa, effectiveMxClient, Metrics, historianDataSource, + ServerHost = new OpcUaServerHost(_config.OpcUa, effectiveMxClient, Metrics, _historianDataSource, _config.Authentication, authProvider, _config.Security, _config.Redundancy); // Step 9-10: Query hierarchy, start server, build address space @@ -329,6 +330,7 @@ namespace ZB.MOM.WW.LmxOpcUa.Host } _staThread?.Dispose(); + _historianDataSource?.Dispose(); StatusWeb?.Dispose(); Metrics?.Dispose(); diff --git a/src/ZB.MOM.WW.LmxOpcUa.Host/ZB.MOM.WW.LmxOpcUa.Host.csproj b/src/ZB.MOM.WW.LmxOpcUa.Host/ZB.MOM.WW.LmxOpcUa.Host.csproj index 3ecd2ae..ce723a2 100644 --- a/src/ZB.MOM.WW.LmxOpcUa.Host/ZB.MOM.WW.LmxOpcUa.Host.csproj +++ b/src/ZB.MOM.WW.LmxOpcUa.Host/ZB.MOM.WW.LmxOpcUa.Host.csproj @@ -29,6 +29,9 @@ + + + @@ -45,6 +48,30 @@ ..\..\lib\ArchestrA.MxAccess.dll false + + + ..\..\lib\aahClientManaged.dll + false + + + + + + + PreserveNewest + + + PreserveNewest + + + PreserveNewest + + + PreserveNewest + + + PreserveNewest + diff --git a/src/ZB.MOM.WW.LmxOpcUa.Host/appsettings.json b/src/ZB.MOM.WW.LmxOpcUa.Host/appsettings.json index 2b82f39..43c3a10 100644 --- a/src/ZB.MOM.WW.LmxOpcUa.Host/appsettings.json +++ b/src/ZB.MOM.WW.LmxOpcUa.Host/appsettings.json @@ -71,7 +71,11 @@ }, "Historian": { "Enabled": false, - "ConnectionString": "Server=localhost;Database=Runtime;Integrated Security=true;", + "ServerName": "localhost", + "IntegratedSecurity": true, + "UserName": null, + "Password": null, + "Port": 32568, "CommandTimeoutSeconds": 30, "MaxValuesPerRead": 10000 } diff --git a/tests/ZB.MOM.WW.LmxOpcUa.Client.Shared.Tests/Helpers/AggregateTypeMapperTests.cs b/tests/ZB.MOM.WW.LmxOpcUa.Client.Shared.Tests/Helpers/AggregateTypeMapperTests.cs index 9d2968d..1a22c79 100644 --- a/tests/ZB.MOM.WW.LmxOpcUa.Client.Shared.Tests/Helpers/AggregateTypeMapperTests.cs +++ b/tests/ZB.MOM.WW.LmxOpcUa.Client.Shared.Tests/Helpers/AggregateTypeMapperTests.cs @@ -15,6 +15,7 @@ public class AggregateTypeMapperTests [InlineData(AggregateType.Count)] [InlineData(AggregateType.Start)] [InlineData(AggregateType.End)] + [InlineData(AggregateType.StandardDeviation)] public void ToNodeId_ReturnsNonNullForAllValues(AggregateType aggregate) { var nodeId = AggregateTypeMapper.ToNodeId(aggregate); @@ -58,6 +59,13 @@ public class AggregateTypeMapperTests AggregateTypeMapper.ToNodeId(AggregateType.End).ShouldBe(ObjectIds.AggregateFunction_End); } + [Fact] + public void ToNodeId_StandardDeviation_MapsCorrectly() + { + AggregateTypeMapper.ToNodeId(AggregateType.StandardDeviation) + .ShouldBe(ObjectIds.AggregateFunction_StandardDeviationPopulation); + } + [Fact] public void ToNodeId_InvalidValue_Throws() { diff --git a/tests/ZB.MOM.WW.LmxOpcUa.Client.Shared.Tests/Models/ModelConstructionTests.cs b/tests/ZB.MOM.WW.LmxOpcUa.Client.Shared.Tests/Models/ModelConstructionTests.cs index 03798cc..84a112c 100644 --- a/tests/ZB.MOM.WW.LmxOpcUa.Client.Shared.Tests/Models/ModelConstructionTests.cs +++ b/tests/ZB.MOM.WW.LmxOpcUa.Client.Shared.Tests/Models/ModelConstructionTests.cs @@ -125,6 +125,6 @@ public class ModelConstructionTests [Fact] public void AggregateType_Enum_HasExpectedValues() { - Enum.GetValues().Length.ShouldBe(6); + Enum.GetValues().Length.ShouldBe(7); } } \ No newline at end of file diff --git a/tests/ZB.MOM.WW.LmxOpcUa.Client.UI.Tests/HistoryViewModelTests.cs b/tests/ZB.MOM.WW.LmxOpcUa.Client.UI.Tests/HistoryViewModelTests.cs index fa52bd0..c4947fa 100644 --- a/tests/ZB.MOM.WW.LmxOpcUa.Client.UI.Tests/HistoryViewModelTests.cs +++ b/tests/ZB.MOM.WW.LmxOpcUa.Client.UI.Tests/HistoryViewModelTests.cs @@ -130,7 +130,7 @@ public class HistoryViewModelTests public void AggregateTypes_ContainsNullForRaw() { _vm.AggregateTypes.ShouldContain((AggregateType?)null); - _vm.AggregateTypes.Count.ShouldBe(7); // null + 6 enum values + _vm.AggregateTypes.Count.ShouldBe(8); // null + 7 enum values } [Fact] diff --git a/tests/ZB.MOM.WW.LmxOpcUa.Client.UI.Tests/Screenshots/DateTimeRangePickerScreenshot.cs b/tests/ZB.MOM.WW.LmxOpcUa.Client.UI.Tests/Screenshots/DateTimeRangePickerScreenshot.cs deleted file mode 100644 index 8303389..0000000 --- a/tests/ZB.MOM.WW.LmxOpcUa.Client.UI.Tests/Screenshots/DateTimeRangePickerScreenshot.cs +++ /dev/null @@ -1,170 +0,0 @@ -using Avalonia; -using Avalonia.Controls; -using Avalonia.Headless; -using Avalonia.Media; -using Avalonia.Threading; -using Shouldly; -using Xunit; -using ZB.MOM.WW.LmxOpcUa.Client.UI.Controls; - -namespace ZB.MOM.WW.LmxOpcUa.Client.UI.Tests.Screenshots; - -public class DateTimeRangePickerScreenshot -{ - private static readonly object Lock = new(); - private static bool _initialized; - - private static void EnsureInitialized() - { - lock (Lock) - { - if (_initialized) return; - _initialized = true; - - AppBuilder.Configure() - .UseSkia() - .UseHeadless(new AvaloniaHeadlessPlatformOptions - { - UseHeadlessDrawing = false - }) - .SetupWithoutStarting(); - } - } - - [Fact] - public void TextBoxes_ShowValues_WhenSetBeforeLoad() - { - EnsureInitialized(); - - Dispatcher.UIThread.Invoke(() => - { - var picker = new DateTimeRangePicker - { - StartDateTime = new DateTimeOffset(2026, 3, 31, 8, 0, 0, TimeSpan.Zero), - EndDateTime = new DateTimeOffset(2026, 3, 31, 14, 0, 0, TimeSpan.Zero) - }; - - var window = new Window - { - Content = picker, - Width = 700, Height = 70 - }; - window.Show(); - Dispatcher.UIThread.RunJobs(); - - var startInput = picker.FindControl("StartInput"); - var endInput = picker.FindControl("EndInput"); - - startInput.ShouldNotBeNull(); - endInput.ShouldNotBeNull(); - startInput!.Text.ShouldBe("2026-03-31 08:00:00"); - endInput!.Text.ShouldBe("2026-03-31 14:00:00"); - - window.Close(); - }); - } - - [Fact] - public void TextBoxes_ShowValues_WhenSetAfterLoad() - { - EnsureInitialized(); - - Dispatcher.UIThread.Invoke(() => - { - var picker = new DateTimeRangePicker(); - - var window = new Window - { - Content = picker, - Width = 700, Height = 70 - }; - window.Show(); - Dispatcher.UIThread.RunJobs(); - - // Set values after the control is loaded - picker.StartDateTime = new DateTimeOffset(2026, 1, 15, 10, 30, 0, TimeSpan.Zero); - picker.EndDateTime = new DateTimeOffset(2026, 1, 15, 18, 45, 0, TimeSpan.Zero); - Dispatcher.UIThread.RunJobs(); - - var startInput = picker.FindControl("StartInput"); - var endInput = picker.FindControl("EndInput"); - - startInput!.Text.ShouldBe("2026-01-15 10:30:00"); - endInput!.Text.ShouldBe("2026-01-15 18:45:00"); - - window.Close(); - }); - } - - [Fact] - public void PresetButtons_SetCorrectRange() - { - EnsureInitialized(); - - Dispatcher.UIThread.Invoke(() => - { - var picker = new DateTimeRangePicker(); - - var window = new Window - { - Content = picker, - Width = 700, Height = 70 - }; - window.Show(); - Dispatcher.UIThread.RunJobs(); - - // Click the 1h preset - var lastHourBtn = picker.FindControl