// Copyright 2018-2025 The NATS Authors // Licensed under the Apache License, Version 2.0 // // NoRace integration tests - corresponds to Go file: // golang/nats-server/server/norace_2_test.go (first 24 tests) // // These tests are equivalent to Go's //go:build !race tests. // All tests require NATS_INTEGRATION_ENABLED=true to run. using Shouldly; using Xunit.Abstractions; using ZB.MOM.NatsNet.Server.IntegrationTests.Helpers; namespace ZB.MOM.NatsNet.Server.IntegrationTests.NoRace; [Trait("Category", "NoRace")] [Trait("Category", "Integration")] public class NoRace2Tests : IntegrationTestBase { public NoRace2Tests(ITestOutputHelper output) : base(output) { } // --------------------------------------------------------------------------- // 1. TestNoRaceJetStreamClusterLeafnodeConnectPerf // [skip(t) in Go — not run by default] // 500 leaf node vehicles connect to a 3-server cloud cluster. // Each vehicle creates a source stream referencing the cloud cluster. // Verifies each leaf node connect + stream create completes in < 2 seconds. // --------------------------------------------------------------------------- [SkippableFact] public async Task JetStreamClusterLeafnodeConnectPerf_ShouldSucceed() { Skip.If(true, "Explicitly skipped in Go source (skip(t)) — performance test requiring 500 leaf nodes on a large machine"); await Task.CompletedTask; } // --------------------------------------------------------------------------- // 2. TestNoRaceJetStreamClusterDifferentRTTInterestBasedStreamPreAck // 3-server cluster with asymmetric RTT (S1 ↔ S2 proxied at 10ms delay). // Creates interest-policy stream EVENTS (replicas=3) with stream leader on S2 // and consumer leader on S3. Publishes 1000 messages. Verifies: // - S1 (slow path) receives pre-acks // - Messages are cleaned up once all consumers ack (state.Msgs == 0) // - No pending pre-acks after all messages processed // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream cluster server")] public void JetStreamClusterDifferentRTTInterestBasedStreamPreAck_ShouldSucceed() { // Port of Go TestNoRaceJetStreamClusterDifferentRTTInterestBasedStreamPreAck: // 3-server cluster with asymmetric RTT and interest-policy stream pre-ack verification } // --------------------------------------------------------------------------- // 3. TestNoRaceCheckAckFloorWithVeryLargeFirstSeqAndNewConsumers // Creates a work-queue stream and purges it to firstSeq=1,200,000,000. // Publishes 1 message. Creates pull consumer. Fetches and AckSync. // Verifies that checkAckFloor completes in < 1 second (not O(firstSeq)). // Then purges to 2,400,000,000, simulates the slower walk path. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream server")] public void CheckAckFloorWithVeryLargeFirstSeqAndNewConsumers_ShouldSucceed() { // Port of Go TestNoRaceCheckAckFloorWithVeryLargeFirstSeqAndNewConsumers: // WQ stream purged to firstSeq=1_200_000_000, verifies checkAckFloor is O(gap) not O(seq) } // --------------------------------------------------------------------------- // 4. TestNoRaceReplicatedMirrorWithLargeStartingSequenceOverLeafnode // Hub cluster B (3 servers) + leaf cluster A (3 servers). // Creates stream on B, purges to firstSeq=1,000,000,000. // Sends 1000 messages. Creates mirror on leaf cluster A (cross-domain). // Verifies mirror syncs to 1000 msgs, firstSeq=1,000,000,000 in < 1 second. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream cluster server")] public void ReplicatedMirrorWithLargeStartingSequenceOverLeafnode_ShouldSucceed() { // Port of Go TestNoRaceReplicatedMirrorWithLargeStartingSequenceOverLeafnode: // Hub cluster + leaf cluster cross-domain mirror with large starting sequence } // --------------------------------------------------------------------------- // 5. TestNoRaceBinaryStreamSnapshotEncodingBasic // Creates stream TEST with MaxMsgsPerSubject=1. // Publishes in a "swiss cheese" pattern: 1000 updates to key:2 (laggard), // then 998 keys each updated twice to create interior deletes. // Verifies: firstSeq=1, lastSeq=3000, msgs=1000, numDeleted=2000. // Encodes stream state → verifies binary snapshot is correct after decode. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream server")] public void BinaryStreamSnapshotEncodingBasic_ShouldSucceed() { // Port of Go TestNoRaceBinaryStreamSnapshotEncodingBasic: // Swiss-cheese pattern publish, EncodedStreamState/DecodeStreamState round-trip } // --------------------------------------------------------------------------- // 6. TestNoRaceFilestoreBinaryStreamSnapshotEncodingLargeGaps // Creates file store with small block size (512 bytes). // Stores 20,000 messages, removes all except first and last. // Sync blocks to clean tombstones. // Verifies: encoded snapshot < 512 bytes, ss.Deleted.NumDeleted() == 19,998. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream server")] public void FilestoreBinaryStreamSnapshotEncodingLargeGaps_ShouldSucceed() { // Port of Go TestNoRaceFilestoreBinaryStreamSnapshotEncodingLargeGaps: // File store with 512-byte blocks, compact binary encoding of large delete gaps } // --------------------------------------------------------------------------- // 7. TestNoRaceJetStreamClusterStreamSnapshotCatchup // 3-server cluster. Creates stream TEST (MaxMsgsPerSubject=1, replicas=3). // Shuts down a non-leader. Creates 50k gap (interior deletes via bar). // Snapshots stream. Restarts server — verifies it catches up via snapshot. // Repeats with one more publish + snapshot → verifies state (msgs=3). // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream cluster server")] public void JetStreamClusterStreamSnapshotCatchup_ShouldSucceed() { // Port of Go TestNoRaceJetStreamClusterStreamSnapshotCatchup: // Cluster snapshot catchup after server shutdown with 50k interior deletes } // --------------------------------------------------------------------------- // 8. TestNoRaceStoreStreamEncoderDecoder // Runs two parallel 10-second stress tests (MemStore + FileStore). // Each goroutine: stores messages to random keys (0–256000), // every second encodes snapshot and verifies decode. // Asserts: encode time < 2s, encoded size < 700KB, decoded state valid. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream server")] public void StoreStreamEncoderDecoder_ShouldSucceed() { // Port of Go TestNoRaceStoreStreamEncoderDecoder: // 10-second parallel stress test of MemStore + FileStore snapshot encode/decode } // --------------------------------------------------------------------------- // 9. TestNoRaceJetStreamClusterKVWithServerKill // 3-server cluster. Creates KV bucket TEST (replicas=3, history=10). // 3 workers (one per server): random KV get/create/update/delete at 100/s. // While workers run: randomly kill & restart each server 7 times. // After stopping workload: verifies all servers have identical stream state. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream cluster server")] public void JetStreamClusterKVWithServerKill_ShouldSucceed() { // Port of Go TestNoRaceJetStreamClusterKVWithServerKill: // KV stress test with random server kill/restart while workers run } // --------------------------------------------------------------------------- // 10. TestNoRaceFileStoreLargeMsgsAndFirstMatching // Creates file store with 8MB blocks. Stores 150k messages to "foo.bar.N" // and 150k to "foo.baz.N". Removes messages from block 2 (except last 40). // Verifies LoadNextMsg("*.baz.*") completes in < 200 microseconds. // Removes remaining 40 and re-verifies (non-linear path). // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream server")] public void FileStoreLargeMsgsAndFirstMatching_ShouldSucceed() { // Port of Go TestNoRaceFileStoreLargeMsgsAndFirstMatching: // LoadNextMsg performance < 200µs with 8MB blocks and large interior deletes } // --------------------------------------------------------------------------- // 11. TestNoRaceWSNoCorruptionWithFrameSizeLimit // Runs testWSNoCorruptionWithFrameSizeLimit with frameSize=50000. // Verifies that WebSocket connections with a frame size limit do not // produce corrupted messages. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running NATS server")] public void WSNoCorruptionWithFrameSizeLimit_ShouldSucceed() { // Port of Go TestNoRaceWSNoCorruptionWithFrameSizeLimit: // WebSocket frame size limit (50000) does not corrupt messages } // --------------------------------------------------------------------------- // 12. TestNoRaceJetStreamAPIDispatchQueuePending // 3-server cluster. Creates stream TEST with 500k messages (different subjects). // Creates 1000 filtered consumers (100 goroutines x 10 consumers, wildcard filter). // Verifies inflight API count is non-zero during peak. // Verifies all consumer creates succeed without error. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream cluster server")] public void JetStreamAPIDispatchQueuePending_ShouldSucceed() { // Port of Go TestNoRaceJetStreamAPIDispatchQueuePending: // API dispatch queue stress test with 500k messages and 1000 concurrent consumers } // --------------------------------------------------------------------------- // 13. TestNoRaceJetStreamMirrorAndSourceConsumerFailBackoff // Verifies backoff calculation: attempts 1–11 = N*10s, attempts 12+ = max. // Creates mirror and source streams in a 3-server cluster. // Kills the source stream leader. Waits 6 seconds. // Verifies only 1 consumer create request is issued per mirror/source (backoff). // Verifies fails counter is exactly 1 for both mirror and source. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream cluster server")] public void JetStreamMirrorAndSourceConsumerFailBackoff_ShouldSucceed() { // Port of Go TestNoRaceJetStreamMirrorAndSourceConsumerFailBackoff: // Mirror/source backoff timing verification after source leader kill } // --------------------------------------------------------------------------- // 14. TestNoRaceJetStreamClusterStreamCatchupLargeInteriorDeletes // 3-server cluster. Creates R1 stream with MaxMsgsPerSubject=100. // Creates interior deletes: 50k random + 100k to single subject + 50k random. // Scales stream up to R2. Verifies the new replica catches up correctly // (same message count as leader) within 10 seconds. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream cluster server")] public void JetStreamClusterStreamCatchupLargeInteriorDeletes_ShouldSucceed() { // Port of Go TestNoRaceJetStreamClusterStreamCatchupLargeInteriorDeletes: // Stream scale-up catchup with 200k interior deletes } // --------------------------------------------------------------------------- // 15. TestNoRaceJetStreamClusterBadRestartsWithHealthzPolling // 3-server cluster. Creates stream TEST (replicas=3). // Polls healthz every 50ms in background goroutine. // Creates 500 pull consumers concurrently, then 200 additional streams. // Verifies consumer and stream counts are correct on all servers. // Deletes all consumers and streams, re-verifies counts go to 0. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream cluster server")] public void JetStreamClusterBadRestartsWithHealthzPolling_ShouldSucceed() { // Port of Go TestNoRaceJetStreamClusterBadRestartsWithHealthzPolling: // Healthz polling + 500 concurrent consumer creates + 200 streams } // --------------------------------------------------------------------------- // 16. TestNoRaceJetStreamKVReplaceWithServerRestart // 3-server cluster. Creates KV bucket TEST (replicas=3), disables AllowDirect. // Creates key "foo". Runs concurrent KV update loop. // Kills and restarts the stream leader. // Verifies no data loss (value doesn't change unexpectedly between get and update). // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream cluster server")] public void JetStreamKVReplaceWithServerRestart_ShouldSucceed() { // Port of Go TestNoRaceJetStreamKVReplaceWithServerRestart: // Concurrent KV update loop while killing and restarting stream leader } // --------------------------------------------------------------------------- // 17. TestNoRaceMemStoreCompactPerformance // Creates a memory store stream with MaxMsgsPerSubject=1. // Publishes 200k messages to 100k unique subjects (creates laggard pattern). // Verifies the compact operation completes in < 100ms. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream server")] public void MemStoreCompactPerformance_ShouldSucceed() { // Port of Go TestNoRaceMemStoreCompactPerformance: // Memory store compact performance < 100ms with 200k messages to 100k unique subjects } // --------------------------------------------------------------------------- // 18. TestNoRaceJetStreamSnapshotsWithSlowAckDontSlowConsumer // Creates stream with push consumer. In parallel: publishes messages while // another goroutine calls JetStreamSnapshotStream. // Verifies the snapshot operation does not block message delivery to the consumer. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream server")] public void JetStreamSnapshotsWithSlowAckDontSlowConsumer_ShouldSucceed() { // Port of Go TestNoRaceJetStreamSnapshotsWithSlowAckDontSlowConsumer: // Concurrent snapshot + publish does not block push consumer delivery } // --------------------------------------------------------------------------- // 19. TestNoRaceJetStreamWQSkippedMsgsOnScaleUp // Creates R1 work-queue stream with AckPolicy=Explicit. // Creates durable consumer, publishes 100 messages, acknowledges all. // Scales stream to R3. Publishes 100 more messages, acknowledges all. // Verifies no messages are skipped after scale-up (no ghost sequences). // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream cluster server")] public void JetStreamWQSkippedMsgsOnScaleUp_ShouldSucceed() { // Port of Go TestNoRaceJetStreamWQSkippedMsgsOnScaleUp: // WQ stream scale R1→R3, verify no ghost sequences after scale-up } // --------------------------------------------------------------------------- // 20. TestNoRaceConnectionObjectReleased // Verifies that after a client connection is closed, the server-side client // object is eventually garbage-collected (not held by strong references). // Tests for memory leaks in the connection object lifecycle. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running NATS server")] public void ConnectionObjectReleased_ShouldSucceed() { // Port of Go TestNoRaceConnectionObjectReleased: // Server-side connection objects are GC'd after client disconnect } // --------------------------------------------------------------------------- // 21. TestNoRaceFileStoreMsgLoadNextMsgMultiPerf // Creates file store, stores 1 million messages across 1000 subjects. // Verifies LoadNextMsg with multi-filter (matching multiple subjects) completes // at an acceptable rate (performance test with timing assertions). // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream server")] public void FileStoreMsgLoadNextMsgMultiPerf_ShouldSucceed() { // Port of Go TestNoRaceFileStoreMsgLoadNextMsgMultiPerf: // LoadNextMsg multi-filter performance with 1M messages across 1000 subjects } // --------------------------------------------------------------------------- // 22. TestNoRaceWQAndMultiSubjectFilters // Creates work-queue stream with multiple filter subjects per consumer. // Publishes messages to various subjects. Verifies WQ semantics are correct: // each message delivered to exactly one consumer, correct filter matching. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream server")] public void WQAndMultiSubjectFilters_ShouldSucceed() { // Port of Go TestNoRaceWQAndMultiSubjectFilters: // WQ stream with multiple filter subjects per consumer, each message delivered once } // --------------------------------------------------------------------------- // 23. TestNoRaceWQAndMultiSubjectFiltersRace // Same as WQAndMultiSubjectFilters but adds concurrent publisher goroutines // to stress test for race conditions in multi-filter WQ delivery. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream server")] public void WQAndMultiSubjectFiltersRace_ShouldSucceed() { // Port of Go TestNoRaceWQAndMultiSubjectFiltersRace: // WQ multi-filter delivery race test with concurrent publishers } // --------------------------------------------------------------------------- // 24. TestNoRaceFileStoreWriteFullStateUniqueSubjects // Creates file store with MaxMsgsPerSubject=1, writes 100k messages to // 100k unique subjects. Forces a full-state write (used during recovery). // Verifies: the written state is correct, read-back after restart matches. // Asserts the full-state write completes in a reasonable time. // --------------------------------------------------------------------------- [Fact(Skip = "deferred: requires running JetStream server")] public void FileStoreWriteFullStateUniqueSubjects_ShouldSucceed() { // Port of Go TestNoRaceFileStoreWriteFullStateUniqueSubjects: // writeFullState correctness with 100k unique subjects, round-trip verify after reload } }