From 3d0e8a6ac2a61e111ac01b4a9dcd099121ae5515 Mon Sep 17 00:00:00 2001 From: bailey Date: Wed, 19 Nov 2025 11:02:55 -0700 Subject: [PATCH 1/5] rename isSDAMUnrecoverableError to match spec's name --- src/error.ts | 8 +------- src/sdam/server.ts | 12 ++++++++++-- test/unit/error.test.ts | 29 ++++++++--------------------- 3 files changed, 19 insertions(+), 30 deletions(-) diff --git a/src/error.ts b/src/error.ts index 9822361e72..eda39b7dc3 100644 --- a/src/error.ts +++ b/src/error.ts @@ -1528,13 +1528,7 @@ export function isNodeShuttingDownError(err: MongoError): boolean { * * @see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.md#not-writable-primary-and-node-is-recovering */ -export function isSDAMUnrecoverableError(error: MongoError): boolean { - // NOTE: null check is here for a strictly pre-CMAP world, a timeout or - // close event are considered unrecoverable - if (error instanceof MongoParseError || error == null) { - return true; - } - +export function isStateChangeError(error: MongoError): boolean { return isRecoveringError(error) || isNotWritablePrimaryError(error); } diff --git a/src/sdam/server.ts b/src/sdam/server.ts index f14188a0d5..24a5373a7b 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -22,12 +22,13 @@ import { import { type AnyError, isNodeShuttingDownError, - isSDAMUnrecoverableError, + isStateChangeError, MONGODB_ERROR_CODES, MongoError, MongoErrorLabel, MongoNetworkError, MongoNetworkTimeoutError, + MongoParseError, MongoRuntimeError, MongoServerClosedError, type MongoServerError, @@ -412,7 +413,14 @@ export class Server extends TypedEventEmitter { this.pool.clear({ serviceId: connection.serviceId }); } } else { - if (isSDAMUnrecoverableError(error)) { + // TODO: considering parse errors as SDAM unrecoverable errors seem + // questionable. What if the parse error only comes from an application connection, + // indicating some bytes were lost in transmission? It seems overkill to completely + // kill the server. + // Parse errors from monitoring connections are already handled because the + // error would be wrapped in a ServerHeartbeatFailedEvent, which would mark the + // server unknown and clear the pool. Can we remove this? + if (isStateChangeError(error) || error instanceof MongoParseError) { if (shouldHandleStateChangeError(this, error)) { const shouldClearPool = isNodeShuttingDownError(error); if (this.loadBalanced && connection && shouldClearPool) { diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index 32e90a0785..c5124c60f9 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -10,7 +10,7 @@ import * as importsFromErrorSrc from '../../src/error'; import { isResumableError, isRetryableReadError, - isSDAMUnrecoverableError, + isStateChangeError, LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE, LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE, MONGODB_ERROR_CODES, @@ -211,26 +211,13 @@ describe('MongoErrors', () => { }); }); - describe('#isSDAMUnrecoverableError', function () { - context('when the error is a MongoParseError', function () { - it('returns true', function () { - const error = new MongoParseError(''); - expect(isSDAMUnrecoverableError(error)).to.be.true; - }); - }); - - context('when the error is null', function () { - it('returns true', function () { - expect(isSDAMUnrecoverableError(null)).to.be.true; - }); - }); - + describe('#isStateChangeError', function () { context('when the error has a "node is recovering" error code', function () { it('returns true', function () { const error = new MongoError(''); // Code for NotPrimaryOrSecondary error.code = 13436; - expect(isSDAMUnrecoverableError(error)).to.be.true; + expect(isStateChangeError(error)).to.be.true; }); }); @@ -239,7 +226,7 @@ describe('MongoErrors', () => { const error = new MongoError(''); // Code for NotWritablePrimary error.code = 10107; - expect(isSDAMUnrecoverableError(error)).to.be.true; + expect(isStateChangeError(error)).to.be.true; }); }); @@ -250,7 +237,7 @@ describe('MongoErrors', () => { // If the response includes an error code, it MUST be solely used to determine if error is a "node is recovering" or "not writable primary" error. const error = new MongoError(NODE_IS_RECOVERING_ERROR_MESSAGE.source); error.code = 555; - expect(isSDAMUnrecoverableError(error)).to.be.false; + expect(isStateChangeError(error)).to.be.false; }); } ); @@ -262,7 +249,7 @@ describe('MongoErrors', () => { const error = new MongoError( `this is ${LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE.source}.` ); - expect(isSDAMUnrecoverableError(error)).to.be.true; + expect(isStateChangeError(error)).to.be.true; }); } ); @@ -272,7 +259,7 @@ describe('MongoErrors', () => { function () { it('returns true', function () { const error = new MongoError(`the ${NODE_IS_RECOVERING_ERROR_MESSAGE} from an error`); - expect(isSDAMUnrecoverableError(error)).to.be.true; + expect(isStateChangeError(error)).to.be.true; }); } ); @@ -284,7 +271,7 @@ describe('MongoErrors', () => { const error = new MongoError( `this is ${LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE}, so we have a problem ` ); - expect(isSDAMUnrecoverableError(error)).to.be.true; + expect(isStateChangeError(error)).to.be.true; }); } ); From 47c957f24931dbe539c782a2dffc397cc0d41f68 Mon Sep 17 00:00:00 2001 From: bailey Date: Wed, 19 Nov 2025 11:11:17 -0700 Subject: [PATCH 2/5] remove shouldHandleStateChangeError --- src/sdam/server.ts | 42 +++++++++++++++++++++-------------------- test/unit/error.test.ts | 1 - 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 24a5373a7b..e608054736 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -392,9 +392,7 @@ export class Server extends TypedEventEmitter { return; } - const isStaleError = - error.connectionGeneration && error.connectionGeneration < this.pool.generation; - if (isStaleError) { + if (isStaleError(this, error)) { return; } @@ -421,19 +419,17 @@ export class Server extends TypedEventEmitter { // error would be wrapped in a ServerHeartbeatFailedEvent, which would mark the // server unknown and clear the pool. Can we remove this? if (isStateChangeError(error) || error instanceof MongoParseError) { - if (shouldHandleStateChangeError(this, error)) { - const shouldClearPool = isNodeShuttingDownError(error); - if (this.loadBalanced && connection && shouldClearPool) { - this.pool.clear({ serviceId: connection.serviceId }); - } + const shouldClearPool = isNodeShuttingDownError(error); + if (this.loadBalanced && connection && shouldClearPool) { + this.pool.clear({ serviceId: connection.serviceId }); + } - if (!this.loadBalanced) { - if (shouldClearPool) { - error.addErrorLabel(MongoErrorLabel.ResetPool); - } - markServerUnknown(this, error); - process.nextTick(() => this.requestCheck()); + if (!this.loadBalanced) { + if (shouldClearPool) { + error.addErrorLabel(MongoErrorLabel.ResetPool); } + markServerUnknown(this, error); + process.nextTick(() => this.requestCheck()); } } } @@ -568,12 +564,6 @@ function connectionIsStale(pool: ConnectionPool, connection: Connection) { return connection.generation !== pool.generation; } -function shouldHandleStateChangeError(server: Server, err: MongoError) { - const etv = err.topologyVersion; - const stv = server.description.topologyVersion; - return compareTopologyVersion(stv, etv) < 0; -} - function inActiveTransaction(session: ClientSession | undefined, cmd: Document) { return session && session.inTransaction() && !isTransactionCommand(cmd); } @@ -583,3 +573,15 @@ function inActiveTransaction(session: ClientSession | undefined, cmd: Document) function isRetryableWritesEnabled(topology: Topology) { return topology.s.options.retryWrites !== false; } + +function isStaleError(server: Server, error: MongoError): boolean { + const currentGeneration = server.pool.generation; + const generation = error.connectionGeneration; + + if (generation && generation < currentGeneration) { + return true; + } + + const currentTopologyVersion = server.description.topologyVersion; + return compareTopologyVersion(currentTopologyVersion, error.topologyVersion) >= 0; +} diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index c5124c60f9..34428b0666 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -26,7 +26,6 @@ import { MongoNetworkError, MongoNetworkTimeoutError, MongoOperationTimeoutError, - MongoParseError, MongoRuntimeError, MongoServerError, MongoSystemError, From 064a83c5e346d979ead19a52ceb7410111c174eb Mon Sep 17 00:00:00 2001 From: bailey Date: Wed, 19 Nov 2025 13:15:56 -0700 Subject: [PATCH 3/5] swap ordering of logic in server --- src/sdam/server.ts | 59 ++++++++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 25 deletions(-) diff --git a/src/sdam/server.ts b/src/sdam/server.ts index e608054736..44a2727cc1 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -401,37 +401,46 @@ export class Server extends TypedEventEmitter { const isNetworkTimeoutBeforeHandshakeError = error instanceof MongoNetworkError && error.beforeHandshake; const isAuthHandshakeError = error.hasErrorLabel(MongoErrorLabel.HandshakeError); - if (isNetworkNonTimeoutError || isNetworkTimeoutBeforeHandshakeError || isAuthHandshakeError) { - // In load balanced mode we never mark the server as unknown and always - // clear for the specific service id. + + // TODO: considering parse errors as SDAM unrecoverable errors seem + // questionable. What if the parse error only comes from an application connection, + // indicating some bytes were lost in transmission? It seems overkill to completely + // kill the server. + // Parse errors from monitoring connections are already handled because the + // error would be wrapped in a ServerHeartbeatFailedEvent, which would mark the + // server unknown and clear the pool. Can we remove this? + if (isStateChangeError(error) || error instanceof MongoParseError) { + const shouldClearPool = isNodeShuttingDownError(error); + + // from the SDAM spec: The driver MUST synchronize clearing the pool with updating the topology. + // In load balanced mode: there is no monitoring, so there is no topology to update. We simply clear the pool. + // For other topologies: the `ResetPool` label instructs the topology to clear the server's pool in `updateServer()`. + if (!this.loadBalanced) { + if (shouldClearPool) { + error.addErrorLabel(MongoErrorLabel.ResetPool); + } + markServerUnknown(this, error); + process.nextTick(() => this.requestCheck()); + return; + } + + if (connection && shouldClearPool) { + this.pool.clear({ serviceId: connection.serviceId }); + } + } else if ( + isNetworkNonTimeoutError || + isNetworkTimeoutBeforeHandshakeError || + isAuthHandshakeError + ) { + // from the SDAM spec: The driver MUST synchronize clearing the pool with updating the topology. + // In load balanced mode: there is no monitoring, so there is no topology to update. We simply clear the pool. + // For other topologies: the `ResetPool` label instructs the topology to clear the server's pool in `updateServer()`. if (!this.loadBalanced) { error.addErrorLabel(MongoErrorLabel.ResetPool); markServerUnknown(this, error); } else if (connection) { this.pool.clear({ serviceId: connection.serviceId }); } - } else { - // TODO: considering parse errors as SDAM unrecoverable errors seem - // questionable. What if the parse error only comes from an application connection, - // indicating some bytes were lost in transmission? It seems overkill to completely - // kill the server. - // Parse errors from monitoring connections are already handled because the - // error would be wrapped in a ServerHeartbeatFailedEvent, which would mark the - // server unknown and clear the pool. Can we remove this? - if (isStateChangeError(error) || error instanceof MongoParseError) { - const shouldClearPool = isNodeShuttingDownError(error); - if (this.loadBalanced && connection && shouldClearPool) { - this.pool.clear({ serviceId: connection.serviceId }); - } - - if (!this.loadBalanced) { - if (shouldClearPool) { - error.addErrorLabel(MongoErrorLabel.ResetPool); - } - markServerUnknown(this, error); - process.nextTick(() => this.requestCheck()); - } - } } } From 151b9867460b6ec21f5e6e7bf65f8080778cd55f Mon Sep 17 00:00:00 2001 From: bailey Date: Fri, 21 Nov 2025 14:36:19 -0700 Subject: [PATCH 4/5] everything else --- src/cmap/connect.ts | 15 ++ src/error.ts | 4 +- src/sdam/server.ts | 9 +- ...ver_discovery_and_monitoring.prose.test.ts | 77 ++++++++- .../pool-create-min-size-error.json | 2 +- .../pool-create-min-size-error.yml | 2 +- .../load-balancers/sdam-error-handling.json | 8 +- .../load-balancers/sdam-error-handling.yml | 11 +- .../backpressure-network-error-fail.json | 140 +++++++++++++++++ .../backpressure-network-error-fail.yml | 80 ++++++++++ .../backpressure-network-timeout-fail.json | 143 +++++++++++++++++ .../backpressure-network-timeout-fail.yml | 83 ++++++++++ ...ged-on-min-pool-size-population-error.json | 106 +++++++++++++ ...nged-on-min-pool-size-population-error.yml | 62 ++++++++ .../unified/pool-clear-checkout-error.json | 148 ------------------ .../unified/pool-clear-checkout-error.yml | 90 ----------- .../pool-clear-min-pool-size-error.json | 116 -------------- .../pool-clear-min-pool-size-error.yml | 11 +- 18 files changed, 727 insertions(+), 380 deletions(-) create mode 100644 test/spec/server-discovery-and-monitoring/unified/backpressure-network-error-fail.json create mode 100644 test/spec/server-discovery-and-monitoring/unified/backpressure-network-error-fail.yml create mode 100644 test/spec/server-discovery-and-monitoring/unified/backpressure-network-timeout-fail.json create mode 100644 test/spec/server-discovery-and-monitoring/unified/backpressure-network-timeout-fail.yml create mode 100644 test/spec/server-discovery-and-monitoring/unified/backpressure-server-description-unchanged-on-min-pool-size-population-error.json create mode 100644 test/spec/server-discovery-and-monitoring/unified/backpressure-server-description-unchanged-on-min-pool-size-population-error.yml diff --git a/src/cmap/connect.ts b/src/cmap/connect.ts index 62db2f98d4..e9fecb26bf 100644 --- a/src/cmap/connect.ts +++ b/src/cmap/connect.ts @@ -35,6 +35,11 @@ import { /** @public */ export type Stream = Socket | TLSSocket; +function applyBackpressureLabels(error: MongoError) { + error.addErrorLabel(MongoErrorLabel.SystemOverloadedError); + error.addErrorLabel(MongoErrorLabel.RetryableError); +} + export async function connect(options: ConnectionOptions): Promise { let connection: Connection | null = null; try { @@ -103,6 +108,8 @@ export async function performInitialHandshake( const authContext = new AuthContext(conn, credentials, options); conn.authContext = authContext; + // If we encounter an error preparing the handshake document, do NOT apply backpressure labels. Errors + // encountered building the handshake document are all client-side, and do not indicate an overloaded server. const handshakeDoc = await prepareHandshakeDocument(authContext); // @ts-expect-error: TODO(NODE-5141): The options need to be filtered properly, Connection options differ from Command options @@ -163,12 +170,15 @@ export async function performInitialHandshake( try { await provider.auth(authContext); } catch (error) { + // NOTE: If we encounter an error authenticating a connection, do NOT apply backpressure labels. + if (error instanceof MongoError) { error.addErrorLabel(MongoErrorLabel.HandshakeError); if (needsRetryableWriteLabel(error, response.maxWireVersion, conn.description.type)) { error.addErrorLabel(MongoErrorLabel.RetryableWriteError); } } + throw error; } } @@ -189,6 +199,9 @@ export async function performInitialHandshake( if (error instanceof MongoError) { error.addErrorLabel(MongoErrorLabel.HandshakeError); } + // If we encounter an error executing the initial handshake, apply backpressure labels. + applyBackpressureLabels(error); + throw error; } } @@ -424,6 +437,8 @@ export async function makeSocket(options: MakeConnectionOptions): Promise { error instanceof MongoNetworkError && !(error instanceof MongoNetworkTimeoutError); const isNetworkTimeoutBeforeHandshakeError = error instanceof MongoNetworkError && error.beforeHandshake; - const isAuthHandshakeError = error.hasErrorLabel(MongoErrorLabel.HandshakeError); + const isAuthOrEstablishmentHandshakeError = error.hasErrorLabel(MongoErrorLabel.HandshakeError); + const isSystemOverloadError = error.hasErrorLabel(MongoErrorLabel.SystemOverloadedError); // TODO: considering parse errors as SDAM unrecoverable errors seem // questionable. What if the parse error only comes from an application connection, @@ -430,8 +431,12 @@ export class Server extends TypedEventEmitter { } else if ( isNetworkNonTimeoutError || isNetworkTimeoutBeforeHandshakeError || - isAuthHandshakeError + isAuthOrEstablishmentHandshakeError ) { + // Do NOT clear the pool if we encounter a system overloaded error. + if (isSystemOverloadError) { + return; + } // from the SDAM spec: The driver MUST synchronize clearing the pool with updating the topology. // In load balanced mode: there is no monitoring, so there is no topology to update. We simply clear the pool. // For other topologies: the `ResetPool` label instructs the topology to clear the server's pool in `updateServer()`. diff --git a/test/integration/server-discovery-and-monitoring/server_discovery_and_monitoring.prose.test.ts b/test/integration/server-discovery-and-monitoring/server_discovery_and_monitoring.prose.test.ts index b0ed7adb07..718e69d0fd 100644 --- a/test/integration/server-discovery-and-monitoring/server_discovery_and_monitoring.prose.test.ts +++ b/test/integration/server-discovery-and-monitoring/server_discovery_and_monitoring.prose.test.ts @@ -1,13 +1,18 @@ import { expect } from 'chai'; import { once } from 'events'; -import { type MongoClient } from '../../../src'; +import { + type ConnectionCheckOutFailedEvent, + type ConnectionPoolClearedEvent, + type MongoClient +} from '../../../src'; import { CONNECTION_POOL_CLEARED, CONNECTION_POOL_READY, SERVER_HEARTBEAT_FAILED, SERVER_HEARTBEAT_SUCCEEDED } from '../../../src/constants'; +import { sleep } from '../../tools/utils'; describe('Server Discovery and Monitoring Prose Tests', function () { context('Monitors sleep at least minHeartbeatFrequencyMS between checks', function () { @@ -187,4 +192,74 @@ describe('Server Discovery and Monitoring Prose Tests', function () { } }); }); + + context('Connection Pool Backpressure', function () { + let client: MongoClient; + const checkoutFailedEvents: Array = []; + const poolClearedEvents: Array = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { maxConnecting: 100 }); + + client.on('connectionCheckOutFailed', e => checkoutFailedEvents.push(e)); + client.on('connectionPoolCleared', e => poolClearedEvents.push(e)); + + await client.connect(); + + const admin = client.db('admin').admin(); + await admin.command({ + setParameter: 1, + ingressConnectionEstablishmentRateLimiterEnabled: true + }); + await admin.command({ + setParameter: 1, + ingressConnectionEstablishmentRatePerSec: 20 + }); + await admin.command({ + setParameter: 1, + ingressConnectionEstablishmentBurstCapacitySecs: 1 + }); + await admin.command({ + setParameter: 1, + ingressConnectionEstablishmentMaxQueueDepth: 1 + }); + + await client.db('test').collection('test').insertOne({}); + }); + + afterEach(async function () { + // give the time to recover from the connection storm before cleaning up. + await sleep(1000); + + const admin = client.db('admin').admin(); + await admin.command({ + setParameter: 1, + ingressConnectionEstablishmentRateLimiterEnabled: false + }); + + await client.close(); + }); + + it( + 'does not clear the pool when connections are closed due to connection storms', + { + requires: { + mongodb: '>=7.0' // rate limiting added in 7.0 + } + }, + async function () { + await Promise.allSettled( + Array.from({ length: 100 }).map(() => + client + .db('test') + .collection('test') + .findOne({ $where: 'function() { sleep(2000); return true; }' }) + ) + ); + + expect(poolClearedEvents).to.be.empty; + expect(checkoutFailedEvents.length).to.be.greaterThan(10); + } + ); + }); }); diff --git a/test/spec/connection-monitoring-and-pooling/cmap-format/pool-create-min-size-error.json b/test/spec/connection-monitoring-and-pooling/cmap-format/pool-create-min-size-error.json index 509b2a2356..fe7489f401 100644 --- a/test/spec/connection-monitoring-and-pooling/cmap-format/pool-create-min-size-error.json +++ b/test/spec/connection-monitoring-and-pooling/cmap-format/pool-create-min-size-error.json @@ -17,7 +17,7 @@ "isMaster", "hello" ], - "closeConnection": true, + "errorCode": 91, "appName": "poolCreateMinSizeErrorTest" } }, diff --git a/test/spec/connection-monitoring-and-pooling/cmap-format/pool-create-min-size-error.yml b/test/spec/connection-monitoring-and-pooling/cmap-format/pool-create-min-size-error.yml index f43c4ee154..42cf6e32a3 100644 --- a/test/spec/connection-monitoring-and-pooling/cmap-format/pool-create-min-size-error.yml +++ b/test/spec/connection-monitoring-and-pooling/cmap-format/pool-create-min-size-error.yml @@ -11,7 +11,7 @@ failPoint: mode: { times: 50 } data: failCommands: ["isMaster","hello"] - closeConnection: true + errorCode: 91 appName: "poolCreateMinSizeErrorTest" poolOptions: minPoolSize: 1 diff --git a/test/spec/load-balancers/sdam-error-handling.json b/test/spec/load-balancers/sdam-error-handling.json index 4ab34b1fed..2107afe5b3 100644 --- a/test/spec/load-balancers/sdam-error-handling.json +++ b/test/spec/load-balancers/sdam-error-handling.json @@ -1,6 +1,6 @@ { "description": "state change errors are correctly handled", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "topologies": [ @@ -263,7 +263,7 @@ "description": "errors during the initial connection hello are ignored", "runOnRequirements": [ { - "minServerVersion": "4.9" + "minServerVersion": "4.4.7" } ], "operations": [ @@ -282,7 +282,7 @@ "isMaster", "hello" ], - "closeConnection": true, + "errorCode": 11600, "appName": "lbSDAMErrorTestClient" } } @@ -297,7 +297,7 @@ } }, "expectError": { - "isClientError": true + "isError": true } } ], diff --git a/test/spec/load-balancers/sdam-error-handling.yml b/test/spec/load-balancers/sdam-error-handling.yml index e3d6d6a251..c5a69339e3 100644 --- a/test/spec/load-balancers/sdam-error-handling.yml +++ b/test/spec/load-balancers/sdam-error-handling.yml @@ -1,6 +1,6 @@ description: state change errors are correctly handled -schemaVersion: '1.3' +schemaVersion: '1.4' runOnRequirements: - topologies: [ load-balanced ] @@ -141,9 +141,8 @@ tests: # to the same mongos on which the failpoint is set. - description: errors during the initial connection hello are ignored runOnRequirements: - # Server version 4.9+ is needed to set a fail point on the initial - # connection handshake with the appName filter due to SERVER-49336. - - minServerVersion: '4.9' + # Require SERVER-49336 for failCommand + appName on the initial handshake. + - minServerVersion: '4.4.7' operations: - name: failPoint object: testRunner @@ -154,14 +153,14 @@ tests: mode: { times: 1 } data: failCommands: [isMaster, hello] - closeConnection: true + errorCode: 11600 appName: *singleClientAppName - name: insertOne object: *singleColl arguments: document: { x: 1 } expectError: - isClientError: true + isError: true expectEvents: - client: *singleClient eventType: cmap diff --git a/test/spec/server-discovery-and-monitoring/unified/backpressure-network-error-fail.json b/test/spec/server-discovery-and-monitoring/unified/backpressure-network-error-fail.json new file mode 100644 index 0000000000..f41b76459c --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/unified/backpressure-network-error-fail.json @@ -0,0 +1,140 @@ +{ + "description": "backpressure-network-error-fail", + "schemaVersion": "1.17", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "backpressure-network-error-fail", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "apply backpressure on network connection errors during connection establishment", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatSucceededEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 1000000, + "serverMonitoringMode": "poll", + "appname": "backpressureNetworkErrorFailTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "backpressure-network-error-fail" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "appName": "backpressureNetworkErrorFailTest", + "closeConnection": true + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "SystemOverloadedError", + "RetryableError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [] + } + ] + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/unified/backpressure-network-error-fail.yml b/test/spec/server-discovery-and-monitoring/unified/backpressure-network-error-fail.yml new file mode 100644 index 0000000000..54e3030211 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/unified/backpressure-network-error-fail.yml @@ -0,0 +1,80 @@ +description: backpressure-network-error-fail +schemaVersion: "1.17" +runOnRequirements: + - minServerVersion: "4.4" + serverless: forbid + topologies: + - single + - replicaset + - sharded +createEntities: + - client: + id: setupClient + useMultipleMongoses: false +initialData: + - collectionName: backpressure-network-error-fail + databaseName: sdam-tests + documents: + - _id: 1 + - _id: 2 +tests: + - description: apply backpressure on network connection errors during connection establishment + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: client + useMultipleMongoses: false + observeEvents: + - serverHeartbeatSucceededEvent + - poolClearedEvent + uriOptions: + retryWrites: false + heartbeatFrequencyMS: 1000000 + serverMonitoringMode: poll + appname: backpressureNetworkErrorFailTest + - database: + id: database + client: client + databaseName: sdam-tests + - collection: + id: collection + database: database + collectionName: backpressure-network-error-fail + - name: waitForEvent + object: testRunner + arguments: + client: client + event: + serverHeartbeatSucceededEvent: {} + count: 1 + - name: failPoint + object: testRunner + arguments: + client: setupClient + failPoint: + configureFailPoint: failCommand + mode: alwaysOn + data: + failCommands: + - isMaster + - hello + appName: backpressureNetworkErrorFailTest + closeConnection: true + - name: insertMany + object: collection + arguments: + documents: + - _id: 3 + - _id: 4 + expectError: + isError: true + errorLabelsContain: + - SystemOverloadedError + - RetryableError + expectEvents: + - client: client + eventType: cmap + events: [] diff --git a/test/spec/server-discovery-and-monitoring/unified/backpressure-network-timeout-fail.json b/test/spec/server-discovery-and-monitoring/unified/backpressure-network-timeout-fail.json new file mode 100644 index 0000000000..a97c7a329f --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/unified/backpressure-network-timeout-fail.json @@ -0,0 +1,143 @@ +{ + "description": "backpressure-network-timeout-error", + "schemaVersion": "1.17", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "backpressure-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "apply backpressure on network timeout error during connection establishment", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 1000000, + "appname": "backpressureNetworkTimeoutErrorTest", + "serverMonitoringMode": "poll", + "connectTimeoutMS": 250, + "socketTimeoutMS": 250 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "backpressure-network-timeout-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": {} + }, + "count": 1 + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "backpressureNetworkTimeoutErrorTest" + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "SystemOverloadedError", + "RetryableError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [] + } + ] + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/unified/backpressure-network-timeout-fail.yml b/test/spec/server-discovery-and-monitoring/unified/backpressure-network-timeout-fail.yml new file mode 100644 index 0000000000..6a61eba3ad --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/unified/backpressure-network-timeout-fail.yml @@ -0,0 +1,83 @@ +description: backpressure-network-timeout-error +schemaVersion: "1.17" +runOnRequirements: + - minServerVersion: "4.4" + serverless: forbid + topologies: + - single + - replicaset + - sharded +createEntities: + - client: + id: setupClient + useMultipleMongoses: false +initialData: + - collectionName: backpressure-network-timeout-error + databaseName: sdam-tests + documents: + - _id: 1 + - _id: 2 +tests: + - description: apply backpressure on network timeout error during connection establishment + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: client + useMultipleMongoses: false + observeEvents: + - serverDescriptionChangedEvent + - poolClearedEvent + uriOptions: + retryWrites: false + heartbeatFrequencyMS: 1000000 + appname: backpressureNetworkTimeoutErrorTest + serverMonitoringMode: poll + connectTimeoutMS: 250 + socketTimeoutMS: 250 + - database: + id: database + client: client + databaseName: sdam-tests + - collection: + id: collection + database: database + collectionName: backpressure-network-timeout-error + - name: waitForEvent + object: testRunner + arguments: + client: client + event: + serverDescriptionChangedEvent: {} + count: 1 + - name: failPoint + object: testRunner + arguments: + client: setupClient + failPoint: + configureFailPoint: failCommand + mode: alwaysOn + data: + failCommands: + - isMaster + - hello + blockConnection: true + blockTimeMS: 500 + appName: backpressureNetworkTimeoutErrorTest + - name: insertMany + object: collection + arguments: + documents: + - _id: 3 + - _id: 4 + expectError: + isError: true + errorLabelsContain: + - SystemOverloadedError + - RetryableError + expectEvents: + - client: client + eventType: cmap + events: [] diff --git a/test/spec/server-discovery-and-monitoring/unified/backpressure-server-description-unchanged-on-min-pool-size-population-error.json b/test/spec/server-discovery-and-monitoring/unified/backpressure-server-description-unchanged-on-min-pool-size-population-error.json new file mode 100644 index 0000000000..35a49c1323 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/unified/backpressure-server-description-unchanged-on-min-pool-size-population-error.json @@ -0,0 +1,106 @@ +{ + "description": "backpressure-server-description-unchanged-on-min-pool-size-population-error", + "schemaVersion": "1.17", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "tests": [ + { + "description": "the server description is not changed on handshake error during minPoolSize population", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "serverDescriptionChangedEvent", + "connectionClosedEvent" + ], + "uriOptions": { + "appname": "authErrorTest", + "minPoolSize": 5, + "maxConnecting": 1, + "serverMonitoringMode": "poll", + "heartbeatFrequencyMS": 1000000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 1 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "authErrorTest", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionClosedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/unified/backpressure-server-description-unchanged-on-min-pool-size-population-error.yml b/test/spec/server-discovery-and-monitoring/unified/backpressure-server-description-unchanged-on-min-pool-size-population-error.yml new file mode 100644 index 0000000000..dd5029097d --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/unified/backpressure-server-description-unchanged-on-min-pool-size-population-error.yml @@ -0,0 +1,62 @@ +description: backpressure-server-description-unchanged-on-min-pool-size-population-error +schemaVersion: "1.17" +runOnRequirements: + - minServerVersion: "4.4" + serverless: forbid + topologies: + - single +createEntities: + - client: + id: setupClient + useMultipleMongoses: false +tests: + - description: the server description is not changed on handshake error during minPoolSize population + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: client + observeEvents: + - serverDescriptionChangedEvent + - connectionClosedEvent + uriOptions: + appname: authErrorTest + minPoolSize: 5 + maxConnecting: 1 + serverMonitoringMode: poll + heartbeatFrequencyMS: 1000000 + - name: failPoint + object: testRunner + arguments: + client: setupClient + failPoint: + configureFailPoint: failCommand + mode: + skip: 1 + data: + failCommands: + - hello + - isMaster + appName: authErrorTest + closeConnection: true + - name: waitForEvent + object: testRunner + arguments: + client: client + event: + serverDescriptionChangedEvent: {} + count: 1 + - name: waitForEvent + object: testRunner + arguments: + client: client + event: + connectionClosedEvent: {} + count: 1 + expectEvents: + - client: client + eventType: sdam + events: + - serverDescriptionChangedEvent: {} diff --git a/test/spec/server-discovery-and-monitoring/unified/pool-clear-checkout-error.json b/test/spec/server-discovery-and-monitoring/unified/pool-clear-checkout-error.json index 126ee54533..7e6c7c8df4 100644 --- a/test/spec/server-discovery-and-monitoring/unified/pool-clear-checkout-error.json +++ b/test/spec/server-discovery-and-monitoring/unified/pool-clear-checkout-error.json @@ -143,154 +143,6 @@ ] } ] - }, - { - "description": "Pool is cleared before connection is closed (handshake error)", - "runOnRequirements": [ - { - "topologies": [ - "single" - ] - } - ], - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "client": { - "id": "client", - "useMultipleMongoses": false, - "observeEvents": [ - "connectionCheckOutStartedEvent", - "poolClearedEvent", - "connectionClosedEvent", - "topologyDescriptionChangedEvent" - ], - "uriOptions": { - "retryWrites": false, - "appname": "authErrorTest", - "minPoolSize": 0, - "serverMonitoringMode": "poll", - "heartbeatFrequencyMS": 1000000 - } - } - }, - { - "database": { - "id": "database", - "client": "client", - "databaseName": "foo" - } - }, - { - "collection": { - "id": "collection", - "database": "database", - "collectionName": "bar" - } - } - ] - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": { - "previousDescription": { - "type": "Unknown" - }, - "newDescription": { - "type": "Single" - } - } - }, - "count": 1 - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "setupClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "authErrorTest", - "closeConnection": true - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "expectError": { - "isError": true - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "poolClearedEvent": {} - }, - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "connectionClosedEvent": {} - }, - "count": 1 - } - } - ], - "expectEvents": [ - { - "client": "client", - "eventType": "cmap", - "events": [ - { - "connectionCheckOutStartedEvent": {} - }, - { - "poolClearedEvent": {} - }, - { - "connectionClosedEvent": {} - } - ] - } - ] } ] } diff --git a/test/spec/server-discovery-and-monitoring/unified/pool-clear-checkout-error.yml b/test/spec/server-discovery-and-monitoring/unified/pool-clear-checkout-error.yml index 8df74b6a6f..5f7c48b521 100644 --- a/test/spec/server-discovery-and-monitoring/unified/pool-clear-checkout-error.yml +++ b/test/spec/server-discovery-and-monitoring/unified/pool-clear-checkout-error.yml @@ -84,93 +84,3 @@ tests: - connectionCheckOutStartedEvent: {} - poolClearedEvent: {} - connectionClosedEvent: {} - - - description: Pool is cleared before connection is closed (handshake error) - runOnRequirements: - - topologies: [ single ] - operations: - - name: createEntities - object: testRunner - arguments: - entities: - - client: - id: &client client - useMultipleMongoses: false - observeEvents: - - connectionCheckOutStartedEvent - - poolClearedEvent - - connectionClosedEvent - - topologyDescriptionChangedEvent - uriOptions: - retryWrites: false - appname: authErrorTest - minPoolSize: 0 - # ensure that once we've connected to the server, the failCommand won't - # be triggered by monitors and will only be triggered by handshakes - serverMonitoringMode: poll - heartbeatFrequencyMS: 1000000 - - database: - id: &database database - client: *client - databaseName: foo - - collection: - id: &collection collection - database: *database - collectionName: bar - - name: waitForEvent - object: testRunner - arguments: - client: *client - event: - topologyDescriptionChangedEvent: - previousDescription: - type: "Unknown" - newDescription: - type: "Single" - count: 1 - - - name: failPoint - object: testRunner - arguments: - client: *setupClient - failPoint: - configureFailPoint: failCommand - mode: - times: 1 - data: - failCommands: - - hello - - isMaster - appName: authErrorTest - closeConnection: true - - - name: insertMany - object: *collection - arguments: - documents: - - _id: 3 - - _id: 4 - expectError: - isError: true - - name: waitForEvent - object: testRunner - arguments: - client: *client - event: - poolClearedEvent: {} - count: 1 - - name: waitForEvent - object: testRunner - arguments: - client: *client - event: - connectionClosedEvent: {} - count: 1 - expectEvents: - - client: *client - eventType: cmap - events: - - connectionCheckOutStartedEvent: {} - - poolClearedEvent: {} - - connectionClosedEvent: {} - diff --git a/test/spec/server-discovery-and-monitoring/unified/pool-clear-min-pool-size-error.json b/test/spec/server-discovery-and-monitoring/unified/pool-clear-min-pool-size-error.json index 11c6be5bc1..e36dd7aa61 100644 --- a/test/spec/server-discovery-and-monitoring/unified/pool-clear-min-pool-size-error.json +++ b/test/spec/server-discovery-and-monitoring/unified/pool-clear-min-pool-size-error.json @@ -109,122 +109,6 @@ ] } ] - }, - { - "description": "Pool is cleared on handshake error during minPoolSize population", - "operations": [ - { - "name": "createEntities", - "object": "testRunner", - "arguments": { - "entities": [ - { - "client": { - "id": "client", - "observeEvents": [ - "topologyDescriptionChangedEvent", - "connectionCreatedEvent", - "poolClearedEvent", - "connectionClosedEvent", - "connectionReadyEvent" - ], - "uriOptions": { - "appname": "authErrorTest", - "minPoolSize": 5, - "maxConnecting": 1, - "serverMonitoringMode": "poll", - "heartbeatFrequencyMS": 1000000 - } - } - } - ] - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "topologyDescriptionChangedEvent": { - "previousDescription": { - "type": "Unknown" - }, - "newDescription": { - "type": "Single" - } - } - }, - "count": 1 - } - }, - { - "name": "failPoint", - "object": "testRunner", - "arguments": { - "client": "setupClient", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "authErrorTest", - "closeConnection": true - } - } - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "poolClearedEvent": {} - }, - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "client": "client", - "event": { - "connectionClosedEvent": {} - }, - "count": 1 - } - } - ], - "expectEvents": [ - { - "client": "client", - "eventType": "cmap", - "events": [ - { - "connectionCreatedEvent": {} - }, - { - "connectionReadyEvent": {} - }, - { - "connectionCreatedEvent": {} - }, - { - "poolClearedEvent": {} - }, - { - "connectionClosedEvent": {} - } - ] - } - ] } ] } diff --git a/test/spec/server-discovery-and-monitoring/unified/pool-clear-min-pool-size-error.yml b/test/spec/server-discovery-and-monitoring/unified/pool-clear-min-pool-size-error.yml index 7e7ef0c590..8b58f672d3 100644 --- a/test/spec/server-discovery-and-monitoring/unified/pool-clear-min-pool-size-error.yml +++ b/test/spec/server-discovery-and-monitoring/unified/pool-clear-min-pool-size-error.yml @@ -68,7 +68,7 @@ tests: - poolClearedEvent: {} - connectionClosedEvent: {} - - description: Pool is cleared on handshake error during minPoolSize population + - description: Pool is not cleared on handshake error during minPoolSize population operations: - name: createEntities object: testRunner @@ -118,13 +118,6 @@ tests: appName: authErrorTest closeConnection: true - - name: waitForEvent - object: testRunner - arguments: - client: *client - event: - poolClearedEvent: {} - count: 1 - name: waitForEvent object: testRunner arguments: @@ -139,6 +132,4 @@ tests: - connectionCreatedEvent: {} - connectionReadyEvent: {} - connectionCreatedEvent: {} - - poolClearedEvent: {} - connectionClosedEvent: {} - From 541a8e0280851d83bff6ea89831b707c3da0194a Mon Sep 17 00:00:00 2001 From: bailey Date: Wed, 26 Nov 2025 12:04:50 -0700 Subject: [PATCH 5/5] fix SDAM unit test behavior --- .../errors/error_handling_handshake.json | 16 ++- ...rver_discovery_and_monitoring.spec.test.ts | 125 ++++++++++++++---- 2 files changed, 108 insertions(+), 33 deletions(-) diff --git a/test/spec/server-discovery-and-monitoring/errors/error_handling_handshake.json b/test/spec/server-discovery-and-monitoring/errors/error_handling_handshake.json index 56ca7d1132..bf83f46f6a 100644 --- a/test/spec/server-discovery-and-monitoring/errors/error_handling_handshake.json +++ b/test/spec/server-discovery-and-monitoring/errors/error_handling_handshake.json @@ -97,14 +97,22 @@ "outcome": { "servers": { "a:27017": { - "type": "Unknown", - "topologyVersion": null, + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, "pool": { - "generation": 1 + "generation": 0 } } }, - "topologyType": "ReplicaSetNoPrimary", + "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs" } diff --git a/test/unit/assorted/server_discovery_and_monitoring.spec.test.ts b/test/unit/assorted/server_discovery_and_monitoring.spec.test.ts index e84e70f711..c4c0e7ee05 100644 --- a/test/unit/assorted/server_discovery_and_monitoring.spec.test.ts +++ b/test/unit/assorted/server_discovery_and_monitoring.spec.test.ts @@ -4,9 +4,11 @@ import * as fs from 'fs'; import * as path from 'path'; import * as sinon from 'sinon'; +import { Connection } from '../../../src/cmap/connection'; import { ConnectionPool } from '../../../src/cmap/connection_pool'; import { HEARTBEAT_EVENTS, + LEGACY_HELLO_COMMAND, SERVER_CLOSED, SERVER_DESCRIPTION_CHANGED, SERVER_OPENING, @@ -37,6 +39,7 @@ import { import { Server } from '../../../src/sdam/server'; import { ServerDescription, type TopologyVersion } from '../../../src/sdam/server_description'; import { Topology } from '../../../src/sdam/topology'; +import { TimeoutContext } from '../../../src/timeout'; import { isRecord, ns, squashError } from '../../../src/utils'; import { ejson, fakeServer } from '../../tools/utils'; @@ -188,7 +191,7 @@ function assertMonitoringOutcome(outcome: any): asserts outcome is MonitoringOut expect(outcome).to.have.property('events').that.is.an('array'); } -describe('Server Discovery and Monitoring (spec)', function () { +describe.only('Server Discovery and Monitoring (spec)', function () { let serverConnect: sinon.SinonStub; before(() => { @@ -311,6 +314,89 @@ const SDAM_EVENTS = [ ...HEARTBEAT_EVENTS ]; +function checkoutStubImpl(appError: ApplicationError) { + return async function () { + const connectionPoolGeneration = this.generation; + const fakeConnection = { + generation: + typeof appError.generation === 'number' ? appError.generation : connectionPoolGeneration, + async command(_, __, ___) { + switch (appError.type) { + case 'network': + throw new MongoNetworkError('test generated'); + case 'timeout': + throw new MongoNetworkTimeoutError('xxx timed out'); + case 'command': + throw new MongoServerError(appError.response); + default: + throw new Error( + // @ts-expect-error `.type` is never, but we want to access it in this unreachable code to + // throw an error message. + `SDAM unit test runner error: unexpected appError.type field: ${appError.type}` + ); + } + } + }; + return fakeConnection as any as Connection; + }; +} + +function stubConnectionEstablishment(appError: ApplicationError) { + const stubs = []; + if (appError.when === 'afterHandshakeCompletes') { + const checkOutStub = sinon + .stub(ConnectionPool.prototype, 'checkOut') + .callsFake(checkoutStubImpl(appError)); + stubs.push(checkOutStub); + return stubs; + } + + // eslint-disable-next-line @typescript-eslint/no-require-imports + const net: typeof import('net') = require('net'); + + const netStub = sinon.stub(net, 'createConnection'); + + netStub.callsFake(function createConnectionStub() { + const socket = new net.Socket(); + process.nextTick(() => socket.emit('connect')); + return socket; + }); + + stubs.push(netStub); + + class StubbedConnection extends Connection { + override command( + _ns: unknown, + command: Document, + _options?: unknown, + _responseType?: unknown + ): Promise { + if (command.hello || command[LEGACY_HELLO_COMMAND]) { + throw new MongoNetworkError(`error executing command`, { beforeHandshake: true }); + } + + throw new Error('unexpected command: ', command); + } + } + + // eslint-disable-next-line @typescript-eslint/no-require-imports + const connectionUtils: typeof import('../../../src/cmap/connect') = require('../../../src/cmap/connect'); + + const wrapped = sinon.stub(connectionUtils, 'connect').callsFake(async function connect(options) { + const generation = + typeof appError.generation === 'number' ? appError.generation : options.generation; + return wrapped.wrappedMethod({ + ...options, + generation, + connectionType: StubbedConnection + }); + }); + + stubs.push(wrapped); + + return stubs; +} + async function executeSDAMTest(testData: SDAMTest) { const client = new MongoClient(testData.uri); // listen for SDAM monitoring events @@ -337,20 +423,23 @@ async function executeSDAMTest(testData: SDAMTest) { // phase with applicationErrors simulating error's from network, timeouts, server for (const appError of phase.applicationErrors) { // Stub will return appError to SDAM machinery - const checkOutStub = sinon - .stub(ConnectionPool.prototype, 'checkOut') - .callsFake(checkoutStubImpl(appError)); + + const stubs = stubConnectionEstablishment(appError); const server = client.topology.s.servers.get(appError.address); // Run a dummy command to encounter the error - const res = server.command.bind(server)( - new RunCommandOperation(ns('admin.$cmd'), { ping: 1 }, {}) + const res = server.command( + new RunCommandOperation(ns('admin.$cmd'), { ping: 1 }, {}), + TimeoutContext.create({ + serverSelectionTimeoutMS: 30_000, + waitQueueTimeoutMS: 10_000 + }) ); const thrownError = await res.catch(error => error); // Restore the stub before asserting anything in case of errors - checkOutStub.restore(); + stubs.forEach(stub => stub.restore()); const isApplicationError = error => { // These errors all come from the withConnection stub @@ -412,28 +501,6 @@ async function executeSDAMTest(testData: SDAMTest) { } } -function checkoutStubImpl(appError) { - return async function () { - const connectionPoolGeneration = this.generation; - const fakeConnection = { - generation: - typeof appError.generation === 'number' ? appError.generation : connectionPoolGeneration, - async command(_, __, ___) { - if (appError.type === 'network') { - throw new MongoNetworkError('test generated'); - } else if (appError.type === 'timeout') { - throw new MongoNetworkTimeoutError('xxx timed out', { - beforeHandshake: appError.when === 'beforeHandshakeCompletes' - }); - } else { - throw new MongoServerError(appError.response); - } - } - }; - return fakeConnection; - }; -} - function assertTopologyDescriptionOutcomeExpectations( topology: Topology, outcome: TopologyDescriptionOutcome