diff --git a/.evergreen/run-resource-management-feature-integration.sh b/.evergreen/run-resource-management-feature-integration.sh index 093a4749d78..3c6918b8d81 100644 --- a/.evergreen/run-resource-management-feature-integration.sh +++ b/.evergreen/run-resource-management-feature-integration.sh @@ -2,6 +2,9 @@ source $DRIVERS_TOOLS/.evergreen/init-node-and-npm-env.sh +echo "node: $(node --version)" +echo "npm: $(npm --version)" + echo "Building driver..." npm pack echo "Building driver...finished." diff --git a/etc/notes/errors.md b/etc/notes/errors.md index d0f8e6b6e95..9cebe037e89 100644 --- a/etc/notes/errors.md +++ b/etc/notes/errors.md @@ -67,7 +67,7 @@ Children of `MongoError` include: ### `MongoDriverError` This class represents errors which originate in the driver itself or when the user incorrectly uses the driver. This class should **never** be directly instantiated. -Its children are the main classes of errors that most users will interact with: [**`MongoAPIError`**](#MongoAPIError) and [**`MongoRuntimeError`**](#MongoRuntimeError). +Its children are the main classes of errors that most users will interact with: [**`MongoAPIError`**](#MongoAPIError), [**`MongoRuntimeError`**](#MongoRuntimeError) and [**`MongoOperationTimeoutError`**](#MongoOperationTimeoutError). ### `MongoAPIError` @@ -109,6 +109,12 @@ This class should **never** be directly instantiated. | **MongoGridFSChunkError** | Thrown when a malformed or invalid chunk is encountered when reading from a GridFS Stream. | | **MongoUnexpectedServerResponseError** | Thrown when the driver receives a **parsable** response it did not expect from the server. | +### `MongoOperationTimeoutError` + +The `MongoOperationTimeoutError` class represents an error that occurs when an operation could not be completed within the specified `timeoutMS`. +It is generated by the driver in support of the "client side operation timeout" feature and inherits from `MongoDriverError`. +When `timeoutMS` is enabled `MongoServerErrors` relating to `MaxTimeExpired` errors will be converted to `MongoOperationTimeoutError`. + ### MongoUnexpectedServerResponseError Intended for the scenario where the MongoDB returns an unexpected response in relation to some state the driver is in. diff --git a/package-lock.json b/package-lock.json index 2cf1d4cb671..c67887fe6cd 100644 --- a/package-lock.json +++ b/package-lock.json @@ -49,7 +49,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.0", + "mongodb-legacy": "^6.1.3", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", @@ -6415,10 +6415,46 @@ "node": ">=10" } }, - "node_modules/mongodb": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-6.8.0.tgz", - "integrity": "sha512-HGQ9NWDle5WvwMnrvUxsFYPd3JEbqD3RgABHBQRuoCEND0qzhsd0iH5ypHsf1eJ+sXmvmyKpP+FLOKY8Il7jMw==", + "node_modules/mongodb-client-encryption": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/mongodb-client-encryption/-/mongodb-client-encryption-6.1.0.tgz", + "integrity": "sha512-Y3Hakre82nXD/pNDUzBjxfgwWSj5E1ar9ZLkqyXDfvirv4huHMbg8Q2qVO/TXlNJuf1B2bzrEDXsTqHKQSQLtw==", + "dev": true, + "hasInstallScript": true, + "dependencies": { + "bindings": "^1.5.0", + "node-addon-api": "^4.3.0", + "prebuild-install": "^7.1.2" + }, + "engines": { + "node": ">=16.20.1" + } + }, + "node_modules/mongodb-connection-string-url": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mongodb-connection-string-url/-/mongodb-connection-string-url-3.0.1.tgz", + "integrity": "sha512-XqMGwRX0Lgn05TDB4PyG2h2kKO/FfWJyCzYQbIhXUxz7ETt0I/FqHjUeqj37irJ+Dl1ZtU82uYyj14u2XsZKfg==", + "dependencies": { + "@types/whatwg-url": "^11.0.2", + "whatwg-url": "^13.0.0" + } + }, + "node_modules/mongodb-legacy": { + "version": "6.1.3", + "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.3.tgz", + "integrity": "sha512-XJ2PIbVEHUUF4/SyH00dfeprfeLOdWiHcKq8At+JoEZeTue+IAG39G2ixRwClnI7roPb/46K8IF713v9dgQ8rg==", + "dev": true, + "dependencies": { + "mongodb": "^6.0.0" + }, + "engines": { + "node": ">=16.20.1" + } + }, + "node_modules/mongodb-legacy/node_modules/mongodb": { + "version": "6.7.0", + "resolved": "https://registry.npmjs.org/mongodb/-/mongodb-6.7.0.tgz", + "integrity": "sha512-TMKyHdtMcO0fYBNORiYdmM25ijsHs+Njs963r4Tro4OQZzqYigAzYQouwWRg4OIaiLRUEGUh/1UAcH5lxdSLIA==", "dev": true, "dependencies": { "@mongodb-js/saslprep": "^1.1.5", @@ -6461,42 +6497,6 @@ } } }, - "node_modules/mongodb-client-encryption": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/mongodb-client-encryption/-/mongodb-client-encryption-6.1.0.tgz", - "integrity": "sha512-Y3Hakre82nXD/pNDUzBjxfgwWSj5E1ar9ZLkqyXDfvirv4huHMbg8Q2qVO/TXlNJuf1B2bzrEDXsTqHKQSQLtw==", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "bindings": "^1.5.0", - "node-addon-api": "^4.3.0", - "prebuild-install": "^7.1.2" - }, - "engines": { - "node": ">=16.20.1" - } - }, - "node_modules/mongodb-connection-string-url": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mongodb-connection-string-url/-/mongodb-connection-string-url-3.0.1.tgz", - "integrity": "sha512-XqMGwRX0Lgn05TDB4PyG2h2kKO/FfWJyCzYQbIhXUxz7ETt0I/FqHjUeqj37irJ+Dl1ZtU82uYyj14u2XsZKfg==", - "dependencies": { - "@types/whatwg-url": "^11.0.2", - "whatwg-url": "^13.0.0" - } - }, - "node_modules/mongodb-legacy": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/mongodb-legacy/-/mongodb-legacy-6.1.1.tgz", - "integrity": "sha512-u9Cl8UEzdtf7mhWrAEHHhfU0OCqahaOB5midwtyudWIuEz5t18DJFXfqJq3cbEypVfLkfF3zi6rkolKMU9uPjQ==", - "dev": true, - "dependencies": { - "mongodb": "^6.0.0" - }, - "engines": { - "node": ">=16.20.1" - } - }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", diff --git a/package.json b/package.json index d8b9f5945bf..5e76162eb81 100644 --- a/package.json +++ b/package.json @@ -97,7 +97,7 @@ "mocha": "^10.4.0", "mocha-sinon": "^2.1.2", "mongodb-client-encryption": "^6.1.0", - "mongodb-legacy": "^6.1.0", + "mongodb-legacy": "^6.1.3", "nyc": "^15.1.0", "prettier": "^3.3.3", "semver": "^7.6.3", diff --git a/src/admin.ts b/src/admin.ts index a71ac4be1dc..0f03023a95c 100644 --- a/src/admin.ts +++ b/src/admin.ts @@ -78,7 +78,8 @@ export class Admin { new RunAdminCommandOperation(command, { ...resolveBSONOptions(options), session: options?.session, - readPreference: options?.readPreference + readPreference: options?.readPreference, + timeoutMS: options?.timeoutMS ?? this.s.db.timeoutMS }) ); } @@ -154,7 +155,10 @@ export class Admin { * @param options - Optional settings for the command */ async listDatabases(options?: ListDatabasesOptions): Promise { - return await executeOperation(this.s.db.client, new ListDatabasesOperation(this.s.db, options)); + return await executeOperation( + this.s.db.client, + new ListDatabasesOperation(this.s.db, { timeoutMS: this.s.db.timeoutMS, ...options }) + ); } /** diff --git a/src/bulk/common.ts b/src/bulk/common.ts index a62d62a4a5c..22012207a09 100644 --- a/src/bulk/common.ts +++ b/src/bulk/common.ts @@ -19,6 +19,7 @@ import { makeUpdateStatement, UpdateOperation, type UpdateStatement } from '../o import type { Server } from '../sdam/server'; import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { applyRetryableWrites, getTopology, @@ -500,7 +501,7 @@ export function mergeBatchResults( async function executeCommands( bulkOperation: BulkOperationBase, - options: BulkWriteOptions + options: BulkWriteOptions & { timeoutContext?: TimeoutContext | null } ): Promise { if (bulkOperation.s.batches.length === 0) { return new BulkWriteResult(bulkOperation.s.bulkResult, bulkOperation.isOrdered); @@ -551,7 +552,11 @@ async function executeCommands( let thrownError = null; let result; try { - result = await executeOperation(bulkOperation.s.collection.client, operation); + result = await executeOperation( + bulkOperation.s.collection.client, + operation, + finalOptions.timeoutContext + ); } catch (error) { thrownError = error; } @@ -842,6 +847,9 @@ export interface BulkWriteOptions extends CommandOperationOptions { forceServerObjectId?: boolean; /** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */ let?: Document; + + /** @internal */ + timeoutContext?: TimeoutContext; } /** @@ -862,7 +870,11 @@ export class BulkWriteShimOperation extends AbstractOperation { return 'bulkWrite' as const; } - async execute(_server: Server, session: ClientSession | undefined): Promise { + async execute( + _server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { if (this.options.session == null) { // An implicit session could have been created by 'executeOperation' // So if we stick it on finalOptions here, each bulk operation @@ -870,7 +882,7 @@ export class BulkWriteShimOperation extends AbstractOperation { // an explicit session would be this.options.session = session; } - return await executeCommands(this.bulkOperation, this.options); + return await executeCommands(this.bulkOperation, { ...this.options, timeoutContext }); } } @@ -1199,7 +1211,7 @@ export abstract class BulkOperationBase { const finalOptions = { ...this.s.options, ...options }; const operation = new BulkWriteShimOperation(this, finalOptions); - return await executeOperation(this.s.collection.client, operation); + return await executeOperation(this.s.collection.client, operation, finalOptions.timeoutContext); } /** diff --git a/src/change_stream.ts b/src/change_stream.ts index 34f92a4477c..c7b21b7a202 100644 --- a/src/change_stream.ts +++ b/src/change_stream.ts @@ -3,7 +3,7 @@ import type { Readable } from 'stream'; import type { Binary, Document, Timestamp } from './bson'; import { Collection } from './collection'; import { CHANGE, CLOSE, END, ERROR, INIT, MORE, RESPONSE, RESUME_TOKEN_CHANGED } from './constants'; -import type { AbstractCursorEvents, CursorStreamOptions } from './cursor/abstract_cursor'; +import { type CursorStreamOptions, CursorTimeoutContext } from './cursor/abstract_cursor'; import { ChangeStreamCursor, type ChangeStreamCursorOptions } from './cursor/change_stream_cursor'; import { Db } from './db'; import { @@ -11,6 +11,7 @@ import { isResumableError, MongoAPIError, MongoChangeStreamError, + MongoOperationTimeoutError, MongoRuntimeError } from './error'; import { MongoClient } from './mongo_client'; @@ -20,6 +21,7 @@ import type { CollationOptions, OperationParent } from './operations/command'; import type { ReadPreference } from './read_preference'; import { type AsyncDisposable, configureResourceManagement } from './resource_management'; import type { ServerSessionId } from './sessions'; +import { CSOTTimeoutContext, type TimeoutContext } from './timeout'; import { filterOptions, getTopology, type MongoDBNamespace, squashError } from './utils'; /** @internal */ @@ -538,7 +540,14 @@ export type ChangeStreamEvents< end(): void; error(error: Error): void; change(change: TChange): void; -} & AbstractCursorEvents; + /** + * @remarks Note that the `close` event is currently emitted whenever the internal `ChangeStreamCursor` + * instance is closed, which can occur multiple times for a given `ChangeStream` instance. + * + * TODO(NODE-6434): address this issue in NODE-6434 + */ + close(): void; +}; /** * Creates a new Change Stream instance. Normally created using {@link Collection#watch|Collection.watch()}. @@ -609,6 +618,13 @@ export class ChangeStream< */ static readonly RESUME_TOKEN_CHANGED = RESUME_TOKEN_CHANGED; + private timeoutContext?: TimeoutContext; + /** + * Note that this property is here to uniquely identify a ChangeStream instance as the owner of + * the {@link CursorTimeoutContext} instance (see {@link ChangeStream._createChangeStreamCursor}) to ensure + * that {@link AbstractCursor.close} does not mutate the timeoutContext. + */ + private contextOwner: symbol; /** * @internal * @@ -624,20 +640,25 @@ export class ChangeStream< this.pipeline = pipeline; this.options = { ...options }; + let serverSelectionTimeoutMS: number; delete this.options.writeConcern; if (parent instanceof Collection) { this.type = CHANGE_DOMAIN_TYPES.COLLECTION; + serverSelectionTimeoutMS = parent.s.db.client.options.serverSelectionTimeoutMS; } else if (parent instanceof Db) { this.type = CHANGE_DOMAIN_TYPES.DATABASE; + serverSelectionTimeoutMS = parent.client.options.serverSelectionTimeoutMS; } else if (parent instanceof MongoClient) { this.type = CHANGE_DOMAIN_TYPES.CLUSTER; + serverSelectionTimeoutMS = parent.options.serverSelectionTimeoutMS; } else { throw new MongoChangeStreamError( 'Parent provided to ChangeStream constructor must be an instance of Collection, Db, or MongoClient' ); } + this.contextOwner = Symbol(); this.parent = parent; this.namespace = parent.s.namespace; if (!this.options.readPreference && parent.readPreference) { @@ -662,6 +683,13 @@ export class ChangeStream< this[kCursorStream]?.removeAllListeners('data'); } }); + + if (this.options.timeoutMS != null) { + this.timeoutContext = new CSOTTimeoutContext({ + timeoutMS: this.options.timeoutMS, + serverSelectionTimeoutMS + }); + } } /** @internal */ @@ -681,22 +709,30 @@ export class ChangeStream< // This loop continues until either a change event is received or until a resume attempt // fails. - while (true) { - try { - const hasNext = await this.cursor.hasNext(); - return hasNext; - } catch (error) { + this.timeoutContext?.refresh(); + try { + while (true) { try { - await this._processErrorIteratorMode(error); + const hasNext = await this.cursor.hasNext(); + return hasNext; } catch (error) { try { - await this.close(); + await this._processErrorIteratorMode(error, this.cursor.id != null); } catch (error) { - squashError(error); + if (error instanceof MongoOperationTimeoutError && this.cursor.id == null) { + throw error; + } + try { + await this.close(); + } catch (error) { + squashError(error); + } + throw error; } - throw error; } } + } finally { + this.timeoutContext?.clear(); } } @@ -706,24 +742,32 @@ export class ChangeStream< // Change streams must resume indefinitely while each resume event succeeds. // This loop continues until either a change event is received or until a resume attempt // fails. + this.timeoutContext?.refresh(); - while (true) { - try { - const change = await this.cursor.next(); - const processedChange = this._processChange(change ?? null); - return processedChange; - } catch (error) { + try { + while (true) { try { - await this._processErrorIteratorMode(error); + const change = await this.cursor.next(); + const processedChange = this._processChange(change ?? null); + return processedChange; } catch (error) { try { - await this.close(); + await this._processErrorIteratorMode(error, this.cursor.id != null); } catch (error) { - squashError(error); + if (error instanceof MongoOperationTimeoutError && this.cursor.id == null) { + throw error; + } + try { + await this.close(); + } catch (error) { + squashError(error); + } + throw error; } - throw error; } } + } finally { + this.timeoutContext?.clear(); } } @@ -735,23 +779,29 @@ export class ChangeStream< // Change streams must resume indefinitely while each resume event succeeds. // This loop continues until either a change event is received or until a resume attempt // fails. + this.timeoutContext?.refresh(); - while (true) { - try { - const change = await this.cursor.tryNext(); - return change ?? null; - } catch (error) { + try { + while (true) { try { - await this._processErrorIteratorMode(error); + const change = await this.cursor.tryNext(); + return change ?? null; } catch (error) { try { - await this.close(); + await this._processErrorIteratorMode(error, this.cursor.id != null); } catch (error) { - squashError(error); + if (error instanceof MongoOperationTimeoutError && this.cursor.id == null) throw error; + try { + await this.close(); + } catch (error) { + squashError(error); + } + throw error; } - throw error; } } + } finally { + this.timeoutContext?.clear(); } } @@ -784,6 +834,8 @@ export class ChangeStream< * Frees the internal resources used by the change stream. */ async close(): Promise { + this.timeoutContext?.clear(); + this.timeoutContext = undefined; this[kClosed] = true; const cursor = this.cursor; @@ -866,7 +918,12 @@ export class ChangeStream< client, this.namespace, pipeline, - options + { + ...options, + timeoutContext: this.timeoutContext + ? new CursorTimeoutContext(this.timeoutContext, this.contextOwner) + : undefined + } ); for (const event of CHANGE_STREAM_EVENTS) { @@ -899,8 +956,9 @@ export class ChangeStream< } catch (error) { this.emit(ChangeStream.ERROR, error); } + this.timeoutContext?.refresh(); }); - stream.on('error', error => this._processErrorStreamMode(error)); + stream.on('error', error => this._processErrorStreamMode(error, this.cursor.id != null)); } /** @internal */ @@ -942,24 +1000,30 @@ export class ChangeStream< } /** @internal */ - private _processErrorStreamMode(changeStreamError: AnyError) { + private _processErrorStreamMode(changeStreamError: AnyError, cursorInitialized: boolean) { // If the change stream has been closed explicitly, do not process error. if (this[kClosed]) return; - if (this.cursor.id != null && isResumableError(changeStreamError, this.cursor.maxWireVersion)) { + if ( + cursorInitialized && + (isResumableError(changeStreamError, this.cursor.maxWireVersion) || + changeStreamError instanceof MongoOperationTimeoutError) + ) { this._endStream(); - this.cursor.close().then(undefined, squashError); - - const topology = getTopology(this.parent); - topology - .selectServer(this.cursor.readPreference, { - operationName: 'reconnect topology in change stream' - }) - + this.cursor + .close() + .then( + () => this._resume(changeStreamError), + e => { + squashError(e); + return this._resume(changeStreamError); + } + ) .then( () => { - this.cursor = this._createChangeStreamCursor(this.cursor.resumeOptions); + if (changeStreamError instanceof MongoOperationTimeoutError) + this.emit(ChangeStream.ERROR, changeStreamError); }, () => this._closeEmitterModeWithError(changeStreamError) ); @@ -969,33 +1033,44 @@ export class ChangeStream< } /** @internal */ - private async _processErrorIteratorMode(changeStreamError: AnyError) { + private async _processErrorIteratorMode(changeStreamError: AnyError, cursorInitialized: boolean) { if (this[kClosed]) { // TODO(NODE-3485): Replace with MongoChangeStreamClosedError throw new MongoAPIError(CHANGESTREAM_CLOSED_ERROR); } if ( - this.cursor.id == null || - !isResumableError(changeStreamError, this.cursor.maxWireVersion) + cursorInitialized && + (isResumableError(changeStreamError, this.cursor.maxWireVersion) || + changeStreamError instanceof MongoOperationTimeoutError) ) { + try { + await this.cursor.close(); + } catch (error) { + squashError(error); + } + + await this._resume(changeStreamError); + + if (changeStreamError instanceof MongoOperationTimeoutError) throw changeStreamError; + } else { try { await this.close(); } catch (error) { squashError(error); } + throw changeStreamError; } + } - try { - await this.cursor.close(); - } catch (error) { - squashError(error); - } + private async _resume(changeStreamError: AnyError) { + this.timeoutContext?.refresh(); const topology = getTopology(this.parent); try { await topology.selectServer(this.cursor.readPreference, { - operationName: 'reconnect topology in change stream' + operationName: 'reconnect topology in change stream', + timeoutContext: this.timeoutContext }); this.cursor = this._createChangeStreamCursor(this.cursor.resumeOptions); } catch { diff --git a/src/client-side-encryption/auto_encrypter.ts b/src/client-side-encryption/auto_encrypter.ts index 5ac3945f5e4..edf731b92ac 100644 --- a/src/client-side-encryption/auto_encrypter.ts +++ b/src/client-side-encryption/auto_encrypter.ts @@ -310,7 +310,10 @@ export class AutoEncrypter { // eslint-disable-next-line @typescript-eslint/ban-ts-comment // @ts-ignore: TS complains as this always returns true on versions where it is present. if (net.getDefaultAutoSelectFamily) { - Object.assign(clientOptions, autoSelectSocketOptions(this._client.options)); + // AutoEncrypter is made inside of MongoClient constructor while options are being parsed, + // we do not have access to the options that are in progress. + // TODO(NODE-6449): AutoEncrypter does not use client options for autoSelectFamily + Object.assign(clientOptions, autoSelectSocketOptions(this._client.s?.options ?? {})); } this._mongocryptdClient = new MongoClient(this._mongocryptdManager.uri, clientOptions); @@ -392,10 +395,10 @@ export class AutoEncrypter { promoteLongs: false, proxyOptions: this._proxyOptions, tlsOptions: this._tlsOptions, - socketOptions: autoSelectSocketOptions(this._client.options) + socketOptions: autoSelectSocketOptions(this._client.s.options) }); - return deserialize(await stateMachine.execute(this, context), { + return deserialize(await stateMachine.execute(this, context, options.timeoutContext), { promoteValues: false, promoteLongs: false }); @@ -413,10 +416,14 @@ export class AutoEncrypter { ...options, proxyOptions: this._proxyOptions, tlsOptions: this._tlsOptions, - socketOptions: autoSelectSocketOptions(this._client.options) + socketOptions: autoSelectSocketOptions(this._client.s.options) }); - return await stateMachine.execute(this, context); + return await stateMachine.execute( + this, + context, + options.timeoutContext?.csotEnabled() ? options.timeoutContext : undefined + ); } /** diff --git a/src/client-side-encryption/client_encryption.ts b/src/client-side-encryption/client_encryption.ts index ca62b5d2393..7482c513d37 100644 --- a/src/client-side-encryption/client_encryption.ts +++ b/src/client-side-encryption/client_encryption.ts @@ -24,7 +24,8 @@ import { type MongoClient, type MongoClientOptions } from '../mongo_client'; import { type Filter, type WithId } from '../mongo_types'; import { type CreateCollectionOptions } from '../operations/create_collection'; import { type DeleteResult } from '../operations/delete'; -import { MongoDBCollectionNamespace } from '../utils'; +import { type CSOTTimeoutContext, TimeoutContext } from '../timeout'; +import { MongoDBCollectionNamespace, resolveTimeoutOptions } from '../utils'; import * as cryptoCallbacks from './crypto_callbacks'; import { MongoCryptCreateDataKeyError, @@ -74,6 +75,8 @@ export class ClientEncryption { _tlsOptions: CSFLEKMSTlsOptions; /** @internal */ _kmsProviders: KMSProviders; + /** @internal */ + _timeoutMS?: number; /** @internal */ _mongoCrypt: MongoCrypt; @@ -120,6 +123,8 @@ export class ClientEncryption { this._proxyOptions = options.proxyOptions ?? {}; this._tlsOptions = options.tlsOptions ?? {}; this._kmsProviders = options.kmsProviders || {}; + const { timeoutMS } = resolveTimeoutOptions(client, options); + this._timeoutMS = timeoutMS; if (options.keyVaultNamespace == null) { throw new MongoCryptInvalidArgumentError('Missing required option `keyVaultNamespace`'); @@ -212,10 +217,16 @@ export class ClientEncryption { const stateMachine = new StateMachine({ proxyOptions: this._proxyOptions, tlsOptions: this._tlsOptions, - socketOptions: autoSelectSocketOptions(this._client.options) + socketOptions: autoSelectSocketOptions(this._client.s.options) }); - const dataKey = deserialize(await stateMachine.execute(this, context)) as DataKey; + const timeoutContext = + options?.timeoutContext ?? + TimeoutContext.create(resolveTimeoutOptions(this._client, { timeoutMS: this._timeoutMS })); + + const dataKey = deserialize( + await stateMachine.execute(this, context, timeoutContext) + ) as DataKey; const { db: dbName, collection: collectionName } = MongoDBCollectionNamespace.fromString( this._keyVaultNamespace @@ -224,7 +235,12 @@ export class ClientEncryption { const { insertedId } = await this._keyVaultClient .db(dbName) .collection(collectionName) - .insertOne(dataKey, { writeConcern: { w: 'majority' } }); + .insertOne(dataKey, { + writeConcern: { w: 'majority' }, + timeoutMS: timeoutContext?.csotEnabled() + ? timeoutContext?.getRemainingTimeMSOrThrow() + : undefined + }); return insertedId; } @@ -270,10 +286,14 @@ export class ClientEncryption { const stateMachine = new StateMachine({ proxyOptions: this._proxyOptions, tlsOptions: this._tlsOptions, - socketOptions: autoSelectSocketOptions(this._client.options) + socketOptions: autoSelectSocketOptions(this._client.s.options) }); - const { v: dataKeys } = deserialize(await stateMachine.execute(this, context)); + const timeoutContext = TimeoutContext.create( + resolveTimeoutOptions(this._client, { timeoutMS: this._timeoutMS }) + ); + + const { v: dataKeys } = deserialize(await stateMachine.execute(this, context, timeoutContext)); if (dataKeys.length === 0) { return {}; } @@ -303,7 +323,8 @@ export class ClientEncryption { .db(dbName) .collection(collectionName) .bulkWrite(replacements, { - writeConcern: { w: 'majority' } + writeConcern: { w: 'majority' }, + timeoutMS: timeoutContext.csotEnabled() ? timeoutContext?.remainingTimeMS : undefined }); return { bulkWriteResult: result }; @@ -332,7 +353,7 @@ export class ClientEncryption { return await this._keyVaultClient .db(dbName) .collection(collectionName) - .deleteOne({ _id }, { writeConcern: { w: 'majority' } }); + .deleteOne({ _id }, { writeConcern: { w: 'majority' }, timeoutMS: this._timeoutMS }); } /** @@ -355,7 +376,7 @@ export class ClientEncryption { return this._keyVaultClient .db(dbName) .collection(collectionName) - .find({}, { readConcern: { level: 'majority' } }); + .find({}, { readConcern: { level: 'majority' }, timeoutMS: this._timeoutMS }); } /** @@ -381,7 +402,7 @@ export class ClientEncryption { return await this._keyVaultClient .db(dbName) .collection(collectionName) - .findOne({ _id }, { readConcern: { level: 'majority' } }); + .findOne({ _id }, { readConcern: { level: 'majority' }, timeoutMS: this._timeoutMS }); } /** @@ -408,7 +429,10 @@ export class ClientEncryption { return await this._keyVaultClient .db(dbName) .collection(collectionName) - .findOne({ keyAltNames: keyAltName }, { readConcern: { level: 'majority' } }); + .findOne( + { keyAltNames: keyAltName }, + { readConcern: { level: 'majority' }, timeoutMS: this._timeoutMS } + ); } /** @@ -442,7 +466,7 @@ export class ClientEncryption { .findOneAndUpdate( { _id }, { $addToSet: { keyAltNames: keyAltName } }, - { writeConcern: { w: 'majority' }, returnDocument: 'before' } + { writeConcern: { w: 'majority' }, returnDocument: 'before', timeoutMS: this._timeoutMS } ); return value; @@ -498,12 +522,14 @@ export class ClientEncryption { } } ]; + const value = await this._keyVaultClient .db(dbName) .collection(collectionName) .findOneAndUpdate({ _id }, pipeline, { writeConcern: { w: 'majority' }, - returnDocument: 'before' + returnDocument: 'before', + timeoutMS: this._timeoutMS }); return value; @@ -541,16 +567,25 @@ export class ClientEncryption { } } = options; + const timeoutContext = + this._timeoutMS != null + ? TimeoutContext.create(resolveTimeoutOptions(this._client, { timeoutMS: this._timeoutMS })) + : undefined; + if (Array.isArray(encryptedFields.fields)) { const createDataKeyPromises = encryptedFields.fields.map(async field => field == null || typeof field !== 'object' || field.keyId != null ? field : { ...field, - keyId: await this.createDataKey(provider, { masterKey }) + keyId: await this.createDataKey(provider, { + masterKey, + // clone the timeoutContext + // in order to avoid sharing the same timeout for server selection and connection checkout across different concurrent operations + timeoutContext: timeoutContext?.csotEnabled() ? timeoutContext?.clone() : undefined + }) } ); - const createDataKeyResolutions = await Promise.allSettled(createDataKeyPromises); encryptedFields.fields = createDataKeyResolutions.map((resolution, index) => @@ -568,7 +603,10 @@ export class ClientEncryption { try { const collection = await db.createCollection(name, { ...createCollectionOptions, - encryptedFields + encryptedFields, + timeoutMS: timeoutContext?.csotEnabled() + ? timeoutContext?.getRemainingTimeMSOrThrow() + : undefined }); return { collection, encryptedFields }; } catch (cause) { @@ -650,10 +688,15 @@ export class ClientEncryption { const stateMachine = new StateMachine({ proxyOptions: this._proxyOptions, tlsOptions: this._tlsOptions, - socketOptions: autoSelectSocketOptions(this._client.options) + socketOptions: autoSelectSocketOptions(this._client.s.options) }); - const { v } = deserialize(await stateMachine.execute(this, context)); + const timeoutContext = + this._timeoutMS != null + ? TimeoutContext.create(resolveTimeoutOptions(this._client, { timeoutMS: this._timeoutMS })) + : undefined; + + const { v } = deserialize(await stateMachine.execute(this, context, timeoutContext)); return v; } @@ -729,11 +772,15 @@ export class ClientEncryption { const stateMachine = new StateMachine({ proxyOptions: this._proxyOptions, tlsOptions: this._tlsOptions, - socketOptions: autoSelectSocketOptions(this._client.options) + socketOptions: autoSelectSocketOptions(this._client.s.options) }); const context = this._mongoCrypt.makeExplicitEncryptionContext(valueBuffer, contextOptions); - const { v } = deserialize(await stateMachine.execute(this, context)); + const timeoutContext = + this._timeoutMS != null + ? TimeoutContext.create(resolveTimeoutOptions(this._client, { timeoutMS: this._timeoutMS })) + : undefined; + const { v } = deserialize(await stateMachine.execute(this, context, timeoutContext)); return v; } } @@ -818,6 +865,39 @@ export interface ClientEncryptionOptions { * TLS options for kms providers to use. */ tlsOptions?: CSFLEKMSTlsOptions; + + /** + * @experimental + * + * The timeout setting to be used for all the operations on ClientEncryption. + * + * When provided, `timeoutMS` is used as the timeout for each operation executed on + * the ClientEncryption object. For example: + * + * ```typescript + * const clientEncryption = new ClientEncryption(client, { + * timeoutMS: 1_000 + * kmsProviders: { local: { key: '' } } + * }); + * + * // `1_000` is used as the timeout for createDataKey call + * await clientEncryption.createDataKey('local'); + * ``` + * + * If `timeoutMS` is configured on the provided client, the client's `timeoutMS` value + * will be used unless `timeoutMS` is also provided as a client encryption option. + * + * ```typescript + * const client = new MongoClient('', { timeoutMS: 2_000 }); + * + * // timeoutMS is set to 1_000 on clientEncryption + * const clientEncryption = new ClientEncryption(client, { + * timeoutMS: 1_000 + * kmsProviders: { local: { key: '' } } + * }); + * ``` + */ + timeoutMS?: number; } /** @@ -946,6 +1026,9 @@ export interface ClientEncryptionCreateDataKeyProviderOptions { /** @experimental */ keyMaterial?: Buffer | Binary; + + /** @internal */ + timeoutContext?: CSOTTimeoutContext; } /** diff --git a/src/client-side-encryption/state_machine.ts b/src/client-side-encryption/state_machine.ts index af3ea4c215d..d10776abe73 100644 --- a/src/client-side-encryption/state_machine.ts +++ b/src/client-side-encryption/state_machine.ts @@ -11,8 +11,11 @@ import { serialize } from '../bson'; import { type ProxyOptions } from '../cmap/connection'; +import { CursorTimeoutContext } from '../cursor/abstract_cursor'; import { getSocks, type SocksLib } from '../deps'; +import { MongoOperationTimeoutError } from '../error'; import { type MongoClient, type MongoClientOptions } from '../mongo_client'; +import { Timeout, type TimeoutContext, TimeoutError } from '../timeout'; import { BufferPool, MongoDBCollectionNamespace, promiseWithResolvers } from '../utils'; import { autoSelectSocketOptions, type DataKey } from './client_encryption'; import { MongoCryptError } from './errors'; @@ -173,6 +176,7 @@ export type StateMachineOptions = { * An internal class that executes across a MongoCryptContext until either * a finishing state or an error is reached. Do not instantiate directly. */ +// TODO(DRIVERS-2671): clarify CSOT behavior for FLE APIs export class StateMachine { constructor( private options: StateMachineOptions, @@ -182,7 +186,11 @@ export class StateMachine { /** * Executes the state machine according to the specification */ - async execute(executor: StateMachineExecutable, context: MongoCryptContext): Promise { + async execute( + executor: StateMachineExecutable, + context: MongoCryptContext, + timeoutContext?: TimeoutContext + ): Promise { const keyVaultNamespace = executor._keyVaultNamespace; const keyVaultClient = executor._keyVaultClient; const metaDataClient = executor._metaDataClient; @@ -201,8 +209,13 @@ export class StateMachine { 'unreachable state machine state: entered MONGOCRYPT_CTX_NEED_MONGO_COLLINFO but metadata client is undefined' ); } - const collInfo = await this.fetchCollectionInfo(metaDataClient, context.ns, filter); + const collInfo = await this.fetchCollectionInfo( + metaDataClient, + context.ns, + filter, + timeoutContext + ); if (collInfo) { context.addMongoOperationResponse(collInfo); } @@ -222,9 +235,9 @@ export class StateMachine { // When we are using the shared library, we don't have a mongocryptd manager. const markedCommand: Uint8Array = mongocryptdManager ? await mongocryptdManager.withRespawn( - this.markCommand.bind(this, mongocryptdClient, context.ns, command) + this.markCommand.bind(this, mongocryptdClient, context.ns, command, timeoutContext) ) - : await this.markCommand(mongocryptdClient, context.ns, command); + : await this.markCommand(mongocryptdClient, context.ns, command, timeoutContext); context.addMongoOperationResponse(markedCommand); context.finishMongoOperation(); @@ -233,7 +246,12 @@ export class StateMachine { case MONGOCRYPT_CTX_NEED_MONGO_KEYS: { const filter = context.nextMongoOperation(); - const keys = await this.fetchKeys(keyVaultClient, keyVaultNamespace, filter); + const keys = await this.fetchKeys( + keyVaultClient, + keyVaultNamespace, + filter, + timeoutContext + ); if (keys.length === 0) { // See docs on EMPTY_V @@ -255,9 +273,7 @@ export class StateMachine { } case MONGOCRYPT_CTX_NEED_KMS: { - const requests = Array.from(this.requests(context)); - await Promise.all(requests); - + await Promise.all(this.requests(context, timeoutContext)); context.finishKMSRequests(); break; } @@ -299,7 +315,7 @@ export class StateMachine { * @param kmsContext - A C++ KMS context returned from the bindings * @returns A promise that resolves when the KMS reply has be fully parsed */ - async kmsRequest(request: MongoCryptKMSRequest): Promise { + async kmsRequest(request: MongoCryptKMSRequest, timeoutContext?: TimeoutContext): Promise { const parsedUrl = request.endpoint.split(':'); const port = parsedUrl[1] != null ? Number.parseInt(parsedUrl[1], 10) : HTTPS_PORT; const socketOptions = autoSelectSocketOptions(this.options.socketOptions || {}); @@ -329,10 +345,6 @@ export class StateMachine { } } - function ontimeout() { - return new MongoCryptError('KMS request timed out'); - } - function onerror(cause: Error) { return new MongoCryptError('KMS request failed', { cause }); } @@ -364,7 +376,6 @@ export class StateMachine { resolve: resolveOnNetSocketConnect } = promiseWithResolvers(); netSocket - .once('timeout', () => rejectOnNetSocketError(ontimeout())) .once('error', err => rejectOnNetSocketError(onerror(err))) .once('close', () => rejectOnNetSocketError(onclose())) .once('connect', () => resolveOnNetSocketConnect()); @@ -410,8 +421,8 @@ export class StateMachine { reject: rejectOnTlsSocketError, resolve } = promiseWithResolvers(); + socket - .once('timeout', () => rejectOnTlsSocketError(ontimeout())) .once('error', err => rejectOnTlsSocketError(onerror(err))) .once('close', () => rejectOnTlsSocketError(onclose())) .on('data', data => { @@ -425,20 +436,26 @@ export class StateMachine { resolve(); } }); - await willResolveKmsRequest; + await (timeoutContext?.csotEnabled() + ? Promise.all([willResolveKmsRequest, Timeout.expires(timeoutContext?.remainingTimeMS)]) + : willResolveKmsRequest); + } catch (error) { + if (error instanceof TimeoutError) + throw new MongoOperationTimeoutError('KMS request timed out'); + throw error; } finally { // There's no need for any more activity on this socket at this point. destroySockets(); } } - *requests(context: MongoCryptContext) { + *requests(context: MongoCryptContext, timeoutContext?: TimeoutContext) { for ( let request = context.nextKMSRequest(); request != null; request = context.nextKMSRequest() ) { - yield this.kmsRequest(request); + yield this.kmsRequest(request, timeoutContext); } } @@ -498,17 +515,21 @@ export class StateMachine { async fetchCollectionInfo( client: MongoClient, ns: string, - filter: Document + filter: Document, + timeoutContext?: TimeoutContext ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); - const collections = await client - .db(db) - .listCollections(filter, { - promoteLongs: false, - promoteValues: false - }) - .toArray(); + const cursor = client.db(db).listCollections(filter, { + promoteLongs: false, + promoteValues: false, + timeoutContext: timeoutContext && new CursorTimeoutContext(timeoutContext, Symbol()) + }); + + // There is always exactly zero or one matching documents, so this should always exhaust the cursor + // in a single batch. We call `toArray()` just to be safe and ensure that the cursor is always + // exhausted and closed. + const collections = await cursor.toArray(); const info = collections.length > 0 ? serialize(collections[0]) : null; return info; @@ -522,12 +543,22 @@ export class StateMachine { * @param command - The command to execute. * @param callback - Invoked with the serialized and marked bson command, or with an error */ - async markCommand(client: MongoClient, ns: string, command: Uint8Array): Promise { - const options = { promoteLongs: false, promoteValues: false }; + async markCommand( + client: MongoClient, + ns: string, + command: Uint8Array, + timeoutContext?: TimeoutContext + ): Promise { const { db } = MongoDBCollectionNamespace.fromString(ns); - const rawCommand = deserialize(command, options); + const bsonOptions = { promoteLongs: false, promoteValues: false }; + const rawCommand = deserialize(command, bsonOptions); - const response = await client.db(db).command(rawCommand, options); + const response = await client.db(db).command(rawCommand, { + ...bsonOptions, + ...(timeoutContext?.csotEnabled() + ? { timeoutMS: timeoutContext?.remainingTimeMS } + : undefined) + }); return serialize(response, this.bsonOptions); } @@ -543,7 +574,8 @@ export class StateMachine { fetchKeys( client: MongoClient, keyVaultNamespace: string, - filter: Uint8Array + filter: Uint8Array, + timeoutContext?: TimeoutContext ): Promise> { const { db: dbName, collection: collectionName } = MongoDBCollectionNamespace.fromString(keyVaultNamespace); @@ -551,7 +583,9 @@ export class StateMachine { return client .db(dbName) .collection(collectionName, { readConcern: { level: 'majority' } }) - .find(deserialize(filter)) + .find(deserialize(filter), { + timeoutContext: timeoutContext && new CursorTimeoutContext(timeoutContext, Symbol()) + }) .toArray(); } } diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 1ed62647a8a..ca7c86a0bad 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -21,9 +21,11 @@ import { } from '../constants'; import { MongoCompatibilityError, + MONGODB_ERROR_CODES, MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, + MongoOperationTimeoutError, MongoParseError, MongoServerError, MongoUnexpectedServerResponseError @@ -35,6 +37,7 @@ import { type CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { ServerType } from '../sdam/common'; import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions'; +import { type TimeoutContext, TimeoutError } from '../timeout'; import { BufferPool, calculateDurationInMs, @@ -88,6 +91,7 @@ export interface CommandOptions extends BSONSerializeOptions { documentsReturnedIn?: string; noResponse?: boolean; omitReadPreference?: boolean; + omitMaxTimeMS?: boolean; // TODO(NODE-2802): Currently the CommandOptions take a property willRetryWrite which is a hint // from executeOperation that the txnNum should be applied to this command. @@ -99,6 +103,9 @@ export interface CommandOptions extends BSONSerializeOptions { writeConcern?: WriteConcern; directConnection?: boolean; + + /** @internal */ + timeoutContext?: TimeoutContext; } /** @public */ @@ -420,6 +427,11 @@ export class Connection extends TypedEventEmitter { ...options }; + if (!options.omitMaxTimeMS) { + const maxTimeMS = options.timeoutContext?.maxTimeMS; + if (maxTimeMS && maxTimeMS > 0 && Number.isFinite(maxTimeMS)) cmd.maxTimeMS = maxTimeMS; + } + const message = this.supportsOpMsg ? new OpMsgRequest(db, cmd, commandOptions) : new OpQueryRequest(db, cmd, commandOptions); @@ -434,7 +446,9 @@ export class Connection extends TypedEventEmitter { ): AsyncGenerator { this.throwIfAborted(); - if (typeof options.socketTimeoutMS === 'number') { + if (options.timeoutContext?.csotEnabled()) { + this.socket.setTimeout(0); + } else if (typeof options.socketTimeoutMS === 'number') { this.socket.setTimeout(options.socketTimeoutMS); } else if (this.socketTimeoutMS !== 0) { this.socket.setTimeout(this.socketTimeoutMS); @@ -443,7 +457,8 @@ export class Connection extends TypedEventEmitter { try { await this.writeCommand(message, { agreedCompressor: this.description.compressor ?? 'none', - zlibCompressionLevel: this.description.zlibCompressionLevel + zlibCompressionLevel: this.description.zlibCompressionLevel, + timeoutContext: options.timeoutContext }); if (options.noResponse || message.moreToCome) { @@ -453,7 +468,17 @@ export class Connection extends TypedEventEmitter { this.throwIfAborted(); - for await (const response of this.readMany()) { + if ( + options.timeoutContext?.csotEnabled() && + options.timeoutContext.minRoundTripTime != null && + options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime + ) { + throw new MongoOperationTimeoutError( + 'Server roundtrip time is greater than the time remaining' + ); + } + + for await (const response of this.readMany({ timeoutContext: options.timeoutContext })) { this.socket.setTimeout(0); const bson = response.parse(); @@ -480,7 +505,6 @@ export class Connection extends TypedEventEmitter { responseType?: MongoDBResponseConstructor ) { const message = this.prepareCommand(ns.db, command, options); - let started = 0; if (this.shouldEmitAndLogCommand) { started = now(); @@ -522,6 +546,11 @@ export class Connection extends TypedEventEmitter { } if (document.ok === 0) { + if (options.timeoutContext?.csotEnabled() && document.isMaxTimeExpiredError) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError((object ??= document.toObject(bsonOptions))) + }); + } throw new MongoServerError((object ??= document.toObject(bsonOptions))); } @@ -595,6 +624,28 @@ export class Connection extends TypedEventEmitter { ): Promise { this.throwIfAborted(); for await (const document of this.sendCommand(ns, command, options, responseType)) { + if (options.timeoutContext?.csotEnabled()) { + if (MongoDBResponse.is(document)) { + if (document.isMaxTimeExpiredError) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError(document.toObject()) + }); + } + } else { + if ( + (Array.isArray(document?.writeErrors) && + document.writeErrors.some( + error => error?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired + )) || + document?.writeConcernError?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired + ) { + throw new MongoOperationTimeoutError('Server reported a timeout error', { + cause: new MongoServerError(document) + }); + } + } + } + return document; } throw new MongoUnexpectedServerResponseError('Unable to get response from server'); @@ -630,7 +681,11 @@ export class Connection extends TypedEventEmitter { */ private async writeCommand( command: WriteProtocolMessageType, - options: { agreedCompressor?: CompressorName; zlibCompressionLevel?: number } + options: { + agreedCompressor?: CompressorName; + zlibCompressionLevel?: number; + timeoutContext?: TimeoutContext; + } ): Promise { const finalCommand = options.agreedCompressor === 'none' || !OpCompressedRequest.canCompress(command) @@ -642,8 +697,36 @@ export class Connection extends TypedEventEmitter { const buffer = Buffer.concat(await finalCommand.toBin()); + if (options.timeoutContext?.csotEnabled()) { + if ( + options.timeoutContext.minRoundTripTime != null && + options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime + ) { + throw new MongoOperationTimeoutError( + 'Server roundtrip time is greater than the time remaining' + ); + } + } + if (this.socket.write(buffer)) return; - return await once(this.socket, 'drain'); + + const drainEvent = once(this.socket, 'drain'); + const timeout = options?.timeoutContext?.timeoutForSocketWrite; + if (timeout) { + try { + return await Promise.race([drainEvent, timeout]); + } catch (error) { + let err = error; + if (TimeoutError.is(error)) { + err = new MongoOperationTimeoutError('Timed out at socket write'); + this.cleanup(err); + } + throw error; + } finally { + timeout.clear(); + } + } + return await drainEvent; } /** @@ -655,10 +738,13 @@ export class Connection extends TypedEventEmitter { * * Note that `for-await` loops call `return` automatically when the loop is exited. */ - private async *readMany(): AsyncGenerator { + private async *readMany(options: { + timeoutContext?: TimeoutContext; + }): AsyncGenerator { try { - this.dataEvents = onData(this.messageStream); + this.dataEvents = onData(this.messageStream, options); this.messageStream.resume(); + for await (const message of this.dataEvents) { const response = await decompressResponse(message); yield response; @@ -667,6 +753,17 @@ export class Connection extends TypedEventEmitter { return; } } + } catch (readError) { + const err = readError; + if (TimeoutError.is(readError)) { + const error = new MongoOperationTimeoutError( + `Timed out during socket read (${readError.duration}ms)` + ); + this.dataEvents = null; + this.onError(error); + throw error; + } + throw err; } finally { this.dataEvents = null; this.messageStream.pause(); diff --git a/src/cmap/connection_pool.ts b/src/cmap/connection_pool.ts index 5a858a5121e..2cd2bcc2c19 100644 --- a/src/cmap/connection_pool.ts +++ b/src/cmap/connection_pool.ts @@ -21,12 +21,13 @@ import { MongoInvalidArgumentError, MongoMissingCredentialsError, MongoNetworkError, + MongoOperationTimeoutError, MongoRuntimeError, MongoServerError } from '../error'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Server } from '../sdam/server'; -import { Timeout, TimeoutError } from '../timeout'; +import { type TimeoutContext, TimeoutError } from '../timeout'; import { type Callback, List, makeCounter, now, promiseWithResolvers } from '../utils'; import { connect } from './connect'; import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection'; @@ -102,7 +103,6 @@ export interface ConnectionPoolOptions extends Omit void; reject: (err: AnyError) => void; - timeout: Timeout; [kCancelled]?: boolean; checkoutTime: number; } @@ -355,23 +355,20 @@ export class ConnectionPool extends TypedEventEmitter { * will be held by the pool. This means that if a connection is checked out it MUST be checked back in or * explicitly destroyed by the new owner. */ - async checkOut(): Promise { + async checkOut(options: { timeoutContext: TimeoutContext }): Promise { const checkoutTime = now(); this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_STARTED, new ConnectionCheckOutStartedEvent(this) ); - const waitQueueTimeoutMS = this.options.waitQueueTimeoutMS; - const { promise, resolve, reject } = promiseWithResolvers(); - const timeout = Timeout.expires(waitQueueTimeoutMS); + const timeout = options.timeoutContext.connectionCheckoutTimeout; const waitQueueMember: WaitQueueMember = { resolve, reject, - timeout, checkoutTime }; @@ -379,13 +376,13 @@ export class ConnectionPool extends TypedEventEmitter { process.nextTick(() => this.processWaitQueue()); try { - return await Promise.race([promise, waitQueueMember.timeout]); + timeout?.throwIfExpired(); + return await (timeout ? Promise.race([promise, timeout]) : promise); } catch (error) { if (TimeoutError.is(error)) { + timeout?.clear(); waitQueueMember[kCancelled] = true; - waitQueueMember.timeout.clear(); - this.emitAndLog( ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new ConnectionCheckOutFailedEvent(this, 'timeout', waitQueueMember.checkoutTime) @@ -396,9 +393,16 @@ export class ConnectionPool extends TypedEventEmitter { : 'Timed out while checking out a connection from connection pool', this.address ); + if (options.timeoutContext.csotEnabled()) { + throw new MongoOperationTimeoutError('Timed out during connection checkout', { + cause: timeoutError + }); + } throw timeoutError; } throw error; + } finally { + if (options.timeoutContext.clearConnectionCheckoutTimeout) timeout?.clear(); } } @@ -764,7 +768,6 @@ export class ConnectionPool extends TypedEventEmitter { ConnectionPool.CONNECTION_CHECK_OUT_FAILED, new ConnectionCheckOutFailedEvent(this, reason, waitQueueMember.checkoutTime, error) ); - waitQueueMember.timeout.clear(); this[kWaitQueue].shift(); waitQueueMember.reject(error); continue; @@ -785,7 +788,6 @@ export class ConnectionPool extends TypedEventEmitter { ConnectionPool.CONNECTION_CHECKED_OUT, new ConnectionCheckedOutEvent(this, connection, waitQueueMember.checkoutTime) ); - waitQueueMember.timeout.clear(); this[kWaitQueue].shift(); waitQueueMember.resolve(connection); @@ -828,8 +830,6 @@ export class ConnectionPool extends TypedEventEmitter { ); waitQueueMember.resolve(connection); } - - waitQueueMember.timeout.clear(); } process.nextTick(() => this.processWaitQueue()); }); diff --git a/src/cmap/wire_protocol/on_data.ts b/src/cmap/wire_protocol/on_data.ts index b99c950d96f..f6732618330 100644 --- a/src/cmap/wire_protocol/on_data.ts +++ b/src/cmap/wire_protocol/on_data.ts @@ -1,5 +1,6 @@ import { type EventEmitter } from 'events'; +import { type TimeoutContext } from '../../timeout'; import { List, promiseWithResolvers } from '../../utils'; /** @@ -18,7 +19,10 @@ type PendingPromises = Omit< * Returns an AsyncIterator that iterates each 'data' event emitted from emitter. * It will reject upon an error event. */ -export function onData(emitter: EventEmitter) { +export function onData( + emitter: EventEmitter, + { timeoutContext }: { timeoutContext?: TimeoutContext } +) { // Setup pending events and pending promise lists /** * When the caller has not yet called .next(), we store the @@ -87,6 +91,10 @@ export function onData(emitter: EventEmitter) { emitter.on('data', eventHandler); emitter.on('error', errorHandler); + const timeoutForSocketRead = timeoutContext?.timeoutForSocketRead; + timeoutForSocketRead?.throwIfExpired(); + timeoutForSocketRead?.then(undefined, errorHandler); + return iterator; function eventHandler(value: Buffer) { @@ -97,6 +105,7 @@ export function onData(emitter: EventEmitter) { function errorHandler(err: Error) { const promise = unconsumedPromises.shift(); + if (promise != null) promise.reject(err); else error = err; void closeHandler(); @@ -107,6 +116,7 @@ export function onData(emitter: EventEmitter) { emitter.off('data', eventHandler); emitter.off('error', errorHandler); finished = true; + timeoutForSocketRead?.clear(); const doneResult = { value: undefined, done: finished } as const; for (const promise of unconsumedPromises) { diff --git a/src/cmap/wire_protocol/responses.ts b/src/cmap/wire_protocol/responses.ts index a968bcb2061..1d20566e2d5 100644 --- a/src/cmap/wire_protocol/responses.ts +++ b/src/cmap/wire_protocol/responses.ts @@ -10,7 +10,7 @@ import { pluckBSONSerializeOptions, type Timestamp } from '../../bson'; -import { MongoUnexpectedServerResponseError } from '../../error'; +import { MONGODB_ERROR_CODES, MongoUnexpectedServerResponseError } from '../../error'; import { type ClusterTime } from '../../sdam/common'; import { decorateDecryptionResult, ns } from '../../utils'; import { @@ -110,6 +110,40 @@ export class MongoDBResponse extends OnDemandDocument { // {ok:1} static empty = new MongoDBResponse(new Uint8Array([13, 0, 0, 0, 16, 111, 107, 0, 1, 0, 0, 0, 0])); + /** + * Returns true iff: + * - ok is 0 and the top-level code === 50 + * - ok is 1 and the writeErrors array contains a code === 50 + * - ok is 1 and the writeConcern object contains a code === 50 + */ + get isMaxTimeExpiredError() { + // {ok: 0, code: 50 ... } + const isTopLevel = this.ok === 0 && this.code === MONGODB_ERROR_CODES.MaxTimeMSExpired; + if (isTopLevel) return true; + + if (this.ok === 0) return false; + + // {ok: 1, writeConcernError: {code: 50 ... }} + const isWriteConcern = + this.get('writeConcernError', BSONType.object)?.getNumber('code') === + MONGODB_ERROR_CODES.MaxTimeMSExpired; + if (isWriteConcern) return true; + + const writeErrors = this.get('writeErrors', BSONType.array); + if (writeErrors?.size()) { + for (let i = 0; i < writeErrors.size(); i++) { + const isWriteError = + writeErrors.get(i, BSONType.object)?.getNumber('code') === + MONGODB_ERROR_CODES.MaxTimeMSExpired; + + // {ok: 1, writeErrors: [{code: 50 ... }]} + if (isWriteError) return true; + } + } + + return false; + } + /** * Drivers can safely assume that the `recoveryToken` field is always a BSON document but drivers MUST NOT modify the * contents of the document. diff --git a/src/collection.ts b/src/collection.ts index ccc6fe2da65..50adfbca76c 100644 --- a/src/collection.ts +++ b/src/collection.ts @@ -11,7 +11,7 @@ import { type ListSearchIndexesOptions } from './cursor/list_search_indexes_cursor'; import type { Db } from './db'; -import { MongoInvalidArgumentError } from './error'; +import { MongoInvalidArgumentError, MongoOperationTimeoutError } from './error'; import type { MongoClient, PkFactory } from './mongo_client'; import type { Filter, @@ -115,7 +115,10 @@ export interface CollectionOptions extends BSONSerializeOptions, WriteConcernOpt readConcern?: ReadConcernLike; /** The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST). */ readPreference?: ReadPreferenceLike; - /** @internal TODO(NODE-5688): make this public */ + /** + * @experimental + * Specifies the time an operation will run until it throws a timeout error + */ timeoutMS?: number; } @@ -262,6 +265,10 @@ export class Collection { this.s.collectionHint = normalizeHintField(v); } + public get timeoutMS(): number | undefined { + return this.s.options.timeoutMS; + } + /** * Inserts a single document into MongoDB. If documents passed in do not contain the **_id** field, * one will be added to each of the documents missing it by the driver, mutating the document. This behavior @@ -465,10 +472,14 @@ export class Collection { // Intentionally, we do not inherit options from parent for this operation. return await executeOperation( this.client, - new RenameOperation(this as TODO_NODE_3286, newName, { - ...options, - readPreference: ReadPreference.PRIMARY - }) as TODO_NODE_3286 + new RenameOperation( + this as TODO_NODE_3286, + newName, + resolveOptions(undefined, { + ...options, + readPreference: ReadPreference.PRIMARY + }) + ) as TODO_NODE_3286 ); } @@ -492,12 +503,18 @@ export class Collection { */ async findOne(): Promise | null>; async findOne(filter: Filter): Promise | null>; - async findOne(filter: Filter, options: FindOptions): Promise | null>; + async findOne( + filter: Filter, + options: Omit + ): Promise | null>; // allow an override of the schema. async findOne(): Promise; async findOne(filter: Filter): Promise; - async findOne(filter: Filter, options?: FindOptions): Promise; + async findOne( + filter: Filter, + options?: Omit + ): Promise; async findOne( filter: Filter = {}, @@ -669,7 +686,9 @@ export class Collection { new DropIndexOperation(this as TODO_NODE_3286, '*', resolveOptions(this, options)) ); return true; - } catch { + } catch (error) { + // TODO(NODE-6517): Driver should only filter for namespace not found error. Other errors should be thrown. + if (error instanceof MongoOperationTimeoutError) throw error; return false; } } @@ -1033,6 +1052,59 @@ export class Collection { * }); * ``` * + * @remarks + * When `timeoutMS` is configured for a change stream, it will have different behaviour depending + * on whether the change stream is in iterator mode or emitter mode. In both cases, a change + * stream will time out if it does not receive a change event within `timeoutMS` of the last change + * event. + * + * Note that if a change stream is consistently timing out when watching a collection, database or + * client that is being changed, then this may be due to the server timing out before it can finish + * processing the existing oplog. To address this, restart the change stream with a higher + * `timeoutMS`. + * + * If the change stream times out the initial aggregate operation to establish the change stream on + * the server, then the client will close the change stream. If the getMore calls to the server + * time out, then the change stream will be left open, but will throw a MongoOperationTimeoutError + * when in iterator mode and emit an error event that returns a MongoOperationTimeoutError in + * emitter mode. + * + * To determine whether or not the change stream is still open following a timeout, check the + * {@link ChangeStream.closed} getter. + * + * @example + * In iterator mode, if a next() call throws a timeout error, it will attempt to resume the change stream. + * The next call can just be retried after this succeeds. + * ```ts + * const changeStream = collection.watch([], { timeoutMS: 100 }); + * try { + * await changeStream.next(); + * } catch (e) { + * if (e instanceof MongoOperationTimeoutError && !changeStream.closed) { + * await changeStream.next(); + * } + * throw e; + * } + * ``` + * + * @example + * In emitter mode, if the change stream goes `timeoutMS` without emitting a change event, it will + * emit an error event that returns a MongoOperationTimeoutError, but will not close the change + * stream unless the resume attempt fails. There is no need to re-establish change listeners as + * this will automatically continue emitting change events once the resume attempt completes. + * + * ```ts + * const changeStream = collection.watch([], { timeoutMS: 100 }); + * changeStream.on('change', console.log); + * changeStream.on('error', e => { + * if (e instanceof MongoOperationTimeoutError && !changeStream.closed) { + * // do nothing + * } else { + * changeStream.close(); + * } + * }); + * ``` + * * @param pipeline - An array of {@link https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/|aggregation pipeline stages} through which to pass change stream documents. This allows for filtering (using $match) and manipulating the change stream documents. * @param options - Optional settings for the command * @typeParam TLocal - Type of the data being detected by the change stream diff --git a/src/connection_string.ts b/src/connection_string.ts index f0b497ddf40..ce20b9e90a9 100644 --- a/src/connection_string.ts +++ b/src/connection_string.ts @@ -1092,6 +1092,7 @@ export const OPTIONS = { type: 'string' }, socketTimeoutMS: { + // TODO(NODE-6491): deprecated: 'Please use timeoutMS instead', default: 0, type: 'uint' }, @@ -1162,6 +1163,7 @@ export const OPTIONS = { } }, waitQueueTimeoutMS: { + // TODO(NODE-6491): deprecated: 'Please use timeoutMS instead', default: 0, type: 'uint' }, diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index 51206b51a27..66bfbed0078 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -21,6 +21,7 @@ import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import { type AsyncDisposable, configureResourceManagement } from '../resource_management'; import type { Server } from '../sdam/server'; import { ClientSession, maybeClearPinnedConnection } from '../sessions'; +import { type CSOTTimeoutContext, type Timeout, TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; /** @@ -60,6 +61,46 @@ export interface CursorStreamOptions { /** @public */ export type CursorFlag = (typeof CURSOR_FLAGS)[number]; +/** + * @public + * @experimental + * Specifies how `timeoutMS` is applied to the cursor. Can be either `'cursorLifeTime'` or `'iteration'` + * When set to `'iteration'`, the deadline specified by `timeoutMS` applies to each call of + * `cursor.next()`. + * When set to `'cursorLifetime'`, the deadline applies to the life of the entire cursor. + * + * Depending on the type of cursor being used, this option has different default values. + * For non-tailable cursors, this value defaults to `'cursorLifetime'` + * For tailable cursors, this value defaults to `'iteration'` since tailable cursors, by + * definition can have an arbitrarily long lifetime. + * + * @example + * ```ts + * const cursor = collection.find({}, {timeoutMS: 100, timeoutMode: 'iteration'}); + * for await (const doc of cursor) { + * // process doc + * // This will throw a timeout error if any of the iterator's `next()` calls takes more than 100ms, but + * // will continue to iterate successfully otherwise, regardless of the number of batches. + * } + * ``` + * + * @example + * ```ts + * const cursor = collection.find({}, { timeoutMS: 1000, timeoutMode: 'cursorLifetime' }); + * const docs = await cursor.toArray(); // This entire line will throw a timeout error if all batches are not fetched and returned within 1000ms. + * ``` + */ +export const CursorTimeoutMode = Object.freeze({ + ITERATION: 'iteration', + LIFETIME: 'cursorLifetime' +} as const); + +/** + * @public + * @experimental + */ +export type CursorTimeoutMode = (typeof CursorTimeoutMode)[keyof typeof CursorTimeoutMode]; + /** @public */ export interface AbstractCursorOptions extends BSONSerializeOptions { session?: ClientSession; @@ -103,8 +144,46 @@ export interface AbstractCursorOptions extends BSONSerializeOptions { */ awaitData?: boolean; noCursorTimeout?: boolean; - /** @internal TODO(NODE-5688): make this public */ + /** Specifies the time an operation will run until it throws a timeout error. See {@link AbstractCursorOptions.timeoutMode} for more details on how this option applies to cursors. */ timeoutMS?: number; + /** + * @public + * @experimental + * Specifies how `timeoutMS` is applied to the cursor. Can be either `'cursorLifeTime'` or `'iteration'` + * When set to `'iteration'`, the deadline specified by `timeoutMS` applies to each call of + * `cursor.next()`. + * When set to `'cursorLifetime'`, the deadline applies to the life of the entire cursor. + * + * Depending on the type of cursor being used, this option has different default values. + * For non-tailable cursors, this value defaults to `'cursorLifetime'` + * For tailable cursors, this value defaults to `'iteration'` since tailable cursors, by + * definition can have an arbitrarily long lifetime. + * + * @example + * ```ts + * const cursor = collection.find({}, {timeoutMS: 100, timeoutMode: 'iteration'}); + * for await (const doc of cursor) { + * // process doc + * // This will throw a timeout error if any of the iterator's `next()` calls takes more than 100ms, but + * // will continue to iterate successfully otherwise, regardless of the number of batches. + * } + * ``` + * + * @example + * ```ts + * const cursor = collection.find({}, { timeoutMS: 1000, timeoutMode: 'cursorLifetime' }); + * const docs = await cursor.toArray(); // This entire line will throw a timeout error if all batches are not fetched and returned within 1000ms. + * ``` + */ + timeoutMode?: CursorTimeoutMode; + + /** + * @internal + * + * A timeout context to govern the total time the cursor can live. If provided, the cursor + * cannot be used in ITERATION mode. + */ + timeoutContext?: CursorTimeoutContext; } /** @internal */ @@ -117,6 +196,8 @@ export type InternalAbstractCursorOptions = Omit any; - /** @internal */ + /** + * @internal + * This is true whether or not the first command fails. It only indicates whether or not the first + * command has been run. + */ private initialized: boolean; /** @internal */ private isClosed: boolean; @@ -154,6 +239,8 @@ export abstract class AbstractCursor< private isKilled: boolean; /** @internal */ protected readonly cursorOptions: InternalAbstractCursorOptions; + /** @internal */ + protected timeoutContext?: CursorTimeoutContext; /** @event */ static readonly CLOSE = 'close' as const; @@ -183,9 +270,50 @@ export abstract class AbstractCursor< options.readPreference && options.readPreference instanceof ReadPreference ? options.readPreference : ReadPreference.primary, - ...pluckBSONSerializeOptions(options) + ...pluckBSONSerializeOptions(options), + timeoutMS: options?.timeoutContext?.csotEnabled() + ? options.timeoutContext.timeoutMS + : options.timeoutMS, + tailable: options.tailable, + awaitData: options.awaitData }; - this.cursorOptions.timeoutMS = options.timeoutMS; + + if (this.cursorOptions.timeoutMS != null) { + if (options.timeoutMode == null) { + if (options.tailable) { + if (options.awaitData) { + if ( + options.maxAwaitTimeMS != null && + options.maxAwaitTimeMS >= this.cursorOptions.timeoutMS + ) + throw new MongoInvalidArgumentError( + 'Cannot specify maxAwaitTimeMS >= timeoutMS for a tailable awaitData cursor' + ); + } + + this.cursorOptions.timeoutMode = CursorTimeoutMode.ITERATION; + } else { + this.cursorOptions.timeoutMode = CursorTimeoutMode.LIFETIME; + } + } else { + if (options.tailable && options.timeoutMode === CursorTimeoutMode.LIFETIME) { + throw new MongoInvalidArgumentError( + "Cannot set tailable cursor's timeoutMode to LIFETIME" + ); + } + this.cursorOptions.timeoutMode = options.timeoutMode; + } + } else { + if (options.timeoutMode != null) + throw new MongoInvalidArgumentError('Cannot set timeoutMode without setting timeoutMS'); + } + + // Set for initial command + this.cursorOptions.omitMaxTimeMS = + this.cursorOptions.timeoutMS != null && + ((this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && + !this.cursorOptions.tailable) || + (this.cursorOptions.tailable && !this.cursorOptions.awaitData)); const readConcern = ReadConcern.fromOptions(options); if (readConcern) { @@ -222,6 +350,8 @@ export abstract class AbstractCursor< utf8: options?.enableUtf8Validation === false ? false : true } }; + + this.timeoutContext = options.timeoutContext; } /** @@ -400,12 +530,21 @@ export abstract class AbstractCursor< return false; } - do { - if ((this.documents?.length ?? 0) !== 0) { - return true; + if (this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && this.cursorId != null) { + this.timeoutContext?.refresh(); + } + try { + do { + if ((this.documents?.length ?? 0) !== 0) { + return true; + } + await this.fetchBatch(); + } while (!this.isDead || (this.documents?.length ?? 0) !== 0); + } finally { + if (this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION) { + this.timeoutContext?.clear(); } - await this.fetchBatch(); - } while (!this.isDead || (this.documents?.length ?? 0) !== 0); + } return false; } @@ -416,14 +555,24 @@ export abstract class AbstractCursor< throw new MongoCursorExhaustedError(); } - do { - const doc = this.documents?.shift(this.deserializationOptions); - if (doc != null) { - if (this.transform != null) return await this.transformDocument(doc); - return doc; + if (this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && this.cursorId != null) { + this.timeoutContext?.refresh(); + } + + try { + do { + const doc = this.documents?.shift(this.deserializationOptions); + if (doc != null) { + if (this.transform != null) return await this.transformDocument(doc); + return doc; + } + await this.fetchBatch(); + } while (!this.isDead || (this.documents?.length ?? 0) !== 0); + } finally { + if (this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION) { + this.timeoutContext?.clear(); } - await this.fetchBatch(); - } while (!this.isDead || (this.documents?.length ?? 0) !== 0); + } return null; } @@ -436,18 +585,27 @@ export abstract class AbstractCursor< throw new MongoCursorExhaustedError(); } - let doc = this.documents?.shift(this.deserializationOptions); - if (doc != null) { - if (this.transform != null) return await this.transformDocument(doc); - return doc; + if (this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && this.cursorId != null) { + this.timeoutContext?.refresh(); } + try { + let doc = this.documents?.shift(this.deserializationOptions); + if (doc != null) { + if (this.transform != null) return await this.transformDocument(doc); + return doc; + } - await this.fetchBatch(); + await this.fetchBatch(); - doc = this.documents?.shift(this.deserializationOptions); - if (doc != null) { - if (this.transform != null) return await this.transformDocument(doc); - return doc; + doc = this.documents?.shift(this.deserializationOptions); + if (doc != null) { + if (this.transform != null) return await this.transformDocument(doc); + return doc; + } + } finally { + if (this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION) { + this.timeoutContext?.clear(); + } } return null; @@ -476,8 +634,8 @@ export abstract class AbstractCursor< /** * Frees any client-side resources used by the cursor. */ - async close(): Promise { - await this.cleanup(); + async close(options?: { timeoutMS?: number }): Promise { + await this.cleanup(options?.timeoutMS); } /** @@ -652,12 +810,17 @@ export abstract class AbstractCursor< * if the resultant data has already been retrieved by this cursor. */ rewind(): void { + if (this.timeoutContext && this.timeoutContext.owner !== this) { + throw new MongoAPIError(`Cannot rewind cursor that does not own its timeout context.`); + } if (!this.initialized) { return; } this.cursorId = null; this.documents?.clear(); + this.timeoutContext?.clear(); + this.timeoutContext = undefined; this.isClosed = false; this.isKilled = false; this.initialized = false; @@ -696,18 +859,20 @@ export abstract class AbstractCursor< 'Unexpected null selectedServer. A cursor creating command should have set this' ); } + const getMoreOptions = { + ...this.cursorOptions, + session: this.cursorSession, + batchSize + }; + const getMoreOperation = new GetMoreOperation( this.cursorNamespace, this.cursorId, this.selectedServer, - { - ...this.cursorOptions, - session: this.cursorSession, - batchSize - } + getMoreOptions ); - return await executeOperation(this.cursorClient, getMoreOperation); + return await executeOperation(this.cursorClient, getMoreOperation, this.timeoutContext); } /** @@ -718,8 +883,19 @@ export abstract class AbstractCursor< * a significant refactor. */ private async cursorInit(): Promise { + if (this.cursorOptions.timeoutMS != null) { + this.timeoutContext ??= new CursorTimeoutContext( + TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.s.options.serverSelectionTimeoutMS, + timeoutMS: this.cursorOptions.timeoutMS + }), + this + ); + } try { const state = await this._initialize(this.cursorSession); + // Set omitMaxTimeMS to the value needed for subsequent getMore calls + this.cursorOptions.omitMaxTimeMS = this.cursorOptions.timeoutMS != null; const response = state.response; this.selectedServer = state.server; this.cursorId = response.id; @@ -729,7 +905,7 @@ export abstract class AbstractCursor< } catch (error) { // the cursor is now initialized, even if an error occurred this.initialized = true; - await this.cleanup(error); + await this.cleanup(undefined, error); throw error; } @@ -770,10 +946,10 @@ export abstract class AbstractCursor< this.documents = response; } catch (error) { try { - await this.cleanup(error); - } catch (error) { + await this.cleanup(undefined, error); + } catch (cleanupError) { // `cleanupCursor` should never throw, squash and throw the original error - squashError(error); + squashError(cleanupError); } throw error; } @@ -791,9 +967,23 @@ export abstract class AbstractCursor< } /** @internal */ - private async cleanup(error?: Error) { + private async cleanup(timeoutMS?: number, error?: Error) { this.isClosed = true; const session = this.cursorSession; + const timeoutContextForKillCursors = (): CursorTimeoutContext | undefined => { + if (timeoutMS != null) { + this.timeoutContext?.clear(); + return new CursorTimeoutContext( + TimeoutContext.create({ + serverSelectionTimeoutMS: this.client.s.options.serverSelectionTimeoutMS, + timeoutMS + }), + this + ); + } else { + return this.timeoutContext?.refreshed(); + } + }; try { if ( !this.isKilled && @@ -806,11 +996,13 @@ export abstract class AbstractCursor< this.isKilled = true; const cursorId = this.cursorId; this.cursorId = Long.ZERO; + await executeOperation( this.cursorClient, new KillCursorsOperation(cursorId, this.cursorNamespace, this.selectedServer, { session - }) + }), + timeoutContextForKillCursors() ); } } catch (error) { @@ -952,3 +1144,58 @@ class ReadableCursorStream extends Readable { } configureResourceManagement(AbstractCursor.prototype); + +/** + * @internal + * The cursor timeout context is a wrapper around a timeout context + * that keeps track of the "owner" of the cursor. For timeout contexts + * instantiated inside a cursor, the owner will be the cursor. + * + * All timeout behavior is exactly the same as the wrapped timeout context's. + */ +export class CursorTimeoutContext extends TimeoutContext { + constructor( + public timeoutContext: TimeoutContext, + public owner: symbol | AbstractCursor + ) { + super(); + } + override get serverSelectionTimeout(): Timeout | null { + return this.timeoutContext.serverSelectionTimeout; + } + override get connectionCheckoutTimeout(): Timeout | null { + return this.timeoutContext.connectionCheckoutTimeout; + } + override get clearServerSelectionTimeout(): boolean { + return this.timeoutContext.clearServerSelectionTimeout; + } + override get clearConnectionCheckoutTimeout(): boolean { + return this.timeoutContext.clearConnectionCheckoutTimeout; + } + override get timeoutForSocketWrite(): Timeout | null { + return this.timeoutContext.timeoutForSocketWrite; + } + override get timeoutForSocketRead(): Timeout | null { + return this.timeoutContext.timeoutForSocketRead; + } + override csotEnabled(): this is CSOTTimeoutContext { + return this.timeoutContext.csotEnabled(); + } + override refresh(): void { + if (typeof this.owner !== 'symbol') return this.timeoutContext.refresh(); + } + override clear(): void { + if (typeof this.owner !== 'symbol') return this.timeoutContext.clear(); + } + override get maxTimeMS(): number | null { + return this.timeoutContext.maxTimeMS; + } + + get timeoutMS(): number | null { + return this.timeoutContext.csotEnabled() ? this.timeoutContext.timeoutMS : null; + } + + override refreshed(): CursorTimeoutContext { + return new CursorTimeoutContext(this.timeoutContext.refreshed(), this.owner); + } +} diff --git a/src/cursor/aggregation_cursor.ts b/src/cursor/aggregation_cursor.ts index 9762c8a03bf..cace0a4b6a2 100644 --- a/src/cursor/aggregation_cursor.ts +++ b/src/cursor/aggregation_cursor.ts @@ -1,5 +1,12 @@ import type { Document } from '../bson'; -import type { ExplainCommandOptions, ExplainVerbosityLike } from '../explain'; +import { MongoAPIError } from '../error'; +import { + Explain, + ExplainableCursor, + type ExplainCommandOptions, + type ExplainVerbosityLike, + validateExplainTimeoutOptions +} from '../explain'; import type { MongoClient } from '../mongo_client'; import { AggregateOperation, type AggregateOptions } from '../operations/aggregate'; import { executeOperation } from '../operations/execute_operation'; @@ -7,8 +14,8 @@ import type { ClientSession } from '../sessions'; import type { Sort } from '../sort'; import { mergeOptions, type MongoDBNamespace } from '../utils'; import { - AbstractCursor, type AbstractCursorOptions, + CursorTimeoutMode, type InitialCursorResponse } from './abstract_cursor'; @@ -22,7 +29,7 @@ export interface AggregationCursorOptions extends AbstractCursorOptions, Aggrega * or higher stream * @public */ -export class AggregationCursor extends AbstractCursor { +export class AggregationCursor extends ExplainableCursor { public readonly pipeline: Document[]; /** @internal */ private aggregateOptions: AggregateOptions; @@ -38,6 +45,15 @@ export class AggregationCursor extends AbstractCursor { this.pipeline = pipeline; this.aggregateOptions = options; + + const lastStage: Document | undefined = this.pipeline[this.pipeline.length - 1]; + + if ( + this.cursorOptions.timeoutMS != null && + this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && + (lastStage?.$merge != null || lastStage?.$out != null) + ) + throw new MongoAPIError('Cannot use $out or $merge stage with ITERATION timeoutMode'); } clone(): AggregationCursor { @@ -54,26 +70,49 @@ export class AggregationCursor extends AbstractCursor { /** @internal */ async _initialize(session: ClientSession): Promise { - const aggregateOperation = new AggregateOperation(this.namespace, this.pipeline, { + const options = { ...this.aggregateOptions, ...this.cursorOptions, session - }); + }; + if (options.explain) { + try { + validateExplainTimeoutOptions(options, Explain.fromOptions(options)); + } catch { + throw new MongoAPIError( + 'timeoutMS cannot be used with explain when explain is specified in aggregateOptions' + ); + } + } + + const aggregateOperation = new AggregateOperation(this.namespace, this.pipeline, options); - const response = await executeOperation(this.client, aggregateOperation); + const response = await executeOperation(this.client, aggregateOperation, this.timeoutContext); return { server: aggregateOperation.server, session, response }; } /** Execute the explain for the cursor */ - async explain(verbosity?: ExplainVerbosityLike | ExplainCommandOptions): Promise { + async explain(): Promise; + async explain(verbosity: ExplainVerbosityLike | ExplainCommandOptions): Promise; + async explain(options: { timeoutMS?: number }): Promise; + async explain( + verbosity: ExplainVerbosityLike | ExplainCommandOptions, + options: { timeoutMS?: number } + ): Promise; + async explain( + verbosity?: ExplainVerbosityLike | ExplainCommandOptions | { timeoutMS?: number }, + options?: { timeoutMS?: number } + ): Promise { + const { explain, timeout } = this.resolveExplainTimeoutOptions(verbosity, options); return ( await executeOperation( this.client, new AggregateOperation(this.namespace, this.pipeline, { ...this.aggregateOptions, // NOTE: order matters here, we may need to refine this ...this.cursorOptions, - explain: verbosity ?? true + ...timeout, + explain: explain ?? true }) ) ).shift(this.deserializationOptions); @@ -95,6 +134,13 @@ export class AggregationCursor extends AbstractCursor { addStage(stage: Document): AggregationCursor; addStage(stage: Document): AggregationCursor { this.throwIfInitialized(); + if ( + this.cursorOptions.timeoutMS != null && + this.cursorOptions.timeoutMode === CursorTimeoutMode.ITERATION && + (stage.$out != null || stage.$merge != null) + ) { + throw new MongoAPIError('Cannot use $out or $merge stage with ITERATION timeoutMode'); + } this.pipeline.push(stage); return this as unknown as AggregationCursor; } diff --git a/src/cursor/change_stream_cursor.ts b/src/cursor/change_stream_cursor.ts index b42ce3e1302..73a256cdeea 100644 --- a/src/cursor/change_stream_cursor.ts +++ b/src/cursor/change_stream_cursor.ts @@ -55,7 +55,7 @@ export class ChangeStreamCursor< pipeline: Document[] = [], options: ChangeStreamCursorOptions = {} ) { - super(client, namespace, options); + super(client, namespace, { ...options, tailable: true, awaitData: true }); this.pipeline = pipeline; this.changeStreamCursorOptions = options; @@ -133,7 +133,11 @@ export class ChangeStreamCursor< session }); - const response = await executeOperation(session.client, aggregateOperation); + const response = await executeOperation( + session.client, + aggregateOperation, + this.timeoutContext + ); const server = aggregateOperation.server; this.maxWireVersion = maxWireVersion(server); diff --git a/src/cursor/client_bulk_write_cursor.ts b/src/cursor/client_bulk_write_cursor.ts index 69e166effca..d9da82d367b 100644 --- a/src/cursor/client_bulk_write_cursor.ts +++ b/src/cursor/client_bulk_write_cursor.ts @@ -34,7 +34,7 @@ export class ClientBulkWriteCursor extends AbstractCursor { constructor( client: MongoClient, commandBuilder: ClientBulkWriteCommandBuilder, - options: ClientBulkWriteOptions = {} + options: ClientBulkWriteCursorOptions = {} ) { super(client, new MongoDBNamespace('admin', '$cmd'), options); @@ -71,7 +71,11 @@ export class ClientBulkWriteCursor extends AbstractCursor { session }); - const response = await executeOperation(this.client, clientBulkWriteOperation); + const response = await executeOperation( + this.client, + clientBulkWriteOperation, + this.timeoutContext + ); this.cursorResponse = response; return { server: clientBulkWriteOperation.server, session, response }; diff --git a/src/cursor/find_cursor.ts b/src/cursor/find_cursor.ts index 83a12818bd0..28cb373614d 100644 --- a/src/cursor/find_cursor.ts +++ b/src/cursor/find_cursor.ts @@ -1,7 +1,13 @@ import { type Document } from '../bson'; import { CursorResponse } from '../cmap/wire_protocol/responses'; -import { MongoInvalidArgumentError, MongoTailableCursorError } from '../error'; -import { type ExplainCommandOptions, type ExplainVerbosityLike } from '../explain'; +import { MongoAPIError, MongoInvalidArgumentError, MongoTailableCursorError } from '../error'; +import { + Explain, + ExplainableCursor, + type ExplainCommandOptions, + type ExplainVerbosityLike, + validateExplainTimeoutOptions +} from '../explain'; import type { MongoClient } from '../mongo_client'; import type { CollationOptions } from '../operations/command'; import { CountOperation, type CountOptions } from '../operations/count'; @@ -11,7 +17,7 @@ import type { Hint } from '../operations/operation'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort, type SortDirection } from '../sort'; import { emitWarningOnce, mergeOptions, type MongoDBNamespace, squashError } from '../utils'; -import { AbstractCursor, type InitialCursorResponse } from './abstract_cursor'; +import { type InitialCursorResponse } from './abstract_cursor'; /** @public Flags allowed for cursor */ export const FLAGS = [ @@ -24,7 +30,7 @@ export const FLAGS = [ ] as const; /** @public */ -export class FindCursor extends AbstractCursor { +export class FindCursor extends ExplainableCursor { /** @internal */ private cursorFilter: Document; /** @internal */ @@ -63,13 +69,25 @@ export class FindCursor extends AbstractCursor { /** @internal */ async _initialize(session: ClientSession): Promise { - const findOperation = new FindOperation(this.namespace, this.cursorFilter, { + const options = { ...this.findOptions, // NOTE: order matters here, we may need to refine this ...this.cursorOptions, session - }); + }; + + if (options.explain) { + try { + validateExplainTimeoutOptions(options, Explain.fromOptions(options)); + } catch { + throw new MongoAPIError( + 'timeoutMS cannot be used with explain when explain is specified in findOptions' + ); + } + } - const response = await executeOperation(this.client, findOperation); + const findOperation = new FindOperation(this.namespace, this.cursorFilter, options); + + const response = await executeOperation(this.client, findOperation, this.timeoutContext); // the response is not a cursor when `explain` is enabled this.numReturned = response.batchSize; @@ -133,14 +151,27 @@ export class FindCursor extends AbstractCursor { } /** Execute the explain for the cursor */ - async explain(verbosity?: ExplainVerbosityLike | ExplainCommandOptions): Promise { + async explain(): Promise; + async explain(verbosity: ExplainVerbosityLike | ExplainCommandOptions): Promise; + async explain(options: { timeoutMS?: number }): Promise; + async explain( + verbosity: ExplainVerbosityLike | ExplainCommandOptions, + options: { timeoutMS?: number } + ): Promise; + async explain( + verbosity?: ExplainVerbosityLike | ExplainCommandOptions | { timeoutMS?: number }, + options?: { timeoutMS?: number } + ): Promise { + const { explain, timeout } = this.resolveExplainTimeoutOptions(verbosity, options); + return ( await executeOperation( this.client, new FindOperation(this.namespace, this.cursorFilter, { ...this.findOptions, // NOTE: order matters here, we may need to refine this ...this.cursorOptions, - explain: verbosity ?? true + ...timeout, + explain: explain ?? true }) ) ).shift(this.deserializationOptions); diff --git a/src/cursor/list_collections_cursor.ts b/src/cursor/list_collections_cursor.ts index a529709556d..9b69de1b935 100644 --- a/src/cursor/list_collections_cursor.ts +++ b/src/cursor/list_collections_cursor.ts @@ -41,7 +41,7 @@ export class ListCollectionsCursor< session }); - const response = await executeOperation(this.parent.client, operation); + const response = await executeOperation(this.parent.client, operation, this.timeoutContext); return { server: operation.server, session, response }; } diff --git a/src/cursor/list_indexes_cursor.ts b/src/cursor/list_indexes_cursor.ts index 799ddf5bdb5..0f768f3b699 100644 --- a/src/cursor/list_indexes_cursor.ts +++ b/src/cursor/list_indexes_cursor.ts @@ -30,7 +30,7 @@ export class ListIndexesCursor extends AbstractCursor { session }); - const response = await executeOperation(this.parent.client, operation); + const response = await executeOperation(this.parent.client, operation, this.timeoutContext); return { server: operation.server, session, response }; } diff --git a/src/cursor/run_command_cursor.ts b/src/cursor/run_command_cursor.ts index 78b9826b9b1..d5b90eeda9d 100644 --- a/src/cursor/run_command_cursor.ts +++ b/src/cursor/run_command_cursor.ts @@ -9,12 +9,55 @@ import type { ReadConcernLike } from '../read_concern'; import type { ReadPreferenceLike } from '../read_preference'; import type { ClientSession } from '../sessions'; import { ns } from '../utils'; -import { AbstractCursor, type InitialCursorResponse } from './abstract_cursor'; +import { + AbstractCursor, + type CursorTimeoutMode, + type InitialCursorResponse +} from './abstract_cursor'; /** @public */ export type RunCursorCommandOptions = { readPreference?: ReadPreferenceLike; session?: ClientSession; + /** + * @experimental + * Specifies the time an operation will run until it throws a timeout error. Note that if + * `maxTimeMS` is provided in the command in addition to setting `timeoutMS` in the options, then + * the original value of `maxTimeMS` will be overwritten. + */ + timeoutMS?: number; + /** + * @public + * @experimental + * Specifies how `timeoutMS` is applied to the cursor. Can be either `'cursorLifeTime'` or `'iteration'` + * When set to `'iteration'`, the deadline specified by `timeoutMS` applies to each call of + * `cursor.next()`. + * When set to `'cursorLifetime'`, the deadline applies to the life of the entire cursor. + * + * Depending on the type of cursor being used, this option has different default values. + * For non-tailable cursors, this value defaults to `'cursorLifetime'` + * For tailable cursors, this value defaults to `'iteration'` since tailable cursors, by + * definition can have an arbitrarily long lifetime. + * + * @example + * ```ts + * const cursor = collection.find({}, {timeoutMS: 100, timeoutMode: 'iteration'}); + * for await (const doc of cursor) { + * // process doc + * // This will throw a timeout error if any of the iterator's `next()` calls takes more than 100ms, but + * // will continue to iterate successfully otherwise, regardless of the number of batches. + * } + * ``` + * + * @example + * ```ts + * const cursor = collection.find({}, { timeoutMS: 1000, timeoutMode: 'cursorLifetime' }); + * const docs = await cursor.toArray(); // This entire line will throw a timeout error if all batches are not fetched and returned within 1000ms. + * ``` + */ + timeoutMode?: CursorTimeoutMode; + tailable?: boolean; + awaitData?: boolean; } & BSONSerializeOptions; /** @public */ @@ -46,7 +89,7 @@ export class RunCommandCursor extends AbstractCursor { /** * Controls the `getMore.batchSize` field - * @param maxTimeMS - the number documents to return in the `nextBatch` + * @param batchSize - the number documents to return in the `nextBatch` */ public setBatchSize(batchSize: number): this { this.getMoreOptions.batchSize = batchSize; @@ -72,7 +115,9 @@ export class RunCommandCursor extends AbstractCursor { ); } - /** Unsupported for RunCommandCursor: maxTimeMS must be configured directly on command document */ + /** + * Unsupported for RunCommandCursor: maxTimeMS must be configured directly on command document + */ public override maxTimeMS(_: number): never { throw new MongoAPIError( 'maxTimeMS must be configured on the command document directly, to configure getMore.maxTimeMS use cursor.setMaxTimeMS()' @@ -105,7 +150,7 @@ export class RunCommandCursor extends AbstractCursor { responseType: CursorResponse }); - const response = await executeOperation(this.client, operation); + const response = await executeOperation(this.client, operation, this.timeoutContext); return { server: operation.server, @@ -123,6 +168,6 @@ export class RunCommandCursor extends AbstractCursor { ...this.getMoreOptions }); - return await executeOperation(this.client, getMoreOperation); + return await executeOperation(this.client, getMoreOperation, this.timeoutContext); } } diff --git a/src/db.ts b/src/db.ts index 53c18e44af6..121d6fc4f1e 100644 --- a/src/db.ts +++ b/src/db.ts @@ -97,7 +97,10 @@ export interface DbOptions extends BSONSerializeOptions, WriteConcernOptions { readConcern?: ReadConcern; /** Should retry failed writes */ retryWrites?: boolean; - /** @internal TODO(NODE-5688): make this public */ + /** + * @experimental + * Specifies the time an operation will run until it throws a timeout error + */ timeoutMS?: number; } @@ -222,6 +225,10 @@ export class Db { return this.s.namespace.toString(); } + public get timeoutMS(): number | undefined { + return this.s.options?.timeoutMS; + } + /** * Create a new collection on a server with the specified options. Use this to create capped collections. * More information about command options available at https://www.mongodb.com/docs/manual/reference/command/create/ @@ -270,11 +277,16 @@ export class Db { // Intentionally, we do not inherit options from parent for this operation. return await executeOperation( this.client, - new RunCommandOperation(this, command, { - ...resolveBSONOptions(options), - session: options?.session, - readPreference: options?.readPreference - }) + new RunCommandOperation( + this, + command, + resolveOptions(undefined, { + ...resolveBSONOptions(options), + timeoutMS: options?.timeoutMS ?? this.timeoutMS, + session: options?.session, + readPreference: options?.readPreference + }) + ) ); } @@ -379,7 +391,11 @@ export class Db { new RenameOperation( this.collection(fromCollection) as TODO_NODE_3286, toCollection, - { ...options, new_collection: true, readPreference: ReadPreference.primary } + resolveOptions(undefined, { + ...options, + new_collection: true, + readPreference: ReadPreference.primary + }) ) as TODO_NODE_3286 ); } @@ -517,6 +533,58 @@ export class Db { * - The first is to provide the schema that may be defined for all the collections within this database * - The second is to override the shape of the change stream document entirely, if it is not provided the type will default to ChangeStreamDocument of the first argument * + * @remarks + * When `timeoutMS` is configured for a change stream, it will have different behaviour depending + * on whether the change stream is in iterator mode or emitter mode. In both cases, a change + * stream will time out if it does not receive a change event within `timeoutMS` of the last change + * event. + * + * Note that if a change stream is consistently timing out when watching a collection, database or + * client that is being changed, then this may be due to the server timing out before it can finish + * processing the existing oplog. To address this, restart the change stream with a higher + * `timeoutMS`. + * + * If the change stream times out the initial aggregate operation to establish the change stream on + * the server, then the client will close the change stream. If the getMore calls to the server + * time out, then the change stream will be left open, but will throw a MongoOperationTimeoutError + * when in iterator mode and emit an error event that returns a MongoOperationTimeoutError in + * emitter mode. + * + * To determine whether or not the change stream is still open following a timeout, check the + * {@link ChangeStream.closed} getter. + * + * @example + * In iterator mode, if a next() call throws a timeout error, it will attempt to resume the change stream. + * The next call can just be retried after this succeeds. + * ```ts + * const changeStream = collection.watch([], { timeoutMS: 100 }); + * try { + * await changeStream.next(); + * } catch (e) { + * if (e instanceof MongoOperationTimeoutError && !changeStream.closed) { + * await changeStream.next(); + * } + * throw e; + * } + * ``` + * + * @example + * In emitter mode, if the change stream goes `timeoutMS` without emitting a change event, it will + * emit an error event that returns a MongoOperationTimeoutError, but will not close the change + * stream unless the resume attempt fails. There is no need to re-establish change listeners as + * this will automatically continue emitting change events once the resume attempt completes. + * + * ```ts + * const changeStream = collection.watch([], { timeoutMS: 100 }); + * changeStream.on('change', console.log); + * changeStream.on('error', e => { + * if (e instanceof MongoOperationTimeoutError && !changeStream.closed) { + * // do nothing + * } else { + * changeStream.close(); + * } + * }); + * ``` * @param pipeline - An array of {@link https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/|aggregation pipeline stages} through which to pass change stream documents. This allows for filtering (using $match) and manipulating the change stream documents. * @param options - Optional settings for the command * @typeParam TSchema - Type of the data being detected by the change stream diff --git a/src/error.ts b/src/error.ts index a3120a93880..a3ae965b78d 100644 --- a/src/error.ts +++ b/src/error.ts @@ -311,7 +311,7 @@ export class MongoAPIError extends MongoDriverError { /** * An error generated when the driver encounters unexpected input - * or reaches an unexpected/invalid internal state + * or reaches an unexpected/invalid internal state. * * @privateRemarks * Should **never** be directly instantiated. @@ -857,6 +857,31 @@ export class MongoUnexpectedServerResponseError extends MongoRuntimeError { } } +/** + * @public + * @category Error + * + * The `MongoOperationTimeoutError` class represents an error that occurs when an operation could not be completed within the specified `timeoutMS`. + * It is generated by the driver in support of the "client side operation timeout" feature so inherits from `MongoDriverError`. + * When `timeoutMS` is enabled `MongoServerError`s relating to `MaxTimeExpired` errors will be converted to `MongoOperationTimeoutError` + * + * @example + * ```ts + * try { + * await blogs.insertOne(blogPost, { timeoutMS: 60_000 }) + * } catch (error) { + * if (error instanceof MongoOperationTimeoutError) { + * console.log(`Oh no! writer's block!`, error); + * } + * } + * ``` + */ +export class MongoOperationTimeoutError extends MongoDriverError { + override get name(): string { + return 'MongoOperationTimeoutError'; + } +} + /** * An error thrown when the user attempts to add options to a cursor that has already been * initialized diff --git a/src/explain.ts b/src/explain.ts index 51f591efd47..670bea53041 100644 --- a/src/explain.ts +++ b/src/explain.ts @@ -1,3 +1,7 @@ +import { type Document } from './bson'; +import { AbstractCursor } from './cursor/abstract_cursor'; +import { MongoAPIError } from './error'; + /** @public */ export const ExplainVerbosity = Object.freeze({ queryPlanner: 'queryPlanner', @@ -86,3 +90,84 @@ export class Explain { return new Explain(verbosity, maxTimeMS); } } + +export function validateExplainTimeoutOptions(options: Document, explain?: Explain) { + const { maxTimeMS, timeoutMS } = options; + if (timeoutMS != null && (maxTimeMS != null || explain?.maxTimeMS != null)) { + throw new MongoAPIError('Cannot use maxTimeMS with timeoutMS for explain commands.'); + } +} + +/** + * Applies an explain to a given command. + * @internal + * + * @param command - the command on which to apply the explain + * @param options - the options containing the explain verbosity + */ +export function decorateWithExplain( + command: Document, + explain: Explain +): { + explain: Document; + verbosity: ExplainVerbosity; + maxTimeMS?: number; +} { + type ExplainCommand = ReturnType; + const { verbosity, maxTimeMS } = explain; + const baseCommand: ExplainCommand = { explain: command, verbosity }; + + if (typeof maxTimeMS === 'number') { + baseCommand.maxTimeMS = maxTimeMS; + } + + return baseCommand; +} + +/** + * @public + * + * A base class for any cursors that have `explain()` methods. + */ +export abstract class ExplainableCursor extends AbstractCursor { + /** Execute the explain for the cursor */ + abstract explain(): Promise; + abstract explain(verbosity: ExplainVerbosityLike | ExplainCommandOptions): Promise; + abstract explain(options: { timeoutMS?: number }): Promise; + abstract explain( + verbosity: ExplainVerbosityLike | ExplainCommandOptions, + options: { timeoutMS?: number } + ): Promise; + abstract explain( + verbosity?: ExplainVerbosityLike | ExplainCommandOptions | { timeoutMS?: number }, + options?: { timeoutMS?: number } + ): Promise; + + protected resolveExplainTimeoutOptions( + verbosity?: ExplainVerbosityLike | ExplainCommandOptions | { timeoutMS?: number }, + options?: { timeoutMS?: number } + ): { timeout?: { timeoutMS?: number }; explain?: ExplainVerbosityLike | ExplainCommandOptions } { + let explain: ExplainVerbosityLike | ExplainCommandOptions | undefined; + let timeout: { timeoutMS?: number } | undefined; + + if (verbosity == null && options == null) { + explain = undefined; + timeout = undefined; + } else if (verbosity != null && options == null) { + explain = + typeof verbosity !== 'object' + ? verbosity + : 'verbosity' in verbosity + ? verbosity + : undefined; + + timeout = typeof verbosity === 'object' && 'timeoutMS' in verbosity ? verbosity : undefined; + } else { + // @ts-expect-error TS isn't smart enough to determine that if both options are provided, the first is explain options + explain = verbosity; + timeout = options; + } + + return { timeout, explain }; + } +} diff --git a/src/gridfs/download.ts b/src/gridfs/download.ts index 06dda0a92ba..022bcf94449 100644 --- a/src/gridfs/download.ts +++ b/src/gridfs/download.ts @@ -2,6 +2,7 @@ import { Readable } from 'stream'; import type { Document, ObjectId } from '../bson'; import type { Collection } from '../collection'; +import { CursorTimeoutMode } from '../cursor/abstract_cursor'; import type { FindCursor } from '../cursor/find_cursor'; import { MongoGridFSChunkError, @@ -12,6 +13,7 @@ import { import type { FindOptions } from '../operations/find'; import type { ReadPreference } from '../read_preference'; import type { Sort } from '../sort'; +import { CSOTTimeoutContext } from '../timeout'; import type { Callback } from '../utils'; import type { GridFSChunk } from './upload'; @@ -28,7 +30,10 @@ export interface GridFSBucketReadStreamOptions { * to be returned by the stream. `end` is non-inclusive */ end?: number; - /** @internal TODO(NODE-5688): make this public */ + /** + * @experimental + * Specifies the time an operation will run until it throws a timeout error + */ timeoutMS?: number; } @@ -98,8 +103,10 @@ export interface GridFSBucketReadStreamPrivate { skip?: number; start: number; end: number; + timeoutMS?: number; }; readPreference?: ReadPreference; + timeoutContext?: CSOTTimeoutContext; } /** @@ -148,7 +155,11 @@ export class GridFSBucketReadStream extends Readable { end: 0, ...options }, - readPreference + readPreference, + timeoutContext: + options?.timeoutMS != null + ? new CSOTTimeoutContext({ timeoutMS: options.timeoutMS, serverSelectionTimeoutMS: 0 }) + : undefined }; } @@ -196,7 +207,8 @@ export class GridFSBucketReadStream extends Readable { async abort(): Promise { this.push(null); this.destroy(); - await this.s.cursor?.close(); + const remainingTimeMS = this.s.timeoutContext?.getRemainingTimeMSOrThrow(); + await this.s.cursor?.close({ timeoutMS: remainingTimeMS }); } } @@ -352,7 +364,22 @@ function init(stream: GridFSBucketReadStream): void { filter['n'] = { $gte: skip }; } } - stream.s.cursor = stream.s.chunks.find(filter).sort({ n: 1 }); + + let remainingTimeMS: number | undefined; + try { + remainingTimeMS = stream.s.timeoutContext?.getRemainingTimeMSOrThrow( + `Download timed out after ${stream.s.timeoutContext?.timeoutMS}ms` + ); + } catch (error) { + return stream.destroy(error); + } + + stream.s.cursor = stream.s.chunks + .find(filter, { + timeoutMode: stream.s.options.timeoutMS != null ? CursorTimeoutMode.LIFETIME : undefined, + timeoutMS: remainingTimeMS + }) + .sort({ n: 1 }); if (stream.s.readPreference) { stream.s.cursor.withReadPreference(stream.s.readPreference); @@ -371,6 +398,18 @@ function init(stream: GridFSBucketReadStream): void { return; }; + let remainingTimeMS: number | undefined; + try { + remainingTimeMS = stream.s.timeoutContext?.getRemainingTimeMSOrThrow( + `Download timed out after ${stream.s.timeoutContext?.timeoutMS}ms` + ); + } catch (error) { + if (!stream.destroyed) stream.destroy(error); + return; + } + + findOneOptions.timeoutMS = remainingTimeMS; + stream.s.files.findOne(stream.s.filter, findOneOptions).then(handleReadResult, error => { if (stream.destroyed) return; stream.destroy(error); diff --git a/src/gridfs/index.ts b/src/gridfs/index.ts index 51c32b7a01c..70f154431cf 100644 --- a/src/gridfs/index.ts +++ b/src/gridfs/index.ts @@ -2,10 +2,12 @@ import type { ObjectId } from '../bson'; import type { Collection } from '../collection'; import type { FindCursor } from '../cursor/find_cursor'; import type { Db } from '../db'; -import { MongoRuntimeError } from '../error'; +import { MongoOperationTimeoutError, MongoRuntimeError } from '../error'; import { type Filter, TypedEventEmitter } from '../mongo_types'; import type { ReadPreference } from '../read_preference'; import type { Sort } from '../sort'; +import { CSOTTimeoutContext } from '../timeout'; +import { resolveOptions } from '../utils'; import { WriteConcern, type WriteConcernOptions } from '../write_concern'; import type { FindOptions } from './../operations/find'; import { @@ -36,7 +38,11 @@ export interface GridFSBucketOptions extends WriteConcernOptions { chunkSizeBytes?: number; /** Read preference to be passed to read operations */ readPreference?: ReadPreference; - /** @internal TODO(NODE-5688): make this public */ + /** + * @experimental + * Specifies the lifetime duration of a gridFS stream. If any async operations are in progress + * when this timeout expires, the stream will throw a timeout error. + */ timeoutMS?: number; } @@ -48,6 +54,7 @@ export interface GridFSBucketPrivate { chunkSizeBytes: number; readPreference?: ReadPreference; writeConcern: WriteConcern | undefined; + timeoutMS?: number; }; _chunksCollection: Collection; _filesCollection: Collection; @@ -81,11 +88,11 @@ export class GridFSBucket extends TypedEventEmitter { constructor(db: Db, options?: GridFSBucketOptions) { super(); this.setMaxListeners(0); - const privateOptions = { + const privateOptions = resolveOptions(db, { ...DEFAULT_GRIDFS_BUCKET_OPTIONS, ...options, writeConcern: WriteConcern.fromOptions(options) - }; + }); this.s = { db, options: privateOptions, @@ -109,7 +116,10 @@ export class GridFSBucket extends TypedEventEmitter { filename: string, options?: GridFSBucketWriteStreamOptions ): GridFSBucketWriteStream { - return new GridFSBucketWriteStream(this, filename, options); + return new GridFSBucketWriteStream(this, filename, { + timeoutMS: this.s.options.timeoutMS, + ...options + }); } /** @@ -122,7 +132,11 @@ export class GridFSBucket extends TypedEventEmitter { filename: string, options?: GridFSBucketWriteStreamOptions ): GridFSBucketWriteStream { - return new GridFSBucketWriteStream(this, filename, { ...options, id }); + return new GridFSBucketWriteStream(this, filename, { + timeoutMS: this.s.options.timeoutMS, + ...options, + id + }); } /** Returns a readable stream (GridFSBucketReadStream) for streaming file data from GridFS. */ @@ -135,7 +149,7 @@ export class GridFSBucket extends TypedEventEmitter { this.s._filesCollection, this.s.options.readPreference, { _id: id }, - options + { timeoutMS: this.s.options.timeoutMS, ...options } ); } @@ -144,11 +158,27 @@ export class GridFSBucket extends TypedEventEmitter { * * @param id - The id of the file doc */ - async delete(id: ObjectId): Promise { - const { deletedCount } = await this.s._filesCollection.deleteOne({ _id: id }); + async delete(id: ObjectId, options?: { timeoutMS: number }): Promise { + const { timeoutMS } = resolveOptions(this.s.db, options); + let timeoutContext: CSOTTimeoutContext | undefined = undefined; + if (timeoutMS) { + timeoutContext = new CSOTTimeoutContext({ + timeoutMS, + serverSelectionTimeoutMS: this.s.db.client.s.options.serverSelectionTimeoutMS + }); + } + + const { deletedCount } = await this.s._filesCollection.deleteOne( + { _id: id }, + { timeoutMS: timeoutContext?.remainingTimeMS } + ); + + const remainingTimeMS = timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) + throw new MongoOperationTimeoutError(`Timed out after ${timeoutMS}ms`); // Delete orphaned chunks before returning FileNotFound - await this.s._chunksCollection.deleteMany({ files_id: id }); + await this.s._chunksCollection.deleteMany({ files_id: id }, { timeoutMS: remainingTimeMS }); if (deletedCount === 0) { // TODO(NODE-3483): Replace with more appropriate error @@ -188,7 +218,7 @@ export class GridFSBucket extends TypedEventEmitter { this.s._filesCollection, this.s.options.readPreference, { filename }, - { ...options, sort, skip } + { timeoutMS: this.s.options.timeoutMS, ...options, sort, skip } ); } @@ -198,18 +228,36 @@ export class GridFSBucket extends TypedEventEmitter { * @param id - the id of the file to rename * @param filename - new name for the file */ - async rename(id: ObjectId, filename: string): Promise { + async rename(id: ObjectId, filename: string, options?: { timeoutMS: number }): Promise { const filter = { _id: id }; const update = { $set: { filename } }; - const { matchedCount } = await this.s._filesCollection.updateOne(filter, update); + const { matchedCount } = await this.s._filesCollection.updateOne(filter, update, options); if (matchedCount === 0) { throw new MongoRuntimeError(`File with id ${id} not found`); } } /** Removes this bucket's files collection, followed by its chunks collection. */ - async drop(): Promise { - await this.s._filesCollection.drop(); - await this.s._chunksCollection.drop(); + async drop(options?: { timeoutMS: number }): Promise { + const { timeoutMS } = resolveOptions(this.s.db, options); + let timeoutContext: CSOTTimeoutContext | undefined = undefined; + + if (timeoutMS) { + timeoutContext = new CSOTTimeoutContext({ + timeoutMS, + serverSelectionTimeoutMS: this.s.db.client.s.options.serverSelectionTimeoutMS + }); + } + + if (timeoutContext) { + await this.s._filesCollection.drop({ timeoutMS: timeoutContext.remainingTimeMS }); + const remainingTimeMS = timeoutContext.getRemainingTimeMSOrThrow( + `Timed out after ${timeoutMS}ms` + ); + await this.s._chunksCollection.drop({ timeoutMS: remainingTimeMS }); + } else { + await this.s._filesCollection.drop(); + await this.s._chunksCollection.drop(); + } } } diff --git a/src/gridfs/upload.ts b/src/gridfs/upload.ts index f54d5131f66..6191e457bef 100644 --- a/src/gridfs/upload.ts +++ b/src/gridfs/upload.ts @@ -2,8 +2,15 @@ import { Writable } from 'stream'; import { type Document, ObjectId } from '../bson'; import type { Collection } from '../collection'; -import { MongoAPIError, MONGODB_ERROR_CODES, MongoError } from '../error'; -import { type Callback, squashError } from '../utils'; +import { CursorTimeoutMode } from '../cursor/abstract_cursor'; +import { + MongoAPIError, + MONGODB_ERROR_CODES, + MongoError, + MongoOperationTimeoutError +} from '../error'; +import { CSOTTimeoutContext } from '../timeout'; +import { type Callback, resolveTimeoutOptions, squashError } from '../utils'; import type { WriteConcernOptions } from '../write_concern'; import { WriteConcern } from './../write_concern'; import type { GridFSFile } from './download'; @@ -35,7 +42,10 @@ export interface GridFSBucketWriteStreamOptions extends WriteConcernOptions { * @deprecated Will be removed in the next major version. Add an aliases field to the metadata document instead. */ aliases?: string[]; - /** @internal TODO(NODE-5688): make this public */ + /** + * @experimental + * Specifies the time an operation will run until it throws a timeout error + */ timeoutMS?: number; } @@ -97,6 +107,8 @@ export class GridFSBucketWriteStream extends Writable { * ``` */ gridFSFile: GridFSFile | null = null; + /** @internal */ + timeoutContext?: CSOTTimeoutContext; /** * @param bucket - Handle for this stream's corresponding bucket @@ -131,14 +143,12 @@ export class GridFSBucketWriteStream extends Writable { aborted: false }; - if (!this.bucket.s.calledOpenUploadStream) { - this.bucket.s.calledOpenUploadStream = true; - - checkIndexes(this).then(() => { - this.bucket.s.checkedIndexes = true; - this.bucket.emit('index'); - }, squashError); - } + if (options.timeoutMS != null) + this.timeoutContext = new CSOTTimeoutContext({ + timeoutMS: options.timeoutMS, + serverSelectionTimeoutMS: resolveTimeoutOptions(this.bucket.s.db.client, {}) + .serverSelectionTimeoutMS + }); } /** @@ -147,10 +157,26 @@ export class GridFSBucketWriteStream extends Writable { * The stream is considered constructed when the indexes are done being created */ override _construct(callback: (error?: Error | null) => void): void { - if (this.bucket.s.checkedIndexes) { + if (!this.bucket.s.calledOpenUploadStream) { + this.bucket.s.calledOpenUploadStream = true; + + checkIndexes(this).then( + () => { + this.bucket.s.checkedIndexes = true; + this.bucket.emit('index'); + callback(); + }, + error => { + if (error instanceof MongoOperationTimeoutError) { + return handleError(this, error, callback); + } + squashError(error); + callback(); + } + ); + } else { return process.nextTick(callback); } - this.bucket.once('index', callback); } /** @@ -194,7 +220,10 @@ export class GridFSBucketWriteStream extends Writable { } this.state.aborted = true; - await this.chunks.deleteMany({ files_id: this.id }); + const remainingTimeMS = this.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${this.timeoutContext?.timeoutMS}ms` + ); + await this.chunks.deleteMany({ files_id: this.id, timeoutMS: remainingTimeMS }); } } @@ -219,9 +248,19 @@ function createChunkDoc(filesId: ObjectId, n: number, data: Buffer): GridFSChunk async function checkChunksIndex(stream: GridFSBucketWriteStream): Promise { const index = { files_id: 1, n: 1 }; + let remainingTimeMS; + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + let indexes; try { - indexes = await stream.chunks.listIndexes().toArray(); + indexes = await stream.chunks + .listIndexes({ + timeoutMode: remainingTimeMS != null ? CursorTimeoutMode.LIFETIME : undefined, + timeoutMS: remainingTimeMS + }) + .toArray(); } catch (error) { if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) { indexes = []; @@ -239,10 +278,14 @@ async function checkChunksIndex(stream: GridFSBucketWriteStream): Promise }); if (!hasChunksIndex) { + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); await stream.chunks.createIndex(index, { ...stream.writeConcern, background: true, - unique: true + unique: true, + timeoutMS: remainingTimeMS }); } } @@ -270,13 +313,28 @@ function checkDone(stream: GridFSBucketWriteStream, callback: Callback): void { return; } - stream.files.insertOne(gridFSFile, { writeConcern: stream.writeConcern }).then( - () => { - stream.gridFSFile = gridFSFile; - callback(); - }, - error => handleError(stream, error, callback) - ); + const remainingTimeMS = stream.timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) { + return handleError( + stream, + new MongoOperationTimeoutError( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ), + callback + ); + } + + stream.files + .insertOne(gridFSFile, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS }) + .then( + () => { + stream.gridFSFile = gridFSFile; + callback(); + }, + error => { + return handleError(stream, error, callback); + } + ); return; } @@ -284,7 +342,16 @@ function checkDone(stream: GridFSBucketWriteStream, callback: Callback): void { } async function checkIndexes(stream: GridFSBucketWriteStream): Promise { - const doc = await stream.files.findOne({}, { projection: { _id: 1 } }); + let remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + const doc = await stream.files.findOne( + {}, + { + projection: { _id: 1 }, + timeoutMS: remainingTimeMS + } + ); if (doc != null) { // If at least one document exists assume the collection has the required index return; @@ -293,8 +360,15 @@ async function checkIndexes(stream: GridFSBucketWriteStream): Promise { const index = { filename: 1, uploadDate: 1 }; let indexes; + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + const listIndexesOptions = { + timeoutMode: remainingTimeMS != null ? CursorTimeoutMode.LIFETIME : undefined, + timeoutMS: remainingTimeMS + }; try { - indexes = await stream.files.listIndexes().toArray(); + indexes = await stream.files.listIndexes(listIndexesOptions).toArray(); } catch (error) { if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) { indexes = []; @@ -312,7 +386,11 @@ async function checkIndexes(stream: GridFSBucketWriteStream): Promise { }); if (!hasFileIndex) { - await stream.files.createIndex(index, { background: false }); + remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ); + + await stream.files.createIndex(index, { background: false, timeoutMS: remainingTimeMS }); } await checkChunksIndex(stream); @@ -386,6 +464,18 @@ function doWrite( let doc: GridFSChunk; if (spaceRemaining === 0) { doc = createChunkDoc(stream.id, stream.n, Buffer.from(stream.bufToStore)); + + const remainingTimeMS = stream.timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) { + return handleError( + stream, + new MongoOperationTimeoutError( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ), + callback + ); + } + ++stream.state.outstandingRequests; ++outstandingRequests; @@ -393,17 +483,21 @@ function doWrite( return; } - stream.chunks.insertOne(doc, { writeConcern: stream.writeConcern }).then( - () => { - --stream.state.outstandingRequests; - --outstandingRequests; - - if (!outstandingRequests) { - checkDone(stream, callback); + stream.chunks + .insertOne(doc, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS }) + .then( + () => { + --stream.state.outstandingRequests; + --outstandingRequests; + + if (!outstandingRequests) { + checkDone(stream, callback); + } + }, + error => { + return handleError(stream, error, callback); } - }, - error => handleError(stream, error, callback) - ); + ); spaceRemaining = stream.chunkSizeBytes; stream.pos = 0; @@ -420,8 +514,6 @@ function writeRemnant(stream: GridFSBucketWriteStream, callback: Callback): void return checkDone(stream, callback); } - ++stream.state.outstandingRequests; - // Create a new buffer to make sure the buffer isn't bigger than it needs // to be. const remnant = Buffer.alloc(stream.pos); @@ -433,13 +525,28 @@ function writeRemnant(stream: GridFSBucketWriteStream, callback: Callback): void return; } - stream.chunks.insertOne(doc, { writeConcern: stream.writeConcern }).then( - () => { - --stream.state.outstandingRequests; - checkDone(stream, callback); - }, - error => handleError(stream, error, callback) - ); + const remainingTimeMS = stream.timeoutContext?.remainingTimeMS; + if (remainingTimeMS != null && remainingTimeMS <= 0) { + return handleError( + stream, + new MongoOperationTimeoutError( + `Upload timed out after ${stream.timeoutContext?.timeoutMS}ms` + ), + callback + ); + } + ++stream.state.outstandingRequests; + stream.chunks + .insertOne(doc, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS }) + .then( + () => { + --stream.state.outstandingRequests; + checkDone(stream, callback); + }, + error => { + return handleError(stream, error, callback); + } + ); } function isAborted(stream: GridFSBucketWriteStream, callback: Callback): boolean { diff --git a/src/index.ts b/src/index.ts index 39d4df719de..dd4d8a21d95 100644 --- a/src/index.ts +++ b/src/index.ts @@ -10,6 +10,7 @@ import { ListCollectionsCursor } from './cursor/list_collections_cursor'; import { ListIndexesCursor } from './cursor/list_indexes_cursor'; import type { RunCommandCursor } from './cursor/run_command_cursor'; import { Db } from './db'; +import { ExplainableCursor } from './explain'; import { GridFSBucket } from './gridfs'; import { GridFSBucketReadStream } from './gridfs/download'; import { GridFSBucketWriteStream } from './gridfs/upload'; @@ -36,7 +37,11 @@ export { Timestamp, UUID } from './bson'; -export { AnyBulkWriteOperation, BulkWriteOptions, MongoBulkWriteError } from './bulk/common'; +export { + type AnyBulkWriteOperation, + type BulkWriteOptions, + MongoBulkWriteError +} from './bulk/common'; export { ClientEncryption } from './client-side-encryption/client_encryption'; export { ChangeStreamCursor } from './cursor/change_stream_cursor'; export { @@ -66,6 +71,7 @@ export { MongoNetworkTimeoutError, MongoNotConnectedError, MongoOIDCError, + MongoOperationTimeoutError, MongoParseError, MongoRuntimeError, MongoServerClosedError, @@ -90,6 +96,7 @@ export { ClientSession, Collection, Db, + ExplainableCursor, FindCursor, GridFSBucket, GridFSBucketReadStream, @@ -108,7 +115,7 @@ export { AutoEncryptionLoggerLevel } from './client-side-encryption/auto_encrypt export { GSSAPICanonicalizationValue } from './cmap/auth/gssapi'; export { AuthMechanism } from './cmap/auth/providers'; export { Compressor } from './cmap/wire_protocol/compression'; -export { CURSOR_FLAGS } from './cursor/abstract_cursor'; +export { CURSOR_FLAGS, CursorTimeoutMode } from './cursor/abstract_cursor'; export { MongoErrorLabel } from './error'; export { ExplainVerbosity } from './explain'; export { ServerApiVersion } from './mongo_client'; @@ -358,6 +365,7 @@ export type { CursorStreamOptions } from './cursor/abstract_cursor'; export type { + CursorTimeoutContext, InitialCursorResponse, InternalAbstractCursorOptions } from './cursor/abstract_cursor'; @@ -566,7 +574,13 @@ export type { RTTSampler, ServerMonitoringMode } from './sdam/monitor'; -export type { Server, ServerEvents, ServerOptions, ServerPrivate } from './sdam/server'; +export type { + Server, + ServerCommandOptions, + ServerEvents, + ServerOptions, + ServerPrivate +} from './sdam/server'; export type { ServerDescription, ServerDescriptionOptions, @@ -597,7 +611,15 @@ export type { WithTransactionCallback } from './sessions'; export type { Sort, SortDirection, SortDirectionForCmd, SortForCmd } from './sort'; -export type { Timeout } from './timeout'; +export type { + CSOTTimeoutContext, + CSOTTimeoutContextOptions, + LegacyTimeoutContext, + LegacyTimeoutContextOptions, + Timeout, + TimeoutContext, + TimeoutContextOptions +} from './timeout'; export type { Transaction, TransactionOptions, TxnState } from './transactions'; export type { BufferPool, diff --git a/src/mongo_client.ts b/src/mongo_client.ts index 0bc9165deee..bab3d2c0f4d 100644 --- a/src/mongo_client.ts +++ b/src/mongo_client.ts @@ -130,7 +130,10 @@ export type SupportedNodeConnectionOptions = SupportedTLSConnectionOptions & export interface MongoClientOptions extends BSONSerializeOptions, SupportedNodeConnectionOptions { /** Specifies the name of the replica set, if the mongod is a member of a replica set. */ replicaSet?: string; - /** @internal TODO(NODE-5688): This option is in development and currently has no behaviour. */ + /** + * @experimental + * Specifies the time an operation will run until it throws a timeout error + */ timeoutMS?: number; /** Enables or disables TLS/SSL for the connection. */ tls?: boolean; @@ -482,6 +485,10 @@ export class MongoClient extends TypedEventEmitter implements return this.s.bsonOptions; } + get timeoutMS(): number | undefined { + return this.s.options.timeoutMS; + } + /** * Executes a client bulk write operation, available on server 8.0+. * @param models - The client bulk write models. @@ -508,6 +515,13 @@ export class MongoClient extends TypedEventEmitter implements /** * Connect to MongoDB using a url * + * @remarks + * Calling `connect` is optional since the first operation you perform will call `connect` if it's needed. + * `timeoutMS` will bound the time any operation can take before throwing a timeout error. + * However, when the operation being run is automatically connecting your `MongoClient` the `timeoutMS` will not apply to the time taken to connect the MongoClient. + * This means the time to setup the `MongoClient` does not count against `timeoutMS`. + * If you are using `timeoutMS` we recommend connecting your client explicitly in advance of any operation to avoid this inconsistent execution time. + * * @see docs.mongodb.org/manual/reference/connection-string/ */ async connect(): Promise { @@ -688,7 +702,7 @@ export class MongoClient extends TypedEventEmitter implements // Default to db from connection string if not provided if (!dbName) { - dbName = this.options.dbName; + dbName = this.s.options.dbName; } // Copy the options and add out internal override of the not shared flag @@ -705,6 +719,13 @@ export class MongoClient extends TypedEventEmitter implements * Connect to MongoDB using a url * * @remarks + * Calling `connect` is optional since the first operation you perform will call `connect` if it's needed. + * `timeoutMS` will bound the time any operation can take before throwing a timeout error. + * However, when the operation being run is automatically connecting your `MongoClient` the `timeoutMS` will not apply to the time taken to connect the MongoClient. + * This means the time to setup the `MongoClient` does not count against `timeoutMS`. + * If you are using `timeoutMS` we recommend connecting your client explicitly in advance of any operation to avoid this inconsistent execution time. + * + * @remarks * The programmatically provided options take precedence over the URI options. * * @see https://www.mongodb.com/docs/manual/reference/connection-string/ @@ -789,6 +810,58 @@ export class MongoClient extends TypedEventEmitter implements * - The first is to provide the schema that may be defined for all the data within the current cluster * - The second is to override the shape of the change stream document entirely, if it is not provided the type will default to ChangeStreamDocument of the first argument * + * @remarks + * When `timeoutMS` is configured for a change stream, it will have different behaviour depending + * on whether the change stream is in iterator mode or emitter mode. In both cases, a change + * stream will time out if it does not receive a change event within `timeoutMS` of the last change + * event. + * + * Note that if a change stream is consistently timing out when watching a collection, database or + * client that is being changed, then this may be due to the server timing out before it can finish + * processing the existing oplog. To address this, restart the change stream with a higher + * `timeoutMS`. + * + * If the change stream times out the initial aggregate operation to establish the change stream on + * the server, then the client will close the change stream. If the getMore calls to the server + * time out, then the change stream will be left open, but will throw a MongoOperationTimeoutError + * when in iterator mode and emit an error event that returns a MongoOperationTimeoutError in + * emitter mode. + * + * To determine whether or not the change stream is still open following a timeout, check the + * {@link ChangeStream.closed} getter. + * + * @example + * In iterator mode, if a next() call throws a timeout error, it will attempt to resume the change stream. + * The next call can just be retried after this succeeds. + * ```ts + * const changeStream = collection.watch([], { timeoutMS: 100 }); + * try { + * await changeStream.next(); + * } catch (e) { + * if (e instanceof MongoOperationTimeoutError && !changeStream.closed) { + * await changeStream.next(); + * } + * throw e; + * } + * ``` + * + * @example + * In emitter mode, if the change stream goes `timeoutMS` without emitting a change event, it will + * emit an error event that returns a MongoOperationTimeoutError, but will not close the change + * stream unless the resume attempt fails. There is no need to re-establish change listeners as + * this will automatically continue emitting change events once the resume attempt completes. + * + * ```ts + * const changeStream = collection.watch([], { timeoutMS: 100 }); + * changeStream.on('change', console.log); + * changeStream.on('error', e => { + * if (e instanceof MongoOperationTimeoutError && !changeStream.closed) { + * // do nothing + * } else { + * changeStream.close(); + * } + * }); + * ``` * @param pipeline - An array of {@link https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/|aggregation pipeline stages} through which to pass change stream documents. This allows for filtering (using $match) and manipulating the change stream documents. * @param options - Optional settings for the command * @typeParam TSchema - Type of the data being detected by the change stream @@ -952,6 +1025,5 @@ export interface MongoOptions * TODO: NODE-5671 - remove internal flag */ mongodbLogPath?: 'stderr' | 'stdout' | MongoDBLogWritable; - /** @internal TODO(NODE-5688): make this public */ timeoutMS?: number; } diff --git a/src/operations/aggregate.ts b/src/operations/aggregate.ts index 7b67fd0422d..ab367a16a94 100644 --- a/src/operations/aggregate.ts +++ b/src/operations/aggregate.ts @@ -1,9 +1,11 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; import { type ExplainOptions } from '../explain'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, type MongoDBNamespace } from '../utils'; import { WriteConcern } from '../write_concern'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; @@ -24,7 +26,9 @@ export interface AggregateOptions extends Omit it returns as a real cursor on pre 2.6 it returns as an emulated cursor. */ cursor?: Document; - /** specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point. */ + /** + * Specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point. + */ maxTimeMS?: number; /** The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query. */ maxAwaitTimeMS?: number; @@ -43,6 +47,8 @@ export interface AggregateOptions extends Omit { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const options: AggregateOptions = this.options; const serverWireVersion = maxWireVersion(server); @@ -150,6 +157,7 @@ export class AggregateOperation extends CommandOperation { server, session, command, + timeoutContext, this.explain ? ExplainedCursorResponse : CursorResponse ); } diff --git a/src/operations/bulk_write.ts b/src/operations/bulk_write.ts index 0a855644f06..55b61ef73b0 100644 --- a/src/operations/bulk_write.ts +++ b/src/operations/bulk_write.ts @@ -7,6 +7,7 @@ import type { import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { AbstractOperation, Aspect, defineAspects } from './operation'; /** @internal */ @@ -32,11 +33,17 @@ export class BulkWriteOperation extends AbstractOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const coll = this.collection; const operations = this.operations; - const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference }; + const options = { + ...this.options, + ...this.bsonOptions, + readPreference: this.readPreference, + timeoutContext + }; // Create the bulk operation const bulk: BulkOperationBase = diff --git a/src/operations/client_bulk_write/client_bulk_write.ts b/src/operations/client_bulk_write/client_bulk_write.ts index e901407cd78..26d1e7bb60f 100644 --- a/src/operations/client_bulk_write/client_bulk_write.ts +++ b/src/operations/client_bulk_write/client_bulk_write.ts @@ -2,6 +2,7 @@ import { MongoClientBulkWriteExecutionError, ServerType } from '../../beta'; import { ClientBulkWriteCursorResponse } from '../../cmap/wire_protocol/responses'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { MongoDBNamespace } from '../../utils'; import { CommandOperation } from '../command'; import { Aspect, defineAspects } from '../operation'; @@ -43,7 +44,8 @@ export class ClientBulkWriteOperation extends CommandOperation { let command; @@ -52,7 +54,7 @@ export class ClientBulkWriteOperation extends CommandOperation extends AbstractOperation { if (this.hasAspect(Aspect.EXPLAINABLE)) { this.explain = Explain.fromOptions(options); + if (this.explain) validateExplainTimeoutOptions(this.options, this.explain); } else if (options?.explain != null) { throw new MongoInvalidArgumentError(`Option "explain" is not supported on this command`); } @@ -111,19 +117,22 @@ export abstract class CommandOperation extends AbstractOperation { server: Server, session: ClientSession | undefined, cmd: Document, + timeoutContext: TimeoutContext, responseType: T | undefined ): Promise>; public async executeCommand( server: Server, session: ClientSession | undefined, - cmd: Document + cmd: Document, + timeoutContext: TimeoutContext ): Promise; async executeCommand( server: Server, session: ClientSession | undefined, cmd: Document, + timeoutContext: TimeoutContext, responseType?: MongoDBResponseConstructor ): Promise { this.server = server; @@ -131,6 +140,7 @@ export abstract class CommandOperation extends AbstractOperation { const options = { ...this.options, ...this.bsonOptions, + timeoutContext, readPreference: this.readPreference, session }; diff --git a/src/operations/count.ts b/src/operations/count.ts index 00aae501728..e3f9800d0e5 100644 --- a/src/operations/count.ts +++ b/src/operations/count.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import type { MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -12,7 +13,9 @@ export interface CountOptions extends CommandOperationOptions { skip?: number; /** The maximum amounts to count before aborting. */ limit?: number; - /** Number of milliseconds to wait before aborting the query. */ + /** + * Number of milliseconds to wait before aborting the query. + */ maxTimeMS?: number; /** An index name hint for the query. */ hint?: string | Document; @@ -36,7 +39,11 @@ export class CountOperation extends CommandOperation { return 'count' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options; const cmd: Document = { count: this.collectionName, @@ -59,7 +66,7 @@ export class CountOperation extends CommandOperation { cmd.maxTimeMS = options.maxTimeMS; } - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return result ? result.n : 0; } } diff --git a/src/operations/create_collection.ts b/src/operations/create_collection.ts index 8edc7e9a1c4..da278f88c11 100644 --- a/src/operations/create_collection.ts +++ b/src/operations/create_collection.ts @@ -9,6 +9,7 @@ import { MongoCompatibilityError } from '../error'; import type { PkFactory } from '../mongo_client'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { CreateIndexesOperation } from './indexes'; import { Aspect, defineAspects } from './operation'; @@ -16,6 +17,7 @@ import { Aspect, defineAspects } from './operation'; const ILLEGAL_COMMAND_FIELDS = new Set([ 'w', 'wtimeout', + 'timeoutMS', 'j', 'fsync', 'autoIndexId', @@ -124,14 +126,18 @@ export class CreateCollectionOperation extends CommandOperation { return 'create' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const db = this.db; const name = this.name; const options = this.options; const encryptedFields: Document | undefined = options.encryptedFields ?? - db.client.options.autoEncryption?.encryptedFieldsMap?.[`${db.databaseName}.${name}`]; + db.client.s.options.autoEncryption?.encryptedFieldsMap?.[`${db.databaseName}.${name}`]; if (encryptedFields) { // Creating a QE collection required min server of 7.0.0 @@ -155,7 +161,7 @@ export class CreateCollectionOperation extends CommandOperation { unique: true } }); - await createOp.executeWithoutEncryptedFieldsCheck(server, session); + await createOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } if (!options.encryptedFields) { @@ -163,7 +169,7 @@ export class CreateCollectionOperation extends CommandOperation { } } - const coll = await this.executeWithoutEncryptedFieldsCheck(server, session); + const coll = await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); if (encryptedFields) { // Create the required index for queryable encryption support. @@ -173,7 +179,7 @@ export class CreateCollectionOperation extends CommandOperation { { __safeContent__: 1 }, {} ); - await createIndexOp.execute(server, session); + await createIndexOp.execute(server, session, timeoutContext); } return coll; @@ -181,7 +187,8 @@ export class CreateCollectionOperation extends CommandOperation { private async executeWithoutEncryptedFieldsCheck( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const db = this.db; const name = this.name; @@ -198,7 +205,7 @@ export class CreateCollectionOperation extends CommandOperation { } } // otherwise just execute the command - await super.executeCommand(server, session, cmd); + await super.executeCommand(server, session, cmd, timeoutContext); return new Collection(db, name, options); } } diff --git a/src/operations/delete.ts b/src/operations/delete.ts index f0ef61cb7b1..0e93ead36a2 100644 --- a/src/operations/delete.ts +++ b/src/operations/delete.ts @@ -4,6 +4,7 @@ import { MongoCompatibilityError, MongoServerError } from '../error'; import { type TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { type MongoDBNamespace } from '../utils'; import { type WriteConcernOptions } from '../write_concern'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; @@ -67,7 +68,8 @@ export class DeleteOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; @@ -95,7 +97,12 @@ export class DeleteOperation extends CommandOperation { } } - const res: TODO_NODE_3286 = await super.executeCommand(server, session, command); + const res: TODO_NODE_3286 = await super.executeCommand( + server, + session, + command, + timeoutContext + ); return res; } } @@ -107,9 +114,10 @@ export class DeleteOneOperation extends DeleteOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -127,9 +135,10 @@ export class DeleteManyOperation extends DeleteOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); diff --git a/src/operations/distinct.ts b/src/operations/distinct.ts index 4fda285d880..51f2a362d8c 100644 --- a/src/operations/distinct.ts +++ b/src/operations/distinct.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { decorateWithCollation, decorateWithReadConcern } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -42,7 +43,11 @@ export class DistinctOperation extends CommandOperation { return 'distinct' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const coll = this.collection; const key = this.key; const query = this.query; @@ -72,7 +77,7 @@ export class DistinctOperation extends CommandOperation { // Have we specified collation decorateWithCollation(cmd, coll, options); - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return this.explain ? result : result.values; } diff --git a/src/operations/drop.ts b/src/operations/drop.ts index 15624d4c07b..0ead5a4927a 100644 --- a/src/operations/drop.ts +++ b/src/operations/drop.ts @@ -3,6 +3,7 @@ import type { Db } from '../db'; import { MONGODB_ERROR_CODES, MongoServerError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -29,12 +30,16 @@ export class DropCollectionOperation extends CommandOperation { return 'drop' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const db = this.db; const options = this.options; const name = this.name; - const encryptedFieldsMap = db.client.options.autoEncryption?.encryptedFieldsMap; + const encryptedFieldsMap = db.client.s.options.autoEncryption?.encryptedFieldsMap; let encryptedFields: Document | undefined = options.encryptedFields ?? encryptedFieldsMap?.[`${db.databaseName}.${name}`]; @@ -57,7 +62,7 @@ export class DropCollectionOperation extends CommandOperation { // Drop auxilliary collections, ignoring potential NamespaceNotFound errors. const dropOp = new DropCollectionOperation(db, collectionName); try { - await dropOp.executeWithoutEncryptedFieldsCheck(server, session); + await dropOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } catch (err) { if ( !(err instanceof MongoServerError) || @@ -69,14 +74,15 @@ export class DropCollectionOperation extends CommandOperation { } } - return await this.executeWithoutEncryptedFieldsCheck(server, session); + return await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext); } private async executeWithoutEncryptedFieldsCheck( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - await super.executeCommand(server, session, { drop: this.name }); + await super.executeCommand(server, session, { drop: this.name }, timeoutContext); return true; } } @@ -96,8 +102,12 @@ export class DropDatabaseOperation extends CommandOperation { return 'dropDatabase' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - await super.executeCommand(server, session, { dropDatabase: 1 }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + await super.executeCommand(server, session, { dropDatabase: 1 }, timeoutContext); return true; } } diff --git a/src/operations/estimated_document_count.ts b/src/operations/estimated_document_count.ts index c1d6c381998..5ab5aa4c305 100644 --- a/src/operations/estimated_document_count.ts +++ b/src/operations/estimated_document_count.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -30,7 +31,11 @@ export class EstimatedDocumentCountOperation extends CommandOperation { return 'count' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const cmd: Document = { count: this.collectionName }; if (typeof this.options.maxTimeMS === 'number') { @@ -43,7 +48,7 @@ export class EstimatedDocumentCountOperation extends CommandOperation { cmd.comment = this.options.comment; } - const response = await super.executeCommand(server, session, cmd); + const response = await super.executeCommand(server, session, cmd, timeoutContext); return response?.n || 0; } diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index ec7c233eeca..f59df27569f 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -24,6 +24,7 @@ import { } from '../sdam/server_selection'; import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; +import { TimeoutContext } from '../timeout'; import { supportsRetryableWrites } from '../utils'; import { AbstractOperation, Aspect } from './operation'; @@ -57,7 +58,7 @@ type ResultTypeFromOperation = export async function executeOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(client: MongoClient, operation: T): Promise { +>(client: MongoClient, operation: T, timeoutContext?: TimeoutContext | null): Promise { if (!(operation instanceof AbstractOperation)) { // TODO(NODE-3483): Extend MongoRuntimeError throw new MongoRuntimeError('This method requires a valid operation instance'); @@ -80,11 +81,6 @@ export async function executeOperation< } else if (session.client !== client) { throw new MongoInvalidArgumentError('ClientSession must be from the same MongoClient'); } - if (session.explicit && session?.timeoutMS != null && operation.options.timeoutMS != null) { - throw new MongoInvalidArgumentError( - 'Do not specify timeoutMS on operation if already specified on an explicit session' - ); - } const readPreference = operation.readPreference ?? ReadPreference.primary; const inTransaction = !!session?.inTransaction(); @@ -105,9 +101,17 @@ export async function executeOperation< session.unpin(); } + timeoutContext ??= TimeoutContext.create({ + session, + serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS, + waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS, + timeoutMS: operation.options.timeoutMS + }); + try { return await tryOperation(operation, { topology, + timeoutContext, session, readPreference }); @@ -148,6 +152,7 @@ type RetryOptions = { session: ClientSession | undefined; readPreference: ReadPreference; topology: Topology; + timeoutContext: TimeoutContext; }; /** @@ -171,7 +176,10 @@ type RetryOptions = { async function tryOperation< T extends AbstractOperation, TResult = ResultTypeFromOperation ->(operation: T, { topology, session, readPreference }: RetryOptions): Promise { +>( + operation: T, + { topology, timeoutContext, session, readPreference }: RetryOptions +): Promise { let selector: ReadPreference | ServerSelector; if (operation.hasAspect(Aspect.MUST_SELECT_SAME_SERVER)) { @@ -189,7 +197,8 @@ async function tryOperation< let server = await topology.selectServer(selector, { session, - operationName: operation.commandName + operationName: operation.commandName, + timeoutContext }); const hasReadAspect = operation.hasAspect(Aspect.READ_OPERATION); @@ -214,12 +223,10 @@ async function tryOperation< session.incrementTransactionNumber(); } - // TODO(NODE-6231): implement infinite retry within CSOT timeout here - const maxTries = willRetry ? 2 : 1; + const maxTries = willRetry ? (timeoutContext.csotEnabled() ? Infinity : 2) : 1; let previousOperationError: MongoError | undefined; let previousServer: ServerDescription | undefined; - // TODO(NODE-6231): implement infinite retry within CSOT timeout here for (let tries = 0; tries < maxTries; tries++) { if (previousOperationError) { if (hasWriteAspect && previousOperationError.code === MMAPv1_RETRY_WRITES_ERROR_CODE) { @@ -268,10 +275,9 @@ async function tryOperation< if (tries > 0 && operation.hasAspect(Aspect.COMMAND_BATCHING)) { operation.resetBatch(); } - return await operation.execute(server, session); + return await operation.execute(server, session, timeoutContext); } catch (operationError) { if (!(operationError instanceof MongoError)) throw operationError; - if ( previousOperationError != null && operationError.hasErrorLabel(MongoErrorLabel.NoWritesPerformed) @@ -280,6 +286,9 @@ async function tryOperation< } previousServer = server.description; previousOperationError = operationError; + + // Reset timeouts + timeoutContext.clear(); } } diff --git a/src/operations/find.ts b/src/operations/find.ts index 55abe00a923..1775ea6e07f 100644 --- a/src/operations/find.ts +++ b/src/operations/find.ts @@ -1,12 +1,18 @@ import type { Document } from '../bson'; import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses'; +import { type AbstractCursorOptions, type CursorTimeoutMode } from '../cursor/abstract_cursor'; import { MongoInvalidArgumentError } from '../error'; -import { type ExplainOptions } from '../explain'; +import { + decorateWithExplain, + type ExplainOptions, + validateExplainTimeoutOptions +} from '../explain'; import { ReadConcern } from '../read_concern'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort } from '../sort'; -import { decorateWithExplain, type MongoDBNamespace, normalizeHintField } from '../utils'; +import { type TimeoutContext } from '../timeout'; +import { type MongoDBNamespace, normalizeHintField } from '../utils'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects, type Hint } from './operation'; @@ -16,7 +22,8 @@ import { Aspect, defineAspects, type Hint } from './operation'; */ // eslint-disable-next-line @typescript-eslint/no-unused-vars export interface FindOptions - extends Omit { + extends Omit, + AbstractCursorOptions { /** Sets the limit of documents returned in the query. */ limit?: number; /** Set to sort the documents coming back from the query. Array of indexes, `[['a', 1]]` etc. */ @@ -70,6 +77,8 @@ export interface FindOptions * @deprecated This API is deprecated in favor of `collection.find().explain()`. */ explain?: ExplainOptions['explain']; + /** @internal*/ + timeoutMode?: CursorTimeoutMode; } /** @internal */ @@ -105,7 +114,8 @@ export class FindOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { this.server = server; @@ -113,6 +123,7 @@ export class FindOperation extends CommandOperation { let findCommand = makeFindCommand(this.ns, this.filter, options); if (this.explain) { + validateExplainTimeoutOptions(this.options, this.explain); findCommand = decorateWithExplain(findCommand, this.explain); } @@ -123,7 +134,8 @@ export class FindOperation extends CommandOperation { ...this.options, ...this.bsonOptions, documentsReturnedIn: 'firstBatch', - session + session, + timeoutContext }, this.explain ? ExplainedCursorResponse : CursorResponse ); diff --git a/src/operations/find_and_modify.ts b/src/operations/find_and_modify.ts index 92b17e93b3b..651bcccb626 100644 --- a/src/operations/find_and_modify.ts +++ b/src/operations/find_and_modify.ts @@ -5,6 +5,7 @@ import { ReadPreference } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { formatSort, type Sort, type SortForCmd } from '../sort'; +import { type TimeoutContext } from '../timeout'; import { decorateWithCollation, hasAtomicOperators, maxWireVersion } from '../utils'; import { type WriteConcern, type WriteConcernSettings } from '../write_concern'; import { CommandOperation, type CommandOperationOptions } from './command'; @@ -180,7 +181,11 @@ export class FindAndModifyOperation extends CommandOperation { return 'findAndModify' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const coll = this.collection; const query = this.query; const options = { ...this.options, ...this.bsonOptions }; @@ -208,7 +213,7 @@ export class FindAndModifyOperation extends CommandOperation { } // Execute the command - const result = await super.executeCommand(server, session, cmd); + const result = await super.executeCommand(server, session, cmd, timeoutContext); return options.includeResultMetadata ? result : (result.value ?? null); } } diff --git a/src/operations/get_more.ts b/src/operations/get_more.ts index aa550721b6f..34317d533b5 100644 --- a/src/operations/get_more.ts +++ b/src/operations/get_more.ts @@ -3,6 +3,7 @@ import { CursorResponse } from '../cmap/wire_protocol/responses'; import { MongoRuntimeError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, type MongoDBNamespace } from '../utils'; import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation'; @@ -58,7 +59,8 @@ export class GetMoreOperation extends AbstractOperation { */ override async execute( server: Server, - _session: ClientSession | undefined + _session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { if (server !== this.server) { throw new MongoRuntimeError('Getmore must run on the same server operation began on'); @@ -97,6 +99,7 @@ export class GetMoreOperation extends AbstractOperation { const commandOptions = { returnFieldSelector: null, documentsReturnedIn: 'nextBatch', + timeoutContext, ...this.options }; diff --git a/src/operations/indexes.ts b/src/operations/indexes.ts index fda3fa80dd6..afd05f5be36 100644 --- a/src/operations/indexes.ts +++ b/src/operations/indexes.ts @@ -6,6 +6,7 @@ import { MongoCompatibilityError } from '../error'; import { type OneOrMore } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { isObject, maxWireVersion, type MongoDBNamespace } from '../utils'; import { type CollationOptions, @@ -296,7 +297,11 @@ export class CreateIndexesOperation extends CommandOperation { return 'createIndexes'; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options; const indexes = this.indexes; @@ -316,7 +321,7 @@ export class CreateIndexesOperation extends CommandOperation { // collation is set on each index, it should not be defined at the root this.options.collation = undefined; - await super.executeCommand(server, session, cmd); + await super.executeCommand(server, session, cmd, timeoutContext); const indexNames = indexes.map(index => index.name || ''); return indexNames; @@ -344,14 +349,21 @@ export class DropIndexOperation extends CommandOperation { return 'dropIndexes' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const cmd = { dropIndexes: this.collection.collectionName, index: this.indexName }; - return await super.executeCommand(server, session, cmd); + return await super.executeCommand(server, session, cmd, timeoutContext); } } /** @public */ -export type ListIndexesOptions = AbstractCursorOptions; +export type ListIndexesOptions = AbstractCursorOptions & { + /** @internal */ + omitMaxTimeMS?: boolean; +}; /** @internal */ export class ListIndexesOperation extends CommandOperation { @@ -379,7 +391,8 @@ export class ListIndexesOperation extends CommandOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const serverWireVersion = maxWireVersion(server); @@ -393,7 +406,7 @@ export class ListIndexesOperation extends CommandOperation { command.comment = this.options.comment; } - return await super.executeCommand(server, session, command, CursorResponse); + return await super.executeCommand(server, session, command, timeoutContext, CursorResponse); } } diff --git a/src/operations/insert.ts b/src/operations/insert.ts index 35a050ed1ca..1a40763e313 100644 --- a/src/operations/insert.ts +++ b/src/operations/insert.ts @@ -5,6 +5,7 @@ import { MongoInvalidArgumentError, MongoServerError } from '../error'; import type { InferIdType } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maybeAddIdToDocuments, type MongoDBNamespace } from '../utils'; import { WriteConcern } from '../write_concern'; import { BulkWriteOperation } from './bulk_write'; @@ -27,7 +28,11 @@ export class InsertOperation extends CommandOperation { return 'insert' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; const command: Document = { @@ -46,7 +51,7 @@ export class InsertOperation extends CommandOperation { command.comment = options.comment; } - return await super.executeCommand(server, session, command); + return await super.executeCommand(server, session, command, timeoutContext); } } @@ -73,9 +78,10 @@ export class InsertOneOperation extends InsertOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res = await super.execute(server, session); + const res = await super.execute(server, session, timeoutContext); if (res.code) throw new MongoServerError(res); if (res.writeErrors) { // This should be a WriteError but we can't change it now because of error hierarchy @@ -123,7 +129,8 @@ export class InsertManyOperation extends AbstractOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const coll = this.collection; const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference }; @@ -137,7 +144,7 @@ export class InsertManyOperation extends AbstractOperation { ); try { - const res = await bulkWriteOperation.execute(server, session); + const res = await bulkWriteOperation.execute(server, session, timeoutContext); return { acknowledged: writeConcern?.w !== 0, insertedCount: res.insertedCount, diff --git a/src/operations/kill_cursors.ts b/src/operations/kill_cursors.ts index 356230e9c7a..72c6a04b276 100644 --- a/src/operations/kill_cursors.ts +++ b/src/operations/kill_cursors.ts @@ -2,6 +2,7 @@ import type { Long } from '../bson'; import { MongoRuntimeError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { type MongoDBNamespace, squashError } from '../utils'; import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation'; @@ -29,7 +30,11 @@ export class KillCursorsOperation extends AbstractOperation { return 'killCursors' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { if (server !== this.server) { throw new MongoRuntimeError('Killcursor must run on the same server operation began on'); } @@ -46,7 +51,10 @@ export class KillCursorsOperation extends AbstractOperation { cursors: [this.cursorId] }; try { - await server.command(this.ns, killCursorsCommand, { session }); + await server.command(this.ns, killCursorsCommand, { + session, + timeoutContext + }); } catch (error) { // The driver should never emit errors from killCursors, this is spec-ed behavior squashError(error); diff --git a/src/operations/list_collections.ts b/src/operations/list_collections.ts index e94300f1205..6b3296fcf00 100644 --- a/src/operations/list_collections.ts +++ b/src/operations/list_collections.ts @@ -1,8 +1,10 @@ import type { Binary, Document } from '../bson'; import { CursorResponse } from '../cmap/wire_protocol/responses'; +import { type CursorTimeoutContext, type CursorTimeoutMode } from '../cursor/abstract_cursor'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -15,6 +17,11 @@ export interface ListCollectionsOptions extends Omit { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { return await super.executeCommand( server, session, this.generateCommand(maxWireVersion(server)), + timeoutContext, CursorResponse ); } diff --git a/src/operations/list_databases.ts b/src/operations/list_databases.ts index 5ad9142a1a7..bd740d50c68 100644 --- a/src/operations/list_databases.ts +++ b/src/operations/list_databases.ts @@ -3,6 +3,7 @@ import type { Db } from '../db'; import { type TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { maxWireVersion, MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -41,7 +42,8 @@ export class ListDatabasesOperation extends CommandOperation { const cmd: Document = { listDatabases: 1 }; @@ -63,7 +65,12 @@ export class ListDatabasesOperation extends CommandOperation); + return await (super.executeCommand( + server, + session, + cmd, + timeoutContext + ) as Promise); } } diff --git a/src/operations/operation.ts b/src/operations/operation.ts index 12f168b76e3..1c5be203516 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -2,6 +2,7 @@ import { type BSONSerializeOptions, type Document, resolveBSONOptions } from '.. import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import type { MongoDBNamespace } from '../utils'; export const Aspect = { @@ -31,7 +32,13 @@ export interface OperationOptions extends BSONSerializeOptions { bypassPinningCheck?: boolean; omitReadPreference?: boolean; - /** @internal TODO(NODE-5688): make this public */ + /** @internal Hint to `executeOperation` to omit maxTimeMS */ + omitMaxTimeMS?: boolean; + + /** + * @experimental + * Specifies the time an operation will run until it throws a timeout error + */ timeoutMS?: number; } @@ -57,6 +64,9 @@ export abstract class AbstractOperation { options: OperationOptions; + /** Specifies the time an operation will run until it throws a timeout error. */ + timeoutMS?: number; + [kSession]: ClientSession | undefined; static aspects?: Set; @@ -80,7 +90,11 @@ export abstract class AbstractOperation { Command name should be stateless (should not use 'this' keyword) */ abstract get commandName(): string; - abstract execute(server: Server, session: ClientSession | undefined): Promise; + abstract execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise; hasAspect(aspect: symbol): boolean { const ctor = this.constructor as { aspects?: Set }; diff --git a/src/operations/profiling_level.ts b/src/operations/profiling_level.ts index 383062c2a40..7c860a244b7 100644 --- a/src/operations/profiling_level.ts +++ b/src/operations/profiling_level.ts @@ -2,6 +2,7 @@ import type { Db } from '../db'; import { MongoUnexpectedServerResponseError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; /** @public */ @@ -20,8 +21,12 @@ export class ProfilingLevelOperation extends CommandOperation { return 'profile' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - const doc = await super.executeCommand(server, session, { profile: -1 }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + const doc = await super.executeCommand(server, session, { profile: -1 }, timeoutContext); if (doc.ok === 1) { const was = doc.was; if (was === 0) return 'off'; diff --git a/src/operations/remove_user.ts b/src/operations/remove_user.ts index ced8e4e1cab..7f484ba89a3 100644 --- a/src/operations/remove_user.ts +++ b/src/operations/remove_user.ts @@ -1,6 +1,7 @@ import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -22,8 +23,12 @@ export class RemoveUserOperation extends CommandOperation { return 'dropUser' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { - await super.executeCommand(server, session, { dropUser: this.username }); + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { + await super.executeCommand(server, session, { dropUser: this.username }, timeoutContext); return true; } } diff --git a/src/operations/rename.ts b/src/operations/rename.ts index a27d4afe45a..883be282b64 100644 --- a/src/operations/rename.ts +++ b/src/operations/rename.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import { Collection } from '../collection'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { MongoDBNamespace } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -29,7 +30,11 @@ export class RenameOperation extends CommandOperation { return 'renameCollection' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { // Build the command const renameCollection = this.collection.namespace; const toCollection = this.collection.s.namespace.withCollection(this.newName).toString(); @@ -42,7 +47,7 @@ export class RenameOperation extends CommandOperation { dropTarget: dropTarget }; - await super.executeCommand(server, session, command); + await super.executeCommand(server, session, command, timeoutContext); return new Collection(this.collection.s.db, this.newName, this.collection.s.options); } } diff --git a/src/operations/run_command.ts b/src/operations/run_command.ts index ad7d02c044f..db5c5a7c169 100644 --- a/src/operations/run_command.ts +++ b/src/operations/run_command.ts @@ -5,6 +5,7 @@ import { type TODO_NODE_3286 } from '../mongo_types'; import type { ReadPreferenceLike } from '../read_preference'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { MongoDBNamespace } from '../utils'; import { AbstractOperation } from './operation'; @@ -14,6 +15,13 @@ export type RunCommandOptions = { session?: ClientSession; /** The read preference */ readPreference?: ReadPreferenceLike; + /** + * @experimental + * Specifies the time an operation will run until it throws a timeout error + */ + timeoutMS?: number; + /** @internal */ + omitMaxTimeMS?: boolean; } & BSONSerializeOptions; /** @internal */ @@ -31,7 +39,11 @@ export class RunCommandOperation extends AbstractOperation { return 'runCommand' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { this.server = server; const res: TODO_NODE_3286 = await server.command( this.ns, @@ -39,10 +51,12 @@ export class RunCommandOperation extends AbstractOperation { { ...this.options, readPreference: this.readPreference, - session + session, + timeoutContext }, this.options.responseType ); + return res; } } @@ -63,12 +77,17 @@ export class RunAdminCommandOperation extends AbstractOperation return 'runCommand' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { this.server = server; const res: TODO_NODE_3286 = await server.command(this.ns, this.command, { ...this.options, readPreference: this.readPreference, - session + session, + timeoutContext }); return res; } diff --git a/src/operations/search_indexes/create.ts b/src/operations/search_indexes/create.ts index 2ce66f4707e..2870868bc91 100644 --- a/src/operations/search_indexes/create.ts +++ b/src/operations/search_indexes/create.ts @@ -2,6 +2,7 @@ import type { Document } from '../../bson'; import type { Collection } from '../../collection'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @@ -31,14 +32,21 @@ export class CreateSearchIndexesOperation extends AbstractOperation { return 'createSearchIndexes' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command = { createSearchIndexes: namespace.collection, indexes: this.descriptions }; - const res = await server.command(namespace, command, { session }); + const res = await server.command(namespace, command, { + session, + timeoutContext + }); const indexesCreated: Array<{ name: string }> = res?.indexesCreated ?? []; return indexesCreated.map(({ name }) => name); diff --git a/src/operations/search_indexes/drop.ts b/src/operations/search_indexes/drop.ts index ee9acdf850e..28870d3220e 100644 --- a/src/operations/search_indexes/drop.ts +++ b/src/operations/search_indexes/drop.ts @@ -3,6 +3,7 @@ import type { Collection } from '../../collection'; import { MONGODB_ERROR_CODES, MongoServerError } from '../../error'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @internal */ @@ -18,7 +19,11 @@ export class DropSearchIndexOperation extends AbstractOperation { return 'dropSearchIndex' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command: Document = { @@ -30,7 +35,7 @@ export class DropSearchIndexOperation extends AbstractOperation { } try { - await server.command(namespace, command, { session }); + await server.command(namespace, command, { session, timeoutContext }); } catch (error) { const isNamespaceNotFoundError = error instanceof MongoServerError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound; diff --git a/src/operations/search_indexes/update.ts b/src/operations/search_indexes/update.ts index b6986da9410..e8701d2802e 100644 --- a/src/operations/search_indexes/update.ts +++ b/src/operations/search_indexes/update.ts @@ -2,6 +2,7 @@ import type { Document } from '../../bson'; import type { Collection } from '../../collection'; import type { Server } from '../../sdam/server'; import type { ClientSession } from '../../sessions'; +import { type TimeoutContext } from '../../timeout'; import { AbstractOperation } from '../operation'; /** @internal */ @@ -18,7 +19,11 @@ export class UpdateSearchIndexOperation extends AbstractOperation { return 'updateSearchIndex' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const namespace = this.collection.fullNamespace; const command = { updateSearchIndex: namespace.collection, @@ -26,7 +31,7 @@ export class UpdateSearchIndexOperation extends AbstractOperation { definition: this.definition }; - await server.command(namespace, command, { session }); + await server.command(namespace, command, { session, timeoutContext }); return; } } diff --git a/src/operations/set_profiling_level.ts b/src/operations/set_profiling_level.ts index 9969b2ea3c2..d76473f2632 100644 --- a/src/operations/set_profiling_level.ts +++ b/src/operations/set_profiling_level.ts @@ -2,6 +2,7 @@ import type { Db } from '../db'; import { MongoInvalidArgumentError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { enumToString } from '../utils'; import { CommandOperation, type CommandOperationOptions } from './command'; @@ -53,7 +54,8 @@ export class SetProfilingLevelOperation extends CommandOperation override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { const level = this.level; @@ -64,7 +66,7 @@ export class SetProfilingLevelOperation extends CommandOperation } // TODO(NODE-3483): Determine error to put here - await super.executeCommand(server, session, { profile: this.profile }); + await super.executeCommand(server, session, { profile: this.profile }, timeoutContext); return level; } } diff --git a/src/operations/stats.ts b/src/operations/stats.ts index 41c9faf6e24..aafd3bf1bac 100644 --- a/src/operations/stats.ts +++ b/src/operations/stats.ts @@ -2,6 +2,7 @@ import type { Document } from '../bson'; import type { Db } from '../db'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects } from './operation'; @@ -24,13 +25,17 @@ export class DbStatsOperation extends CommandOperation { return 'dbStats' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const command: Document = { dbStats: true }; if (this.options.scale != null) { command.scale = this.options.scale; } - return await super.executeCommand(server, session, command); + return await super.executeCommand(server, session, command, timeoutContext); } } diff --git a/src/operations/update.ts b/src/operations/update.ts index ba0ad6d95ff..5b6f396afec 100644 --- a/src/operations/update.ts +++ b/src/operations/update.ts @@ -4,6 +4,7 @@ import { MongoCompatibilityError, MongoInvalidArgumentError, MongoServerError } import type { InferIdType, TODO_NODE_3286 } from '../mongo_types'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { hasAtomicOperators, type MongoDBNamespace } from '../utils'; import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command'; import { Aspect, defineAspects, type Hint } from './operation'; @@ -91,7 +92,11 @@ export class UpdateOperation extends CommandOperation { return this.statements.every(op => op.multi == null || op.multi === false); } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const options = this.options ?? {}; const ordered = typeof options.ordered === 'boolean' ? options.ordered : true; const command: Document = { @@ -122,7 +127,7 @@ export class UpdateOperation extends CommandOperation { } } - const res = await super.executeCommand(server, session, command); + const res = await super.executeCommand(server, session, command, timeoutContext); return res; } } @@ -143,9 +148,10 @@ export class UpdateOneOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -177,9 +183,10 @@ export class UpdateManyOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); @@ -230,9 +237,10 @@ export class ReplaceOneOperation extends UpdateOperation { override async execute( server: Server, - session: ClientSession | undefined + session: ClientSession | undefined, + timeoutContext: TimeoutContext ): Promise { - const res: TODO_NODE_3286 = await super.execute(server, session); + const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext); if (this.explain != null) return res; if (res.code) throw new MongoServerError(res); if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]); diff --git a/src/operations/validate_collection.ts b/src/operations/validate_collection.ts index 4880a703a7a..16ae4cad9e0 100644 --- a/src/operations/validate_collection.ts +++ b/src/operations/validate_collection.ts @@ -3,6 +3,7 @@ import type { Document } from '../bson'; import { MongoUnexpectedServerResponseError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { CommandOperation, type CommandOperationOptions } from './command'; /** @public */ @@ -37,10 +38,14 @@ export class ValidateCollectionOperation extends CommandOperation { return 'validate' as const; } - override async execute(server: Server, session: ClientSession | undefined): Promise { + override async execute( + server: Server, + session: ClientSession | undefined, + timeoutContext: TimeoutContext + ): Promise { const collectionName = this.collectionName; - const doc = await super.executeCommand(server, session, this.command); + const doc = await super.executeCommand(server, session, this.command, timeoutContext); if (doc.result != null && typeof doc.result !== 'string') throw new MongoUnexpectedServerResponseError('Error with validation data'); if (doc.result != null && doc.result.match(/exception|corrupt/) != null) diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 4c1d37519ad..47a390277d6 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -40,6 +40,7 @@ import type { ServerApi } from '../mongo_client'; import { TypedEventEmitter } from '../mongo_types'; import type { GetMoreOptions } from '../operations/get_more'; import type { ClientSession } from '../sessions'; +import { type TimeoutContext } from '../timeout'; import { isTransactionCommand } from '../transactions'; import { type EventEmitterWithState, @@ -104,6 +105,11 @@ export type ServerEvents = { } & ConnectionPoolEvents & EventEmitterWithState; +/** @internal */ +export type ServerCommandOptions = Omit & { + timeoutContext: TimeoutContext; +}; + /** @internal */ export class Server extends TypedEventEmitter { /** @internal */ @@ -267,20 +273,20 @@ export class Server extends TypedEventEmitter { public async command( ns: MongoDBNamespace, command: Document, - options: CommandOptions | undefined, + options: ServerCommandOptions, responseType: T | undefined ): Promise>; public async command( ns: MongoDBNamespace, command: Document, - options?: CommandOptions + options: ServerCommandOptions ): Promise; public async command( ns: MongoDBNamespace, cmd: Document, - options: CommandOptions, + options: ServerCommandOptions, responseType?: MongoDBResponseConstructor ): Promise { if (ns.db == null || typeof ns === 'string') { @@ -305,13 +311,17 @@ export class Server extends TypedEventEmitter { delete finalOptions.readPreference; } + if (this.description.iscryptd) { + finalOptions.omitMaxTimeMS = true; + } + const session = finalOptions.session; let conn = session?.pinnedConnection; this.incrementOperationCount(); if (conn == null) { try { - conn = await this.pool.checkOut(); + conn = await this.pool.checkOut(options); if (this.loadBalanced && isPinnableCommand(cmd, session)) { session?.pin(conn); } diff --git a/src/sdam/server_description.ts b/src/sdam/server_description.ts index 73f4d6354ad..a3b7c506ef6 100644 --- a/src/sdam/server_description.ts +++ b/src/sdam/server_description.ts @@ -75,6 +75,8 @@ export class ServerDescription { maxWriteBatchSize: number | null; /** The max bson object size. */ maxBsonObjectSize: number | null; + /** Indicates server is a mongocryptd instance. */ + iscryptd: boolean; // NOTE: does this belong here? It seems we should gossip the cluster time at the CMAP level $clusterTime?: ClusterTime; @@ -123,6 +125,7 @@ export class ServerDescription { this.primary = hello?.primary ?? null; this.me = hello?.me?.toLowerCase() ?? null; this.$clusterTime = hello?.$clusterTime ?? null; + this.iscryptd = Boolean(hello?.iscryptd); } get hostAddress(): HostAddress { @@ -176,6 +179,7 @@ export class ServerDescription { return ( other != null && + other.iscryptd === this.iscryptd && errorStrictEqual(this.error, other.error) && this.type === other.type && this.minWireVersion === other.minWireVersion && diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 73b0e92a09a..2ea7d3244f1 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -24,6 +24,7 @@ import { type MongoDriverError, MongoError, MongoErrorLabel, + MongoOperationTimeoutError, MongoRuntimeError, MongoServerSelectionError, MongoTopologyClosedError @@ -33,7 +34,7 @@ import { MongoLoggableComponent, type MongoLogger, SeverityLevel } from '../mong import { TypedEventEmitter } from '../mongo_types'; import { ReadPreference, type ReadPreferenceLike } from '../read_preference'; import type { ClientSession } from '../sessions'; -import { Timeout, TimeoutError } from '../timeout'; +import { Timeout, TimeoutContext, TimeoutError } from '../timeout'; import type { Transaction } from '../transactions'; import { type Callback, @@ -107,7 +108,6 @@ export interface ServerSelectionRequest { resolve: (server: Server) => void; reject: (error: MongoError) => void; [kCancelled]?: boolean; - timeout: Timeout; operationName: string; waitingLogged: boolean; previousServer?: ServerDescription; @@ -178,8 +178,11 @@ export interface SelectServerOptions { session?: ClientSession; operationName: string; previousServer?: ServerDescription; - /** @internal*/ - timeout?: Timeout; + /** + * @internal + * TODO(NODE-6496): Make this required by making ChangeStream use LegacyTimeoutContext + * */ + timeoutContext?: TimeoutContext; } /** @public */ @@ -457,17 +460,28 @@ export class Topology extends TypedEventEmitter { } } + const serverSelectionTimeoutMS = this.client.s.options.serverSelectionTimeoutMS; const readPreference = options.readPreference ?? ReadPreference.primary; - const selectServerOptions = { operationName: 'ping', ...options }; + const timeoutContext = TimeoutContext.create({ + // TODO(NODE-6448): auto-connect ignores timeoutMS; potential future feature + timeoutMS: undefined, + serverSelectionTimeoutMS, + waitQueueTimeoutMS: this.client.s.options.waitQueueTimeoutMS + }); + const selectServerOptions = { + operationName: 'ping', + ...options, + timeoutContext + }; + try { const server = await this.selectServer( readPreferenceServerSelector(readPreference), selectServerOptions ); - const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true; - if (!skipPingOnConnect && server && this.s.credentials) { - await server.command(ns('admin.$cmd'), { ping: 1 }, {}); + if (!skipPingOnConnect && this.s.credentials) { + await server.command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }); stateTransition(this, STATE_CONNECTED); this.emit(Topology.OPEN, this); this.emit(Topology.CONNECT, this); @@ -556,6 +570,11 @@ export class Topology extends TypedEventEmitter { new ServerSelectionStartedEvent(selector, this.description, options.operationName) ); } + let timeout; + if (options.timeoutContext) timeout = options.timeoutContext.serverSelectionTimeout; + else { + timeout = Timeout.expires(options.serverSelectionTimeoutMS ?? 0); + } const isSharded = this.description.type === TopologyType.Sharded; const session = options.session; @@ -578,11 +597,12 @@ export class Topology extends TypedEventEmitter { ) ); } + if (options.timeoutContext?.clearServerSelectionTimeout) timeout?.clear(); return transaction.server; } const { promise: serverPromise, resolve, reject } = promiseWithResolvers(); - const timeout = Timeout.expires(options.serverSelectionTimeoutMS ?? 0); + const waitQueueMember: ServerSelectionRequest = { serverSelector, topologyDescription: this.description, @@ -590,7 +610,6 @@ export class Topology extends TypedEventEmitter { transaction, resolve, reject, - timeout, startTime: now(), operationName: options.operationName, waitingLogged: false, @@ -601,14 +620,18 @@ export class Topology extends TypedEventEmitter { processWaitQueue(this); try { - return await Promise.race([serverPromise, waitQueueMember.timeout]); + timeout?.throwIfExpired(); + const server = await (timeout ? Promise.race([serverPromise, timeout]) : serverPromise); + if (options.timeoutContext?.csotEnabled() && server.description.minRoundTripTime !== 0) { + options.timeoutContext.minRoundTripTime = server.description.minRoundTripTime; + } + return server; } catch (error) { if (TimeoutError.is(error)) { // Timeout waitQueueMember[kCancelled] = true; - timeout.clear(); const timeoutError = new MongoServerSelectionError( - `Server selection timed out after ${options.serverSelectionTimeoutMS} ms`, + `Server selection timed out after ${timeout?.duration} ms`, this.description ); if ( @@ -628,10 +651,17 @@ export class Topology extends TypedEventEmitter { ); } + if (options.timeoutContext?.csotEnabled()) { + throw new MongoOperationTimeoutError('Timed out during server selection', { + cause: timeoutError + }); + } throw timeoutError; } // Other server selection error throw error; + } finally { + if (options.timeoutContext?.clearServerSelectionTimeout) timeout?.clear(); } } /** @@ -889,8 +919,6 @@ function drainWaitQueue(queue: List, drainError: MongoDr continue; } - waitQueueMember.timeout.clear(); - if (!waitQueueMember[kCancelled]) { if ( waitQueueMember.mongoLogger?.willLog( @@ -944,7 +972,6 @@ function processWaitQueue(topology: Topology) { ) : serverDescriptions; } catch (selectorError) { - waitQueueMember.timeout.clear(); if ( topology.client.mongoLogger?.willLog( MongoLoggableComponent.SERVER_SELECTION, @@ -1032,8 +1059,6 @@ function processWaitQueue(topology: Topology) { transaction.pinServer(selectedServer); } - waitQueueMember.timeout.clear(); - if ( topology.client.mongoLogger?.willLog( MongoLoggableComponent.SERVER_SELECTION, diff --git a/src/sessions.ts b/src/sessions.ts index 4029744dcac..c53bc2e485d 100644 --- a/src/sessions.ts +++ b/src/sessions.ts @@ -29,6 +29,7 @@ import { ReadConcernLevel } from './read_concern'; import { ReadPreference } from './read_preference'; import { type AsyncDisposable, configureResourceManagement } from './resource_management'; import { _advanceClusterTime, type ClusterTime, TopologyType } from './sdam/common'; +import { TimeoutContext } from './timeout'; import { isTransactionCommand, Transaction, @@ -58,8 +59,12 @@ export interface ClientSessionOptions { snapshot?: boolean; /** The default TransactionOptions to use for transactions started on this session. */ defaultTransactionOptions?: TransactionOptions; - /** @internal - * The value of timeoutMS used for CSOT. Used to override client timeoutMS */ + /** + * @public + * @experimental + * An overriding timeoutMS value to use for a client-side timeout. + * If not provided the session uses the timeoutMS specified on the MongoClient. + */ defaultTimeoutMS?: number; /** @internal */ @@ -98,6 +103,9 @@ export interface EndSessionOptions { error?: AnyError; force?: boolean; forceClear?: boolean; + + /** Specifies the time an operation will run until it throws a timeout error */ + timeoutMS?: number; } /** @@ -115,7 +123,7 @@ export class ClientSession /** @internal */ sessionPool: ServerSessionPool; hasEnded: boolean; - clientOptions?: MongoOptions; + clientOptions: MongoOptions; supports: { causalConsistency: boolean }; clusterTime?: ClusterTime; operationTime?: Timestamp; @@ -138,9 +146,15 @@ export class ClientSession [kPinnedConnection]?: Connection; /** @internal */ [kTxnNumberIncrement]: number; - /** @internal */ + /** + * @experimental + * Specifies the time an operation in a given `ClientSession` will run until it throws a timeout error + */ timeoutMS?: number; + /** @internal */ + public timeoutContext: TimeoutContext | null = null; + /** * Create a client session. * @internal @@ -153,7 +167,7 @@ export class ClientSession client: MongoClient, sessionPool: ServerSessionPool, options: ClientSessionOptions, - clientOptions?: MongoOptions + clientOptions: MongoOptions ) { super(); @@ -273,8 +287,13 @@ export class ClientSession async endSession(options?: EndSessionOptions): Promise { try { if (this.inTransaction()) { - await this.abortTransaction(); + await this.abortTransaction({ ...options, throwTimeout: true }); } + } catch (error) { + // spec indicates that we should ignore all errors for `endSessions` + if (error.name === 'MongoOperationTimeoutError') throw error; + squashError(error); + } finally { if (!this.hasEnded) { const serverSession = this[kServerSession]; if (serverSession != null) { @@ -290,10 +309,6 @@ export class ClientSession this.hasEnded = true; this.emit('ended', this); } - } catch (error) { - // spec indicates that we should ignore all errors for `endSessions` - squashError(error); - } finally { maybeClearPinnedConnection(this, { force: true, ...options }); } } @@ -446,8 +461,10 @@ export class ClientSession /** * Commits the currently active transaction in this session. + * + * @param options - Optional options, can be used to override `defaultTimeoutMS`. */ - async commitTransaction(): Promise { + async commitTransaction(options?: { timeoutMS?: number }): Promise { if (this.transaction.state === TxnState.NO_TRANSACTION) { throw new MongoTransactionError('No transaction started'); } @@ -474,13 +491,31 @@ export class ClientSession maxTimeMS?: number; } = { commitTransaction: 1 }; + const timeoutMS = + typeof options?.timeoutMS === 'number' + ? options.timeoutMS + : typeof this.timeoutMS === 'number' + ? this.timeoutMS + : null; + const wc = this.transaction.options.writeConcern ?? this.clientOptions?.writeConcern; if (wc != null) { - WriteConcern.apply(command, { wtimeoutMS: 10000, w: 'majority', ...wc }); + if (timeoutMS == null && this.timeoutContext == null) { + WriteConcern.apply(command, { wtimeoutMS: 10000, w: 'majority', ...wc }); + } else { + const wcKeys = Object.keys(wc); + if (wcKeys.length > 2 || (!wcKeys.includes('wtimeoutMS') && !wcKeys.includes('wTimeoutMS'))) + // if the write concern was specified with wTimeoutMS, then we set both wtimeoutMS and wTimeoutMS, guaranteeing at least two keys, so if we have more than two keys, then we can automatically assume that we should add the write concern to the command. If it has 2 or fewer keys, we need to check that those keys aren't the wtimeoutMS or wTimeoutMS options before we add the write concern to the command + WriteConcern.apply(command, { ...wc, wtimeoutMS: undefined }); + } } if (this.transaction.state === TxnState.TRANSACTION_COMMITTED || this.commitAttempted) { - WriteConcern.apply(command, { wtimeoutMS: 10000, ...wc, w: 'majority' }); + if (timeoutMS == null && this.timeoutContext == null) { + WriteConcern.apply(command, { wtimeoutMS: 10000, ...wc, w: 'majority' }); + } else { + WriteConcern.apply(command, { w: 'majority', ...wc, wtimeoutMS: undefined }); + } } if (typeof this.transaction.options.maxTimeMS === 'number') { @@ -497,8 +532,18 @@ export class ClientSession bypassPinningCheck: true }); + const timeoutContext = + this.timeoutContext ?? + (typeof timeoutMS === 'number' + ? TimeoutContext.create({ + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS, + timeoutMS + }) + : null); + try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); this.commitAttempted = undefined; return; } catch (firstCommitError) { @@ -516,7 +561,8 @@ export class ClientSession session: this, readPreference: ReadPreference.primary, bypassPinningCheck: true - }) + }), + timeoutContext ); return; } catch (retryCommitError) { @@ -549,8 +595,13 @@ export class ClientSession /** * Aborts the currently active transaction in this session. + * + * @param options - Optional options, can be used to override `defaultTimeoutMS`. */ - async abortTransaction(): Promise { + async abortTransaction(options?: { timeoutMS?: number }): Promise; + /** @internal */ + async abortTransaction(options?: { timeoutMS?: number; throwTimeout?: true }): Promise; + async abortTransaction(options?: { timeoutMS?: number; throwTimeout?: true }): Promise { if (this.transaction.state === TxnState.NO_TRANSACTION) { throw new MongoTransactionError('No transaction started'); } @@ -580,8 +631,26 @@ export class ClientSession recoveryToken?: Document; } = { abortTransaction: 1 }; + const timeoutMS = + typeof options?.timeoutMS === 'number' + ? options.timeoutMS + : this.timeoutContext?.csotEnabled() + ? this.timeoutContext.timeoutMS // refresh timeoutMS for abort operation + : typeof this.timeoutMS === 'number' + ? this.timeoutMS + : null; + + const timeoutContext = + timeoutMS != null + ? TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS + }) + : null; + const wc = this.transaction.options.writeConcern ?? this.clientOptions?.writeConcern; - if (wc != null) { + if (wc != null && timeoutMS == null) { WriteConcern.apply(command, { wtimeoutMS: 10000, w: 'majority', ...wc }); } @@ -596,17 +665,26 @@ export class ClientSession }); try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); this.unpin(); return; } catch (firstAbortError) { this.unpin(); + if (firstAbortError.name === 'MongoRuntimeError') throw firstAbortError; + if (options?.throwTimeout && firstAbortError.name === 'MongoOperationTimeoutError') { + throw firstAbortError; + } + if (firstAbortError instanceof MongoError && isRetryableWriteError(firstAbortError)) { try { - await executeOperation(this.client, operation); + await executeOperation(this.client, operation, timeoutContext); return; - } catch { + } catch (secondAbortError) { + if (secondAbortError.name === 'MongoRuntimeError') throw secondAbortError; + if (options?.throwTimeout && secondAbortError.name === 'MongoOperationTimeoutError') { + throw secondAbortError; + } // we do not retry the retry } } @@ -636,6 +714,9 @@ export class ClientSession * `Promise.allSettled`, `Promise.race`, etc to parallelize operations inside a transaction is * undefined behaviour. * + * **IMPORTANT:** When running an operation inside a `withTransaction` callback, if it is not + * provided the explicit session in its options, it will not be part of the transaction and it will not respect timeoutMS. + * * * @remarks * - If all operations successfully complete and the `commitTransaction` operation is successful, then the provided function will return the result of the provided function. @@ -661,96 +742,119 @@ export class ClientSession */ async withTransaction( fn: WithTransactionCallback, - options?: TransactionOptions + options?: TransactionOptions & { + /** + * Configures a timeoutMS expiry for the entire withTransactionCallback. + * + * @remarks + * - The remaining timeout will not be applied to callback operations that do not use the ClientSession. + * - Overriding timeoutMS for operations executed using the explicit session inside the provided callback will result in a client-side error. + */ + timeoutMS?: number; + } ): Promise { const MAX_TIMEOUT = 120000; - const startTime = now(); - let committed = false; - let result: any; + const timeoutMS = options?.timeoutMS ?? this.timeoutMS ?? null; + this.timeoutContext = + timeoutMS != null + ? TimeoutContext.create({ + timeoutMS, + serverSelectionTimeoutMS: this.clientOptions.serverSelectionTimeoutMS, + socketTimeoutMS: this.clientOptions.socketTimeoutMS + }) + : null; - while (!committed) { - this.startTransaction(options); // may throw on error + const startTime = this.timeoutContext?.csotEnabled() ? this.timeoutContext.start : now(); - try { - const promise = fn(this); - if (!isPromiseLike(promise)) { - throw new MongoInvalidArgumentError( - 'Function provided to `withTransaction` must return a Promise' - ); - } + let committed = false; + let result: any; - result = await promise; + try { + while (!committed) { + this.startTransaction(options); // may throw on error - if ( - this.transaction.state === TxnState.NO_TRANSACTION || - this.transaction.state === TxnState.TRANSACTION_COMMITTED || - this.transaction.state === TxnState.TRANSACTION_ABORTED - ) { - // Assume callback intentionally ended the transaction - return result; - } - } catch (fnError) { - if (!(fnError instanceof MongoError) || fnError instanceof MongoInvalidArgumentError) { - await this.abortTransaction(); - throw fnError; - } + try { + const promise = fn(this); + if (!isPromiseLike(promise)) { + throw new MongoInvalidArgumentError( + 'Function provided to `withTransaction` must return a Promise' + ); + } - if ( - this.transaction.state === TxnState.STARTING_TRANSACTION || - this.transaction.state === TxnState.TRANSACTION_IN_PROGRESS - ) { - await this.abortTransaction(); - } + result = await promise; - if ( - fnError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && - now() - startTime < MAX_TIMEOUT - ) { - continue; - } - - throw fnError; - } + if ( + this.transaction.state === TxnState.NO_TRANSACTION || + this.transaction.state === TxnState.TRANSACTION_COMMITTED || + this.transaction.state === TxnState.TRANSACTION_ABORTED + ) { + // Assume callback intentionally ended the transaction + return result; + } + } catch (fnError) { + if (!(fnError instanceof MongoError) || fnError instanceof MongoInvalidArgumentError) { + await this.abortTransaction(); + throw fnError; + } - while (!committed) { - try { - /* - * We will rely on ClientSession.commitTransaction() to - * apply a majority write concern if commitTransaction is - * being retried (see: DRIVERS-601) - */ - await this.commitTransaction(); - committed = true; - } catch (commitError) { - /* - * Note: a maxTimeMS error will have the MaxTimeMSExpired - * code (50) and can be reported as a top-level error or - * inside writeConcernError, ex. - * { ok:0, code: 50, codeName: 'MaxTimeMSExpired' } - * { ok:1, writeConcernError: { code: 50, codeName: 'MaxTimeMSExpired' } } - */ if ( - !isMaxTimeMSExpiredError(commitError) && - commitError.hasErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult) && - now() - startTime < MAX_TIMEOUT + this.transaction.state === TxnState.STARTING_TRANSACTION || + this.transaction.state === TxnState.TRANSACTION_IN_PROGRESS ) { - continue; + await this.abortTransaction(); } if ( - commitError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && - now() - startTime < MAX_TIMEOUT + fnError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) ) { - break; + continue; } - throw commitError; + throw fnError; + } + + while (!committed) { + try { + /* + * We will rely on ClientSession.commitTransaction() to + * apply a majority write concern if commitTransaction is + * being retried (see: DRIVERS-601) + */ + await this.commitTransaction(); + committed = true; + } catch (commitError) { + /* + * Note: a maxTimeMS error will have the MaxTimeMSExpired + * code (50) and can be reported as a top-level error or + * inside writeConcernError, ex. + * { ok:0, code: 50, codeName: 'MaxTimeMSExpired' } + * { ok:1, writeConcernError: { code: 50, codeName: 'MaxTimeMSExpired' } } + */ + if ( + !isMaxTimeMSExpiredError(commitError) && + commitError.hasErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) + ) { + continue; + } + + if ( + commitError.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && + (this.timeoutContext != null || now() - startTime < MAX_TIMEOUT) + ) { + break; + } + + throw commitError; + } } } + return result; + } finally { + this.timeoutContext = null; } - - return result; } } diff --git a/src/timeout.ts b/src/timeout.ts index cd48ec385da..47c27c7b90e 100644 --- a/src/timeout.ts +++ b/src/timeout.ts @@ -1,16 +1,19 @@ import { clearTimeout, setTimeout } from 'timers'; -import { MongoInvalidArgumentError } from './error'; -import { noop } from './utils'; +import { MongoInvalidArgumentError, MongoOperationTimeoutError, MongoRuntimeError } from './error'; +import { type ClientSession } from './sessions'; +import { csotMin, noop } from './utils'; /** @internal */ export class TimeoutError extends Error { + duration: number; override get name(): 'TimeoutError' { return 'TimeoutError'; } - constructor(message: string, options?: { cause?: Error }) { + constructor(message: string, options: { cause?: Error; duration: number }) { super(message, options); + this.duration = options.duration; } static is(error: unknown): error is TimeoutError { @@ -29,25 +32,38 @@ type Reject = Parameters>[0]>[1]; * if interacted with exclusively through its public API * */ export class Timeout extends Promise { - get [Symbol.toStringTag](): 'MongoDBTimeout' { - return 'MongoDBTimeout'; - } - private id?: NodeJS.Timeout; public readonly start: number; public ended: number | null = null; public duration: number; - public timedOut = false; + private timedOut = false; + public cleared = false; + + get remainingTime(): number { + if (this.timedOut) return 0; + if (this.duration === 0) return Infinity; + return this.start + this.duration - Math.trunc(performance.now()); + } + + get timeElapsed(): number { + return Math.trunc(performance.now()) - this.start; + } /** Create a new timeout that expires in `duration` ms */ - private constructor(executor: Executor = () => null, duration: number, unref = false) { - let reject!: Reject; + private constructor( + executor: Executor = () => null, + options?: { duration: number; unref?: true; rejection?: Error } + ) { + const duration = options?.duration ?? 0; + const unref = !!options?.unref; + const rejection = options?.rejection; if (duration < 0) { throw new MongoInvalidArgumentError('Cannot create a Timeout with a negative duration'); } + let reject!: Reject; super((_, promiseReject) => { reject = promiseReject; @@ -57,16 +73,20 @@ export class Timeout extends Promise { this.duration = duration; this.start = Math.trunc(performance.now()); - if (this.duration > 0) { + if (rejection == null && this.duration > 0) { this.id = setTimeout(() => { this.ended = Math.trunc(performance.now()); this.timedOut = true; - reject(new TimeoutError(`Expired after ${duration}ms`)); + reject(new TimeoutError(`Expired after ${duration}ms`, { duration })); }, this.duration); if (typeof this.id.unref === 'function' && unref) { // Ensure we do not keep the Node.js event loop running this.id.unref(); } + } else if (rejection != null) { + this.ended = Math.trunc(performance.now()); + this.timedOut = true; + reject(rejection); } } @@ -76,20 +96,287 @@ export class Timeout extends Promise { clear(): void { clearTimeout(this.id); this.id = undefined; + this.timedOut = false; + this.cleared = true; } - public static expires(durationMS: number, unref?: boolean): Timeout { - return new Timeout(undefined, durationMS, unref); + throwIfExpired(): void { + if (this.timedOut) throw new TimeoutError('Timed out', { duration: this.duration }); } - static is(timeout: unknown): timeout is Timeout { - return ( - typeof timeout === 'object' && - timeout != null && - Symbol.toStringTag in timeout && - timeout[Symbol.toStringTag] === 'MongoDBTimeout' && - 'then' in timeout && - typeof timeout.then === 'function' - ); + public static expires(duration: number, unref?: true): Timeout { + return new Timeout(undefined, { duration, unref }); + } + + static override reject(rejection?: Error): Timeout { + return new Timeout(undefined, { duration: 0, unref: true, rejection }); + } +} + +/** @internal */ +export type TimeoutContextOptions = (LegacyTimeoutContextOptions | CSOTTimeoutContextOptions) & { + session?: ClientSession; +}; + +/** @internal */ +export type LegacyTimeoutContextOptions = { + serverSelectionTimeoutMS: number; + waitQueueTimeoutMS: number; + socketTimeoutMS?: number; +}; + +/** @internal */ +export type CSOTTimeoutContextOptions = { + timeoutMS: number; + serverSelectionTimeoutMS: number; + socketTimeoutMS?: number; +}; + +function isLegacyTimeoutContextOptions(v: unknown): v is LegacyTimeoutContextOptions { + return ( + v != null && + typeof v === 'object' && + 'serverSelectionTimeoutMS' in v && + typeof v.serverSelectionTimeoutMS === 'number' && + 'waitQueueTimeoutMS' in v && + typeof v.waitQueueTimeoutMS === 'number' + ); +} + +function isCSOTTimeoutContextOptions(v: unknown): v is CSOTTimeoutContextOptions { + return ( + v != null && + typeof v === 'object' && + 'serverSelectionTimeoutMS' in v && + typeof v.serverSelectionTimeoutMS === 'number' && + 'timeoutMS' in v && + typeof v.timeoutMS === 'number' + ); +} + +/** @internal */ +export abstract class TimeoutContext { + static create(options: TimeoutContextOptions): TimeoutContext { + if (options.session?.timeoutContext != null) return options.session?.timeoutContext; + if (isCSOTTimeoutContextOptions(options)) return new CSOTTimeoutContext(options); + else if (isLegacyTimeoutContextOptions(options)) return new LegacyTimeoutContext(options); + else throw new MongoRuntimeError('Unrecognized options'); + } + + abstract get maxTimeMS(): number | null; + + abstract get serverSelectionTimeout(): Timeout | null; + + abstract get connectionCheckoutTimeout(): Timeout | null; + + abstract get clearServerSelectionTimeout(): boolean; + + abstract get clearConnectionCheckoutTimeout(): boolean; + + abstract get timeoutForSocketWrite(): Timeout | null; + + abstract get timeoutForSocketRead(): Timeout | null; + + abstract csotEnabled(): this is CSOTTimeoutContext; + + abstract refresh(): void; + + abstract clear(): void; + + /** Returns a new instance of the TimeoutContext, with all timeouts refreshed and restarted. */ + abstract refreshed(): TimeoutContext; +} + +/** @internal */ +export class CSOTTimeoutContext extends TimeoutContext { + timeoutMS: number; + serverSelectionTimeoutMS: number; + socketTimeoutMS?: number; + + clearConnectionCheckoutTimeout: boolean; + clearServerSelectionTimeout: boolean; + + private _serverSelectionTimeout?: Timeout | null; + private _connectionCheckoutTimeout?: Timeout | null; + public minRoundTripTime = 0; + public start: number; + + constructor(options: CSOTTimeoutContextOptions) { + super(); + this.start = Math.trunc(performance.now()); + + this.timeoutMS = options.timeoutMS; + + this.serverSelectionTimeoutMS = options.serverSelectionTimeoutMS; + + this.socketTimeoutMS = options.socketTimeoutMS; + + this.clearServerSelectionTimeout = false; + this.clearConnectionCheckoutTimeout = true; + } + + get maxTimeMS(): number { + return this.remainingTimeMS - this.minRoundTripTime; + } + + get remainingTimeMS() { + const timePassed = Math.trunc(performance.now()) - this.start; + return this.timeoutMS <= 0 ? Infinity : this.timeoutMS - timePassed; + } + + csotEnabled(): this is CSOTTimeoutContext { + return true; + } + + get serverSelectionTimeout(): Timeout | null { + // check for undefined + if (typeof this._serverSelectionTimeout !== 'object' || this._serverSelectionTimeout?.cleared) { + const { remainingTimeMS, serverSelectionTimeoutMS } = this; + if (remainingTimeMS <= 0) + return Timeout.reject( + new MongoOperationTimeoutError(`Timed out in server selection after ${this.timeoutMS}ms`) + ); + const usingServerSelectionTimeoutMS = + serverSelectionTimeoutMS !== 0 && + csotMin(remainingTimeMS, serverSelectionTimeoutMS) === serverSelectionTimeoutMS; + if (usingServerSelectionTimeoutMS) { + this._serverSelectionTimeout = Timeout.expires(serverSelectionTimeoutMS); + } else { + if (remainingTimeMS > 0 && Number.isFinite(remainingTimeMS)) { + this._serverSelectionTimeout = Timeout.expires(remainingTimeMS); + } else { + this._serverSelectionTimeout = null; + } + } + } + + return this._serverSelectionTimeout; + } + + get connectionCheckoutTimeout(): Timeout | null { + if ( + typeof this._connectionCheckoutTimeout !== 'object' || + this._connectionCheckoutTimeout?.cleared + ) { + if (typeof this._serverSelectionTimeout === 'object') { + // null or Timeout + this._connectionCheckoutTimeout = this._serverSelectionTimeout; + } else { + throw new MongoRuntimeError( + 'Unreachable. If you are seeing this error, please file a ticket on the NODE driver project on Jira' + ); + } + } + return this._connectionCheckoutTimeout; + } + + get timeoutForSocketWrite(): Timeout | null { + const { remainingTimeMS } = this; + if (!Number.isFinite(remainingTimeMS)) return null; + if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); + return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket write')); + } + + get timeoutForSocketRead(): Timeout | null { + const { remainingTimeMS } = this; + if (!Number.isFinite(remainingTimeMS)) return null; + if (remainingTimeMS > 0) return Timeout.expires(remainingTimeMS); + return Timeout.reject(new MongoOperationTimeoutError('Timed out before socket read')); + } + + refresh(): void { + this.start = Math.trunc(performance.now()); + this.minRoundTripTime = 0; + this._serverSelectionTimeout?.clear(); + this._connectionCheckoutTimeout?.clear(); + } + + clear(): void { + this._serverSelectionTimeout?.clear(); + this._connectionCheckoutTimeout?.clear(); + } + + /** + * @internal + * Throws a MongoOperationTimeoutError if the context has expired. + * If the context has not expired, returns the `remainingTimeMS` + **/ + getRemainingTimeMSOrThrow(message?: string): number { + const { remainingTimeMS } = this; + if (remainingTimeMS <= 0) + throw new MongoOperationTimeoutError(message ?? `Expired after ${this.timeoutMS}ms`); + return remainingTimeMS; + } + + /** + * @internal + * This method is intended to be used in situations where concurrent operation are on the same deadline, but cannot share a single `TimeoutContext` instance. + * Returns a new instance of `CSOTTimeoutContext` constructed with identical options, but setting the `start` property to `this.start`. + */ + clone(): CSOTTimeoutContext { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: this.timeoutMS, + serverSelectionTimeoutMS: this.serverSelectionTimeoutMS + }); + timeoutContext.start = this.start; + return timeoutContext; + } + + override refreshed(): CSOTTimeoutContext { + return new CSOTTimeoutContext(this); + } +} + +/** @internal */ +export class LegacyTimeoutContext extends TimeoutContext { + options: LegacyTimeoutContextOptions; + clearServerSelectionTimeout: boolean; + clearConnectionCheckoutTimeout: boolean; + + constructor(options: LegacyTimeoutContextOptions) { + super(); + this.options = options; + this.clearServerSelectionTimeout = true; + this.clearConnectionCheckoutTimeout = true; + } + + csotEnabled(): this is CSOTTimeoutContext { + return false; + } + + get serverSelectionTimeout(): Timeout | null { + if (this.options.serverSelectionTimeoutMS != null && this.options.serverSelectionTimeoutMS > 0) + return Timeout.expires(this.options.serverSelectionTimeoutMS); + return null; + } + + get connectionCheckoutTimeout(): Timeout | null { + if (this.options.waitQueueTimeoutMS != null && this.options.waitQueueTimeoutMS > 0) + return Timeout.expires(this.options.waitQueueTimeoutMS); + return null; + } + + get timeoutForSocketWrite(): Timeout | null { + return null; + } + + get timeoutForSocketRead(): Timeout | null { + return null; + } + + refresh(): void { + return; + } + + clear(): void { + return; + } + + get maxTimeMS() { + return null; + } + + override refreshed(): LegacyTimeoutContext { + return new LegacyTimeoutContext(this.options); } } diff --git a/src/transactions.ts b/src/transactions.ts index 53dcb842084..308d0ad90f6 100644 --- a/src/transactions.ts +++ b/src/transactions.ts @@ -60,7 +60,7 @@ const COMMITTED_STATES: Set = new Set([ * Configuration options for a transaction. * @public */ -export interface TransactionOptions extends CommandOperationOptions { +export interface TransactionOptions extends Omit { // TODO(NODE-3344): These options use the proper class forms of these settings, it should accept the basic enum values too /** A default read concern for commands in this transaction */ readConcern?: ReadConcernLike; diff --git a/src/utils.ts b/src/utils.ts index 6bc1b1d3008..c23161612a8 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -26,7 +26,6 @@ import { MongoParseError, MongoRuntimeError } from './error'; -import type { Explain, ExplainVerbosity } from './explain'; import type { MongoClient } from './mongo_client'; import type { CommandOperationOptions, OperationParent } from './operations/command'; import type { Hint, OperationOptions } from './operations/operation'; @@ -36,6 +35,7 @@ import { ServerType } from './sdam/common'; import type { Server } from './sdam/server'; import type { Topology } from './sdam/topology'; import type { ClientSession } from './sessions'; +import { type TimeoutContextOptions } from './timeout'; import { WriteConcern } from './write_concern'; /** @@ -245,32 +245,6 @@ export function decorateWithReadConcern( } } -/** - * Applies an explain to a given command. - * @internal - * - * @param command - the command on which to apply the explain - * @param options - the options containing the explain verbosity - */ -export function decorateWithExplain( - command: Document, - explain: Explain -): { - explain: Document; - verbosity: ExplainVerbosity; - maxTimeMS?: number; -} { - type ExplainCommand = ReturnType; - const { verbosity, maxTimeMS } = explain; - const baseCommand: ExplainCommand = { explain: command, verbosity }; - - if (typeof maxTimeMS === 'number') { - baseCommand.maxTimeMS = maxTimeMS; - } - - return baseCommand; -} - /** * @internal */ @@ -515,9 +489,25 @@ export function hasAtomicOperators(doc: Document | Document[]): boolean { return keys.length > 0 && keys[0][0] === '$'; } +export function resolveTimeoutOptions>( + client: MongoClient, + options: T +): T & + Pick< + MongoClient['s']['options'], + 'timeoutMS' | 'serverSelectionTimeoutMS' | 'waitQueueTimeoutMS' | 'socketTimeoutMS' + > { + const { socketTimeoutMS, serverSelectionTimeoutMS, waitQueueTimeoutMS, timeoutMS } = + client.s.options; + return { socketTimeoutMS, serverSelectionTimeoutMS, waitQueueTimeoutMS, timeoutMS, ...options }; +} /** * Merge inherited properties from parent into options, prioritizing values from options, * then values from parent. + * + * @param parent - An optional owning class of the operation being run. ex. Db/Collection/MongoClient. + * @param options - The options passed to the operation method. + * * @internal */ export function resolveOptions( @@ -526,25 +516,43 @@ export function resolveOptions( ): T { const result: T = Object.assign({}, options, resolveBSONOptions(options, parent)); + const timeoutMS = options?.timeoutMS ?? parent?.timeoutMS; // Users cannot pass a readConcern/writeConcern to operations in a transaction const session = options?.session; + if (!session?.inTransaction()) { const readConcern = ReadConcern.fromOptions(options) ?? parent?.readConcern; if (readConcern) { result.readConcern = readConcern; } - const writeConcern = WriteConcern.fromOptions(options) ?? parent?.writeConcern; + let writeConcern = WriteConcern.fromOptions(options) ?? parent?.writeConcern; if (writeConcern) { + if (timeoutMS != null) { + writeConcern = WriteConcern.fromOptions({ + ...writeConcern, + wtimeout: undefined, + wtimeoutMS: undefined + }); + } result.writeConcern = writeConcern; } } + result.timeoutMS = timeoutMS; + const readPreference = ReadPreference.fromOptions(options) ?? parent?.readPreference; if (readPreference) { result.readPreference = readPreference; } + const isConvenientTransaction = session?.explicit && session?.timeoutContext != null; + if (isConvenientTransaction && options?.timeoutMS != null) { + throw new MongoInvalidArgumentError( + 'An operation cannot be given a timeoutMS setting when inside a withTransaction call that has a timeoutMS setting' + ); + } + return result; } @@ -1401,6 +1409,12 @@ export async function fileIsAccessible(fileName: string, mode?: number) { } } +export function csotMin(duration1: number, duration2: number): number { + if (duration1 === 0) return duration2; + if (duration2 === 0) return duration1; + return Math.min(duration1, duration2); +} + export function noop() { return; } diff --git a/src/write_concern.ts b/src/write_concern.ts index 390646a3be0..6326f15d588 100644 --- a/src/write_concern.ts +++ b/src/write_concern.ts @@ -15,7 +15,9 @@ export interface WriteConcernOptions { export interface WriteConcernSettings { /** The write concern */ w?: W; - /** The write concern timeout */ + /** + * The write concern timeout. + */ wtimeoutMS?: number; /** The journal write concern */ journal?: boolean; @@ -28,7 +30,6 @@ export interface WriteConcernSettings { j?: boolean; /** * The write concern timeout. - * @deprecated Will be removed in the next major version. Please use the wtimeoutMS option. */ wtimeout?: number; /** @@ -65,7 +66,9 @@ export class WriteConcern { readonly w?: W; /** Request acknowledgment that the write operation has been written to the on-disk journal */ readonly journal?: boolean; - /** Specify a time limit to prevent write operations from blocking indefinitely */ + /** + * Specify a time limit to prevent write operations from blocking indefinitely. + */ readonly wtimeoutMS?: number; /** * Specify a time limit to prevent write operations from blocking indefinitely. diff --git a/test/integration/client-side-encryption/driver.test.ts b/test/integration/client-side-encryption/driver.test.ts index 71c3cbd858d..720d67c4565 100644 --- a/test/integration/client-side-encryption/driver.test.ts +++ b/test/integration/client-side-encryption/driver.test.ts @@ -1,20 +1,51 @@ -import { EJSON, UUID } from 'bson'; +import { type Binary, EJSON, UUID } from 'bson'; import { expect } from 'chai'; import * as crypto from 'crypto'; +import * as sinon from 'sinon'; +import { setTimeout } from 'timers/promises'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { ClientEncryption } from '../../../src/client-side-encryption/client_encryption'; -import { type Collection, type CommandStartedEvent, type MongoClient } from '../../mongodb'; -import * as BSON from '../../mongodb'; -import { getEncryptExtraOptions } from '../../tools/utils'; +import { + BSON, + type Collection, + type CommandStartedEvent, + Connection, + CSOTTimeoutContext, + type MongoClient, + MongoCryptCreateDataKeyError, + MongoCryptCreateEncryptedCollectionError, + MongoOperationTimeoutError, + resolveTimeoutOptions, + StateMachine, + TimeoutContext +} from '../../mongodb'; +import { + clearFailPoint, + configureFailPoint, + type FailPoint, + getEncryptExtraOptions, + measureDuration, + sleep +} from '../../tools/utils'; +import { filterForCommands } from '../shared'; -const metadata = { +const metadata: MongoDBMetadataUI = { requires: { mongodb: '>=4.2.0', clientSideEncryption: true } }; +const getLocalKmsProvider = (): { local: { key: Binary } } => { + const { local } = EJSON.parse(process.env.CSFLE_KMS_PROVIDERS || '{}') as { + local: { key: Binary }; + [key: string]: unknown; + }; + + return { local }; +}; + describe('Client Side Encryption Functional', function () { const dataDbName = 'db'; const dataCollName = 'coll'; @@ -130,10 +161,8 @@ describe('Client Side Encryption Functional', function () { await client.connect(); const encryption = new ClientEncryption(client, { - bson: BSON, keyVaultNamespace, - kmsProviders, - extraOptions: getEncryptExtraOptions() + kmsProviders }); const dataDb = client.db(dataDbName); @@ -319,8 +348,7 @@ describe('Client Side Encryption Functional', function () { const encryption = new ClientEncryption(client, { keyVaultNamespace, - kmsProviders, - extraOptions: getEncryptExtraOptions() + kmsProviders }); const dataDb = client.db(dataDbName); @@ -401,6 +429,201 @@ describe('Client Side Encryption Functional', function () { }); } ); + + describe('CSOT on ClientEncryption', { requires: { clientSideEncryption: true } }, function () { + const metadata: MongoDBMetadataUI = { + requires: { clientSideEncryption: true, mongodb: '>=4.4' } + }; + + function makeBlockingFailFor(command: string | string[], blockTimeMS: number) { + beforeEach(async function () { + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + failCommands: Array.isArray(command) ? command : [command], + blockConnection: true, + blockTimeMS, + appName: 'clientEncryption' + } + }); + }); + + afterEach(async function () { + sinon.restore(); + await clearFailPoint(this.configuration); + }); + } + + function runAndCheckForCSOTTimeout(fn: () => Promise) { + return async () => { + const start = performance.now(); + const error = await fn().then( + () => 'API did not reject', + error => error + ); + const end = performance.now(); + if (error?.name === 'MongoBulkWriteError') { + expect(error) + .to.have.property('errorResponse') + .that.is.instanceOf(MongoOperationTimeoutError); + } else { + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + } + expect(end - start).to.be.within(498, 1000); + }; + } + + let key1Id; + let keyVaultClient: MongoClient; + let clientEncryption: ClientEncryption; + let commandsStarted: CommandStartedEvent[]; + + beforeEach(async function () { + const internalClient = this.configuration.newClient(); + await internalClient + .db('keyvault') + .dropCollection('datakeys', { writeConcern: { w: 'majority' } }) + .catch(() => null); + await internalClient.db('keyvault').createCollection('datakeys'); + await internalClient.close(); + + keyVaultClient = this.configuration.newClient(undefined, { + timeoutMS: 500, + monitorCommands: true, + minPoolSize: 1, + appName: 'clientEncryption' + }); + await keyVaultClient.connect(); + + clientEncryption = new ClientEncryption(keyVaultClient, { + keyVaultNamespace: 'keyvault.datakeys', + kmsProviders: getLocalKmsProvider(), + timeoutMS: 500 + }); + + key1Id = await clientEncryption.createDataKey('local'); + while ((await clientEncryption.getKey(key1Id)) == null); + + commandsStarted = []; + keyVaultClient.on('commandStarted', ev => commandsStarted.push(ev)); + }); + + afterEach(async function () { + await keyVaultClient?.close(); + }); + + describe('rewrapManyDataKey', function () { + describe('when the bulk operation takes too long', function () { + makeBlockingFailFor('update', 2000); + + it( + 'throws a timeout error', + metadata, + runAndCheckForCSOTTimeout(async () => { + await clientEncryption.rewrapManyDataKey({ _id: key1Id }, { provider: 'local' }); + }) + ); + }); + + describe('when the find operation for fetchKeys takes too long', function () { + makeBlockingFailFor('find', 2000); + + it( + 'throws a timeout error', + metadata, + runAndCheckForCSOTTimeout(async () => { + await clientEncryption.rewrapManyDataKey({ _id: key1Id }, { provider: 'local' }); + }) + ); + }); + + describe('when the find and bulk operation takes too long', function () { + // together they add up to 800, exceeding the timeout of 500 + makeBlockingFailFor(['update', 'find'], 400); + + it( + 'throws a timeout error', + metadata, + runAndCheckForCSOTTimeout(async () => { + await clientEncryption.rewrapManyDataKey({ _id: key1Id }, { provider: 'local' }); + }) + ); + }); + }); + + describe('deleteKey', function () { + makeBlockingFailFor('delete', 2000); + + it( + 'throws a timeout error if the delete operation takes too long', + metadata, + runAndCheckForCSOTTimeout(async () => { + await clientEncryption.deleteKey(new UUID()); + }) + ); + }); + + describe('getKey', function () { + makeBlockingFailFor('find', 2000); + + it( + 'throws a timeout error if the find takes too long', + metadata, + runAndCheckForCSOTTimeout(async () => { + await clientEncryption.getKey(new UUID()); + }) + ); + }); + + describe('getKeys', function () { + makeBlockingFailFor('find', 2000); + + it( + 'throws a timeout error if the find operation takes too long', + metadata, + runAndCheckForCSOTTimeout(async () => { + await clientEncryption.getKeys().toArray(); + }) + ); + }); + + describe('removeKeyAltName', function () { + makeBlockingFailFor('findAndModify', 2000); + + it( + 'throws a timeout error if the findAndModify operation takes too long', + metadata, + runAndCheckForCSOTTimeout(async () => { + await clientEncryption.removeKeyAltName(new UUID(), 'blah'); + }) + ); + }); + + describe('addKeyAltName', function () { + makeBlockingFailFor('findAndModify', 2000); + + it( + 'throws a timeout error if the findAndModify operation takes too long', + metadata, + runAndCheckForCSOTTimeout(async () => { + await clientEncryption.addKeyAltName(new UUID(), 'blah'); + }) + ); + }); + + describe('getKeyByAltName', function () { + makeBlockingFailFor('find', 2000); + + it( + 'throws a timeout error if the find operation takes too long', + metadata, + runAndCheckForCSOTTimeout(async () => { + await clientEncryption.getKeyByAltName('blah'); + }) + ); + }); + }); }); describe('Range Explicit Encryption with JS native types', function () { @@ -471,3 +694,589 @@ describe('Range Explicit Encryption with JS native types', function () { }); }); }); + +describe('CSOT', function () { + describe('Auto encryption', function () { + let setupClient; + let keyVaultClient: MongoClient; + let dataKey; + + beforeEach(async function () { + keyVaultClient = this.configuration.newClient(); + await keyVaultClient.connect(); + await keyVaultClient.db('keyvault').collection('datakeys'); + const clientEncryption = new ClientEncryption(keyVaultClient, { + keyVaultNamespace: 'keyvault.datakeys', + kmsProviders: getLocalKmsProvider() + }); + dataKey = await clientEncryption.createDataKey('local'); + setupClient = this.configuration.newClient(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find'], + blockConnection: true, + blockTimeMS: 2000 + } + } as FailPoint); + }); + + afterEach(async function () { + await keyVaultClient.close(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off' + } as FailPoint); + await setupClient.close(); + }); + + const metadata: MongoDBMetadataUI = { + requires: { + mongodb: '>=4.2.0', + clientSideEncryption: true + } + }; + + context( + 'when an auto encrypted client is configured with timeoutMS and auto encryption takes longer than timeoutMS', + function () { + let encryptedClient: MongoClient; + const timeoutMS = 1000; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultClient, + keyVaultNamespace: 'keyvault.datakeys', + kmsProviders: getLocalKmsProvider(), + schemaMap: { + 'test.test': { + bsonType: 'object', + encryptMetadata: { + keyId: [new UUID(dataKey)] + }, + properties: { + a: { + encrypt: { + bsonType: 'int', + algorithm: 'AEAD_AES_256_CBC_HMAC_SHA_512-Random', + keyId: [new UUID(dataKey)] + } + } + } + } + } + }, + timeoutMS + } + ); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient.close(); + }); + + it('the command should fail due to a timeout error', metadata, async function () { + const { duration, result: error } = await measureDuration(() => + encryptedClient + .db('test') + .collection('test') + .insertOne({ a: 1 }) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + }); + } + ); + + context( + 'when an auto encrypted client is not configured with timeoutMS and auto encryption is delayed', + function () { + let encryptedClient: MongoClient; + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + keyVaultClient, + keyVaultNamespace: 'admin.datakeys', + kmsProviders: getLocalKmsProvider() + } + } + ); + }); + + afterEach(async function () { + await encryptedClient.close(); + }); + + it('the command succeeds', metadata, async function () { + await encryptedClient.db('test').collection('test').aggregate([]).toArray(); + }); + } + ); + }); + + describe('State machine', function () { + const stateMachine = new StateMachine({} as any); + + const timeoutContext = () => { + return new CSOTTimeoutContext({ + timeoutMS: 1000, + serverSelectionTimeoutMS: 30000 + }); + }; + + const timeoutMS = 1000; + + const metadata: MongoDBMetadataUI = { + requires: { + mongodb: '>=4.2.0' + } + }; + + describe('#markCommand', function () { + context( + 'when csot is enabled and markCommand() takes longer than the remaining timeoutMS', + function () { + let encryptedClient: MongoClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + timeoutMS + } + ); + await encryptedClient.connect(); + + const stub = sinon + // @ts-expect-error accessing private method + .stub(Connection.prototype, 'sendCommand') + .callsFake(async function* (...args) { + await sleep(1000); + yield* stub.wrappedMethod.call(this, ...args); + }); + }); + + afterEach(async function () { + await encryptedClient?.close(); + sinon.restore(); + }); + + it('the command should fail due to a timeout error', metadata, async function () { + const { duration, result: error } = await measureDuration(() => + stateMachine + .markCommand( + encryptedClient, + 'test.test', + BSON.serialize({ ping: 1 }), + timeoutContext() + ) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + }); + } + ); + }); + + describe('#fetchKeys', function () { + let setupClient; + + beforeEach(async function () { + setupClient = this.configuration.newClient(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find'], + blockConnection: true, + blockTimeMS: 2000 + } + } as FailPoint); + }); + + afterEach(async function () { + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off' + } as FailPoint); + await setupClient.close(); + }); + + context( + 'when csot is enabled and fetchKeys() takes longer than the remaining timeoutMS', + function () { + let encryptedClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + timeoutMS + } + ); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient?.close(); + }); + + it('the command should fail due to a timeout error', metadata, async function () { + const { duration, result: error } = await measureDuration(() => + stateMachine + .fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 }), timeoutContext()) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + }); + } + ); + + context('when the cursor times out and a killCursors is executed', function () { + let client: MongoClient; + let commands: (CommandStartedEvent & { command: { maxTimeMS?: number } })[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + commands = []; + client.on('commandStarted', filterForCommands('killCursors', commands)); + + await client.connect(); + const docs = Array.from({ length: 1200 }, (_, i) => ({ i })); + + await client.db('test').collection('test').insertMany(docs); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['getMore'], + blockConnection: true, + blockTimeMS: 2000 + } + }); + }); + + afterEach(async function () { + await clearFailPoint(this.configuration); + await client.close(); + }); + + it( + 'refreshes timeoutMS to the full timeout', + { + requires: { + ...metadata.requires, + topology: '!load-balanced' + } + }, + async function () { + const timeoutContext = TimeoutContext.create( + resolveTimeoutOptions(client, { timeoutMS: 1900 }) + ); + + await setTimeout(1500); + + const { result: error } = await measureDuration(() => + stateMachine + .fetchKeys(client, 'test.test', BSON.serialize({}), timeoutContext) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + + const [ + { + command: { maxTimeMS } + } + ] = commands; + expect(maxTimeMS).to.be.greaterThan(1800); + } + ); + }); + + context('when csot is not enabled and fetchKeys() is delayed', function () { + let encryptedClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient(); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient?.close(); + }); + + it('the command succeeds', metadata, async function () { + await stateMachine.fetchKeys(encryptedClient, 'test.test', BSON.serialize({ a: 1 })); + }); + }); + }); + + describe('#fetchCollectionInfo', function () { + let setupClient; + + beforeEach(async function () { + setupClient = this.configuration.newClient(); + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['listCollections'], + blockConnection: true, + blockTimeMS: 2000 + } + } as FailPoint); + }); + + afterEach(async function () { + await setupClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off' + } as FailPoint); + await setupClient.close(); + }); + + context( + 'when csot is enabled and fetchCollectionInfo() takes longer than the remaining timeoutMS', + metadata, + function () { + let encryptedClient: MongoClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + timeoutMS + } + ); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient?.close(); + }); + + it('the command should fail due to a timeout error', metadata, async function () { + const { duration, result: error } = await measureDuration(() => + stateMachine + .fetchCollectionInfo(encryptedClient, 'test.test', { a: 1 }, timeoutContext()) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + }); + } + ); + + context( + 'when csot is not enabled and fetchCollectionInfo() is delayed', + metadata, + function () { + let encryptedClient: MongoClient; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient(); + await encryptedClient.connect(); + }); + + afterEach(async function () { + await encryptedClient?.close(); + }); + + it('the command succeeds', metadata, async function () { + await stateMachine.fetchCollectionInfo(encryptedClient, 'test.test', { a: 1 }); + }); + } + ); + }); + }); + + describe('Explicit Encryption', function () { + describe('#createEncryptedCollection', function () { + let client: MongoClient; + let clientEncryption: ClientEncryption; + let local_key; + const timeoutMS = 1000; + + const encryptedCollectionMetadata: MongoDBMetadataUI = { + requires: { + clientSideEncryption: true, + mongodb: '>=7.0.0', + topology: '!single' + } + }; + + beforeEach(async function () { + local_key = { local: EJSON.parse(process.env.CSFLE_KMS_PROVIDERS).local }; + client = this.configuration.newClient({ timeoutMS }); + await client.connect(); + await client.db('keyvault').createCollection('datakeys'); + clientEncryption = new ClientEncryption(client, { + keyVaultNamespace: 'keyvault.datakeys', + keyVaultClient: client, + kmsProviders: local_key + }); + }); + + afterEach(async function () { + await client + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off' + } as FailPoint); + await client + .db('db') + .collection('newnew') + .drop() + .catch(() => null); + await client + .db('keyvault') + .collection('datakeys') + .drop() + .catch(() => null); + await client.close(); + }); + + async function runCreateEncryptedCollection() { + const createCollectionOptions = { + encryptedFields: { fields: [{ path: 'ssn', bsonType: 'string', keyId: null }] } + }; + + const db = client.db('db'); + + return await measureDuration(() => + clientEncryption + .createEncryptedCollection(db, 'newnew', { + provider: 'local', + createCollectionOptions, + masterKey: null + }) + .catch(err => err) + ); + } + + context( + 'when `createDataKey` hangs longer than timeoutMS and `createCollection` does not hang', + () => { + it( + '`createEncryptedCollection throws `MongoCryptCreateDataKeyError` due to a timeout error', + encryptedCollectionMetadata, + async function () { + await client + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { + times: 1 + }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS: timeoutMS * 1.2 + } + } as FailPoint); + + const { duration, result: err } = await runCreateEncryptedCollection(); + expect(err).to.be.instanceOf(MongoCryptCreateDataKeyError); + expect(err.cause).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + } + ); + } + ); + + context( + 'when `createDataKey` does not hang and `createCollection` hangs longer than timeoutMS', + () => { + it( + '`createEncryptedCollection throws `MongoCryptCreateEncryptedCollectionError` due to a timeout error', + encryptedCollectionMetadata, + async function () { + await client + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { + times: 1 + }, + data: { + failCommands: ['create'], + blockConnection: true, + blockTimeMS: timeoutMS * 1.2 + } + } as FailPoint); + + const { duration, result: err } = await runCreateEncryptedCollection(); + expect(err).to.be.instanceOf(MongoCryptCreateEncryptedCollectionError); + expect(err.cause).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + } + ); + } + ); + + context( + 'when `createDataKey` and `createCollection` cumulatively hang longer than timeoutMS', + () => { + it( + '`createEncryptedCollection throws `MongoCryptCreateEncryptedCollectionError` due to a timeout error', + encryptedCollectionMetadata, + async function () { + await client + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { + times: 2 + }, + data: { + failCommands: ['insert', 'create'], + blockConnection: true, + blockTimeMS: timeoutMS * 0.6 + } + } as FailPoint); + + const { duration, result: err } = await runCreateEncryptedCollection(); + expect(err).to.be.instanceOf(MongoCryptCreateEncryptedCollectionError); + expect(err.cause).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + } + ); + } + ); + }); + }); +}); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts index 1ed88f34d86..51bd834a209 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.prose.test.ts @@ -1,8 +1,47 @@ /* Specification prose tests */ -// TODO(NODE-5824): Implement CSOT prose tests -describe.skip('CSOT spec prose tests', () => { - context('1. Multi-batch writes', () => { +import { type ChildProcess, spawn } from 'node:child_process'; + +import { expect } from 'chai'; +import * as os from 'os'; +import * as path from 'path'; +import * as semver from 'semver'; +import * as sinon from 'sinon'; +import { Readable } from 'stream'; +import { pipeline } from 'stream/promises'; + +import { type CommandStartedEvent } from '../../../mongodb'; +import { + Binary, + ClientEncryption, + type CommandSucceededEvent, + GridFSBucket, + MongoBulkWriteError, + MongoClient, + MongoOperationTimeoutError, + MongoServerSelectionError, + now, + ObjectId, + promiseWithResolvers, + squashError +} from '../../mongodb'; +import { type FailPoint, makeMultiBatchWrite, measureDuration } from '../../tools/utils'; +import { filterForCommands } from '../shared'; + +describe('CSOT spec prose tests', function () { + let internalClient: MongoClient; + let client: MongoClient; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + }); + + afterEach(async function () { + await internalClient?.close(); + await client?.close(); + }); + + describe('1. Multi-batch writes', { requires: { topology: 'single', mongodb: '>=4.4' } }, () => { /** * This test MUST only run against standalones on server versions 4.4 and higher. * The `insertMany` call takes an exceedingly long time on replicasets and sharded @@ -29,19 +68,102 @@ describe.skip('CSOT spec prose tests', () => { * - Expect this to fail with a timeout error. * 1. Verify that two `insert` commands were executed against `db.coll` as part of the `insertMany` call. */ - }); - context('2. maxTimeMS is not set for commands sent to mongocryptd', () => { - /** - * This test MUST only be run against enterprise server versions 4.2 and higher. - * - * 1. Launch a mongocryptd process on 23000. - * 1. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. - * 1. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. - * 1. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. - */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { + times: 2 + }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS: 1010 + } + }; + + beforeEach(async function () { + await internalClient + .db('db') + .collection('bulkWriteTest') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + + client = this.configuration.newClient({ timeoutMS: 2000, monitorCommands: true }); + }); + + it('performs two inserts which fail to complete before 2000 ms', async () => { + const inserts = []; + client.on('commandStarted', ev => inserts.push(ev)); + + const a = new Uint8Array(1000000 - 22); + const oneMBDocs = Array.from({ length: 50 }, (_, _id) => ({ _id, a })); + const error = await client + .db('db') + .collection<{ _id: number; a: Uint8Array }>('bulkWriteTest') + .insertMany(oneMBDocs) + .catch(error => error); + + expect(error).to.be.instanceOf(MongoBulkWriteError); + expect(error.errorResponse).to.be.instanceOf(MongoOperationTimeoutError); + expect(inserts.map(ev => ev.commandName)).to.deep.equal(['insert', 'insert']); + }); }); + context( + '2. maxTimeMS is not set for commands sent to mongocryptd', + { requires: { mongodb: '>=4.2' } }, + () => { + /** + * This test MUST only be run against enterprise server versions 4.2 and higher. + * + * 1. Launch a mongocryptd process on 23000. + * 1. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. + * 1. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. + * 1. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. + */ + + let client: MongoClient; + const mongocryptdTestPort = '23000'; + let childProcess: ChildProcess; + + beforeEach(async function () { + const pidFile = path.join(os.tmpdir(), new ObjectId().toHexString()); + childProcess = spawn( + 'mongocryptd', + ['--port', mongocryptdTestPort, '--ipv6', '--pidfilepath', pidFile], + { + stdio: 'ignore', + detached: true + } + ); + + childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); + client = new MongoClient(`mongodb://localhost:${mongocryptdTestPort}/?timeoutMS=1000`, { + monitorCommands: true + }); + }); + + afterEach(async function () { + await client.close(); + childProcess.kill('SIGKILL'); + sinon.restore(); + }); + + it('maxTimeMS is not set', async function () { + const commandStarted = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + await client.connect(); + await client + .db('admin') + .command({ ping: 1 }) + .catch(e => squashError(e)); + expect(commandStarted).to.have.lengthOf(1); + expect(commandStarted[0].command).to.not.have.property('maxTimeMS'); + }); + } + ); + context('3. ClientEncryption', () => { /** * Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, @@ -58,6 +180,49 @@ describe.skip('CSOT spec prose tests', () => { * { local: { key: } } * ``` */ + let keyVaultClient: MongoClient; + let clientEncryption: ClientEncryption; + const LOCAL_MASTERKEY = Buffer.from( + 'Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk', + 'base64' + ); + + const clientEncryptionMetadata: MongoDBMetadataUI = { + requires: { + clientSideEncryption: true, + mongodb: '>=7.0.0', + topology: '!single' + } + } as const; + + const timeoutMS = 100; + + beforeEach(async function () { + await internalClient + .db('keyvault') + .collection('datakeys') + .drop() + .catch(() => null); + await internalClient.db('keyvault').collection('datakeys'); + keyVaultClient = this.configuration.newClient({}, { timeoutMS, monitorCommands: true }); + clientEncryption = new ClientEncryption(keyVaultClient, { + keyVaultNamespace: 'keyvault.datakeys', + kmsProviders: { local: { key: LOCAL_MASTERKEY } } + }); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off' + } as FailPoint); + await keyVaultClient.close(); + await internalClient.close(); + }); + context('createDataKey', () => { /** * 1. Using `internalClient`, set the following fail point: @@ -78,6 +243,34 @@ describe.skip('CSOT spec prose tests', () => { * - Expect this to fail with a timeout error. * 1. Verify that an `insert` command was executed against to `keyvault.datakeys` as part of the `createDataKey` call. */ + + it('times out due to timeoutMS', clientEncryptionMetadata, async function () { + await internalClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { + times: 1 + }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS: 150 + } + } as FailPoint); + const commandStarted: CommandStartedEvent[] = []; + keyVaultClient.on('commandStarted', ev => commandStarted.push(ev)); + + const { duration, result: err } = await measureDuration(() => + clientEncryption.createDataKey('local').catch(e => e) + ); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + const command = commandStarted[0].command; + expect(command).to.have.property('insert', 'datakeys'); + expect(command).to.have.property('$db', 'keyvault'); + }); }); context('encrypt', () => { @@ -102,6 +295,43 @@ describe.skip('CSOT spec prose tests', () => { * - Expect this to fail with a timeout error. * 1. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `encrypt` call. */ + it('times out due to timeoutMS', clientEncryptionMetadata, async function () { + const datakeyId = await clientEncryption.createDataKey('local'); + expect(datakeyId).to.be.instanceOf(Binary); + expect(datakeyId.sub_type).to.equal(Binary.SUBTYPE_UUID); + + await internalClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { + times: 1 + }, + data: { + failCommands: ['find'], + blockConnection: true, + blockTimeMS: 150 + } + } as FailPoint); + + const commandStarted: CommandStartedEvent[] = []; + keyVaultClient.on('commandStarted', ev => commandStarted.push(ev)); + + const { duration, result: err } = await measureDuration(() => + clientEncryption + .encrypt('hello', { + algorithm: `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, + keyId: datakeyId + }) + .catch(e => e) + ); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + const command = commandStarted[0].command; + expect(command).to.have.property('find', 'datakeys'); + expect(command).to.have.property('$db', 'keyvault'); + }); }); context('decrypt', () => { @@ -129,10 +359,51 @@ describe.skip('CSOT spec prose tests', () => { * - Expect this to fail with a timeout error. * 1. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `decrypt` call. */ + it('times out due to timeoutMS', clientEncryptionMetadata, async function () { + const datakeyId = await clientEncryption.createDataKey('local'); + expect(datakeyId).to.be.instanceOf(Binary); + expect(datakeyId.sub_type).to.equal(Binary.SUBTYPE_UUID); + + // pre-compute 'hello' encryption, otherwise the data key is cached sometimes and find in stateMachine.execute never runs + const encrypted = Binary.createFromBase64( + 'Af6ie/LRP0uoisAZthHPUs0CKzTBFIkJr8kxmOk1pV1C/6K54otT8QvNJgNTNG2CNpThhfdXaObuOMMReNlTgwapqPYCb/HJRQ1Nfma6uA3cTg==', + 6 + ); + expect(encrypted).to.be.instanceOf(Binary); + expect(encrypted.sub_type).to.equal(Binary.SUBTYPE_ENCRYPTED); + + await internalClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { + times: 1 + }, + data: { + failCommands: ['find'], + blockConnection: true, + blockTimeMS: 150 + } + } as FailPoint); + + const commandStarted: CommandStartedEvent[] = []; + keyVaultClient.on('commandStarted', ev => commandStarted.push(ev)); + + const { duration, result: err } = await measureDuration(() => + clientEncryption.decrypt(encrypted).catch(e => e) + ); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + const command = commandStarted[0].command; + expect(command).to.have.property('find', 'datakeys'); + expect(command).to.have.property('$db', 'keyvault'); + }); }); }); - context('4. Background Connection Pooling', () => { + /** TODO(DRIVERS-2884): Drivers should not interrupt creating connections with a client-side timeout */ + context.skip('4. Background Connection Pooling', () => { /** * The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication * fields (i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait @@ -193,11 +464,62 @@ describe.skip('CSOT spec prose tests', () => { }); context('5. Blocking Iteration Methods', () => { + const metadata = { requires: { mongodb: '>=4.4' } }; /** * Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a * blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an * error occurs. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['getMore'], + blockConnection: true, + blockTimeMS: 90 + } + }; + let internalClient: MongoClient; + let client: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + // Creating capped collection to be able to create tailable find cursor + const coll = await internalClient + .db('db') + .createCollection('coll', { capped: true, size: 1_000_000 }); + await coll.insertOne({ x: 1 }); + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { + monitorCommands: true, + timeoutMS: 150, + minPoolSize: 20 + }); + await client.connect(); + + commandStarted = []; + commandSucceeded = []; + + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); context('Tailable cursors', () => { /** @@ -224,6 +546,29 @@ describe.skip('CSOT spec prose tests', () => { * - Expect this to fail with a timeout error. * 1. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ + + it('send correct number of finds and getMores', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { tailable: true }) + .project({ _id: 0 }); + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + // Check that there are no getMores sent + expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(0); + + const maybeError = await cursor.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + // Expect 1 find + expect(commandStarted.filter(e => e.command.find != null)).to.have.lengthOf(1); + // Expect 2 getMore + expect(commandStarted.filter(e => e.command.getMore != null)).to.have.lengthOf(2); + }); }); context('Change Streams', () => { @@ -248,13 +593,77 @@ describe.skip('CSOT spec prose tests', () => { * - Expect this to fail with a timeout error. * 1. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during the test. */ + it( + 'sends correct number of aggregate and getMores', + { requires: { mongodb: '>=4.4', topology: '!single' } }, + async function () { + // NOTE: we don't check for a non-zero ID since we lazily send the initial aggregate to the + // server. See ChangeStreamCursor._initialize + const changeStream = client + .db('db') + .collection('coll') + .watch([], { timeoutMS: 120, maxAwaitTimeMS: 10 }); + + // @ts-expect-error private method + await changeStream.cursor.cursorInit(); + + const maybeError = await changeStream.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + const aggregates = commandStarted + .filter(e => e.command.aggregate != null) + .map(e => e.command); + const getMores = commandStarted + .filter(e => e.command.getMore != null) + .map(e => e.command); + // Expect 1 aggregate + expect(aggregates).to.have.lengthOf(1); + // Expect 2 getMores + expect(getMores).to.have.lengthOf(2); + } + ); }); }); context('6. GridFS - Upload', () => { + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4' } + }; + let internalClient: MongoClient; + let client: MongoClient; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('files') + .catch(() => null); + await internalClient + .db('db') + .dropCollection('chunks') + .catch(() => null); + + client = this.configuration.newClient(undefined, { timeoutMS: 100 }); + }); + + afterEach(async function () { + if (internalClient) { + await internalClient + .db() + .admin() + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + if (client) { + await client.close(); + } + }); /** Tests in this section MUST only be run against server versions 4.4 and higher. */ - context('uploads via openUploadStream can be timed out', () => { + it('uploads via openUploadStream can be timed out', metadata, async function () { /** * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. * 1. Using `internalClient`, set the following fail point: @@ -277,9 +686,30 @@ describe.skip('CSOT spec prose tests', () => { * 1. Call `uploadStream.close()` to flush the stream and insert chunks. * - Expect this to fail with a timeout error. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS: 150 + } + }; + await internalClient.db().admin().command(failpoint); + + const bucket = new GridFSBucket(client.db('db')); + const stream = bucket.openUploadStream('filename'); + const data = Buffer.from('13', 'hex'); + + const fileStream = Readable.from(data); + const maybeError = await pipeline(fileStream, stream).then( + () => null, + error => error + ); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); }); - context('Aborting an upload stream can be timed out', () => { + it('Aborting an upload stream can be timed out', metadata, async function () { /** * This test only applies to drivers that provide an API to abort a GridFS upload stream. * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. @@ -303,10 +733,92 @@ describe.skip('CSOT spec prose tests', () => { * 1. Call `uploadStream.abort()`. * - Expect this to fail with a timeout error. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['delete'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + await internalClient.db().admin().command(failpoint); + const bucket = new GridFSBucket(client.db('db'), { chunkSizeBytes: 2 }); + const uploadStream = bucket.openUploadStream('filename', { timeoutMS: 300 }); + + const data = Buffer.from('01020304', 'hex'); + + const { promise: writePromise, resolve, reject } = promiseWithResolvers(); + uploadStream.on('error', error => uploadStream.destroy(error)); + uploadStream.write(data, error => { + if (error) reject(error); + else resolve(); + }); + let maybeError = await writePromise.then( + () => null, + e => e + ); + expect(maybeError).to.be.null; + + maybeError = await uploadStream.abort().then( + () => null, + error => error + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + uploadStream.destroy(); }); }); context('7. GridFS - Download', () => { + let internalClient: MongoClient; + let client: MongoClient; + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4' } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('files') + .catch(() => null); + await internalClient + .db('db') + .dropCollection('chunks') + .catch(() => null); + + const files = await internalClient.db('db').createCollection('files'); + + await files.insertOne({ + _id: new ObjectId('000000000000000000000005'), + length: 10, + chunkSize: 4, + uploadDate: new Date('1970-01-01T00:00:00.000Z'), + md5: '57d83cd477bfb1ccd975ab33d827a92b', + filename: 'length-10', + contentType: 'application/octet-stream', + aliases: [], + metadata: {} + }); + + client = this.configuration.newClient(undefined, { timeoutMS: 100 }); + }); + + afterEach(async function () { + if (internalClient) { + await internalClient + .db() + .admin() + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + + if (client) { + await client.close(); + } + }); + /** * This test MUST only be run against server versions 4.4 and higher. * 1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. @@ -348,99 +860,280 @@ describe.skip('CSOT spec prose tests', () => { * - Expect this to fail with a timeout error. * 1. Verify that two `find` commands were executed during the read: one against `db.fs.files` and another against `db.fs.chunks`. */ + it('download streams can be timed out', metadata, async function () { + const bucket = new GridFSBucket(client.db('db')); + const downloadStream = bucket.openDownloadStream(new ObjectId('000000000000000000000005')); + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['find'], + blockConnection: true, + blockTimeMS: 150 + } + }; + await internalClient.db().admin().command(failpoint); + + const maybeError = await downloadStream.toArray().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + }); }); context('8. Server Selection', () => { - context('serverSelectionTimeoutMS honored if timeoutMS is not set', () => { - /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. - * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. - * - Expect this to fail with a server selection timeout error after no more than 15ms. - */ - }); + context('using sinon timer', function () { + let clock: sinon.SinonFakeTimers; + + beforeEach(function () { + clock = sinon.useFakeTimers(); + }); + + afterEach(function () { + clock.restore(); + }); - context( - "timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", - () => { + it.skip('serverSelectionTimeoutMS honored if timeoutMS is not set', async function () { /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. - * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. + * 1. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. * - Expect this to fail with a server selection timeout error after no more than 15ms. */ - } - ); - context( - "serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", - () => { - /** - * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. - * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. - * - Expect this to fail with a server selection timeout error after no more than 15ms. + /** NOTE: This is the original implementation of this test, but it was flaky, so was + * replaced by the current implementation using sinon fake timers + * ```ts + * client = new MongoClient('mongodb://invalid/?serverSelectionTimeoutMS=10'); + * const admin = client.db('test').admin(); + * const start = performance.now(); + * const maybeError = await admin.ping().then( + * () => null, + * e => e + * ); + * const end = performance.now(); + * + * expect(maybeError).to.be.instanceof(MongoServerSelectionError); + * expect(end - start).to.be.lte(15) + * ``` */ - } - ); + client = new MongoClient('mongodb://invalid/?serverSelectionTimeoutMS=10'); + const admin = client.db('test').admin(); + const maybeError = admin.ping().then( + () => null, + e => e + ); + + await clock.tickAsync(11); + expect(await maybeError).to.be.instanceof(MongoServerSelectionError); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; + }); - context('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', () => { + it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20'); + const start = now(); + + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; + + it.skip("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20'); + const start = now(); + + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; + + it.skip("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", async function () { + /** + * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. + * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + * - Expect this to fail with a server selection timeout error after no more than 15ms. + */ + client = new MongoClient('mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10'); + const start = now(); + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); + + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; + + it.skip('serverSelectionTimeoutMS honored for server selection if timeoutMS=0', async function () { /** * 1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. * 1. Using `client`, run the command `{ ping: 1 }` against the `admin` database. * - Expect this to fail with a server selection timeout error after no more than 15ms. */ - }); + client = new MongoClient('mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10'); + const start = now(); + const maybeError = await client + .db('test') + .admin() + .ping() + .then( + () => null, + e => e + ); + const end = now(); - context( - "timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", - () => { - /** - * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a - * username and password). - * 1. Using `internalClient`, set the following fail point: - * ```js - * { - * configureFailPoint: failCommand, - * mode: { times: 1 }, - * data: { - * failCommands: ["saslContinue"], - * blockConnection: true, - * blockTimeMS: 15 - * } - * } - * ``` - * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. - * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. - * - Expect this to fail with a timeout error after no more than 15ms. - */ - } - ); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(NODE-6223): Auto connect performs extra server selection. Explicit connect throws on invalid host name'; - context( - "serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", - () => { - /** - * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a - * username and password). - * 1. Using `internalClient`, set the following fail point: - * ```js - * { - * configureFailPoint: failCommand, - * mode: { times: 1 }, - * data: { - * failCommands: ["saslContinue"], - * blockConnection: true, - * blockTimeMS: 15 - * } - * } - * ``` - * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. - * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. - * - Expect this to fail with a timeout error after no more than 15ms. - */ - } - ); + it.skip("timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", async function () { + /** + * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a + * username and password). + * 1. Using `internalClient`, set the following fail point: + * ```js + * { + * configureFailPoint: failCommand, + * mode: { times: 1 }, + * data: { + * failCommands: ["saslContinue"], + * blockConnection: true, + * blockTimeMS: 15 + * } + * } + * ``` + * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. + * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + * - Expect this to fail with a timeout error after no more than 15ms. + */ + await internalClient + .db('db') + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['saslContinue'], + blockConnection: true, + blockTimeMS: 15 + } + }); + + client = this.configuration.newClient({ + serverSelectionTimeoutMS: 20, + timeoutMS: 10 + }); + const start = now(); + const maybeError = await client + .db('db') + .collection('coll') + .insertOne({ x: 1 }) + .then( + () => null, + e => e + ); + const end = now(); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; + + it.skip("serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", async function () { + /** + * This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a + * username and password). + * 1. Using `internalClient`, set the following fail point: + * ```js + * { + * configureFailPoint: failCommand, + * mode: { times: 1 }, + * data: { + * failCommands: ["saslContinue"], + * blockConnection: true, + * blockTimeMS: 15 + * } + * } + * ``` + * 1. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. + * 1. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + * - Expect this to fail with a timeout error after no more than 15ms. + */ + await internalClient + .db('db') + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['saslContinue'], + blockConnection: true, + blockTimeMS: 15 + } + }); + + client = this.configuration.newClient({ + serverSelectionTimeoutMS: 10, + timeoutMS: 20 + }); + const start = now(); + const maybeError = await client + .db('db') + .collection('coll') + .insertOne({ x: 1 }) + .then( + () => null, + e => e + ); + const end = now(); + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + expect(end - start).to.be.lte(15); + }).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; }); - context('9. endSession', () => { + describe('9. endSession', () => { + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4', topology: ['replicaset', 'sharded'] } + }; /** * This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be * run three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout @@ -470,12 +1163,92 @@ describe.skip('CSOT spec prose tests', () => { * 1. Using `session`, execute `session.end_session` * - Expect this to fail with a timeout error after no more than 15ms. */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['abortTransaction'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + beforeEach(async function () { + const internalClient = this.configuration.newClient(); + // End in-progress transactions otherwise "drop" will hang + await internalClient.db('admin').command({ killAllSessions: [] }); + await internalClient + .db('endSession_db') + .collection('endSession_coll') + .drop() + .catch(() => null); + await internalClient.db('endSession_db').createCollection('endSession_coll'); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + const internalClient = this.configuration.newClient(); + await internalClient.db('admin').command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client?.close(); + }); + + describe('when timeoutMS is provided to the client', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient({ timeoutMS: 150, monitorCommands: true }); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession().catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('when defaultTimeoutMS is provided to startSession', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient(); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession({ defaultTimeoutMS: 150 }); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession().catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('when timeoutMS is provided to endSession', () => { + it('throws a timeout error from endSession', metadata, async function () { + client = this.configuration.newClient(); + const coll = client.db('endSession_db').collection('endSession_coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session }); + const start = performance.now(); + const error = await session.endSession({ timeoutMS: 150 }).catch(error => error); + const end = performance.now(); + expect(end - start).to.be.within(100, 170); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); }); - context('10. Convenient Transactions', () => { + describe('10. Convenient Transactions', () => { /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset', 'sharded'], mongodb: '>=4.4' } + }; - context('timeoutMS is refreshed for abortTransaction if the callback fails', () => { + describe('when an operation fails inside withTransaction callback', () => { /** * 1. Using `internalClient`, drop the `db.coll` collection. * 1. Using `internalClient`, set the following fail point: @@ -486,7 +1259,7 @@ describe.skip('CSOT spec prose tests', () => { * data: { * failCommands: ["insert", "abortTransaction"], * blockConnection: true, - * blockTimeMS: 15 + * blockTimeMS: 200 * } * } * ``` @@ -503,6 +1276,167 @@ describe.skip('CSOT spec prose tests', () => { * 1. `command_started` and `command_failed` events for an `insert` command. * 1. `command_started` and `command_failed` events for an `abortTransaction` command. */ + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + failCommands: ['insert', 'abortTransaction'], + blockConnection: true, + blockTimeMS: 200 + } + }; + + beforeEach(async function () { + if (!semver.satisfies(this.configuration.version, '>=4.4')) { + this.skipReason = 'Requires server version 4.4+'; + this.skip(); + } + const internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) { + const internalClient = this.configuration.newClient(); + await internalClient + .db('admin') + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + await client?.close(); + }); + + it('timeoutMS is refreshed for abortTransaction', metadata, async function () { + if ( + this.configuration.topologyType === 'ReplicaSetWithPrimary' && + semver.satisfies(this.configuration.version, '<=4.4') + ) { + this.skipReason = '4.4 replicaset fail point does not blockConnection for requested time'; + this.skip(); + } + + const commandsFailed = []; + const commandsStarted = []; + + client = this.configuration + .newClient({ timeoutMS: 150, monitorCommands: true }) + .on('commandStarted', e => commandsStarted.push(e.commandName)) + .on('commandFailed', e => commandsFailed.push(e.commandName)); + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + const withTransactionError = await session + .withTransaction(async session => { + await coll.insertOne({ x: 1 }, { session }); + }) + .catch(error => error); + + try { + expect(withTransactionError).to.be.instanceOf(MongoOperationTimeoutError); + expect(commandsStarted, 'commands started').to.deep.equal(['insert', 'abortTransaction']); + expect(commandsFailed, 'commands failed').to.deep.equal(['insert', 'abortTransaction']); + } finally { + await session.endSession(); + } + }); }); }); + + describe( + '11. Multi-batch bulkWrites', + { requires: { mongodb: '>=8.0', serverless: 'forbid', topology: 'single' } }, + function () { + /** + * ### 11. Multi-batch bulkWrites + * + * This test MUST only run against server versions 8.0+. This test must be skipped on Atlas Serverless. + * + * 1. Using `internalClient`, drop the `db.coll` collection. + * + * 2. Using `internalClient`, set the following fail point: + * + * @example + * ```javascript + * { + * configureFailPoint: "failCommand", + * mode: { + * times: 2 + * }, + * data: { + * failCommands: ["bulkWrite"], + * blockConnection: true, + * blockTimeMS: 1010 + * } + * } + * ``` + * + * 3. Using `internalClient`, perform a `hello` command and record the `maxBsonObjectSize` and `maxMessageSizeBytes` values + * in the response. + * + * 4. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + * + * 5. Create a list of write models (referred to as `models`) with the following write model repeated + * (`maxMessageSizeBytes / maxBsonObjectSize + 1`) times: + * + * @example + * ```json + * InsertOne { + * "namespace": "db.coll", + * "document": { "a": "b".repeat(maxBsonObjectSize - 500) } + * } + * ``` + * + * 6. Call `bulkWrite` on `client` with `models`. + * + * - Expect this to fail with a timeout error. + * + * 7. Verify that two `bulkWrite` commands were executed as part of the `MongoClient.bulkWrite` call. + */ + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { + times: 2 + }, + data: { + failCommands: ['bulkWrite'], + blockConnection: true, + blockTimeMS: 1010 + } + }; + + beforeEach(async function () { + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + + client = this.configuration.newClient({ timeoutMS: 2000, monitorCommands: true }); + }); + + it('performs two bulkWrites which fail to complete before 2000 ms', async function () { + const writes = []; + client.on('commandStarted', filterForCommands('bulkWrite', writes)); + + const models = await makeMultiBatchWrite(this.configuration); + + const error = await client.bulkWrite(models).catch(error => error); + + expect(error, error.stack).to.be.instanceOf(MongoOperationTimeoutError); + expect(writes).to.have.lengthOf(2); + }); + } + ); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts index 2e2cd0fa8e5..a1b0791026d 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.spec.test.ts @@ -1,9 +1,61 @@ import { join } from 'path'; +import * as semver from 'semver'; import { loadSpecTests } from '../../spec'; import { runUnifiedSuite } from '../../tools/unified-spec-runner/runner'; -// TODO(NODE-5823): Implement unified runner operations and options support for CSOT -describe.skip('CSOT spec tests', function () { - runUnifiedSuite(loadSpecTests(join('client-side-operations-timeout'))); +const skippedSpecs = {}; + +const skippedTests = { + 'Tailable cursor iteration timeoutMS is refreshed for getMore - failure': 'TODO(DRIVERS-2965)', + 'Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure': + 'TODO(DRIVERS-2965)', + 'command is not sent if RTT is greater than timeoutMS': 'TODO(DRIVERS-2965)', + 'Non-tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure': + 'TODO(DRIVERS-2965)', + 'maxTimeMS value in the command is less than timeoutMS': + 'TODO(DRIVERS-2970): see modified test in unified-csot-node-specs', + 'timeoutMS is refreshed for getMore - failure': + 'TODO(DRIVERS-2965): see modified test in unified-csot-node-specs', + 'timeoutMS applies to full resume attempt in a next call': 'TODO(DRIVERS-3006)', + 'timeoutMS is refreshed for getMore if maxAwaitTimeMS is set': 'TODO(DRIVERS-3018)' +}; + +describe('CSOT spec tests', function () { + const specs = loadSpecTests('client-side-operations-timeout'); + for (const spec of specs) { + for (const test of spec.tests) { + if (skippedSpecs[spec.name] != null) { + test.skipReason = skippedSpecs[spec.name]; + } + if (skippedTests[test.description] != null) { + test.skipReason = skippedTests[test.description]; + } + } + } + + runUnifiedSuite(specs, (test, configuration) => { + const sessionCSOTTests = ['timeoutMS applied to withTransaction']; + if ( + configuration.topologyType === 'LoadBalanced' && + test.description === 'timeoutMS is refreshed for close' + ) { + return 'LoadBalanced cannot refresh timeoutMS and run expected killCursors because pinned connection has been closed by the timeout'; + } + if ( + sessionCSOTTests.includes(test.description) && + configuration.topologyType === 'ReplicaSetWithPrimary' && + semver.satisfies(configuration.version, '<=4.4') + ) { + return '4.4 replicaset fail point does not blockConnection for requested time'; + } + return false; + }); +}); + +describe('CSOT modified spec tests', function () { + const specs = loadSpecTests( + join('..', 'integration', 'client-side-operations-timeout', 'unified-csot-node-specs') + ); + runUnifiedSuite(specs); }); diff --git a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts index cf9c5f736ff..58bfb79de23 100644 --- a/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts +++ b/test/integration/client-side-operations-timeout/client_side_operations_timeout.unit.test.ts @@ -1,51 +1,256 @@ -/* eslint-disable @typescript-eslint/no-empty-function */ /** * The following tests are described in CSOTs spec prose tests as "unit" tests * The tests enumerated in this section could not be expressed in either spec or prose format. * Drivers SHOULD implement these if it is possible to do so using the driver's existing test infrastructure. */ -// TODO(NODE-5824): Implement CSOT prose tests -describe.skip('CSOT spec unit tests', () => { - context('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', () => {}); - - context( - 'If timeoutMS is set for an operation, the remaining timeoutMS value should apply to connection checkout after a server has been selected.', - () => {} - ); - - context( - 'If timeoutMS is not set for an operation, waitQueueTimeoutMS should apply to connection checkout after a server has been selected.', - () => {} - ); - - context( - 'If a new connection is required to execute an operation, min(remaining computedServerSelectionTimeout, connectTimeoutMS) should apply to socket establishment.', - () => {} - ); - - context( - 'For drivers that have control over OCSP behavior, min(remaining computedServerSelectionTimeout, 5 seconds) should apply to HTTP requests against OCSP responders.', - () => {} - ); - - context( - 'If timeoutMS is unset, operations fail after two non-consecutive socket timeouts.', - () => {} - ); - - context( - 'The remaining timeoutMS value should apply to HTTP requests against KMS servers for CSFLE.', - () => {} - ); - - context( - 'The remaining timeoutMS value should apply to commands sent to mongocryptd as part of automatic encryption.', - () => {} - ); - - context( - 'When doing minPoolSize maintenance, connectTimeoutMS is used as the timeout for socket establishment.', - () => {} - ); +import { expect } from 'chai'; +import * as sinon from 'sinon'; +import { setTimeout } from 'timers'; +import { TLSSocket } from 'tls'; +import { promisify } from 'util'; + +// eslint-disable-next-line @typescript-eslint/no-restricted-imports +import { StateMachine } from '../../../src/client-side-encryption/state_machine'; +import { + Connection, + ConnectionPool, + CSOTTimeoutContext, + type MongoClient, + MongoOperationTimeoutError, + Timeout, + TimeoutContext, + Topology +} from '../../mongodb'; +import { measureDuration, sleep } from '../../tools/utils'; +import { createTimerSandbox } from '../../unit/timer_sandbox'; + +describe('CSOT spec unit tests', function () { + let client: MongoClient; + + afterEach(async function () { + sinon.restore(); + await client?.close(); + }); + + context('Server Selection and Connection Checkout', function () { + it('Operations should ignore waitQueueTimeoutMS if timeoutMS is also set.', async function () { + client = this.configuration.newClient({ waitQueueTimeoutMS: 999999, timeoutMS: 10000 }); + sinon.spy(Timeout, 'expires'); + const timeoutContextSpy = sinon.spy(TimeoutContext, 'create'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + + const createCalls = timeoutContextSpy.getCalls().filter( + // @ts-expect-error accessing concrete field + call => call.args[0].timeoutMS === 10000 + ); + + expect(createCalls).to.have.length.greaterThanOrEqual(1); + expect(Timeout.expires).to.not.have.been.calledWith(999999); + }); + + it('If timeoutMS is set for an operation, the remaining timeoutMS value should apply to connection checkout after a server has been selected.', async function () { + client = this.configuration.newClient({ timeoutMS: 1000 }); + // Spy on connection checkout and pull options argument + const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); + const expiresSpy = sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + + expect(checkoutSpy).to.have.been.calledOnce; + const timeoutContext = checkoutSpy.lastCall.args[0].timeoutContext; + expect(timeoutContext).to.exist; + // Check that we passed through the timeout + // @ts-expect-error accessing private properties + expect(timeoutContext._serverSelectionTimeout).to.be.instanceOf(Timeout); + // @ts-expect-error accessing private properties + expect(timeoutContext._serverSelectionTimeout).to.equal( + // @ts-expect-error accessing private properties + timeoutContext._connectionCheckoutTimeout + ); + + // Check that no more Timeouts are constructed after we enter checkout + expect(!expiresSpy.calledAfter(checkoutSpy)); + }); + + it('If timeoutMS is not set for an operation, waitQueueTimeoutMS should apply to connection checkout after a server has been selected.', async function () { + client = this.configuration.newClient({ waitQueueTimeoutMS: 123456 }); + + const checkoutSpy = sinon.spy(ConnectionPool.prototype, 'checkOut'); + const selectServerSpy = sinon.spy(Topology.prototype, 'selectServer'); + const expiresSpy = sinon.spy(Timeout, 'expires'); + + await client.db('db').collection('collection').insertOne({ x: 1 }); + expect(checkoutSpy).to.have.been.calledAfter(selectServerSpy); + + expect(expiresSpy).to.have.been.calledWith(123456); + }); + + /* eslint-disable @typescript-eslint/no-empty-function */ + context.skip( + 'If a new connection is required to execute an operation, min(remaining computedServerSelectionTimeout, connectTimeoutMS) should apply to socket establishment.', + () => {} + ).skipReason = + 'TODO(DRIVERS-2347): Requires this ticket to be implemented before we can assert on connection CSOT behaviour'; + + context( + 'For drivers that have control over OCSP behavior, min(remaining computedServerSelectionTimeout, 5 seconds) should apply to HTTP requests against OCSP responders.', + () => {} + ); + }); + + context.skip('Socket timeouts', function () { + context( + 'If timeoutMS is unset, operations fail after two non-consecutive socket timeouts.', + () => {} + ); + }).skipReason = + 'TODO(NODE-6518): Add CSOT support for socket read/write at the connection layer for CRUD APIs'; + + describe('Client side encryption', function () { + describe('KMS requests', function () { + const stateMachine = new StateMachine({} as any); + const request = { + addResponse: _response => {}, + status: { + type: 1, + code: 1, + message: 'notARealStatus' + }, + bytesNeeded: 500, + kmsProvider: 'notRealAgain', + endpoint: 'fake', + message: Buffer.from('foobar') + }; + + context('when StateMachine.kmsRequest() is passed a `CSOTimeoutContext`', function () { + beforeEach(async function () { + sinon.stub(TLSSocket.prototype, 'connect').callsFake(function (..._args) {}); + }); + + afterEach(async function () { + sinon.restore(); + }); + + it('the kms request times out through remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + const err = await stateMachine.kmsRequest(request, timeoutContext).catch(e => e); + expect(err).to.be.instanceOf(MongoOperationTimeoutError); + expect(err.errmsg).to.equal('KMS request timed out'); + }); + }); + + context('when StateMachine.kmsRequest() is not passed a `CSOTimeoutContext`', function () { + let clock: sinon.SinonFakeTimers; + let timerSandbox: sinon.SinonSandbox; + + let sleep; + + beforeEach(async function () { + sinon.stub(TLSSocket.prototype, 'connect').callsFake(function (..._args) { + clock.tick(30000); + }); + timerSandbox = createTimerSandbox(); + clock = sinon.useFakeTimers(); + sleep = promisify(setTimeout); + }); + + afterEach(async function () { + if (clock) { + timerSandbox.restore(); + clock.restore(); + clock = undefined; + } + sinon.restore(); + }); + + it('the kms request does not timeout within 30 seconds', async function () { + const sleepingFn = async () => { + await sleep(30000); + throw Error('Slept for 30s'); + }; + + const err$ = Promise.all([stateMachine.kmsRequest(request), sleepingFn()]).catch(e => e); + clock.tick(30000); + const err = await err$; + expect(err.message).to.equal('Slept for 30s'); + }); + }); + }); + + describe('Auto Encryption', function () { + context( + 'when an auto encrypted client is configured with timeoutMS and the command takes longer than timeoutMS', + function () { + let encryptedClient; + const timeoutMS = 500; + + beforeEach(async function () { + encryptedClient = this.configuration.newClient( + {}, + { + autoEncryption: { + extraOptions: { + mongocryptdBypassSpawn: true, + mongocryptdURI: 'mongodb://localhost:27017/db?serverSelectionTimeoutMS=1000', + mongocryptdSpawnArgs: [ + '--pidfilepath=bypass-spawning-mongocryptd.pid', + '--port=27017' + ] + }, + keyVaultNamespace: 'admin.datakeys', + kmsProviders: { + aws: { accessKeyId: 'example', secretAccessKey: 'example' }, + local: { key: Buffer.alloc(96) } + } + }, + timeoutMS + } + ); + await encryptedClient.connect(); + + const stub = sinon + // @ts-expect-error accessing private method + .stub(Connection.prototype, 'sendCommand') + .callsFake(async function* (...args) { + await sleep(timeoutMS + 50); + yield* stub.wrappedMethod.call(this, ...args); + }); + }); + + afterEach(async function () { + await encryptedClient?.close(); + sinon.restore(); + }); + + it( + 'the command should fail due to a timeout error', + { requires: { mongodb: '>=4.2' } }, + async function () { + const { duration, result: error } = await measureDuration(() => + encryptedClient + .db() + .command({ ping: 1 }) + .catch(e => e) + ); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(timeoutMS - 100, timeoutMS + 100); + } + ); + } + ); + }); + }); + + context.skip('Background Connection Pooling', function () { + context( + 'When doing minPoolSize maintenance, connectTimeoutMS is used as the timeout for socket establishment.', + () => {} + ); + }).skipReason = 'TODO(NODE-6091): Implement CSOT logic for Background Connection Pooling'; + /* eslint-enable @typescript-eslint/no-empty-function */ }); diff --git a/test/integration/client-side-operations-timeout/node_csot.test.ts b/test/integration/client-side-operations-timeout/node_csot.test.ts index b6a936afbb9..ec69dcc1b7b 100644 --- a/test/integration/client-side-operations-timeout/node_csot.test.ts +++ b/test/integration/client-side-operations-timeout/node_csot.test.ts @@ -1,19 +1,45 @@ /* Anything javascript specific relating to timeouts */ +import { on, once } from 'node:events'; +import { Readable } from 'node:stream'; +import { pipeline } from 'node:stream/promises'; +import { setTimeout } from 'node:timers/promises'; + import { expect } from 'chai'; +import * as semver from 'semver'; import * as sinon from 'sinon'; import { + BSON, + type ChangeStream, + type ChangeStreamDocument, type ClientSession, type Collection, + type CommandFailedEvent, + type CommandStartedEvent, + type CommandSucceededEvent, + Connection, + CursorTimeoutMode, type Db, type FindCursor, - type MongoClient + GridFSBucket, + LEGACY_HELLO_COMMAND, + type MongoClient, + MongoInvalidArgumentError, + MongoOperationTimeoutError, + MongoServerError, + ObjectId, + promiseWithResolvers, + TopologyType } from '../../mongodb'; +import { type FailPoint, waitUntilPoolsFilled } from '../../tools/utils'; -describe('CSOT driver tests', () => { - afterEach(() => { - sinon.restore(); - }); +const metadata = { requires: { mongodb: '>=4.4' } }; + +describe('CSOT driver tests', metadata, () => { + // NOTE: minPoolSize here is set to ensure that connections are available when testing timeout + // behaviour. This reduces flakiness in our tests since operations will not spend time + // establishing connections, more closely mirroring long-running application behaviour + const minPoolSize = 20; describe('timeoutMS inheritance', () => { let client: MongoClient; @@ -21,7 +47,7 @@ describe('CSOT driver tests', () => { let coll: Collection; beforeEach(async function () { - client = this.configuration.newClient(undefined, { timeoutMS: 100 }); + client = this.configuration.newClient(undefined, { timeoutMS: 100, minPoolSize }); db = client.db('test', { timeoutMS: 200 }); }); @@ -46,7 +72,6 @@ describe('CSOT driver tests', () => { afterEach(async () => { await cursor?.close(); await session?.endSession(); - await session.endSession(); }); it('throws an error', async () => { @@ -94,4 +119,1288 @@ describe('CSOT driver tests', () => { }); }); }); + + describe('autoconnect', () => { + let client: MongoClient; + + afterEach(async function () { + await client?.close(); + client = undefined; + }); + + describe('when failing autoconnect with timeoutMS defined', () => { + let configClient: MongoClient; + + beforeEach(async function () { + configClient = this.configuration.newClient(); + const result = await configClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['ping', 'hello', LEGACY_HELLO_COMMAND], + blockConnection: true, + blockTimeMS: 10 + } + }); + expect(result).to.have.property('ok', 1); + }); + + afterEach(async function () { + const result = await configClient + .db() + .admin() + .command({ + configureFailPoint: 'failCommand', + mode: 'off', + data: { + failCommands: ['ping', 'hello', LEGACY_HELLO_COMMAND], + blockConnection: true, + blockTimeMS: 10 + } + }); + expect(result).to.have.property('ok', 1); + await configClient.close(); + }); + + it('throws a MongoOperationTimeoutError', { + metadata: { requires: { mongodb: '>=4.4', topology: '!load-balanced' } }, + test: async function () { + const commandsStarted = []; + client = this.configuration.newClient(undefined, { + timeoutMS: 1, + monitorCommands: true + }); + + client.on('commandStarted', ev => commandsStarted.push(ev)); + + const maybeError = await client + .db('test') + .collection('test') + .insertOne({ a: 19 }) + .then( + () => null, + e => e + ); + + expect(maybeError).to.exist; + expect(maybeError).to.be.instanceof(MongoOperationTimeoutError); + + expect(commandsStarted).to.have.length(0); // Ensure that we fail before we start the insertOne + } + }); + }); + }); + + describe('server-side maxTimeMS errors are transformed', () => { + let client: MongoClient; + let commandsSucceeded: CommandSucceededEvent[]; + let commandsFailed: CommandFailedEvent[]; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 500_000, monitorCommands: true }); + commandsSucceeded = []; + commandsFailed = []; + client.on('commandSucceeded', event => { + if (event.commandName === 'configureFailPoint') return; + commandsSucceeded.push(event); + }); + client.on('commandFailed', event => commandsFailed.push(event)); + }); + + afterEach(async function () { + await client + .db() + .collection('a') + .drop() + .catch(() => null); + await client.close(); + commandsSucceeded = undefined; + commandsFailed = undefined; + }); + + describe('when a maxTimeExpired error is returned at the top-level', () => { + // {ok: 0, code: 50, codeName: "MaxTimeMSExpired", errmsg: "operation time limit exceeded"} + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['ping'], + errorCode: 50 + } + }; + + beforeEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command(failpoint); + else { + this.skipReason = 'Requires server version later than 4.4'; + this.skip(); + } + }); + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command({ ...failpoint, mode: 'off' }); + }); + + it( + 'throws a MongoOperationTimeoutError error and emits command failed', + metadata, + async () => { + const error = await client + .db() + .command({ ping: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.property('code', 50); + + expect(commandsFailed).to.have.lengthOf(1); + expect(commandsFailed).to.have.nested.property('[0].failure.cause.code', 50); + } + ); + }); + + describe('when a maxTimeExpired error is returned inside a writeErrors array', () => { + // The server should always return one maxTimeExpiredError at the front of the writeErrors array + // But for the sake of defensive programming we will find any maxTime error in the array. + + beforeEach(async () => { + const writeErrorsReply = BSON.serialize({ + ok: 1, + writeErrors: [ + { code: 2, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 3, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 4, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' }, + { code: 50, codeName: 'MaxTimeMSExpired', errmsg: 'operation time limit exceeded' } + ] + }); + const commandSpy = sinon.spy(Connection.prototype, 'command'); + const readManyStub = sinon + // @ts-expect-error: readMany is private + .stub(Connection.prototype, 'readMany') + .callsFake(async function* (...args) { + const realIterator = readManyStub.wrappedMethod.call(this, ...args); + try { + const cmd = commandSpy.lastCall.args.at(1); + if ('giveMeWriteErrors' in cmd) { + await realIterator.next().catch(() => null); // dismiss response + yield { parse: () => writeErrorsReply }; + } else { + yield (await realIterator.next()).value; + } + } finally { + realIterator.return(); + } + }); + }); + + afterEach(() => sinon.restore()); + + it( + 'throws a MongoOperationTimeoutError error and emits command succeeded', + metadata, + async () => { + const error = await client + .db('admin') + .command({ giveMeWriteErrors: 1 }) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeErrors[3].code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeErrors[3].code', 50); + } + ); + }); + + describe('when a maxTimeExpired error is returned inside a writeConcernError embedded document', () => { + // {ok: 1, writeConcernError: {code: 50, codeName: "MaxTimeMSExpired"}} + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + writeConcernError: { code: 50, errmsg: 'times up buster', errorLabels: [] } + } + }; + + beforeEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command(failpoint); + else { + this.skipReason = 'Requires server version later than 4.4'; + this.skip(); + } + }); + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) + await client.db('admin').command({ ...failpoint, mode: 'off' }); + }); + + it( + 'throws a MongoOperationTimeoutError error and emits command succeeded', + metadata, + async () => { + const error = await client + .db() + .collection('a') + .insertOne({}) + .catch(error => error); + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(error.cause).to.be.instanceOf(MongoServerError); + expect(error.cause).to.have.nested.property('writeConcernError.code', 50); + + expect(commandsSucceeded).to.have.lengthOf(1); + expect(commandsSucceeded).to.have.nested.property('[0].reply.writeConcernError.code', 50); + } + ); + }); + }); + + describe('Non-Tailable cursors', () => { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: (CommandStartedEvent & { command: { maxTimeMS?: number } })[]; + let commandSucceeded: CommandSucceededEvent[]; + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find', 'getMore'], + blockConnection: true, + blockTimeMS: 150 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient({}); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 3 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true, minPoolSize: 10 }); + + // wait for a handful of connections to have been established + await waitUntilPoolsFilled(client, AbortSignal.timeout(30_000), 5); + + commandStarted = []; + commandSucceeded = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + + context('ITERATION mode', () => { + context('when executing an operation', () => { + it( + 'must apply the configured timeoutMS to the initial operation execution', + metadata, + async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 3, timeoutMode: CursorTimeoutMode.ITERATION, timeoutMS: 10 }) + .limit(3); + + const maybeError = await cursor.next().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + } + ); + + it('refreshes the timeout for any getMores', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 200 }) + .project({ _id: 0 }); + + // Iterating over 3 documents in the collection, each artificially taking ~50 ms due to failpoint. If timeoutMS is not refreshed, then we'd expect to error + for await (const doc of cursor) { + expect(doc).to.deep.equal({ x: 1 }); + } + + const finds = commandSucceeded.filter(ev => ev.commandName === 'find'); + const getMores = commandSucceeded.filter(ev => ev.commandName === 'getMore'); + + expect(finds).to.have.length(1); // Expecting 1 find + expect(getMores).to.have.length(3); // Expecting 3 getMores (including final empty getMore) + }); + + it( + 'does not append a maxTimeMS to the original command or getMores', + metadata, + async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'iteration', timeoutMS: 200 }) + .project({ _id: 0 }); + await cursor.toArray(); + + const commands = commandStarted.filter(c => + ['find', 'getMore'].includes(c.commandName) + ); + expect(commands).to.have.lengthOf(4); // Find and 2 getMores + + const [ + { command: aggregate }, + { command: getMore1 }, + { command: getMore2 }, + { command: getMore3 } + ] = commands; + expect(aggregate).not.to.have.property('maxTimeMS'); + expect(getMore1).not.to.have.property('maxTimeMS'); + expect(getMore2).not.to.have.property('maxTimeMS'); + expect(getMore3).not.to.have.property('maxTimeMS'); + } + ); + }); + }); + + context('LIFETIME mode', () => { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + let commandSucceeded: CommandSucceededEvent[]; + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['find', 'getMore'], + blockConnection: true, + blockTimeMS: 50 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 3 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { + monitorCommands: true, + minPoolSize: 10 + }); + // wait for a handful of connections to have been established + await waitUntilPoolsFilled(client, AbortSignal.timeout(30_000), 5); + + commandStarted = []; + commandSucceeded = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + client.on('commandSucceeded', ev => commandSucceeded.push(ev)); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + context('when executing a next call', () => { + context( + 'when there are documents available from previously retrieved batch and timeout has expired', + () => { + it('returns documents without error', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMode: 'cursorLifetime', timeoutMS: 100 }) + .project({ _id: 0 }); + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + expect(cursor.documents.length).to.be.gt(0); + + await setTimeout(100); + + const docOrErr = await cursor.next().then( + d => d, + e => e + ); + + expect(docOrErr).to.not.be.instanceOf(MongoOperationTimeoutError); + expect(docOrErr).to.be.deep.equal({ x: 1 }); + }); + } + ); + context('when a getMore is required and the timeout has expired', () => { + it('throws a MongoOperationTimeoutError', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'cursorLifetime', timeoutMS: 100 }) + + .project({ _id: 0 }); + + const doc = await cursor.next(); + expect(doc).to.deep.equal({ x: 1 }); + expect(cursor.documents.length).to.equal(0); + + await setTimeout(100); + + const docOrErr = await cursor.next().then( + d => d, + e => e + ); + + expect(docOrErr).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + it('does not apply maxTimeMS to a getMore', metadata, async function () { + const cursor = client + .db('db') + .collection('coll') + .find({}, { batchSize: 1, timeoutMode: 'cursorLifetime', timeoutMS: 1000 }) + .project({ _id: 0 }); + + for await (const _doc of cursor) { + // Ignore _doc + } + + const getMores = commandStarted + .filter(ev => ev.command.getMore != null) + .map(ev => ev.command); + expect(getMores.length).to.be.gt(0); + + for (const getMore of getMores) { + expect(getMore.maxTimeMS).to.not.exist; + } + }); + }); + }); + }); + + describe('Tailable cursors', function () { + let client: MongoClient; + let internalClient: MongoClient; + let commandStarted: CommandStartedEvent[]; + const metadata: MongoDBMetadataUI = { + requires: { mongodb: '>=4.4' } + }; + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['aggregate', 'find', 'getMore'], + blockConnection: true, + blockTimeMS: 100 + } + }; + + beforeEach(async function () { + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + + await internalClient.db('db').createCollection('coll', { capped: true, size: 1_000_000 }); + + await internalClient + .db('db') + .collection('coll') + .insertMany( + Array.from({ length: 100 }, () => { + return { x: 1 }; + }) + ); + + await internalClient.db().admin().command(failpoint); + + client = this.configuration.newClient(undefined, { monitorCommands: true, minPoolSize }); + commandStarted = []; + client.on('commandStarted', ev => commandStarted.push(ev)); + await waitUntilPoolsFilled(client, AbortSignal.timeout(30_000), minPoolSize); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + .command({ ...failpoint, mode: 'off' }); + await internalClient.close(); + await client.close(); + }); + + context('when in ITERATION mode', function () { + context('awaitData cursors', function () { + let cursor: FindCursor; + afterEach(async function () { + if (cursor) await cursor.close(); + }); + + it('applies timeoutMS to initial command', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 50, tailable: true, awaitData: true, batchSize: 1 }); + const maybeError = await cursor.next().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + + const finds = commandStarted.filter(x => x.commandName === 'find'); + const getMores = commandStarted.filter(x => x.commandName === 'getMore'); + expect(finds).to.have.lengthOf(1); + expect(getMores).to.have.lengthOf(0); + }); + + it('refreshes the timeout for subsequent getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 150, tailable: true, awaitData: true, batchSize: 1 }); + // Iterate cursor 5 times (server would have blocked for 500ms overall, but client + // should not throw + await cursor.next(); + await cursor.next(); + await cursor.next(); + await cursor.next(); + await cursor.next(); + }); + + it('does not use timeoutMS to compute maxTimeMS for getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 10_000, tailable: true, awaitData: true, batchSize: 1 }); + await cursor.next(); + await cursor.next(); + + const getMores = commandStarted + .filter(x => x.command.getMore != null) + .map(x => x.command); + expect(getMores).to.have.lengthOf(1); + + const [getMore] = getMores; + expect(getMore).to.not.haveOwnProperty('maxTimeMS'); + }); + + context('when maxAwaitTimeMS is specified', function () { + it( + 'sets maxTimeMS to the configured maxAwaitTimeMS value on getMores', + metadata, + async function () { + cursor = client.db('db').collection('coll').find( + {}, + { + timeoutMS: 10_000, + tailable: true, + awaitData: true, + batchSize: 1, + maxAwaitTimeMS: 100 + } + ); + await cursor.next(); + await cursor.next(); + + const getMores = commandStarted + .filter(x => x.command.getMore != null) + .map(x => x.command); + expect(getMores).to.have.lengthOf(1); + + const [getMore] = getMores; + expect(getMore).to.haveOwnProperty('maxTimeMS'); + expect(getMore.maxTimeMS).to.equal(100); + } + ); + }); + }); + + context('non-awaitData cursors', function () { + let cursor: FindCursor; + + afterEach(async function () { + if (cursor) await cursor.close(); + }); + + it('applies timeoutMS to initial command', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 50, tailable: true, batchSize: 1 }); + const maybeError = await cursor.next().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + + const finds = commandStarted.filter(x => x.commandName === 'find'); + const getMores = commandStarted.filter(x => x.commandName === 'getMore'); + expect(finds).to.have.lengthOf(1); + expect(getMores).to.have.lengthOf(0); + }); + + it('refreshes the timeout for subsequent getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 150, tailable: true, batchSize: 1 }); + for (let i = 0; i < 5; i++) { + // Iterate cursor 5 times (server would have blocked for 500ms overall, but client + // should not throw + await cursor.next(); + } + }); + + it('does not append a maxTimeMS field to original command', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 2000, tailable: true, batchSize: 1 }); + + await cursor.next(); + + const finds = commandStarted.filter(x => x.command.find != null); + expect(finds).to.have.lengthOf(1); + expect(finds[0].command.find).to.exist; + expect(finds[0].command.maxTimeMS).to.not.exist; + }); + it('does not append a maxTimeMS field to subsequent getMores', metadata, async function () { + cursor = client + .db('db') + .collection('coll') + .find({}, { timeoutMS: 2000, tailable: true, batchSize: 1 }); + + await cursor.next(); + await cursor.next(); + + const getMores = commandStarted.filter(x => x.command.getMore != null); + + expect(getMores).to.have.lengthOf(1); + expect(getMores[0].command.getMore).to.exist; + expect(getMores[0].command.getMore.maxTimeMS).to.not.exist; + }); + }); + }); + }); + + describe('Change Streams', function () { + const metadata: MongoDBMetadataUI = { requires: { mongodb: '>=4.4', topology: '!single' } }; + let internalClient: MongoClient; + let client: MongoClient; + let commandsStarted: CommandStartedEvent[]; + + beforeEach(async function () { + this.configuration.url({ useMultipleMongoses: false }); + internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .dropCollection('coll') + .catch(() => null); + commandsStarted = []; + + client = await this.configuration.newClient(undefined, { monitorCommands: true }).connect(); + client.on('commandStarted', ev => { + commandsStarted.push(ev); + }); + }); + + afterEach(async function () { + await internalClient + .db() + .admin() + ?.command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient?.close(); + await client?.close(); + }); + + context('when in stream mode', function () { + let data: any[]; + let cs: ChangeStream; + let errorIter: AsyncIterableIterator; + + afterEach(async function () { + await cs?.close(); + }); + + context('when the initial aggregate times out', function () { + beforeEach(async function () { + data = []; + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, // fail twice to account for executeOperation's retry attempt + data: { + failCommands: ['aggregate'], + blockConnection: true, + blockTimeMS: 130 + } + }; + + await internalClient.db().admin().command(failpoint); + cs = client.db('db').collection('coll').watch([], { timeoutMS: 120 }); + errorIter = on(cs, 'error'); + cs.on('change', () => { + // Add empty listener just to get the change stream running + }); + }); + + it('emits an error event', metadata, async function () { + const err = (await errorIter.next()).value[0]; + + expect(data).to.have.lengthOf(0); + expect(err).to.be.instanceof(MongoOperationTimeoutError); + }); + + it('closes the change stream', metadata, async function () { + const err = (await errorIter.next()).value[0]; + expect(err).to.be.instanceof(MongoOperationTimeoutError); + expect(cs.closed).to.be.true; + }); + }); + + context('when the getMore times out', function () { + let onSharded: boolean; + beforeEach(async function () { + onSharded = + this.configuration.topologyType === TopologyType.LoadBalanced || + this.configuration.topologyType === TopologyType.Sharded; + data = []; + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['getMore'], + blockConnection: true, + blockTimeMS: onSharded ? 5100 : 120 + } + }; + + await internalClient.db().admin().command(failpoint); + cs = client + .db('db') + .collection('coll') + .watch([], { timeoutMS: onSharded ? 5000 : 100 }); + errorIter = on(cs, 'error'); + cs.on('change', () => { + // Add empty listener just to get the change stream running + }); + }); + + it('emits an error event', metadata, async function () { + const [err] = (await errorIter.next()).value; + expect(data).to.have.lengthOf(0); + expect(err).to.be.instanceof(MongoOperationTimeoutError); + }); + + it( + 'continues emitting change events', + { + requires: { + mongodb: '>=8.0', // NOTE: we are only testing on >= 8.0 because this version has increased performance and this test is sensitive to server performance. This feature should continue to work on server versions down to 4.4, but would require a larger value of timeoutMS which would either significantly slow down our CI testing or make the test flaky + topology: '!single', + os: 'linux' + } + }, + async function () { + // NOTE: duplicating setup code here so its particular configuration requirements don't + // affect other tests. + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['getMore'], + blockConnection: true, + blockTimeMS: onSharded ? 5100 : 520 + } + }; + + await internalClient.db().admin().command(failpoint); + const cs = client + .db('db') + .collection('coll') + .watch([], { timeoutMS: onSharded ? 5000 : 500 }); + const errorIter = on(cs, 'error'); + cs.on('change', () => { + // Add empty listener just to get the change stream running + }); + + const err = (await errorIter.next()).value[0]; + expect(err).to.be.instanceof(MongoOperationTimeoutError); + + await once(cs.cursor, 'resumeTokenChanged'); + + const { + promise: changePromise, + resolve, + reject + } = promiseWithResolvers>(); + + cs.once('change', resolve); + + cs.once('error', reject); + + await internalClient.db('db').collection('coll').insertOne({ x: 1 }); + const change = await changePromise; + expect(change).to.have.ownProperty('operationType', 'insert'); + } + ); + + it('does not close the change stream', metadata, async function () { + const [err] = (await errorIter.next()).value; + expect(err).to.be.instanceof(MongoOperationTimeoutError); + + expect(cs.closed).to.be.false; + }); + + it('attempts to create a new change stream cursor', metadata, async function () { + await errorIter.next(); + let aggregates = commandsStarted + .filter(x => x.commandName === 'aggregate') + .map(x => x.command); + expect(aggregates).to.have.lengthOf(1); + + await once(cs, 'resumeTokenChanged'); + + aggregates = commandsStarted + .filter(x => x.commandName === 'aggregate') + .map(x => x.command); + + expect(aggregates).to.have.lengthOf(2); + + expect(aggregates[0].pipeline).to.deep.equal([{ $changeStream: {} }]); + expect(aggregates[1].pipeline).to.deep.equal([ + { $changeStream: { resumeAfter: cs.resumeToken } } + ]); + }); + }); + + context('when the resume attempt times out', function () { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 2 }, // timeout the getMore, and the aggregate + data: { + failCommands: ['getMore', 'aggregate'], + blockConnection: true, + blockTimeMS: 130 + } + }; + + beforeEach(async function () { + cs = client.db('db').collection('coll').watch([], { timeoutMS: 120 }); + const _changePromise = once(cs, 'change'); + await once(cs.cursor, 'init'); + + await internalClient.db().admin().command(failpoint); + }); + + it('emits an error event', metadata, async function () { + let [err] = await once(cs, 'error'); // getMore failure + expect(err).to.be.instanceof(MongoOperationTimeoutError); + [err] = await once(cs, 'error'); // aggregate failure + expect(err).to.be.instanceof(MongoOperationTimeoutError); + }); + + it('closes the change stream', metadata, async function () { + await once(cs, 'error'); // await the getMore Failure + await once(cs, 'error'); // await the aggregate failure + expect(cs.closed).to.be.true; + }); + }); + }); + }); + + describe('GridFSBucket', () => { + const blockTimeMS = 200; + let internalClient: MongoClient; + let client: MongoClient; + let bucket: GridFSBucket; + + beforeEach(async function () { + client = this.configuration.newClient(undefined, { timeoutMS: 1000 }); + internalClient = this.configuration.newClient(undefined); + }); + + afterEach(async function () { + await client.close(); + await internalClient.db().admin().command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + }); + + context('upload', function () { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS + } + }; + + beforeEach(async function () { + await internalClient + .db('db') + .dropDatabase() + .catch(() => null); + await internalClient.db().admin().command(failpoint); + + const db = client.db('db'); + expect(db.timeoutMS).to.equal(1000); + + bucket = new GridFSBucket(client.db('db'), { chunkSizeBytes: 2 }); + }); + + describe('openUploadStream', function () { + it('can override db timeoutMS settings', metadata, async function () { + const data = Buffer.from('01020304', 'hex'); + const uploadStream = bucket.openUploadStream('filename', { timeoutMS: 175 }); + uploadStream.on('error', error => { + uploadStream.destroy(error); + }); + + uploadStream.write(data, error => { + uploadStream.destroy(error); + }); + + const maybeError = await once(uploadStream, 'error'); + expect(maybeError[0]).to.be.instanceOf(MongoOperationTimeoutError); + }); + + it('only emits index event once per bucket', metadata, async function () { + let numEventsSeen = 0; + bucket.on('index', () => numEventsSeen++); + + const uploadStream0 = bucket + .openUploadStream('filename') + .on('error', error => uploadStream0.destroy(error)); + const uploadStream1 = bucket + .openUploadStream('filename') + .on('error', error => uploadStream1.destroy(error)); + + const data = Buffer.from('test', 'utf-8'); + await pipeline(Readable.from(data), uploadStream0); + await pipeline(Readable.from(data), uploadStream1); + + expect(numEventsSeen).to.equal(1); + }); + }); + + describe('openUploadStreamWithId', function () { + it('can override db timeoutMS settings', metadata, async function () { + const data = Buffer.from('01020304', 'hex'); + const uploadStream = bucket.openUploadStreamWithId(new ObjectId(), 'filename', { + timeoutMS: 175 + }); + uploadStream.on('error', error => { + uploadStream.destroy(error); + }); + + uploadStream.write(data, error => { + uploadStream.destroy(error); + }); + + const maybeError = await once(uploadStream, 'error'); + expect(maybeError[0]).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + }); + + context('download', function () { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['find'], + blockConnection: true, + blockTimeMS + } + }; + const _id = new ObjectId('000000000000000000000005'); + + beforeEach(async function () { + await internalClient + .db('db') + .dropDatabase() + .catch(() => null); + + const files = await internalClient.db('db').createCollection('files'); + await files.insertOne({ + _id, + length: 10, + chunkSize: 4, + uploadDate: new Date('1970-01-01T00:00:00.000Z'), + md5: '57d83cd477bfb1ccd975ab33d827a92b', + filename: 'length-10', + contentType: 'application/octet-stream', + aliases: [], + metadata: {} + }); + + await internalClient.db().admin().command(failpoint); + + const db = client.db('db'); + expect(db.timeoutMS).to.equal(1000); + + bucket = new GridFSBucket(db); + }); + + describe('openDownloadStream', function () { + it('can override db timeoutMS settings', metadata, async function () { + const downloadStream = bucket.openDownloadStream(_id, { timeoutMS: 80 }); + const maybeError = await downloadStream.toArray().then( + () => null, + e => e + ); + + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + + describe('openDownloadStreamByName', function () { + it('can override db timeoutMS settings', metadata, async function () { + const downloadStream = bucket.openDownloadStreamByName('length-10', { timeoutMS: 80 }); + const maybeError = await downloadStream.toArray().then( + () => null, + e => e + ); + expect(maybeError).to.be.instanceOf(MongoOperationTimeoutError); + }); + }); + }); + }); + + describe('when using an explicit session', () => { + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset'], mongodb: '>=4.4' } + }; + + describe('created for a withTransaction callback', () => { + describe('passing a timeoutMS and a session with a timeoutContext', () => { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 123 }); + }); + + afterEach(async function () { + await client.close(); + }); + + it('throws a validation error from the operation', metadata, async () => { + // Drivers MUST raise a validation error if an explicit session with a timeout is used and + // the timeoutMS option is set at the operation level for operations executed as part of a withTransaction callback. + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + let insertError: Error | null = null; + const withTransactionError = await session + .withTransaction(async session => { + insertError = await coll + .insertOne({ x: 1 }, { session, timeoutMS: 1234 }) + .catch(error => error); + throw insertError; + }) + .catch(error => error); + + expect(insertError).to.be.instanceOf(MongoInvalidArgumentError); + expect(withTransactionError).to.be.instanceOf(MongoInvalidArgumentError); + }); + }); + }); + + describe('created manually', () => { + describe('passing a timeoutMS and a session with an inherited timeoutMS', () => { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 123 }); + await client + .db('db') + .dropCollection('coll') + .catch(() => null); + }); + + afterEach(async function () { + await client.close(); + }); + + it('does not throw a validation error', metadata, async () => { + const coll = client.db('db').collection('coll'); + const session = client.startSession(); + session.startTransaction(); + await coll.insertOne({ x: 1 }, { session, timeoutMS: 1234 }); + await session.abortTransaction(); // this uses the inherited timeoutMS, not the insert + }); + }); + }); + }); + + describe('Convenient Transactions', () => { + /** Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. */ + const metadata: MongoDBMetadataUI = { + requires: { topology: ['replicaset', 'sharded'], mongodb: '>=5.0' } + }; + + describe('when an operation fails inside withTransaction callback', () => { + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + failCommands: ['insert', 'abortTransaction'], + blockConnection: true, + blockTimeMS: 600 + } + }; + + beforeEach(async function () { + if (!semver.satisfies(this.configuration.version, '>=4.4')) { + this.skipReason = 'Requires server version 4.4+'; + this.skip(); + } + const internalClient = this.configuration.newClient(); + await internalClient + .db('db') + .collection('coll') + .drop() + .catch(() => null); + await internalClient.db('admin').command(failpoint); + await internalClient.close(); + }); + + let client: MongoClient; + + afterEach(async function () { + if (semver.satisfies(this.configuration.version, '>=4.4')) { + const internalClient = this.configuration.newClient(); + await internalClient + .db('admin') + .command({ configureFailPoint: 'failCommand', mode: 'off' }); + await internalClient.close(); + } + await client?.close(); + }); + + it( + 'timeoutMS is refreshed for abortTransaction and the timeout error is thrown from the operation', + metadata, + async function () { + const commandsFailed = []; + const commandsStarted = []; + + client = this.configuration + .newClient({ timeoutMS: 500, monitorCommands: true }) + .on('commandStarted', e => commandsStarted.push(e.commandName)) + .on('commandFailed', e => commandsFailed.push(e.commandName)); + + const coll = client.db('db').collection('coll'); + + const session = client.startSession(); + + let insertError: Error | null = null; + const withTransactionError = await session + .withTransaction(async session => { + insertError = await coll.insertOne({ x: 1 }, { session }).catch(error => error); + throw insertError; + }) + .catch(error => error); + + try { + expect(insertError).to.be.instanceOf(MongoOperationTimeoutError); + expect(withTransactionError).to.be.instanceOf(MongoOperationTimeoutError); + expect(commandsStarted, 'commands started').to.deep.equal([ + 'insert', + 'abortTransaction' + ]); + expect(commandsFailed, 'commands failed').to.deep.equal(['insert', 'abortTransaction']); + } finally { + await session.endSession(); + } + } + ); + }); + }); + + describe('Connection after timeout', { requires: { mongodb: '>=4.4' } }, function () { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 500 }); + + const failpoint: FailPoint = { + configureFailPoint: 'failCommand', + mode: { + times: 1 + }, + data: { + failCommands: ['insert'], + blockConnection: true, + blockTimeMS: 700 + } + }; + + await client.db('admin').command(failpoint); + }); + + afterEach(async function () { + await client.close(); + }); + + it('closes so pending messages are not read by another operation', async function () { + const cmap = []; + client.on('connectionCheckedOut', ev => cmap.push(ev)); + client.on('connectionClosed', ev => cmap.push(ev)); + + const error = await client + .db('socket') + .collection('closes') + .insertOne({}) + .catch(error => error); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(cmap).to.have.lengthOf(2); + + const [checkedOut, closed] = cmap; + expect(checkedOut).to.have.property('name', 'connectionCheckedOut'); + expect(closed).to.have.property('name', 'connectionClosed'); + expect(checkedOut).to.have.property('connectionId', closed.connectionId); + }); + }); }); diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/change-streams.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/change-streams.json new file mode 100644 index 00000000000..4708939d009 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/change-streams.json @@ -0,0 +1,135 @@ +{ + "description": "timeoutMS behaves correctly for change streams", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 200, + "batchSize": 2, + "maxAwaitTimeMS": 10 + }, + "saveResultAsEntity": "changeStream" + }, + { + "name": "iterateOnce", + "object": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": 10 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json new file mode 100644 index 00000000000..dd6fcb2cf84 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/command-execution.json @@ -0,0 +1,153 @@ +{ + "description": "timeoutMS behaves correctly during command execution", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", + "topologies": [ + "single", + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "timeoutColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "maxTimeMS value in the command is less than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 500, + "heartbeatFrequencyMS": 500 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1500 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 500 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json new file mode 100644 index 00000000000..aabc39abb37 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-awaitData.json @@ -0,0 +1,229 @@ +{ + "description": "timeoutMS behaves correctly for tailable awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll" + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS is refreshed for getMore if maxAwaitTimeMS is set", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "timeoutMS": 250, + "batchSize": 1, + "maxAwaitTimeMS": 10 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": 10 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json new file mode 100644 index 00000000000..80cf74a1116 --- /dev/null +++ b/test/integration/client-side-operations-timeout/unified-csot-node-specs/tailable-non-awaitData.json @@ -0,0 +1,151 @@ +{ + "description": "timeoutMS behaves correctly for tailable non-awaitData cursors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 200 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "createOptions": { + "capped": true, + "size": 500 + }, + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS is refreshed for getMore - failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 250 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailable", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/integration/collection-management/collection_db_management.test.ts b/test/integration/collection-management/collection_db_management.test.ts index f5c4c55cf05..0cb90b3b592 100644 --- a/test/integration/collection-management/collection_db_management.test.ts +++ b/test/integration/collection-management/collection_db_management.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { Collection, type Db, type MongoClient } from '../../mongodb'; +import { Collection, type Db, type MongoClient, ObjectId } from '../../mongodb'; describe('Collection Management and Db Management', function () { let client: MongoClient; @@ -16,7 +16,7 @@ describe('Collection Management and Db Management', function () { }); it('returns a collection object after calling createCollection', async function () { - const collection = await db.createCollection('collection'); + const collection = await db.createCollection(new ObjectId().toHexString()); expect(collection).to.be.instanceOf(Collection); }); diff --git a/test/integration/crud/client_bulk_write.test.ts b/test/integration/crud/client_bulk_write.test.ts new file mode 100644 index 00000000000..fa20d8ed29a --- /dev/null +++ b/test/integration/crud/client_bulk_write.test.ts @@ -0,0 +1,398 @@ +import { expect } from 'chai'; +import { setTimeout } from 'timers/promises'; + +import { + type CommandStartedEvent, + type Connection, + type ConnectionPool, + type MongoClient, + MongoOperationTimeoutError, + now, + TimeoutContext +} from '../../mongodb'; +import { + clearFailPoint, + configureFailPoint, + makeMultiBatchWrite, + makeMultiResponseBatchModelArray, + mergeTestMetadata +} from '../../tools/utils'; +import { filterForCommands } from '../shared'; + +const metadata: MongoDBMetadataUI = { + requires: { + mongodb: '>=8.0', + serverless: 'forbid' + } +}; + +describe('Client Bulk Write', function () { + let client: MongoClient; + + afterEach(async function () { + await client?.close(); + await clearFailPoint(this.configuration); + }); + + describe('CSOT enabled', function () { + describe('when timeoutMS is set on the client', function () { + beforeEach(async function () { + client = this.configuration.newClient({}, { timeoutMS: 300 }); + await client.connect(); + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('timeoutMS is used as the timeout for the bulk write', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite([ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ]) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe('when timeoutMS is set on the bulkWrite operation', function () { + beforeEach(async function () { + client = this.configuration.newClient({}); + + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('timeoutMS is used as the timeout for the bulk write', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 300 } + ) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe('when timeoutMS is set on both the client and operation options', function () { + beforeEach(async function () { + client = this.configuration.newClient({}, { timeoutMS: 1500 }); + + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('bulk write options take precedence over the client options', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 300 } + ) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe( + 'unacknowledged writes', + { + requires: { + mongodb: '>=8.0', + topology: 'single' + } + }, + function () { + let connection: Connection; + let pool: ConnectionPool; + + beforeEach(async function () { + client = this.configuration.newClient({}, { maxPoolSize: 1, waitQueueTimeoutMS: 2000 }); + + await client.connect(); + + pool = Array.from(client.topology.s.servers.values())[0].pool; + connection = await pool.checkOut({ + timeoutContext: TimeoutContext.create({ + serverSelectionTimeoutMS: 30000, + waitQueueTimeoutMS: 1000 + }) + }); + }); + + afterEach(async function () { + pool = Array.from(client.topology.s.servers.values())[0].pool; + pool.checkIn(connection); + await client.close(); + }); + + it('a single batch bulk write does not take longer than timeoutMS', async function () { + const start = now(); + let end; + const timeoutError = client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 200, writeConcern: { w: 0 }, ordered: false } + ) + .catch(e => e) + .then(e => { + end = now(); + return e; + }); + + await setTimeout(250); + + expect(await timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(200 - 100, 200 + 100); + }); + + it( + 'timeoutMS applies to all batches', + { + requires: { + mongodb: '>=8.0', + topology: 'single' + } + }, + async function () { + const models = await makeMultiBatchWrite(this.configuration); + const start = now(); + let end; + const timeoutError = client + .bulkWrite(models, { + timeoutMS: 400, + writeConcern: { w: 0 }, + ordered: false + }) + .catch(e => e) + .then(r => { + end = now(); + return r; + }); + + await setTimeout(210); + + pool.checkIn(connection); + connection = await pool.checkOut({ + timeoutContext: TimeoutContext.create({ + serverSelectionTimeoutMS: 30000, + waitQueueTimeoutMS: 1000 + }) + }); + + await setTimeout(210); + + expect(await timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(400 - 100, 400 + 100); + } + ); + } + ); + + describe('acknowledged writes', metadata, function () { + describe('when a bulk write command times out', function () { + beforeEach(async function () { + client = this.configuration.newClient({}, { timeoutMS: 1500 }); + + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1000, failCommands: ['bulkWrite'] } + }); + }); + + it('the operation times out', metadata, async function () { + const start = now(); + const timeoutError = await client + .bulkWrite( + [ + { + name: 'insertOne', + namespace: 'foo.bar', + document: { age: 10 } + } + ], + { timeoutMS: 300 } + ) + .catch(e => e); + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(300 - 100, 300 + 100); + }); + }); + + describe('when the timeout is reached while iterating the result cursor', function () { + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true, minPoolSize: 5 }); + client.on('commandStarted', filterForCommands(['getMore', 'killCursors'], commands)); + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { blockConnection: true, blockTimeMS: 1400, failCommands: ['getMore'] } + }); + }); + + it( + 'the bulk write operation times out', + mergeTestMetadata(metadata, { + requires: { + // this test has timing logic that depends on killCursors being executed, which does + // not happen in load balanced mode + topology: '!load-balanced' + } + }), + async function () { + const models = await makeMultiResponseBatchModelArray(this.configuration); + const start = now(); + const timeoutError = await client + .bulkWrite(models, { + verboseResults: true, + timeoutMS: 1500 + }) + .catch(e => e); + + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + + // DRIVERS-3005 - killCursors causes cursor cleanup to extend past timeoutMS. + // The amount of time killCursors takes is wildly variable and can take up to almost + // 600-700ms sometimes. + expect(end - start).to.be.within(1500, 1500 + 800); + expect(commands.map(({ commandName }) => commandName)).to.have.lengthOf(2); + } + ); + }); + + describe('if the cursor encounters an error and a killCursors is sent', function () { + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + + client.on('commandStarted', filterForCommands(['killCursors'], commands)); + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { + blockConnection: true, + blockTimeMS: 3000, + failCommands: ['getMore', 'killCursors'] + } + }); + }); + + it( + 'timeoutMS is refreshed to the timeoutMS passed to the bulk write for the killCursors command', + { + requires: { ...metadata.requires, topology: '!load-balanced' } + }, + async function () { + const models = await makeMultiResponseBatchModelArray(this.configuration); + const timeoutError = await client + .bulkWrite(models, { ordered: true, timeoutMS: 2800, verboseResults: true }) + .catch(e => e); + + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + + const [ + { + command: { maxTimeMS } + } + ] = commands; + expect(maxTimeMS).to.be.greaterThan(1000); + } + ); + }); + + describe('when the bulk write is executed in multiple batches', function () { + const commands: CommandStartedEvent[] = []; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + + client.on('commandStarted', filterForCommands('bulkWrite', commands)); + await client.connect(); + + await configureFailPoint(this.configuration, { + configureFailPoint: 'failCommand', + mode: { times: 2 }, + data: { blockConnection: true, blockTimeMS: 1010, failCommands: ['bulkWrite'] } + }); + }); + + it( + 'timeoutMS applies to the duration of all batches', + { + requires: { + ...metadata.requires, + topology: 'single' + } + }, + async function () { + const models = await makeMultiBatchWrite(this.configuration); + const start = now(); + const timeoutError = await client + .bulkWrite(models, { + timeoutMS: 2000 + }) + .catch(e => e); + + const end = now(); + expect(timeoutError).to.be.instanceOf(MongoOperationTimeoutError); + expect(end - start).to.be.within(2000 - 100, 2000 + 100); + expect(commands.length, 'Test must execute two batches.').to.equal(2); + } + ); + }); + }); + }); +}); diff --git a/test/integration/crud/explain.test.ts b/test/integration/crud/explain.test.ts index 44fe381303a..c7a9a3025f9 100644 --- a/test/integration/crud/explain.test.ts +++ b/test/integration/crud/explain.test.ts @@ -5,9 +5,12 @@ import { type Collection, type CommandStartedEvent, type Db, + type Document, type MongoClient, + MongoOperationTimeoutError, MongoServerError } from '../../mongodb'; +import { clearFailPoint, configureFailPoint, measureDuration } from '../../tools/utils'; import { filterForCommands } from '../shared'; const explain = [true, false, 'queryPlanner', 'allPlansExecution', 'executionStats', 'invalid']; @@ -296,6 +299,372 @@ describe('CRUD API explain option', function () { }; } }); + + describe('explain with timeoutMS', function () { + let client: MongoClient; + type ExplainStartedEvent = CommandStartedEvent & { + command: { explain: Document & { maxTimeMS?: number }; maxTimeMS?: number }; + }; + const commands: ExplainStartedEvent[] = []; + + afterEach(async function () { + await clearFailPoint( + this.configuration, + this.configuration.url({ useMultipleMongoses: false }) + ); + }); + + beforeEach(async function () { + const uri = this.configuration.url({ useMultipleMongoses: false }); + await configureFailPoint( + this.configuration, + { + configureFailPoint: 'failCommand', + mode: 'alwaysOn', + data: { + failCommands: ['explain'], + blockConnection: true, + blockTimeMS: 2000 + } + }, + this.configuration.url({ useMultipleMongoses: false }) + ); + + client = this.configuration.newClient(uri, { monitorCommands: true }); + client.on('commandStarted', filterForCommands('explain', commands)); + await client.connect(); + }); + + afterEach(async function () { + await client?.close(); + commands.length = 0; + }); + + describe('Explain helpers respect timeoutMS', function () { + describe('when a cursor api is being explained', function () { + describe('when timeoutMS is provided', function () { + it( + 'the explain command times out after timeoutMS', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').find({}, { timeoutMS: 1000 }); + const { duration, result } = await measureDuration(() => + cursor.explain({ verbosity: 'queryPlanner' }).catch(e => e) + ); + + expect(result).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(1000 - 100, 1000 + 100); + } + ); + + it( + 'the explain command has the calculated maxTimeMS value attached', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').find({}, { timeoutMS: 1000 }); + const timeout = await cursor.explain({ verbosity: 'queryPlanner' }).catch(e => e); + expect(timeout).to.be.instanceOf(MongoOperationTimeoutError); + + const [ + { + command: { maxTimeMS } + } + ] = commands; + + expect(maxTimeMS).to.be.a('number'); + } + ); + + it( + 'the explained command does not have a maxTimeMS value attached', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').find({}, { timeoutMS: 1000 }); + const timeout = await cursor.explain({ verbosity: 'queryPlanner' }).catch(e => e); + expect(timeout).to.be.instanceOf(MongoOperationTimeoutError); + + const [ + { + command: { + explain: { maxTimeMS } + } + } + ] = commands; + + expect(maxTimeMS).not.to.exist; + } + ); + }); + + describe('when timeoutMS and maxTimeMS are both provided', function () { + it( + 'an error is thrown indicating incompatibility of those options', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').find({}, { timeoutMS: 1000 }); + const error = await cursor + .explain({ verbosity: 'queryPlanner', maxTimeMS: 1000 }) + .catch(e => e); + expect(error).to.match(/Cannot use maxTimeMS with timeoutMS for explain commands/); + } + ); + }); + }); + + describe('when a non-cursor api is being explained', function () { + describe('when timeoutMS is provided', function () { + it( + 'the explain command times out after timeoutMS', + { requires: { mongodb: '>=4.4' } }, + async function () { + const { duration, result } = await measureDuration(() => + client + .db('foo') + .collection('bar') + .deleteMany( + {}, + { + timeoutMS: 1000, + explain: { verbosity: 'queryPlanner' } + } + ) + .catch(e => e) + ); + + expect(result).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(1000 - 100, 1000 + 100); + } + ); + + it( + 'the explain command has the calculated maxTimeMS value attached', + { requires: { mongodb: '>=4.4' } }, + async function () { + const timeout = await client + .db('foo') + .collection('bar') + .deleteMany( + {}, + { + timeoutMS: 1000, + explain: { verbosity: 'queryPlanner' } + } + ) + .catch(e => e); + + expect(timeout).to.be.instanceOf(MongoOperationTimeoutError); + + const [ + { + command: { maxTimeMS } + } + ] = commands; + + expect(maxTimeMS).to.be.a('number'); + } + ); + + it( + 'the explained command does not have a maxTimeMS value attached', + { requires: { mongodb: '>=4.4' } }, + async function () { + const timeout = await client + .db('foo') + .collection('bar') + .deleteMany( + {}, + { + timeoutMS: 1000, + explain: { verbosity: 'queryPlanner' } + } + ) + .catch(e => e); + + expect(timeout).to.be.instanceOf(MongoOperationTimeoutError); + + const [ + { + command: { + explain: { maxTimeMS } + } + } + ] = commands; + + expect(maxTimeMS).not.to.exist; + } + ); + }); + + describe('when timeoutMS and maxTimeMS are both provided', function () { + it( + 'an error is thrown indicating incompatibility of those options', + { requires: { mongodb: '>=4.4' } }, + async function () { + const error = await client + .db('foo') + .collection('bar') + .deleteMany( + {}, + { + timeoutMS: 1000, + explain: { verbosity: 'queryPlanner', maxTimeMS: 1000 } + } + ) + .catch(e => e); + + expect(error).to.match(/Cannot use maxTimeMS with timeoutMS for explain commands/); + } + ); + }); + }); + + describe('when find({}, { explain: ...}) is used with timeoutMS', function () { + it( + 'an error is thrown indicating that explain is not supported with timeoutMS for this API', + { requires: { mongodb: '>=4.4' } }, + async function () { + const error = await client + .db('foo') + .collection('bar') + .find( + {}, + { + timeoutMS: 1000, + explain: { verbosity: 'queryPlanner', maxTimeMS: 1000 } + } + ) + .toArray() + .catch(e => e); + + expect(error).to.match( + /timeoutMS cannot be used with explain when explain is specified in findOptions/ + ); + } + ); + }); + + describe('when aggregate({}, { explain: ...}) is used with timeoutMS', function () { + it( + 'an error is thrown indicating that explain is not supported with timeoutMS for this API', + { requires: { mongodb: '>=4.4' } }, + async function () { + const error = await client + .db('foo') + .collection('bar') + .aggregate([], { + timeoutMS: 1000, + explain: { verbosity: 'queryPlanner', maxTimeMS: 1000 } + }) + .toArray() + .catch(e => e); + + expect(error).to.match( + /timeoutMS cannot be used with explain when explain is specified in aggregateOptions/ + ); + } + ); + }); + }); + + describe('fluent api timeoutMS precedence and inheritance', function () { + describe('find({}, { timeoutMS }).explain()', function () { + it( + 'respects the timeoutMS from the find options', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').find({}, { timeoutMS: 800 }); + const { duration, result: error } = await measureDuration(() => + cursor.explain({ verbosity: 'queryPlanner' }).catch(e => e) + ); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(800 - 100, 800 + 100); + } + ); + }); + + describe('find().explain({}, { timeoutMS })', function () { + it( + 'respects the timeoutMS from the explain helper', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').find(); + const { duration, result: error } = await measureDuration(() => + cursor.explain({ verbosity: 'queryPlanner' }, { timeoutMS: 800 }).catch(e => e) + ); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(800 - 100, 800 + 100); + } + ); + }); + + describe('find({}, { timeoutMS} ).explain({}, { timeoutMS })', function () { + it( + 'the timeoutMS from the explain helper has precedence', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').find({}, { timeoutMS: 100 }); + const { duration, result: error } = await measureDuration(() => + cursor.explain({ verbosity: 'queryPlanner' }, { timeoutMS: 800 }).catch(e => e) + ); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(800 - 100, 800 + 100); + } + ); + }); + + describe('aggregate([], { timeoutMS }).explain()', function () { + it( + 'respects the timeoutMS from the find options', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').aggregate([], { timeoutMS: 800 }); + const { duration, result: error } = await measureDuration(() => + cursor.explain({ verbosity: 'queryPlanner' }).catch(e => e) + ); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(800 - 100, 800 + 100); + } + ); + }); + + describe('aggregate([], { timeoutMS })', function () { + it( + 'respects the timeoutMS from the explain helper', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').aggregate(); + + const { duration, result: error } = await measureDuration(() => + cursor.explain({ verbosity: 'queryPlanner' }, { timeoutMS: 800 }).catch(e => e) + ); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(800 - 100, 800 + 100); + } + ); + }); + + describe('aggregate([], { timeoutMS} ).explain({}, { timeoutMS })', function () { + it( + 'the timeoutMS from the explain helper has precedence', + { requires: { mongodb: '>=4.4' } }, + async function () { + const cursor = client.db('foo').collection('bar').aggregate([], { timeoutMS: 100 }); + const { duration, result: error } = await measureDuration(() => + cursor.explain({ verbosity: 'queryPlanner' }, { timeoutMS: 800 }).catch(e => e) + ); + + expect(error).to.be.instanceOf(MongoOperationTimeoutError); + expect(duration).to.be.within(800 - 100, 800 + 100); + } + ); + }); + }); + }); }); function explainValueToExpectation(explainValue: boolean | string) { diff --git a/test/integration/crud/find_cursor_methods.test.js b/test/integration/crud/find_cursor_methods.test.js index 42eeda3e816..21a6649bf0b 100644 --- a/test/integration/crud/find_cursor_methods.test.js +++ b/test/integration/crud/find_cursor_methods.test.js @@ -1,7 +1,13 @@ 'use strict'; const { expect } = require('chai'); const { filterForCommands } = require('../shared'); -const { promiseWithResolvers, MongoCursorExhaustedError } = require('../../mongodb'); +const { + promiseWithResolvers, + MongoCursorExhaustedError, + CursorTimeoutContext, + TimeoutContext, + MongoAPIError +} = require('../../mongodb'); describe('Find Cursor', function () { let client; @@ -246,23 +252,45 @@ describe('Find Cursor', function () { }); context('#rewind', function () { - it('should rewind a cursor', function (done) { + it('should rewind a cursor', async function () { const coll = client.db().collection('abstract_cursor'); const cursor = coll.find({}); - this.defer(() => cursor.close()); - cursor.toArray((err, docs) => { - expect(err).to.not.exist; - expect(docs).to.have.length(6); + try { + let docs = await cursor.toArray(); + expect(docs).to.have.lengthOf(6); cursor.rewind(); - cursor.toArray((err, docs) => { - expect(err).to.not.exist; - expect(docs).to.have.length(6); + docs = await cursor.toArray(); + expect(docs).to.have.lengthOf(6); + } finally { + await cursor.close(); + } + }); - done(); - }); - }); + it('throws if the cursor does not own its timeoutContext', async function () { + const coll = client.db().collection('abstract_cursor'); + const cursor = coll.find( + {}, + { + timeoutContext: new CursorTimeoutContext( + TimeoutContext.create({ + timeoutMS: 1000, + serverSelectionTimeoutMS: 1000 + }), + Symbol() + ) + } + ); + + try { + cursor.rewind(); + expect.fail(`rewind should have thrown.`); + } catch (error) { + expect(error).to.be.instanceOf(MongoAPIError); + } finally { + await cursor.close(); + } }); it('should end an implicit session on rewind', { diff --git a/test/integration/node-specific/abstract_cursor.test.ts b/test/integration/node-specific/abstract_cursor.test.ts index a5e7fba13dd..ac060c9d459 100644 --- a/test/integration/node-specific/abstract_cursor.test.ts +++ b/test/integration/node-specific/abstract_cursor.test.ts @@ -7,12 +7,20 @@ import { inspect } from 'util'; import { AbstractCursor, type Collection, + type CommandStartedEvent, + CSOTTimeoutContext, + CursorTimeoutContext, + CursorTimeoutMode, type FindCursor, MongoAPIError, type MongoClient, MongoCursorExhaustedError, - MongoServerError + MongoOperationTimeoutError, + MongoServerError, + TimeoutContext } from '../../mongodb'; +import { clearFailPoint, configureFailPoint } from '../../tools/utils'; +import { filterForCommands } from '../shared'; describe('class AbstractCursor', function () { describe('regression tests NODE-5372', function () { @@ -395,4 +403,157 @@ describe('class AbstractCursor', function () { expect(nextSpy.callCount).to.be.lessThan(numDocuments); }); }); + + describe('externally provided timeout contexts', function () { + let client: MongoClient; + let collection: Collection; + let context: CursorTimeoutContext; + const commands: CommandStartedEvent[] = []; + let internalContext: TimeoutContext; + + beforeEach(async function () { + client = this.configuration.newClient({}, { monitorCommands: true }); + client.on('commandStarted', filterForCommands('killCursors', commands)); + + collection = client.db('abstract_cursor_integration').collection('test'); + internalContext = TimeoutContext.create({ timeoutMS: 1000, serverSelectionTimeoutMS: 2000 }); + + context = new CursorTimeoutContext(internalContext, Symbol()); + + await collection.insertMany([{ a: 1 }, { b: 2 }, { c: 3 }]); + }); + + afterEach(async function () { + sinon.restore(); + await collection.deleteMany({}); + await client.close(); + }); + + it('CursorTimeoutMode.refresh is a no-op', async function () { + const cursorTimeoutRefreshSpy = sinon.spy(CursorTimeoutContext.prototype, 'refresh'); + const csotTimeoutContextRefreshSpy = sinon.spy(CSOTTimeoutContext.prototype, 'refresh'); + const abstractCursorGetMoreSpy = sinon.spy(AbstractCursor.prototype, 'getMore'); + + const cursor = collection.find( + {}, + { timeoutMode: CursorTimeoutMode.ITERATION, timeoutContext: context, batchSize: 1 } + ); + await cursor.toArray(); + + expect(abstractCursorGetMoreSpy).to.have.been.calledThrice; + + expect(cursorTimeoutRefreshSpy.getCalls()).to.have.length(3); + expect(csotTimeoutContextRefreshSpy).to.not.have.been.called; + }); + + it('CursorTimeoutMode.clear is a no-op', async function () { + const cursorTimeoutClearSpy = sinon.spy(CursorTimeoutContext.prototype, 'clear'); + const csotTimeoutContextRefreshSpy = sinon.spy(CSOTTimeoutContext.prototype, 'clear'); + const abstractCursorGetMoreSpy = sinon.spy(AbstractCursor.prototype, 'getMore'); + + const cursor = collection.find( + {}, + { timeoutMode: CursorTimeoutMode.ITERATION, timeoutContext: context, batchSize: 1 } + ); + await cursor.toArray(); + + expect(abstractCursorGetMoreSpy).to.have.been.calledThrice; + + expect(cursorTimeoutClearSpy.getCalls()).to.have.length(4); + expect(csotTimeoutContextRefreshSpy).to.not.have.been.called; + }); + + describe('when timeoutMode is omitted', function () { + it('stores timeoutContext as the timeoutContext on the cursor', function () { + const cursor = collection.find({}, { timeoutContext: context, timeoutMS: 1000 }); + + // @ts-expect-error Private access. + expect(cursor.timeoutContext).to.equal(context); + }); + }); + + describe('when timeoutMode is LIFETIME', function () { + it('stores timeoutContext as the timeoutContext on the cursor', function () { + const cursor = collection.find( + {}, + { timeoutContext: context, timeoutMS: 1000, timeoutMode: CursorTimeoutMode.LIFETIME } + ); + + // @ts-expect-error Private access. + expect(cursor.timeoutContext).to.equal(context); + }); + }); + + describe('when the cursor is initialized', function () { + it('the provided timeoutContext is not overwritten', async function () { + const cursor = collection.find( + {}, + { timeoutContext: context, timeoutMS: 1000, timeoutMode: CursorTimeoutMode.LIFETIME } + ); + + await cursor.toArray(); + + // @ts-expect-error Private access. + expect(cursor.timeoutContext).to.equal(context); + }); + }); + + describe('when the cursor refreshes the timeout for killCursors', function () { + let uri: string; + + before(function () { + uri = this.configuration.url({ useMultipleMongoses: false }); + }); + + beforeEach(async function () { + commands.length = 0; + await configureFailPoint( + this.configuration, + { + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['getMore'], + blockConnection: true, + blockTimeMS: 5000 + } + }, + uri + ); + }); + + afterEach(async function () { + await clearFailPoint(this.configuration, uri); + }); + + it( + 'the provided timeoutContext is not modified', + { + requires: { + mongodb: '>=4.4', + topology: '!load-balanced' + } + }, + async function () { + const cursor = collection.find( + {}, + { + timeoutContext: context, + timeoutMS: 150, + timeoutMode: CursorTimeoutMode.LIFETIME, + batchSize: 1 + } + ); + + const refresh = sinon.spy(context, 'refresh'); + const refreshed = sinon.spy(context, 'refreshed'); + const error = await cursor.toArray().catch(e => e); + + expect(error).to.be.instanceof(MongoOperationTimeoutError); + expect(refresh.called).to.be.false; + expect(refreshed.called).to.be.true; + } + ); + }); + }); }); diff --git a/test/integration/node-specific/auto_connect.test.ts b/test/integration/node-specific/auto_connect.test.ts index 7f8dbd1fe3b..3e56b69fbef 100644 --- a/test/integration/node-specific/auto_connect.test.ts +++ b/test/integration/node-specific/auto_connect.test.ts @@ -1,17 +1,19 @@ import { expect } from 'chai'; import { once } from 'events'; +import * as sinon from 'sinon'; import { BSONType, type ChangeStream, ClientSession, type Collection, - type MongoClient, + MongoClient, MongoNotConnectedError, ProfilingLevel, Topology, TopologyType } from '../../mongodb'; +import { type FailPoint, sleep } from '../../tools/utils'; describe('When executing an operation for the first time', () => { let client: MongoClient; @@ -821,4 +823,104 @@ describe('When executing an operation for the first time', () => { }); }); }); + + describe('when CSOT is enabled', function () { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient({ timeoutMS: 500 }); + }); + + afterEach(async function () { + await client.close(); + }); + + describe('when nothing is wrong', function () { + it('connects the client', async function () { + await client.connect(); + expect(client).to.have.property('topology').that.is.instanceOf(Topology); + }); + }); + + describe( + 'when the server requires auth and ping is delayed', + { requires: { auth: 'enabled', mongodb: '>=4.4' } }, + function () { + beforeEach(async function () { + // set failpoint to delay ping + // create new util client to avoid affecting the test client + const utilClient = this.configuration.newClient(); + await utilClient.db('admin').command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { failCommands: ['ping'], blockConnection: true, blockTimeMS: 1000 } + } as FailPoint); + await utilClient.close(); + }); + + it('timeoutMS from the client is not used for the internal `ping`', async function () { + const start = performance.now(); + const returnedClient = await client.connect(); + const end = performance.now(); + expect(returnedClient).to.equal(client); + expect(end - start).to.be.within(1000, 1500); // timeoutMS is 1000, did not apply. + }); + } + ); + + describe( + 'when server selection takes longer than the timeout', + { requires: { auth: 'enabled', mongodb: '>=4.4' } }, + function () { + beforeEach(async function () { + const selectServerStub = sinon + .stub(Topology.prototype, 'selectServer') + .callsFake(async function (selector, options) { + await sleep(1000); + const result = selectServerStub.wrappedMethod.call(this, selector, options); + sinon.restore(); // restore after connect selection + return result; + }); + }); + + // restore sinon stub after test + afterEach(() => { + sinon.restore(); + }); + + it('client.connect() takes as long as selectServer is delayed for and does not throw a timeout error', async function () { + const start = performance.now(); + expect(client.topology).to.not.exist; // make sure not connected. + const res = await client.db().collection('test').insertOne({ a: 1 }, { timeoutMS: 500 }); // auto-connect + const end = performance.now(); + expect(res).to.have.property('acknowledged', true); + expect(end - start).to.be.within(1000, 1500); // timeoutMS is 1000, did not apply. + }); + } + ); + + describe('when auto connect is used and connect() takes longer than timeoutMS', function () { + // This test stubs the connect method to check that connect() does not get timed out + // vs. the test above makes sure that the `ping` does not inherit the client's timeoutMS setting + beforeEach(async function () { + const connectStub = sinon + .stub(MongoClient.prototype, 'connect') + .callsFake(async function () { + await sleep(1000); + const result = connectStub.wrappedMethod.call(this); + sinon.restore(); // restore after connect selection + return result; + }); + }); + + it('the operation succeeds', async function () { + const start = performance.now(); + expect(client.topology).to.not.exist; // make sure not connected. + const res = await client.db().collection('test').insertOne({ a: 1 }); // auto-connect + const end = performance.now(); + expect(res).to.have.property('acknowledged', true); + expect(end - start).to.be.within(1000, 1500); // timeoutMS is 1000, did not apply. + }); + }); + }); }); diff --git a/test/integration/node-specific/db.test.js b/test/integration/node-specific/db.test.js index 338e136c12c..a092a8d888b 100644 --- a/test/integration/node-specific/db.test.js +++ b/test/integration/node-specific/db.test.js @@ -45,22 +45,12 @@ describe('Db', function () { }); }); - it('shouldCorrectlyHandleFailedConnection', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded'] } - }, - - test: function (done) { - var configuration = this.configuration; - var fs_client = configuration.newClient('mongodb://127.0.0.1:25117/test', { - serverSelectionTimeoutMS: 10 - }); - - fs_client.connect(function (err) { - test.ok(err != null); - done(); - }); - } + it('should correctly handle failed connection', async function () { + const client = this.configuration.newClient('mongodb://iLoveJS', { + serverSelectionTimeoutMS: 10 + }); + const error = await client.connect().catch(error => error); + expect(error).to.be.instanceOf(Error); }); it('shouldCorrectlyGetErrorDroppingNonExistingDb', { diff --git a/test/integration/server-discovery-and-monitoring/server_description.test.ts b/test/integration/server-discovery-and-monitoring/server_description.test.ts new file mode 100644 index 00000000000..60aa4614055 --- /dev/null +++ b/test/integration/server-discovery-and-monitoring/server_description.test.ts @@ -0,0 +1,63 @@ +import { type ChildProcess, spawn } from 'node:child_process'; + +import { expect } from 'chai'; +import * as os from 'os'; +import * as path from 'path'; + +import { MongoClient, ObjectId } from '../../mongodb'; + +describe('class ServerDescription', function () { + describe('when connecting to mongocryptd', { requires: { mongodb: '>=4.4' } }, function () { + let client: MongoClient; + const mongocryptdTestPort = '27022'; + let childProcess: ChildProcess; + + beforeEach(async function () { + const pidFile = path.join(os.tmpdir(), new ObjectId().toHexString()); + childProcess = spawn( + 'mongocryptd', + ['--port', mongocryptdTestPort, '--ipv6', '--pidfilepath', pidFile], + { + stdio: 'ignore', + detached: true + } + ); + + childProcess.on('error', error => console.warn(this.currentTest?.fullTitle(), error)); + client = new MongoClient(`mongodb://localhost:${mongocryptdTestPort}`); + }); + + afterEach(async function () { + await client?.close(); + childProcess.kill('SIGKILL'); + }); + + it('iscryptd is set to true ', async function () { + const descriptions = []; + client.on('serverDescriptionChanged', description => descriptions.push(description)); + const hello = await client.db().command({ hello: true }); + expect(hello).to.have.property('iscryptd', true); + expect(descriptions.at(-1)).to.have.nested.property('newDescription.iscryptd', true); + }); + }); + + describe('when connecting to anything other than mongocryptd', function () { + let client: MongoClient; + + beforeEach(async function () { + client = this.configuration.newClient(); + }); + + afterEach(async function () { + await client?.close(); + }); + + it('iscryptd is set to false ', async function () { + const descriptions = []; + client.on('serverDescriptionChanged', description => descriptions.push(description)); + const hello = await client.db().command({ hello: true }); + expect(hello).to.not.have.property('iscryptd'); + expect(descriptions.at(-1)).to.have.nested.property('newDescription.iscryptd', false); + }); + }); +}); diff --git a/test/integration/server-selection/server_selection.prose.operation_count.test.ts b/test/integration/server-selection/server_selection.prose.operation_count.test.ts index fec6d24e61c..b4a7d9bf47b 100644 --- a/test/integration/server-selection/server_selection.prose.operation_count.test.ts +++ b/test/integration/server-selection/server_selection.prose.operation_count.test.ts @@ -1,5 +1,4 @@ import { expect } from 'chai'; -import { on } from 'events'; import { type Collection, @@ -7,7 +6,7 @@ import { HostAddress, type MongoClient } from '../../mongodb'; -import { sleep } from '../../tools/utils'; +import { waitUntilPoolsFilled } from '../../tools/utils'; const failPoint = { configureFailPoint: 'failCommand', @@ -28,17 +27,6 @@ async function runTaskGroup(collection: Collection, count: 10 | 100 | 1000) { } } -async function ensurePoolIsFull(client: MongoClient): Promise { - let connectionCount = 0; - - for await (const _event of on(client, 'connectionCreated')) { - connectionCount++; - if (connectionCount === POOL_SIZE * 2) { - break; - } - } -} - // Step 1: Configure a sharded cluster with two mongoses. Use a 4.2.9 or newer server version. const TEST_METADATA: MongoDBMetadataUI = { requires: { mongodb: '>=4.2.9', topology: 'sharded' } }; @@ -75,15 +63,8 @@ describe('operationCount-based Selection Within Latency Window - Prose Test', fu client.on('commandStarted', updateCount); - const poolIsFullPromise = ensurePoolIsFull(client); - - await client.connect(); - // Step 4: Using CMAP events, ensure the client's connection pools for both mongoses have been saturated - const poolIsFull = Promise.race([poolIsFullPromise, sleep(30 * 1000)]); - if (!poolIsFull) { - throw new Error('Timed out waiting for connection pool to fill to minPoolSize'); - } + await waitUntilPoolsFilled(client, AbortSignal.timeout(30_000), POOL_SIZE * 2); seeds = client.topology.s.seedlist.map(address => address.toString()); diff --git a/test/integration/sessions/sessions.prose.test.ts b/test/integration/sessions/sessions.prose.test.ts index 8f157c4fa75..82464ffbbdc 100644 --- a/test/integration/sessions/sessions.prose.test.ts +++ b/test/integration/sessions/sessions.prose.test.ts @@ -1,13 +1,16 @@ import { expect } from 'chai'; import { type ChildProcess, spawn } from 'child_process'; import { once } from 'events'; +import * as os from 'os'; +import * as path from 'path'; import { type Collection, type CommandStartedEvent, MongoClient, MongoDriverError, - MongoInvalidArgumentError + MongoInvalidArgumentError, + ObjectId } from '../../mongodb'; import { sleep } from '../../tools/utils'; @@ -131,10 +134,15 @@ describe('Sessions Prose Tests', () => { let childProcess: ChildProcess; before(() => { - childProcess = spawn('mongocryptd', ['--port', mongocryptdTestPort, '--ipv6'], { - stdio: 'ignore', - detached: true - }); + const pidFile = path.join(os.tmpdir(), new ObjectId().toHexString()); + childProcess = spawn( + 'mongocryptd', + ['--port', mongocryptdTestPort, '--ipv6', '--pidfilepath', pidFile], + { + stdio: 'ignore', + detached: true + } + ); childProcess.on('error', err => { console.warn('Sessions prose mongocryptd error:', err); diff --git a/test/mongodb.ts b/test/mongodb.ts index 35034123048..f94a511929c 100644 --- a/test/mongodb.ts +++ b/test/mongodb.ts @@ -103,6 +103,16 @@ export * from '../src/bulk/common'; export * from '../src/bulk/ordered'; export * from '../src/bulk/unordered'; export * from '../src/change_stream'; +export * from '../src/client-side-encryption/auto_encrypter'; +export * from '../src/client-side-encryption/client_encryption'; +export * from '../src/client-side-encryption/crypto_callbacks'; +export * from '../src/client-side-encryption/errors'; +export * from '../src/client-side-encryption/mongocryptd_manager'; +export * from '../src/client-side-encryption/providers/aws'; +export * from '../src/client-side-encryption/providers/azure'; +export * from '../src/client-side-encryption/providers/gcp'; +export * from '../src/client-side-encryption/providers/index'; +export * from '../src/client-side-encryption/state_machine'; export * from '../src/cmap/auth/auth_provider'; export * from '../src/cmap/auth/aws_temporary_credentials'; export * from '../src/cmap/auth/gssapi'; diff --git a/test/spec/client-side-operations-timeout/README.md b/test/spec/client-side-operations-timeout/README.md new file mode 100644 index 00000000000..a960c2de219 --- /dev/null +++ b/test/spec/client-side-operations-timeout/README.md @@ -0,0 +1,661 @@ +# Client Side Operations Timeouts Tests + +______________________________________________________________________ + +## Introduction + +This document describes the tests that drivers MUST run to validate the behavior of the timeoutMS option. These tests +are broken up into automated YAML/JSON tests and additional prose tests. + +## Spec Tests + +This directory contains a set of YAML and JSON spec tests. Drivers MUST run these as described in the "Unified Test +Runner" specification. Because the tests introduced in this specification are timing-based, there is a risk that some of +them may intermittently fail without any bugs being present in the driver. As a mitigation, drivers MAY execute these +tests in two new Evergreen tasks that use single-node replica sets: one with only authentication enabled and another +with both authentication and TLS enabled. Drivers that choose to do so SHOULD use the `single-node-auth.json` and +`single-node-auth-ssl.json` files in the `drivers-evergreen-tools` repository to create these clusters. + +## Prose Tests + +There are some tests that cannot be expressed in the unified YAML/JSON format. For each of these tests, drivers MUST +create a MongoClient without the `timeoutMS` option set (referred to as `internalClient`). Any fail points set during a +test MUST be unset using `internalClient` after the test has been executed. All MongoClient instances created for tests +MUST be configured with read/write concern `majority`, read preference `primary`, and command monitoring enabled to +listen for `command_started` events. + +### 1. Multi-batch inserts + +This test MUST only run against standalones on server versions 4.4 and higher. The `insertMany` call takes an +exceedingly long time on replicasets and sharded clusters. Drivers MAY adjust the timeouts used in this test to allow +for differing bulk encoding performance. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 1010 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + +4. Using `client`, insert 50 1-megabyte documents in a single `insertMany` call. + + - Expect this to fail with a timeout error. + +5. Verify that two `insert` commands were executed against `db.coll` as part of the `insertMany` call. + +### 2. maxTimeMS is not set for commands sent to mongocryptd + +This test MUST only be run against enterprise server versions 4.2 and higher. + +1. Launch a mongocryptd process on 23000. +2. Create a MongoClient (referred to as `client`) using the URI `mongodb://localhost:23000/?timeoutMS=1000`. +3. Using `client`, execute the `{ ping: 1 }` command against the `admin` database. +4. Verify via command monitoring that the `ping` command sent did not contain a `maxTimeMS` field. + +### 3. ClientEncryption + +Each test under this category MUST only be run against server versions 4.4 and higher. In these tests, `LOCAL_MASTERKEY` +refers to the following base64: + +```javascript +Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk +``` + +For each test, perform the following setup: + +1. Using `internalClient`, drop and create the `keyvault.datakeys` collection. + +2. Create a MongoClient (referred to as `keyVaultClient`) with `timeoutMS=10`. + +3. Create a `ClientEncryption` object that wraps `keyVaultClient` (referred to as `clientEncryption`). Configure this + object with `keyVaultNamespace` set to `keyvault.datakeys` and the following KMS providers map: + + ```javascript + { + "local": { "key": } + } + ``` + +#### createDataKey + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Call `clientEncryption.createDataKey()` with the `local` KMS provider. + + - Expect this to fail with a timeout error. + +3. Verify that an `insert` command was executed against to `keyvault.datakeys` as part of the `createDataKey` call. + +#### encrypt + +1. Call `client_encryption.createDataKey()` with the `local` KMS provider. + + - Expect a BSON binary with subtype 4 to be returned, referred to as `datakeyId`. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm + `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `datakeyId`. + + - Expect this to fail with a timeout error. + +4. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `encrypt` call. + +#### decrypt + +1. Call `clientEncryption.createDataKey()` with the `local` KMS provider. + + - Expect this to return a BSON binary with subtype 4, referred to as `dataKeyId`. + +2. Call `clientEncryption.encrypt()` with the value `hello`, the algorithm + `AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic`, and the keyId `dataKeyId`. + + - Expect this to return a BSON binary with subtype 6, referred to as `encrypted`. + +3. Close and re-create the `keyVaultClient` and `clientEncryption` objects. + +4. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +5. Call `clientEncryption.decrypt()` with the value `encrypted`. + + - Expect this to fail with a timeout error. + +6. Verify that a `find` command was executed against the `keyvault.datakeys` collection as part of the `decrypt` call. + +### 4. Background Connection Pooling + +The tests in this section MUST only be run if the server version is 4.4 or higher and the URI has authentication fields +(i.e. a username and password). Each test in this section requires drivers to create a MongoClient and then wait for +some CMAP events to be published. Drivers MUST wait for up to 10 seconds and fail the test if the specified events are +not published within that time. + +#### timeoutMS used for handshake commands + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 1 + }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "timeoutBackgroundPoolTest" + } + } + ``` + +2. Create a MongoClient (referred to as `client`) configured with the following: + + - `minPoolSize` of 1 + - `timeoutMS` of 10 + - `appName` of `timeoutBackgroundPoolTest` + - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionClosedEvent` events. + +3. Wait for a `ConnectionCreatedEvent` and a `ConnectionClosedEvent` to be published. + +#### timeoutMS is refreshed for each handshake command + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["hello", "isMaster", "saslContinue"], + blockConnection: true, + blockTimeMS: 15, + appName: "refreshTimeoutBackgroundPoolTest" + } + } + ``` + +2. Create a MongoClient (referred to as `client`) configured with the following: + + - `minPoolSize` of 1 + - `timeoutMS` of 20 + - `appName` of `refreshTimeoutBackgroundPoolTest` + - CMAP monitor configured to listen for `ConnectionCreatedEvent` and `ConnectionReady` events. + +3. Wait for a `ConnectionCreatedEvent` and a `ConnectionReady` to be published. + +### 5. Blocking Iteration Methods + +Tests in this section MUST only be run against server versions 4.4 and higher and only apply to drivers that have a +blocking method for cursor iteration that executes `getMore` commands in a loop until a document is available or an +error occurs. + +#### Tailable cursors + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, insert the document `{ x: 1 }` into `db.coll`. + +3. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +4. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`. + +5. Using `client`, create a tailable cursor on `db.coll` with `cursorType=tailable`. + + - Expect this to succeed and return a cursor with a non-zero ID. + +6. Call either a blocking or non-blocking iteration method on the cursor. + + - Expect this to succeed and return the document `{ x: 1 }` without sending a `getMore` command. + +7. Call the blocking iteration method on the resulting cursor. + + - Expect this to fail with a timeout error. + +8. Verify that a `find` command and two `getMore` commands were executed against the `db.coll` collection during the + test. + +#### Change Streams + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: "alwaysOn", + data: { + failCommands: ["getMore"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=20`. + +4. Using `client`, use the `watch` helper to create a change stream against `db.coll`. + + - Expect this to succeed and return a change stream with a non-zero ID. + +5. Call the blocking iteration method on the resulting change stream. + + - Expect this to fail with a timeout error. + +6. Verify that an `aggregate` command and two `getMore` commands were executed against the `db.coll` collection during + the test. + +### 6. GridFS - Upload + +Tests in this section MUST only be run against server versions 4.4 and higher. + +#### uploads via openUploadStream can be timed out + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["insert"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database. + +5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as + `uploadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `uploadStream`, upload a single `0x12` byte. + +7. Call `uploadStream.close()` to flush the stream and insert chunks. + + - Expect this to fail with a timeout error. + +#### Aborting an upload stream can be timed out + +This test only applies to drivers that provide an API to abort a GridFS upload stream. + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["delete"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database with + `chunkSizeBytes=2`. + +5. Call `bucket.open_upload_stream()` with the filename `filename` to create an upload stream (referred to as + `uploadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `uploadStream`, upload the bytes `[0x01, 0x02, 0x03, 0x04]`. + +7. Call `uploadStream.abort()`. + + - Expect this to fail with a timeout error. + +### 7. GridFS - Download + +This test MUST only be run against server versions 4.4 and higher. + +1. Using `internalClient`, drop and re-create the `db.fs.files` and `db.fs.chunks` collections. + +2. Using `internalClient`, insert the following document into the `db.fs.files` collection: + + ```javascript + { + "_id": { + "$oid": "000000000000000000000005" + }, + "length": 10, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "md5": "57d83cd477bfb1ccd975ab33d827a92b", + "filename": "length-10", + "contentType": "application/octet-stream", + "aliases": [], + "metadata": {} + } + ``` + +3. Create a new MongoClient (referred to as `client`) with `timeoutMS=10`. + +4. Using `client`, create a GridFS bucket (referred to as `bucket`) that wraps the `db` database. + +5. Call `bucket.open_download_stream` with the id `{ "$oid": "000000000000000000000005" }` to create a download stream + (referred to as `downloadStream`). + + - Expect this to succeed and return a non-null stream. + +6. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { times: 1 }, + data: { + failCommands: ["find"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +7. Read from the `downloadStream`. + + - Expect this to fail with a timeout error. + +8. Verify that two `find` commands were executed during the read: one against `db.fs.files` and another against + `db.fs.chunks`. + +### 8. Server Selection + +#### serverSelectionTimeoutMS honored if timeoutMS is not set + +1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?serverSelectionTimeoutMS=10`. +2. Using `client`, execute the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS + +1. Create a MongoClient (referred to as `client`) with URI + `mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=20`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS + +1. Create a MongoClient (referred to as `client`) with URI + `mongodb://invalid/?timeoutMS=20&serverSelectionTimeoutMS=10`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for server selection if timeoutMS=0 + +1. Create a MongoClient (referred to as `client`) with URI `mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10`. +2. Using `client`, run the command `{ ping: 1 }` against the `admin` database. + - Expect this to fail with a server selection timeout error after no more than 15ms. + +#### timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username +and password). + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Create a new MongoClient (referred to as `client`) with `timeoutMS=10` and `serverSelectionTimeoutMS=20`. + +3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + + - Expect this to fail with a timeout error after no more than 15ms. + +#### serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS + +This test MUST only be run if the server version is 4.4 or higher and the URI has authentication fields (i.e. a username +and password). + +1. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["saslContinue"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +2. Create a new MongoClient (referred to as `client`) with `timeoutMS=20` and `serverSelectionTimeoutMS=10`. + +3. Using `client`, insert the document `{ x: 1 }` into collection `db.coll`. + + - Expect this to fail with a timeout error after no more than 15ms. + +### 9. endSession + +This test MUST only be run against replica sets and sharded clusters with server version 4.4 or higher. It MUST be run +three times: once with the timeout specified via the MongoClient `timeoutMS` option, once with the timeout specified via +the ClientSession `defaultTimeoutMS` option, and once more with the timeout specified via the `timeoutMS` option for the +`endSession` operation. In all cases, the timeout MUST be set to 10 milliseconds. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 1 }, + data: { + failCommands: ["abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) and an explicit ClientSession derived from that MongoClient + (referred to as `session`). + +4. Execute the following code: + + ```typescript + coll = client.database("db").collection("coll") + session.start_transaction() + coll.insert_one({x: 1}, session=session) + ``` + +5. Using `session`, execute `session.end_session` + + - Expect this to fail with a timeout error after no more than 15ms. + +### 10. Convenient Transactions + +Tests in this section MUST only run against replica sets and sharded clusters with server versions 4.4 or higher. + +#### timeoutMS is refreshed for abortTransaction if the callback fails + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: failCommand, + mode: { times: 2 }, + data: { + failCommands: ["insert", "abortTransaction"], + blockConnection: true, + blockTimeMS: 15 + } + } + ``` + +3. Create a new MongoClient (referred to as `client`) configured with `timeoutMS=10` and an explicit ClientSession + derived from that MongoClient (referred to as `session`). + +4. Using `session`, execute a `withTransaction` operation with the following callback: + + ```typescript + def callback() { + coll = client.database("db").collection("coll") + coll.insert_one({ _id: 1 }, session=session) + } + ``` + +5. Expect the previous `withTransaction` call to fail with a timeout error. + +6. Verify that the following events were published during the `withTransaction` call: + + 1. `command_started` and `command_failed` events for an `insert` command. + 2. `command_started` and `command_failed` events for an `abortTransaction` command. + +### 11. Multi-batch bulkWrites + +This test MUST only run against server versions 8.0+. + +1. Using `internalClient`, drop the `db.coll` collection. + +2. Using `internalClient`, set the following fail point: + + ```javascript + { + configureFailPoint: "failCommand", + mode: { + times: 2 + }, + data: { + failCommands: ["bulkWrite"], + blockConnection: true, + blockTimeMS: 1010 + } + } + ``` + +3. Using `internalClient`, perform a `hello` command and record the `maxBsonObjectSize` and `maxMessageSizeBytes` values + in the response. + +4. Create a new MongoClient (referred to as `client`) with `timeoutMS=2000`. + +5. Create a list of write models (referred to as `models`) with the following write model repeated + (`maxMessageSizeBytes / maxBsonObjectSize + 1`) times: + + ```json + InsertOne { + "namespace": "db.coll", + "document": { "a": "b".repeat(maxBsonObjectSize - 500) } + } + ``` + +6. Call `bulkWrite` on `client` with `models`. + + - Expect this to fail with a timeout error. + +7. Verify that two `bulkWrite` commands were executed as part of the `MongoClient.bulkWrite` call. + +## Unit Tests + +The tests enumerated in this section could not be expressed in either spec or prose format. Drivers SHOULD implement +these if it is possible to do so using the driver's existing test infrastructure. + +- Operations should ignore `waitQueueTimeoutMS` if `timeoutMS` is also set. +- If `timeoutMS` is set for an operation, the remaining `timeoutMS` value should apply to connection checkout after a + server has been selected. +- If `timeoutMS` is not set for an operation, `waitQueueTimeoutMS` should apply to connection checkout after a server + has been selected. +- If a new connection is required to execute an operation, + `min(remaining computedServerSelectionTimeout, connectTimeoutMS)` should apply to socket establishment. +- For drivers that have control over OCSP behavior, `min(remaining computedServerSelectionTimeout, 5 seconds)` should + apply to HTTP requests against OCSP responders. +- If `timeoutMS` is unset, operations fail after two non-consecutive socket timeouts. +- The remaining `timeoutMS` value should apply to HTTP requests against KMS servers for CSFLE. +- The remaining `timeoutMS` value should apply to commands sent to mongocryptd as part of automatic encryption. +- When doing `minPoolSize` maintenance, `connectTimeoutMS` is used as the timeout for socket establishment. diff --git a/test/spec/client-side-operations-timeout/change-streams.json b/test/spec/client-side-operations-timeout/change-streams.json index aef77bb452d..8cffb08e267 100644 --- a/test/spec/client-side-operations-timeout/change-streams.json +++ b/test/spec/client-side-operations-timeout/change-streams.json @@ -104,7 +104,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 250 } } } @@ -114,7 +114,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 50 + "timeoutMS": 200 }, "expectError": { "isTimeoutError": true @@ -242,7 +242,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -252,7 +252,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2, "maxAwaitTimeMS": 1 }, @@ -310,7 +310,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -330,7 +330,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 12, + "blockTimeMS": 120, "errorCode": 7, "errorLabels": [ "ResumableChangeStreamError" @@ -412,7 +412,7 @@ "arguments": { "pipeline": [], "maxAwaitTimeMS": 1, - "timeoutMS": 100 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -431,7 +431,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 150 + "blockTimeMS": 250 } } } @@ -534,7 +534,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -544,7 +544,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 10 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, diff --git a/test/spec/client-side-operations-timeout/change-streams.yml b/test/spec/client-side-operations-timeout/change-streams.yml index b2a052d01b2..c813be035ac 100644 --- a/test/spec/client-side-operations-timeout/change-streams.yml +++ b/test/spec/client-side-operations-timeout/change-streams.yml @@ -67,12 +67,12 @@ tests: data: failCommands: ["aggregate"] blockConnection: true - blockTimeMS: 55 + blockTimeMS: 250 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 50 + timeoutMS: 200 expectError: isTimeoutError: true expectEvents: @@ -142,12 +142,12 @@ tests: data: failCommands: ["aggregate", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 maxAwaitTimeMS: 1 saveResultAsEntity: &changeStream changeStream @@ -171,16 +171,16 @@ tests: maxTimeMS: 1 # The timeout should be applied to the entire resume attempt, not individually to each command. The test creates a - # change stream with timeoutMS=20 which returns an empty initial batch and then sets a fail point to block both - # getMore and aggregate for 12ms each and fail with a resumable error. When the resume attempt happens, the getMore - # and aggregate block for longer than 20ms total, so it times out. + # change stream with timeoutMS=200 which returns an empty initial batch and then sets a fail point to block both + # getMore and aggregate for 120ms each and fail with a resumable error. When the resume attempt happens, the getMore + # and aggregate block for longer than 200ms total, so it times out. - description: "timeoutMS applies to full resume attempt in a next call" operations: - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream - name: failPoint object: testRunner @@ -192,7 +192,7 @@ tests: data: failCommands: ["getMore", "aggregate"] blockConnection: true - blockTimeMS: 12 + blockTimeMS: 120 errorCode: 7 # HostNotFound - resumable but does not require an SDAM state change. # failCommand doesn't correctly add the ResumableChangeStreamError by default. It needs to be specified # manually here so the error is considered resumable. The failGetMoreAfterCursorCheckout fail point @@ -234,9 +234,9 @@ tests: # Specify a short maxAwaitTimeMS because otherwise the getMore on the new cursor will wait for 1000ms and # time out. maxAwaitTimeMS: 1 - timeoutMS: 100 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream - # Block getMore for 150ms to force the next() call to time out. + # Block getMore for 250ms to force the next() call to time out. - name: failPoint object: testRunner arguments: @@ -247,7 +247,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 150 + blockTimeMS: 250 # The original aggregate didn't return any events so this should do a getMore and return a timeout error. - name: iterateUntilDocumentOrError object: *changeStream @@ -290,7 +290,7 @@ tests: collection: *collectionName # The timeoutMS value should be refreshed for getMore's. This is a failure test. The createChangeStream operation - # sets timeoutMS=10 and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + # sets timeoutMS=200 and the getMore blocks for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: - name: failPoint @@ -303,12 +303,12 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createChangeStream object: *collection arguments: pipeline: [] - timeoutMS: 10 + timeoutMS: 200 saveResultAsEntity: &changeStream changeStream # The first iteration should do a getMore - name: iterateUntilDocumentOrError diff --git a/test/spec/client-side-operations-timeout/close-cursors.json b/test/spec/client-side-operations-timeout/close-cursors.json index 1361971c4ce..79b0de7b6aa 100644 --- a/test/spec/client-side-operations-timeout/close-cursors.json +++ b/test/spec/client-side-operations-timeout/close-cursors.json @@ -75,7 +75,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 50 + "blockTimeMS": 250 } } } @@ -86,7 +86,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -175,7 +175,7 @@ "killCursors" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 250 } } } @@ -186,7 +186,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -194,7 +194,7 @@ "name": "close", "object": "cursor", "arguments": { - "timeoutMS": 40 + "timeoutMS": 400 } } ], @@ -215,7 +215,7 @@ { "commandStartedEvent": { "command": { - "killCursors": "collection", + "killCursors": "coll", "maxTimeMS": { "$$type": [ "int", diff --git a/test/spec/client-side-operations-timeout/close-cursors.yml b/test/spec/client-side-operations-timeout/close-cursors.yml index db26e79ca31..c4c4ea0acda 100644 --- a/test/spec/client-side-operations-timeout/close-cursors.yml +++ b/test/spec/client-side-operations-timeout/close-cursors.yml @@ -46,13 +46,13 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 50 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: filter: {} batchSize: 2 - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &cursor cursor # Iterate the cursor three times. The third should do a getMore, which should fail with a timeout error. - name: iterateUntilDocumentOrError @@ -99,18 +99,18 @@ tests: data: failCommands: ["killCursors"] blockConnection: true - blockTimeMS: 30 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: filter: {} batchSize: 2 - timeoutMS: 20 + timeoutMS: 200 saveResultAsEntity: &cursor cursor - name: close object: *cursor arguments: - timeoutMS: 40 + timeoutMS: 400 expectEvents: - client: *client events: @@ -120,7 +120,7 @@ tests: commandName: find - commandStartedEvent: command: - killCursors: *collection + killCursors: *collectionName maxTimeMS: { $$type: ["int", "long"] } commandName: killCursors - commandSucceededEvent: diff --git a/test/spec/client-side-operations-timeout/command-execution.json b/test/spec/client-side-operations-timeout/command-execution.json index b9b306c7fb6..aa9c3eb23f3 100644 --- a/test/spec/client-side-operations-timeout/command-execution.json +++ b/test/spec/client-side-operations-timeout/command-execution.json @@ -3,7 +3,7 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.9", + "minServerVersion": "4.4.7", "topologies": [ "single", "replicaset", diff --git a/test/spec/client-side-operations-timeout/command-execution.yml b/test/spec/client-side-operations-timeout/command-execution.yml index 400a90867a3..6ba0585b3ca 100644 --- a/test/spec/client-side-operations-timeout/command-execution.yml +++ b/test/spec/client-side-operations-timeout/command-execution.yml @@ -3,9 +3,8 @@ description: "timeoutMS behaves correctly during command execution" schemaVersion: "1.9" runOnRequirements: - # The appName filter cannot be used to set a fail point on connection handshakes until server version 4.9 due to - # SERVER-49220/SERVER-49336. - - minServerVersion: "4.9" + # Require SERVER-49336 for failCommand + appName on the initial handshake. + - minServerVersion: "4.4.7" # Skip load-balanced and serverless which do not support RTT measurements. topologies: [ single, replicaset, sharded ] serverless: forbid diff --git a/test/spec/client-side-operations-timeout/convenient-transactions.json b/test/spec/client-side-operations-timeout/convenient-transactions.json index 07e676d5f51..3868b3026c2 100644 --- a/test/spec/client-side-operations-timeout/convenient-transactions.json +++ b/test/spec/client-side-operations-timeout/convenient-transactions.json @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -81,6 +81,9 @@ } } ] + }, + "expectError": { + "isClientError": true } } ], @@ -109,7 +112,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 300 } } } @@ -182,6 +185,21 @@ } } } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } } ] } diff --git a/test/spec/client-side-operations-timeout/convenient-transactions.yml b/test/spec/client-side-operations-timeout/convenient-transactions.yml index d79aa4bd058..02d48b83242 100644 --- a/test/spec/client-side-operations-timeout/convenient-transactions.yml +++ b/test/spec/client-side-operations-timeout/convenient-transactions.yml @@ -13,7 +13,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 500 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -49,6 +49,8 @@ tests: timeoutMS: 100 expectError: isClientError: true + expectError: + isClientError: true expectEvents: # The only operation run fails with a client-side error, so there should be no events for the client. - client: *client @@ -66,7 +68,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 30 + blockTimeMS: 300 - name: withTransaction object: *session arguments: @@ -88,9 +90,6 @@ tests: expectEvents: - client: *client events: - # Because the second insert expects an error and gets an error, it technically succeeds, so withTransaction - # will try to run commitTransaction. This will fail client-side, though, because the timeout has already - # expired, so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -103,3 +102,9 @@ tests: command: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } diff --git a/test/spec/client-side-operations-timeout/deprecated-options.json b/test/spec/client-side-operations-timeout/deprecated-options.json index 322e9449101..d3e4631ff43 100644 --- a/test/spec/client-side-operations-timeout/deprecated-options.json +++ b/test/spec/client-side-operations-timeout/deprecated-options.json @@ -1,5 +1,5 @@ { - "description": "operations ignore deprected timeout options if timeoutMS is set", + "description": "operations ignore deprecated timeout options if timeoutMS is set", "schemaVersion": "1.9", "runOnRequirements": [ { diff --git a/test/spec/client-side-operations-timeout/deprecated-options.yml b/test/spec/client-side-operations-timeout/deprecated-options.yml index 461ba6ab139..582a8983ae2 100644 --- a/test/spec/client-side-operations-timeout/deprecated-options.yml +++ b/test/spec/client-side-operations-timeout/deprecated-options.yml @@ -1,4 +1,4 @@ -description: "operations ignore deprected timeout options if timeoutMS is set" +description: "operations ignore deprecated timeout options if timeoutMS is set" schemaVersion: "1.9" diff --git a/test/spec/client-side-operations-timeout/gridfs-advanced.yml b/test/spec/client-side-operations-timeout/gridfs-advanced.yml index bc788bacc35..f6c37e165b2 100644 --- a/test/spec/client-side-operations-timeout/gridfs-advanced.yml +++ b/test/spec/client-side-operations-timeout/gridfs-advanced.yml @@ -119,7 +119,7 @@ tests: update: *filesCollectionName maxTimeMS: { $$type: ["int", "long"] } - # Tests for the "drop" opration. Any tests that might result in multiple commands being sent do not have expectEvents + # Tests for the "drop" operation. Any tests that might result in multiple commands being sent do not have expectEvents # assertions as these assertions reduce test robustness and can cause flaky failures. - description: "timeoutMS can be overridden for drop" diff --git a/test/spec/client-side-operations-timeout/non-tailable-cursors.json b/test/spec/client-side-operations-timeout/non-tailable-cursors.json index 0a5448a6bb2..291c6e72aa1 100644 --- a/test/spec/client-side-operations-timeout/non-tailable-cursors.json +++ b/test/spec/client-side-operations-timeout/non-tailable-cursors.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -84,7 +84,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -143,7 +143,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -153,7 +153,7 @@ "object": "collection", "arguments": { "filter": {}, - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -221,7 +221,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -232,7 +232,7 @@ "arguments": { "filter": {}, "timeoutMode": "cursorLifetime", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -299,7 +299,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -355,7 +355,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -366,7 +366,7 @@ "arguments": { "filter": {}, "timeoutMode": "iteration", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 } } @@ -427,7 +427,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/non-tailable-cursors.yml b/test/spec/client-side-operations-timeout/non-tailable-cursors.yml index 8cd953dec45..29037b4c0a3 100644 --- a/test/spec/client-side-operations-timeout/non-tailable-cursors.yml +++ b/test/spec/client-side-operations-timeout/non-tailable-cursors.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -53,7 +53,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -86,14 +86,14 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 - # Run a find with timeoutMS=20 and batchSize=1 to force two batches, which will cause a find and a getMore to be - # sent. Both will block for 15ms so together they will go over the timeout. + blockTimeMS: 125 + # Run a find with timeoutMS=200 and batchSize=1 to force two batches, which will cause a find and a getMore to be + # sent. Both will block for 125ms, so together they will go over the timeout. - name: find object: *collection arguments: filter: {} - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectError: isTimeoutError: true @@ -127,13 +127,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: find object: *collection arguments: filter: {} timeoutMode: cursorLifetime - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectError: isTimeoutError: true @@ -168,7 +168,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -187,8 +187,8 @@ tests: maxTimeMS: { $$exists: false } # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither - # command should have a maxTimeMS field. This is a success test. The "find" is executed with timeoutMS=20 and both - # "find" and "getMore" commands are blocked for 15ms each. Neither exceeds the timeout, so iteration succeeds. + # command should have a maxTimeMS field. This is a success test. The "find" is executed with timeoutMS=200 and both + # "find" and "getMore" commands are blocked for 125ms each. Neither exceeds the timeout, so iteration succeeds. - description: "timeoutMS is refreshed for getMore if timeoutMode is iteration - success" operations: - name: failPoint @@ -201,13 +201,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 125 - name: find object: *collection arguments: filter: {} timeoutMode: iteration - timeoutMS: 20 + timeoutMS: 200 batchSize: 2 expectEvents: - client: *client @@ -227,8 +227,8 @@ tests: maxTimeMS: { $$exists: false } # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither - # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=10 and "getMore" - # commands are blocked for 15ms, causing iteration to fail with a timeout error. + # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=200 and "getMore" + # commands are blocked for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore if timeoutMode is iteration - failure" operations: - name: failPoint @@ -241,7 +241,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: diff --git a/test/spec/client-side-operations-timeout/retryability-timeoutMS.json b/test/spec/client-side-operations-timeout/retryability-timeoutMS.json index a28dbd26854..9daad260ef3 100644 --- a/test/spec/client-side-operations-timeout/retryability-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/retryability-timeoutMS.json @@ -108,6 +108,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -198,6 +203,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -327,6 +337,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -419,6 +434,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -546,6 +566,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -634,6 +659,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -760,6 +790,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -851,6 +886,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -982,6 +1022,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1075,6 +1120,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1203,6 +1253,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1291,6 +1346,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1417,6 +1477,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1508,6 +1573,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1639,6 +1709,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1732,6 +1807,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1868,6 +1948,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1964,6 +2049,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2095,6 +2185,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2183,6 +2278,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2303,6 +2403,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2390,6 +2495,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2512,6 +2622,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2600,6 +2715,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2730,6 +2850,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2825,6 +2950,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2955,6 +3085,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3043,6 +3178,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3166,6 +3306,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3254,6 +3399,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3377,6 +3527,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3465,6 +3620,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3588,6 +3748,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3676,6 +3841,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3799,6 +3969,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3887,6 +4062,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4010,6 +4190,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4098,6 +4283,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4218,6 +4408,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4305,6 +4500,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4428,6 +4628,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4517,6 +4722,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4641,6 +4851,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4729,6 +4944,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4852,6 +5072,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4940,6 +5165,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5060,6 +5290,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5147,6 +5382,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5269,6 +5509,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5357,6 +5602,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", diff --git a/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml b/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml index 039f7ca42ef..6f47d6c2e42 100644 --- a/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/retryability-timeoutMS.yml @@ -84,6 +84,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -125,6 +127,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -191,6 +195,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -233,6 +239,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -299,6 +307,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -340,6 +350,8 @@ tests: delete: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -406,6 +418,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -448,6 +462,8 @@ tests: update: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -515,6 +531,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -557,6 +575,8 @@ tests: update: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -623,6 +643,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -664,6 +686,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -730,6 +754,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -772,6 +798,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -839,6 +867,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -881,6 +911,8 @@ tests: findAndModify: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -949,6 +981,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -992,6 +1026,8 @@ tests: insert: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1059,6 +1095,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1100,6 +1138,8 @@ tests: listDatabases: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1163,6 +1203,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1203,6 +1245,8 @@ tests: listDatabases: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1267,6 +1311,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1308,6 +1354,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1373,6 +1421,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1414,6 +1464,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1479,6 +1531,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1520,6 +1574,8 @@ tests: listCollections: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1585,6 +1641,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1626,6 +1684,8 @@ tests: listCollections: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1691,6 +1751,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1732,6 +1794,8 @@ tests: aggregate: 1 maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1797,6 +1861,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1838,6 +1904,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1903,6 +1971,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -1944,6 +2014,8 @@ tests: count: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2009,6 +2081,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2050,6 +2124,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2113,6 +2189,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2153,6 +2231,8 @@ tests: count: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2218,6 +2298,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2260,6 +2342,8 @@ tests: distinct: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2326,6 +2410,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2367,6 +2453,8 @@ tests: find: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2432,6 +2520,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2473,6 +2563,8 @@ tests: find: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2536,6 +2628,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2576,6 +2670,8 @@ tests: listIndexes: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2640,6 +2736,8 @@ tests: expectError: isTimeoutError: true - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner @@ -2681,6 +2779,8 @@ tests: aggregate: *collectionName maxTimeMS: { $$type: ["int", "long"] } - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option operations: - name: failPoint object: testRunner diff --git a/test/spec/client-side-operations-timeout/runCursorCommand.json b/test/spec/client-side-operations-timeout/runCursorCommand.json index 5fc0be33997..36f774fb5af 100644 --- a/test/spec/client-side-operations-timeout/runCursorCommand.json +++ b/test/spec/client-side-operations-timeout/runCursorCommand.json @@ -200,7 +200,7 @@ }, "collection": "collection", "maxTimeMS": { - "$$exists": true + "$$exists": false } } } @@ -210,7 +210,7 @@ ] }, { - "description": "Non=tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure", + "description": "Non-tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure", "runOnRequirements": [ { "serverless": "forbid" diff --git a/test/spec/client-side-operations-timeout/runCursorCommand.yml b/test/spec/client-side-operations-timeout/runCursorCommand.yml index 16a648e0280..91a18d6dd81 100644 --- a/test/spec/client-side-operations-timeout/runCursorCommand.yml +++ b/test/spec/client-side-operations-timeout/runCursorCommand.yml @@ -70,7 +70,7 @@ tests: runOnRequirements: - serverless: forbid operations: - # Block find/getMore for 15ms. + # Block find/getMore for 60ms. - name: failPoint object: testRunner arguments: @@ -83,8 +83,9 @@ tests: blockConnection: true blockTimeMS: 60 # Run a find with timeoutMS less than double our failPoint blockTimeMS and - # batchSize less than the total document count will cause a find and a getMore to be sent. - # Both will block for 60ms so together they will go over the timeout. + # batchSize less than the total document count will cause a find and a + # getMore to be sent. Both will block for 60ms so together they will go + # over the timeout. - name: runCursorCommand object: *db arguments: @@ -106,12 +107,12 @@ tests: command: getMore: { $$type: [int, long] } collection: *collection - maxTimeMS: { $$exists: true } + maxTimeMS: { $$exists: false } # If timeoutMode=ITERATION, timeoutMS applies separately to the initial find and the getMore on the cursor. Neither # command should have a maxTimeMS field. This is a failure test. The "find" inherits timeoutMS=100 and "getMore" # commands are blocked for 60ms, causing iteration to fail with a timeout error. - - description: Non=tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure + - description: Non-tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure runOnRequirements: - serverless: forbid operations: diff --git a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json index abbc3217327..13ea91c7948 100644 --- a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.json @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml index 184ef7eb9e7..c79384e5f0b 100644 --- a/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-inherit-timeoutMS.yml @@ -13,7 +13,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 50 + timeoutMS: 500 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -52,7 +52,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -136,7 +136,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: @@ -153,9 +153,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -166,3 +163,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json index 0254b184a14..441c698328c 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.json @@ -75,7 +75,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -98,7 +98,7 @@ "name": "commitTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 }, "expectError": { "isTimeoutError": true @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -188,7 +188,7 @@ "name": "abortTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 } } ], @@ -252,7 +252,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -261,7 +261,7 @@ "name": "withTransaction", "object": "session", "arguments": { - "timeoutMS": 50, + "timeoutMS": 500, "callback": [ { "name": "insertOne", @@ -306,6 +306,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml index 8a80a65720a..bee91dc4cb8 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-override-operation-timeoutMS.yml @@ -50,7 +50,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -61,7 +61,7 @@ tests: - name: commitTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 expectError: isTimeoutError: true expectEvents: @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -106,7 +106,7 @@ tests: - name: abortTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 expectEvents: - client: *client events: @@ -138,11 +138,11 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: - timeoutMS: 50 + timeoutMS: 500 callback: - name: insertOne object: *collection @@ -156,9 +156,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -169,3 +166,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json index c46ae4dd506..d90152e909c 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json +++ b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.json @@ -47,7 +47,7 @@ "id": "session", "client": "client", "sessionOptions": { - "defaultTimeoutMS": 50 + "defaultTimeoutMS": 500 } } } @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml index 61aaab4d97e..73aaf9ff2a7 100644 --- a/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml +++ b/test/spec/client-side-operations-timeout/sessions-override-timeoutMS.yml @@ -29,7 +29,7 @@ createEntities: id: &session session client: *client sessionOptions: - defaultTimeoutMS: 50 + defaultTimeoutMS: 500 initialData: - collectionName: *collectionName @@ -52,7 +52,7 @@ tests: data: failCommands: ["commitTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -95,7 +95,7 @@ tests: data: failCommands: ["abortTransaction"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: startTransaction object: *session - name: insertOne @@ -136,7 +136,7 @@ tests: data: failCommands: ["insert"] blockConnection: true - blockTimeMS: 60 + blockTimeMS: 600 - name: withTransaction object: *session arguments: @@ -153,9 +153,6 @@ tests: expectEvents: - client: *client events: - # Because the insert expects an error and gets an error, it technically succeeds, so withTransaction will - # try to run commitTransaction. This will fail client-side, though, because the timeout has already expired, - # so no command is sent. - commandStartedEvent: commandName: insert databaseName: *databaseName @@ -166,3 +163,11 @@ tests: maxTimeMS: { $$type: ["int", "long"] } - commandFailedEvent: commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/test/spec/client-side-operations-timeout/tailable-awaitData.json b/test/spec/client-side-operations-timeout/tailable-awaitData.json index 6da85c77835..535fb692434 100644 --- a/test/spec/client-side-operations-timeout/tailable-awaitData.json +++ b/test/spec/client-side-operations-timeout/tailable-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -130,7 +130,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 300 } } } @@ -188,7 +188,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -199,7 +199,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -272,7 +272,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -283,7 +283,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1, "maxAwaitTimeMS": 1 }, @@ -354,7 +354,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/tailable-awaitData.yml b/test/spec/client-side-operations-timeout/tailable-awaitData.yml index 422c6fb5370..52b9b3b456c 100644 --- a/test/spec/client-side-operations-timeout/tailable-awaitData.yml +++ b/test/spec/client-side-operations-timeout/tailable-awaitData.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -83,7 +83,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 300 - name: find object: *collection arguments: @@ -117,13 +117,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailableAwait - timeoutMS: 20 + timeoutMS: 250 batchSize: 1 saveResultAsEntity: &tailableCursor tailableCursor # Iterate twice to force a getMore. The first iteration will return the document from the first batch and the @@ -165,13 +165,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailableAwait - timeoutMS: 20 + timeoutMS: 250 batchSize: 1 maxAwaitTimeMS: 1 saveResultAsEntity: &tailableCursor tailableCursor @@ -199,8 +199,8 @@ tests: collection: *collectionName maxTimeMS: 1 - # The timeoutMS value should be refreshed for getMore's. This is a failure test. The find inherits timeoutMS=10 from - # the collection and the getMore blocks for 15ms, causing iteration to fail with a timeout error. + # The timeoutMS value should be refreshed for getMore's. This is a failure test. The find inherits timeoutMS=200 from + # the collection and the getMore blocks for 250ms, causing iteration to fail with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: - name: failPoint @@ -213,7 +213,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: diff --git a/test/spec/client-side-operations-timeout/tailable-non-awaitData.json b/test/spec/client-side-operations-timeout/tailable-non-awaitData.json index 34ee6609636..e88230e4f7a 100644 --- a/test/spec/client-side-operations-timeout/tailable-non-awaitData.json +++ b/test/spec/client-side-operations-timeout/tailable-non-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -94,7 +94,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -154,7 +154,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -165,7 +165,7 @@ "arguments": { "filter": {}, "cursorType": "tailable", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -239,7 +239,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml b/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml index 766b46e658b..eb75deaa65c 100644 --- a/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml +++ b/test/spec/client-side-operations-timeout/tailable-non-awaitData.yml @@ -12,7 +12,7 @@ createEntities: - client: id: &client client uriOptions: - timeoutMS: 10 + timeoutMS: 200 useMultipleMongoses: false observeEvents: - commandStartedEvent @@ -59,7 +59,7 @@ tests: data: failCommands: ["find"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: find object: *collection arguments: @@ -96,13 +96,13 @@ tests: data: failCommands: ["find", "getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 150 - name: createFindCursor object: *collection arguments: filter: {} cursorType: tailable - timeoutMS: 20 + timeoutMS: 200 batchSize: 1 saveResultAsEntity: &tailableCursor tailableCursor # Iterate the cursor twice: the first iteration will return the document from the batch in the find and the @@ -131,7 +131,7 @@ tests: maxTimeMS: { $$exists: false } # The timeoutMS option should apply separately to the initial "find" and each getMore. This is a failure test. The - # find inherits timeoutMS=10 from the collection and the getMore command blocks for 15ms, causing iteration to fail + # find inherits timeoutMS=200 from the collection and the getMore command blocks for 250ms, causing iteration to fail # with a timeout error. - description: "timeoutMS is refreshed for getMore - failure" operations: @@ -145,7 +145,7 @@ tests: data: failCommands: ["getMore"] blockConnection: true - blockTimeMS: 15 + blockTimeMS: 250 - name: createFindCursor object: *collection arguments: diff --git a/test/spec/index.js b/test/spec/index.ts similarity index 67% rename from test/spec/index.js rename to test/spec/index.ts index f9e6dccf02f..221d6671893 100644 --- a/test/spec/index.js +++ b/test/spec/index.ts @@ -1,7 +1,7 @@ -'use strict'; -const path = require('path'); -const fs = require('fs'); -const { EJSON } = require('bson'); +import * as fs from 'fs'; +import * as path from 'path'; + +import { EJSON } from '../mongodb'; function hasDuplicates(testArray) { const testNames = testArray.map(test => test.description); @@ -12,17 +12,16 @@ function hasDuplicates(testArray) { /** * Given spec test folder names, loads the corresponding JSON * - * @param {...string} args - the spec test name to load - * @returns {any[]} + * @param args - the spec test name to load */ -function loadSpecTests(...args) { +export function loadSpecTests(...args: string[]): any[] { const specPath = path.resolve(...[__dirname].concat(args)); const suites = fs .readdirSync(specPath) .filter(x => x.includes('.json')) .map(x => ({ - ...EJSON.parse(fs.readFileSync(path.join(specPath, x)), { relaxed: true }), + ...EJSON.parse(fs.readFileSync(path.join(specPath, x), 'utf8'), { relaxed: true }), name: path.basename(x, '.json') })); @@ -36,7 +35,3 @@ function loadSpecTests(...args) { return suites; } - -module.exports = { - loadSpecTests -}; diff --git a/test/spec/unified-test-format/Makefile b/test/spec/unified-test-format/Makefile index 9711d9eee0e..a2b79e3f70b 100644 --- a/test/spec/unified-test-format/Makefile +++ b/test/spec/unified-test-format/Makefile @@ -1,8 +1,8 @@ -SCHEMA=../schema-1.5.json +SCHEMA=../schema-1.21.json -.PHONY: all invalid valid-fail valid-pass versioned-api load-balancers gridfs transactions crud collection-management sessions command-monitoring HAS_AJV +.PHONY: all invalid valid-fail valid-pass atlas-data-lake versioned-api load-balancers gridfs transactions transactions-convenient-api crud collection-management read-write-concern retryable-reads retryable-writes sessions command-logging-and-monitoring client-side-operations-timeout HAS_AJV -all: invalid valid-fail valid-pass versioned-api load-balancers gridfs transactions crud collection-management sessions command-monitoring +all: invalid valid-fail valid-pass atlas-data-lake versioned-api load-balancers gridfs transactions transactions-convenient-api change-streams crud collection-management read-write-concern retryable-reads retryable-writes sessions command-logging-and-monitoring client-side-operations-timeout client-side-encryption invalid: HAS_AJV @# Redirect stdout to hide expected validation errors @@ -14,6 +14,9 @@ valid-fail: HAS_AJV valid-pass: HAS_AJV @ajv test -s $(SCHEMA) -d "valid-pass/*.yml" --valid +atlas-data-lake: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../atlas-data-lake-testing/tests/unified/*.yml" --valid + versioned-api: HAS_AJV @ajv test -s $(SCHEMA) -d "../../versioned-api/tests/*.yml" --valid @@ -26,17 +29,39 @@ gridfs: HAS_AJV transactions: HAS_AJV @ajv test -s $(SCHEMA) -d "../../transactions/tests/unified/*.yml" --valid +transactions-convenient-api: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../transactions-convenient-api/tests/unified/*.yml" --valid + +change-streams: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../change-streams/tests/unified/*.yml" --valid + +client-side-operations-timeout: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../client-side-operations-timeout/tests/*.yml" --valid + crud: HAS_AJV @ajv test -s $(SCHEMA) -d "../../crud/tests/unified/*.yml" --valid collection-management: HAS_AJV @ajv test -s $(SCHEMA) -d "../../collection-management/tests/*.yml" --valid +read-write-concern: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../read-write-concern/tests/operation/*.yml" --valid + +retryable-reads: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../retryable-reads/tests/unified/*.yml" --valid + +retryable-writes: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../retryable-writes/tests/unified/*.yml" --valid + sessions: HAS_AJV - @ajv test -s $(SCHEMA) -d "../../sessions/tests/unified/*.yml" --valid + @ajv test -s $(SCHEMA) -d "../../sessions/tests/*.yml" --valid + +command-logging-and-monitoring: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../command-logging-and-monitoring/tests/logging/*.yml" --valid + @ajv test -s $(SCHEMA) -d "../../command-logging-and-monitoring/tests/monitoring/*.yml" --valid -command-monitoring: HAS_AJV - @ajv test -s $(SCHEMA) -d "../../command-monitoring/tests/unified/*.yml" --valid +client-side-encryption: HAS_AJV + @ajv test -s $(SCHEMA) -d "../../client-side-encryption/tests/unified/*.yml" --valid HAS_AJV: @if ! command -v ajv > /dev/null; then \ diff --git a/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml b/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml index 3b1c0c3a412..c6afedcfa96 100644 --- a/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml +++ b/test/spec/unified-test-format/valid-pass/collectionData-createOptions.yml @@ -1,12 +1,9 @@ description: collectionData-createOptions - schemaVersion: "1.9" - runOnRequirements: - minServerVersion: "3.6" # Capped collections cannot be created on serverless instances. serverless: forbid - createEntities: - client: id: &client0 client0 @@ -18,7 +15,6 @@ createEntities: id: &collection0 collection0 database: *database0 collectionName: &collection0Name coll0 - initialData: - collectionName: *collection0Name databaseName: *database0Name @@ -28,7 +24,6 @@ initialData: size: &cappedSize 4096 documents: - { _id: 1, x: 11 } - tests: - description: collection is created with the correct options operations: @@ -39,4 +34,4 @@ tests: - $collStats: { storageStats: {} } - $project: { capped: '$storageStats.capped', maxSize: '$storageStats.maxSize'} expectResult: - - { capped: true, maxSize: *cappedSize } + - { capped: true, maxSize: *cappedSize } \ No newline at end of file diff --git a/test/spec/unified-test-format/valid-pass/createEntities-operation.json b/test/spec/unified-test-format/valid-pass/createEntities-operation.json new file mode 100644 index 00000000000..3fde42919d7 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/createEntities-operation.json @@ -0,0 +1,74 @@ +{ + "description": "createEntities-operation", + "schemaVersion": "1.9", + "tests": [ + { + "description": "createEntities operation", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll1" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection1", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll1", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ] + }, + "commandName": "delete", + "databaseName": "database1" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/createEntities-operation.yml b/test/spec/unified-test-format/valid-pass/createEntities-operation.yml new file mode 100644 index 00000000000..ee8acd73687 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/createEntities-operation.yml @@ -0,0 +1,38 @@ +description: createEntities-operation + +# Note: createEntities is not technically in the 1.9 schema but was introduced at the same time. +schemaVersion: "1.9" + +tests: + - description: createEntities operation + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client1 client1 + observeEvents: [ commandStartedEvent ] + - database: + id: &database1 database1 + client: *client1 + databaseName: &database1Name database1 + - collection: + id: &collection1 collection1 + database: *database1 + collectionName: &collection1Name coll1 + - name: deleteOne + object: *collection1 + arguments: + filter: { _id : 1 } + expectEvents: + - client: *client1 + events: + - commandStartedEvent: + command: + delete: *collection1Name + deletes: + - q: { _id: 1 } + limit: 1 + commandName: delete + databaseName: *database1Name diff --git a/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json new file mode 100644 index 00000000000..b17ae78b942 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.json @@ -0,0 +1,111 @@ +{ + "description": "entity-cursor-iterateOnce", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "databaseName": "database0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "iterateOnce", + "operations": [ + { + "name": "createFindCursor", + "object": "collection0", + "arguments": { + "filter": {}, + "batchSize": 2 + }, + "saveResultAsEntity": "cursor0" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor0", + "expectResult": { + "_id": 2 + } + }, + { + "name": "iterateOnce", + "object": "cursor0" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": {}, + "batchSize": 2 + }, + "commandName": "find", + "databaseName": "database0" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "coll0" + }, + "commandName": "getMore" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml new file mode 100644 index 00000000000..508e594a538 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/entity-cursor-iterateOnce.yml @@ -0,0 +1,59 @@ +description: entity-cursor-iterateOnce + +# Note: iterateOnce is not technically in the 1.9 schema but was introduced at the same time. +schemaVersion: "1.9" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name database0 + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - _id: 1 + - _id: 2 + - _id: 3 + +tests: + - description: iterateOnce + operations: + - name: createFindCursor + object: *collection0 + arguments: + filter: {} + batchSize: 2 + saveResultAsEntity: &cursor0 cursor0 + - name: iterateUntilDocumentOrError + object: *cursor0 + expectResult: { _id: 1 } + - name: iterateUntilDocumentOrError + object: *cursor0 + expectResult: { _id: 2 } + # This operation could be iterateUntilDocumentOrError, but we use iterateOne to ensure that drivers support it. + - name: iterateOnce + object: *cursor0 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: {} + batchSize: 2 + commandName: find + databaseName: *database0Name + - commandStartedEvent: + command: + getMore: { $$type: [ int, long ] } + collection: *collection0Name + commandName: getMore diff --git a/test/spec/unified-test-format/valid-pass/entity-find-cursor.json b/test/spec/unified-test-format/valid-pass/entity-find-cursor.json index 85b8f69d7f3..6f955d81f4a 100644 --- a/test/spec/unified-test-format/valid-pass/entity-find-cursor.json +++ b/test/spec/unified-test-format/valid-pass/entity-find-cursor.json @@ -109,7 +109,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" @@ -126,7 +129,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -138,7 +144,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" diff --git a/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml b/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml index 61c9f8835ac..3ecdf6da1df 100644 --- a/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml +++ b/test/spec/unified-test-format/valid-pass/entity-find-cursor.yml @@ -61,19 +61,19 @@ tests: - commandSucceededEvent: reply: cursor: - id: { $$type: long } + id: { $$type: [ int, long ] } ns: { $$type: string } firstBatch: { $$type: array } commandName: find - commandStartedEvent: command: - getMore: { $$type: long } + getMore: { $$type: [ int, long ] } collection: *collection0Name commandName: getMore - commandSucceededEvent: reply: cursor: - id: { $$type: long } + id: { $$type: [ int, long ] } ns: { $$type: string } nextBatch: { $$type: array } commandName: getMore diff --git a/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml b/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml index 162d0e3c046..d6d87094f64 100644 --- a/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml +++ b/test/spec/unified-test-format/valid-pass/expectedEventsForClient-ignoreExtraEvents.yml @@ -75,4 +75,4 @@ tests: insert: *collection0Name documents: - *insertDocument4 - commandName: insert + commandName: insert \ No newline at end of file diff --git a/test/spec/unified-test-format/valid-pass/matches-lte-operator.json b/test/spec/unified-test-format/valid-pass/matches-lte-operator.json new file mode 100644 index 00000000000..4de65c58387 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/matches-lte-operator.json @@ -0,0 +1,78 @@ +{ + "description": "matches-lte-operator", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "special lte matching operator", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "y": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "$$lte": 1 + }, + "y": { + "$$lte": 2 + } + } + ] + }, + "commandName": "insert", + "databaseName": "database0Name" + } + } + ] + } + ] + } + ] +} diff --git a/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml b/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml new file mode 100644 index 00000000000..4bec571f029 --- /dev/null +++ b/test/spec/unified-test-format/valid-pass/matches-lte-operator.yml @@ -0,0 +1,41 @@ +description: matches-lte-operator + +# Note: $$lte is not technically in the 1.8 schema but was introduced at the same time. +schemaVersion: "1.9" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name database0Name + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: [] + +tests: + - description: special lte matching operator + operations: + - name: insertOne + object: *collection0 + arguments: + document: { _id : 1, y: 1 } + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + insert: *collection0Name + documents: + # We can make exact assertions here but we use the $$lte operator to ensure drivers support it. + - { _id: { $$lte: 1 }, y: { $$lte: 2 } } + commandName: insert + databaseName: *database0Name diff --git a/test/spec/unified-test-format/valid-pass/poc-change-streams.json b/test/spec/unified-test-format/valid-pass/poc-change-streams.json index 4194005eb41..50f0d06f08d 100644 --- a/test/spec/unified-test-format/valid-pass/poc-change-streams.json +++ b/test/spec/unified-test-format/valid-pass/poc-change-streams.json @@ -94,6 +94,42 @@ } ], "tests": [ + { + "description": "saveResultAsEntity is optional for createChangeStream", + "runOnRequirements": [ + { + "minServerVersion": "3.8.0", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "createChangeStream", + "object": "client0", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1 + }, + "commandName": "aggregate", + "databaseName": "admin" + } + } + ] + } + ] + }, { "description": "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster.", "runOnRequirements": [ diff --git a/test/spec/unified-test-format/valid-pass/poc-change-streams.yml b/test/spec/unified-test-format/valid-pass/poc-change-streams.yml index b066cf0b89a..a7daafceb77 100644 --- a/test/spec/unified-test-format/valid-pass/poc-change-streams.yml +++ b/test/spec/unified-test-format/valid-pass/poc-change-streams.yml @@ -59,6 +59,24 @@ initialData: documents: [] tests: + - description: "saveResultAsEntity is optional for createChangeStream" + runOnRequirements: + - minServerVersion: "3.8.0" + topologies: [ replicaset ] + operations: + - name: createChangeStream + object: *client0 + arguments: + pipeline: [] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + aggregate: 1 + commandName: aggregate + databaseName: admin + - description: "Executing a watch helper on a MongoClient results in notifications for changes to all collections in all databases in the cluster." runOnRequirements: - minServerVersion: "3.8.0" diff --git a/test/spec/unified-test-format/valid-pass/poc-crud.json b/test/spec/unified-test-format/valid-pass/poc-crud.json index 0790d9b789f..94e4ec56829 100644 --- a/test/spec/unified-test-format/valid-pass/poc-crud.json +++ b/test/spec/unified-test-format/valid-pass/poc-crud.json @@ -322,7 +322,7 @@ "minServerVersion": "4.1.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ], "serverless": "forbid" } diff --git a/test/spec/unified-test-format/valid-pass/poc-crud.yml b/test/spec/unified-test-format/valid-pass/poc-crud.yml index b7d05d75afb..5748c0779f8 100644 --- a/test/spec/unified-test-format/valid-pass/poc-crud.yml +++ b/test/spec/unified-test-format/valid-pass/poc-crud.yml @@ -143,7 +143,7 @@ tests: - description: "readConcern majority with out stage" runOnRequirements: - minServerVersion: "4.1.0" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] serverless: "forbid" operations: - name: aggregate diff --git a/test/spec/unified-test-format/valid-pass/poc-sessions.json b/test/spec/unified-test-format/valid-pass/poc-sessions.json index 75f34894286..117c9e7d009 100644 --- a/test/spec/unified-test-format/valid-pass/poc-sessions.json +++ b/test/spec/unified-test-format/valid-pass/poc-sessions.json @@ -264,7 +264,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-sessions.yml b/test/spec/unified-test-format/valid-pass/poc-sessions.yml index cb16657da3f..20902583286 100644 --- a/test/spec/unified-test-format/valid-pass/poc-sessions.yml +++ b/test/spec/unified-test-format/valid-pass/poc-sessions.yml @@ -124,12 +124,11 @@ tests: - description: "Dirty explicit session is discarded" # Original test specified retryWrites=true, but that is now the default. - # Retryable writes will require a sharded-replicaset, though. runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] operations: - name: failPoint object: testRunner diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json index 820ed659276..9ab44a9c548 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml index 4f981d15dd4..94fadda0aa5 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-convenient-api.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json index a0b297d59a5..de08edec442 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml index 47db7c3188a..33cd2a25214 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.yml @@ -4,7 +4,7 @@ schemaVersion: "1.0" runOnRequirements: - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions.json b/test/spec/unified-test-format/valid-pass/poc-transactions.json index 0355ca20605..2055a3b7057 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions.json +++ b/test/spec/unified-test-format/valid-pass/poc-transactions.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -93,7 +93,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -203,7 +203,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/spec/unified-test-format/valid-pass/poc-transactions.yml b/test/spec/unified-test-format/valid-pass/poc-transactions.yml index 0a66b9bd7f6..8a12c8b39ac 100644 --- a/test/spec/unified-test-format/valid-pass/poc-transactions.yml +++ b/test/spec/unified-test-format/valid-pass/poc-transactions.yml @@ -6,7 +6,7 @@ runOnRequirements: - minServerVersion: "4.0" topologies: [ replicaset ] - minServerVersion: "4.1.8" - topologies: [ sharded-replicaset ] + topologies: [ sharded ] createEntities: - client: @@ -51,7 +51,7 @@ tests: - description: "explicitly create collection using create command" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 @@ -109,7 +109,7 @@ tests: - description: "create index on a non-existing collection" runOnRequirements: - minServerVersion: "4.3.4" - topologies: [ replicaset, sharded-replicaset ] + topologies: [ replicaset, sharded ] operations: - name: dropCollection object: *database0 diff --git a/test/tools/cmap_spec_runner.ts b/test/tools/cmap_spec_runner.ts index 56fd5ba92c6..a5350e176e0 100644 --- a/test/tools/cmap_spec_runner.ts +++ b/test/tools/cmap_spec_runner.ts @@ -1,6 +1,7 @@ import { expect } from 'chai'; import { EventEmitter } from 'events'; import { clearTimeout, setTimeout } from 'timers'; +import { inspect } from 'util'; import { addContainerMetadata, @@ -12,7 +13,8 @@ import { makeClientMetadata, type MongoClient, type Server, - shuffle + shuffle, + TimeoutContext } from '../mongodb'; import { isAnyRequirementSatisfied } from './unified-spec-runner/unified-utils'; import { type FailPoint, sleep } from './utils'; @@ -191,7 +193,14 @@ const compareInputToSpec = (input, expected, message) => { const getTestOpDefinitions = (threadContext: ThreadContext) => ({ checkOut: async function (op) { - const connection: Connection = await ConnectionPool.prototype.checkOut.call(threadContext.pool); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: threadContext.pool.options.waitQueueTimeoutMS + }); + const connection: Connection = await ConnectionPool.prototype.checkOut.call( + threadContext.pool, + { timeoutContext } + ); if (op.label != null) { threadContext.connections.set(op.label, connection); } else { @@ -425,7 +434,7 @@ async function runCmapTest(test: CmapTest, threadContext: ThreadContext) { } compareInputToSpec(actualError, errorPropsToCheck, `failed while checking ${errorType}`); } else { - expect(actualError).to.not.exist; + expect(actualError, inspect(actualError)).to.not.exist; } const actualEvents = threadContext.poolEvents.filter( diff --git a/test/tools/runner/config.ts b/test/tools/runner/config.ts index 1d637486226..af596980c3f 100644 --- a/test/tools/runner/config.ts +++ b/test/tools/runner/config.ts @@ -7,6 +7,7 @@ import { type AuthMechanism, HostAddress, MongoClient, + type MongoClientOptions, type ServerApi, TopologyType, type WriteConcernSettings @@ -82,7 +83,7 @@ export class TestConfiguration { auth?: { username: string; password: string; authSource?: string }; proxyURIParams?: ProxyParams; }; - serverApi: ServerApi; + serverApi?: ServerApi; activeResources: number; isSrv: boolean; serverlessCredentials: { username: string | undefined; password: string | undefined }; @@ -171,12 +172,33 @@ export class TestConfiguration { return this.options.replicaSet; } + /** + * Returns a `hello`, executed against `uri`. + */ + async hello(uri = this.uri) { + const client = this.newClient(uri); + try { + await client.connect(); + const { maxBsonObjectSize, maxMessageSizeBytes, maxWriteBatchSize, ...rest } = await client + .db('admin') + .command({ hello: 1 }); + return { + maxBsonObjectSize, + maxMessageSizeBytes, + maxWriteBatchSize, + ...rest + }; + } finally { + await client.close(); + } + } + isOIDC(uri: string, env: string): boolean { if (!uri) return false; return uri.indexOf('MONGODB-OIDC') > -1 && uri.indexOf(`ENVIRONMENT:${env}`) > -1; } - newClient(urlOrQueryOptions?: string | Record, serverOptions?: Record) { + newClient(urlOrQueryOptions?: string | Record, serverOptions?: MongoClientOptions) { serverOptions = Object.assign({}, getEnvironmentalOptions(), serverOptions); // Support MongoClient constructor form (url, options) for `newClient`. @@ -272,7 +294,23 @@ export class TestConfiguration { * * @param options - overrides and settings for URI generation */ - url(options?: UrlOptions) { + url( + options?: UrlOptions & { + useMultipleMongoses?: boolean; + db?: string; + replicaSet?: string; + proxyURIParams?: ProxyParams; + username?: string; + password?: string; + auth?: { + username?: string; + password?: string; + }; + authSource?: string; + authMechanism?: string; + authMechanismProperties?: Record; + } + ) { options = { db: this.options.db, replicaSet: this.options.replicaSet, diff --git a/test/tools/unified-spec-runner/entities.ts b/test/tools/unified-spec-runner/entities.ts index 65b5242cf06..7f90e275dc8 100644 --- a/test/tools/unified-spec-runner/entities.ts +++ b/test/tools/unified-spec-runner/entities.ts @@ -44,7 +44,7 @@ import { type TopologyOpeningEvent, WriteConcern } from '../../mongodb'; -import { ejson, getEnvironmentalOptions } from '../../tools/utils'; +import { getEnvironmentalOptions } from '../../tools/utils'; import type { TestConfiguration } from '../runner/config'; import { EntityEventRegistry } from './entity_event_registry'; import { trace } from './runner'; @@ -590,7 +590,7 @@ export class EntitiesMap extends Map { new EntityEventRegistry(client, entity.client, map).register(); await client.connect(); } catch (error) { - console.error(ejson`failed to connect entity ${entity}`); + console.error('failed to connect entity', entity); // In the case where multiple clients are defined in the test and any one of them failed // to connect, but others did succeed, we need to ensure all open clients are closed. const clients = map.mapOf('client'); @@ -619,6 +619,10 @@ export class EntitiesMap extends Map { const options = Object.create(null); + if (entity.session.sessionOptions?.defaultTimeoutMS != null) { + options.defaultTimeoutMS = entity.session.sessionOptions?.defaultTimeoutMS; + } + if (entity.session.sessionOptions?.causalConsistency) { options.causalConsistency = entity.session.sessionOptions?.causalConsistency; } diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index bb4ba99a449..931ba1c9ecc 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -25,6 +25,7 @@ import { MongoBulkWriteError, MongoClientBulkWriteError, MongoError, + MongoOperationTimeoutError, MongoServerError, ObjectId, type OneOrMore, @@ -98,6 +99,19 @@ export function isMatchAsRootOperator(value: unknown): value is MatchAsRootOpera return typeof value === 'object' && value != null && '$$matchAsRoot' in value; } +export interface LteOperator { + $$lte: number; +} + +export function isLteOperator(value: unknown): value is LteOperator { + return ( + typeof value === 'object' && + value != null && + '$$lte' in value && + typeof value['$$lte'] === 'number' + ); +} + export const SpecialOperatorKeys = [ '$$exists', '$$type', @@ -106,7 +120,8 @@ export const SpecialOperatorKeys = [ '$$matchAsRoot', '$$matchAsDocument', '$$unsetOrMatches', - '$$sessionLsid' + '$$sessionLsid', + '$$lte' ]; export type SpecialOperator = @@ -117,7 +132,8 @@ export type SpecialOperator = | UnsetOrMatchesOperator | SessionLsidOperator | MatchAsDocumentOperator - | MatchAsRootOperator; + | MatchAsRootOperator + | LteOperator; type KeysOfUnion = T extends object ? keyof T : never; export type SpecialOperatorKey = KeysOfUnion; @@ -130,7 +146,8 @@ export function isSpecialOperator(value: unknown): value is SpecialOperator { isUnsetOrMatchesOperator(value) || isSessionLsidOperator(value) || isMatchAsRootOperator(value) || - isMatchAsDocumentOperator(value) + isMatchAsDocumentOperator(value) || + isLteOperator(value) ); } @@ -157,7 +174,8 @@ TYPE_MAP.set('minKey', actual => actual._bsontype === 'MinKey'); TYPE_MAP.set('maxKey', actual => actual._bsontype === 'MaxKey'); TYPE_MAP.set( 'int', - actual => (typeof actual === 'number' && Number.isInteger(actual)) || actual._bsontype === 'Int32' + actual => + (typeof actual === 'number' && Number.isInteger(actual)) || actual?._bsontype === 'Int32' ); TYPE_MAP.set( 'long', @@ -202,6 +220,10 @@ export function resultCheck( resultCheck(objFromActual, value, entities, path, checkExtraKeys); } else if (key === 'createIndexes') { for (const [i, userIndex] of actual.indexes.entries()) { + if (expected?.indexes?.[i]?.key == null) { + // The expectation does not include an assertion for the index key + continue; + } expect(expected).to.have.nested.property(`.indexes[${i}].key`).to.be.a('object'); // @ts-expect-error: Not worth narrowing to a document expect(Object.keys(expected.indexes[i].key)).to.have.lengthOf(1); @@ -355,7 +377,7 @@ export function specialCheck( for (const type of types) { ok ||= TYPE_MAP.get(type)(actual); } - expect(ok, `Expected [${actual}] to be one of [${types}]`).to.be.true; + expect(ok, `Expected ${path.join('.')} [${actual}] to be one of [${types}]`).to.be.true; } else if (isExistsOperator(expected)) { // $$exists const actualExists = actual !== undefined && actual !== null; @@ -390,6 +412,9 @@ export function specialCheck( ); resultCheck(actual, expected.$$matchAsRoot as any, entities, path, false); + } else if (isLteOperator(expected)) { + expect(typeof actual).to.equal('number'); + expect(actual).to.be.lte(expected.$$lte); } else { expect.fail(`Unknown special operator: ${JSON.stringify(expected)}`); } @@ -488,6 +513,13 @@ function compareCommandFailedEvents( } } +function expectInstanceOf any>( + instance: any, + ctor: T +): asserts instance is InstanceType { + expect(instance).to.be.instanceOf(ctor); +} + function compareEvents( actual: CommandEvent[] | CmapEvent[] | SdamEvent[], expected: (ExpectedCommandEvent & ExpectedCmapEvent & ExpectedSdamEvent)[], @@ -502,9 +534,7 @@ function compareEvents( if (expectedEvent.commandStartedEvent) { const path = `${rootPrefix}.commandStartedEvent`; - if (!(actualEvent instanceof CommandStartedEvent)) { - expect.fail(`expected ${path} to be instanceof CommandStartedEvent`); - } + expectInstanceOf(actualEvent, CommandStartedEvent); compareCommandStartedEvents(actualEvent, expectedEvent.commandStartedEvent, entities, path); if (expectedEvent.commandStartedEvent.hasServerConnectionId) { expect(actualEvent).property('serverConnectionId').to.be.a('bigint'); @@ -513,9 +543,7 @@ function compareEvents( } } else if (expectedEvent.commandSucceededEvent) { const path = `${rootPrefix}.commandSucceededEvent`; - if (!(actualEvent instanceof CommandSucceededEvent)) { - expect.fail(`expected ${path} to be instanceof CommandSucceededEvent`); - } + expectInstanceOf(actualEvent, CommandSucceededEvent); compareCommandSucceededEvents( actualEvent, expectedEvent.commandSucceededEvent, @@ -529,9 +557,7 @@ function compareEvents( } } else if (expectedEvent.commandFailedEvent) { const path = `${rootPrefix}.commandFailedEvent`; - if (!(actualEvent instanceof CommandFailedEvent)) { - expect.fail(`expected ${path} to be instanceof CommandFailedEvent`); - } + expectInstanceOf(actualEvent, CommandFailedEvent); compareCommandFailedEvents(actualEvent, expectedEvent.commandFailedEvent, entities, path); if (expectedEvent.commandFailedEvent.hasServerConnectionId) { expect(actualEvent).property('serverConnectionId').to.be.a('bigint'); @@ -759,6 +785,16 @@ export function expectErrorCheck( } } + if (expected.isTimeoutError === false) { + expect(error).to.not.be.instanceof(MongoOperationTimeoutError); + } else if (expected.isTimeoutError === true) { + if ('errorResponse' in error) { + expect(error.errorResponse).to.be.instanceof(MongoOperationTimeoutError); + } else { + expect(error).to.be.instanceof(MongoOperationTimeoutError); + } + } + if (expected.errorContains != null) { expect(error.message.toLowerCase(), expectMessage.toLowerCase()).to.include( expected.errorContains.toLowerCase() diff --git a/test/tools/unified-spec-runner/operations.ts b/test/tools/unified-spec-runner/operations.ts index 9cc67174f3c..f7c34a70239 100644 --- a/test/tools/unified-spec-runner/operations.ts +++ b/test/tools/unified-spec-runner/operations.ts @@ -11,6 +11,7 @@ import { CommandStartedEvent, Db, type Document, + GridFSBucket, type MongoClient, MongoError, ReadConcern, @@ -19,6 +20,7 @@ import { ServerType, type TopologyDescription, type TopologyType, + type TransactionOptions, WriteConcern } from '../../mongodb'; import { sleep } from '../../tools/utils'; @@ -49,11 +51,6 @@ operations.set('createEntities', async ({ entities, operation, testConfig }) => await EntitiesMap.createEntities(testConfig, null, operation.arguments.entities!, entities); }); -operations.set('abortTransaction', async ({ entities, operation }) => { - const session = entities.getEntity('session', operation.object); - return session.abortTransaction(); -}); - operations.set('aggregate', async ({ entities, operation }) => { const dbOrCollection = entities.get(operation.object) as Db | Collection; if (!(dbOrCollection instanceof Db || dbOrCollection instanceof Collection)) { @@ -219,7 +216,8 @@ operations.set('close', async ({ entities, operation }) => { /* eslint-disable no-empty */ try { const cursor = entities.getEntity('cursor', operation.object); - await cursor.close(); + const timeoutMS = operation.arguments?.timeoutMS; + await cursor.close({ timeoutMS }); return; } catch {} @@ -241,7 +239,12 @@ operations.set('close', async ({ entities, operation }) => { operations.set('commitTransaction', async ({ entities, operation }) => { const session = entities.getEntity('session', operation.object); - return session.commitTransaction(); + return await session.commitTransaction({ timeoutMS: operation.arguments?.timeoutMS }); +}); + +operations.set('abortTransaction', async ({ entities, operation }) => { + const session = entities.getEntity('session', operation.object); + return await session.abortTransaction({ timeoutMS: operation.arguments?.timeoutMS }); }); operations.set('createChangeStream', async ({ entities, operation }) => { @@ -265,7 +268,18 @@ operations.set('createCollection', async ({ entities, operation }) => { operations.set('createFindCursor', async ({ entities, operation }) => { const collection = entities.getEntity('collection', operation.object); - const { filter, ...opts } = operation.arguments!; + const { filter, cursorType, ...opts } = operation.arguments!; + switch (cursorType) { + case 'tailableAwait': + opts.tailable = true; + opts.awaitData = true; + break; + case 'tailable': + opts.tailable = true; + break; + default: + break; + } const cursor = collection.find(filter, opts); // The spec dictates that we create the cursor and force the find command // to execute, but don't move the cursor forward. hasNext() accomplishes @@ -303,17 +317,18 @@ operations.set('dropCollection', async ({ entities, operation }) => { if (!/ns not found/.test(err.message)) { throw err; } + return false; } }); operations.set('drop', async ({ entities, operation }) => { const bucket = entities.getEntity('bucket', operation.object); - return bucket.drop(); + return bucket.drop(operation.arguments); }); operations.set('dropIndexes', async ({ entities, operation }) => { const collection = entities.getEntity('collection', operation.object); - return collection.dropIndexes(); + return collection.dropIndexes(operation.arguments); }); operations.set('endSession', async ({ entities, operation }) => { @@ -328,7 +343,18 @@ operations.set('find', async ({ entities, operation }) => { } else { queryable = entities.getEntity('collection', operation.object); } - const { filter, ...opts } = operation.arguments!; + const { filter, cursorType, ...opts } = operation.arguments!; + switch (cursorType) { + case 'tailableAwait': + opts.tailable = true; + opts.awaitData = true; + break; + case 'tailable': + opts.tailable = true; + break; + default: + break; + } return queryable.find(filter, opts).toArray(); }); @@ -370,7 +396,7 @@ operations.set('insertOne', async ({ entities, operation }) => { // Looping exposes the fact that we can generate _ids for inserted // documents and we don't want the original operation to get modified // and use the same _id for each insert. - return collection.insertOne({ ...document }, opts); + return await collection.insertOne({ ...document }, opts); }); operations.set('insertMany', async ({ entities, operation }) => { @@ -526,7 +552,8 @@ operations.set('targetedFailPoint', async ({ entities, operation }) => { operations.set('delete', async ({ entities, operation }) => { const bucket = entities.getEntity('bucket', operation.object); - return bucket.delete(operation.arguments!.id); + const { id, ...opts } = operation.arguments; + return bucket.delete(id, opts); }); operations.set('download', async ({ entities, operation }) => { @@ -534,7 +561,8 @@ operations.set('download', async ({ entities, operation }) => { const { id, ...options } = operation.arguments ?? {}; const stream = bucket.openDownloadStream(id, options); - return Buffer.concat(await stream.toArray()); + const data = Buffer.concat(await stream.toArray()); + return data; }); operations.set('downloadByName', async ({ entities, operation }) => { @@ -549,7 +577,6 @@ operations.set('downloadByName', async ({ entities, operation }) => { operations.set('upload', async ({ entities, operation }) => { const bucket = entities.getEntity('bucket', operation.object); const { filename, source, ...options } = operation.arguments ?? {}; - const stream = bucket.openUploadStream(filename, options); const fileStream = Readable.from(Buffer.from(source.$$hexBytes, 'hex')); @@ -717,13 +744,17 @@ operations.set('waitForThread', async ({ entities, operation }) => { operations.set('withTransaction', async ({ entities, operation, client, testConfig }) => { const session = entities.getEntity('session', operation.object); - const options = { + const options: TransactionOptions = { readConcern: ReadConcern.fromOptions(operation.arguments), writeConcern: WriteConcern.fromOptions(operation.arguments), readPreference: ReadPreference.fromOptions(operation.arguments), - maxCommitTimeMS: operation.arguments!.maxCommitTimeMS + maxCommitTimeMS: operation.arguments?.maxCommitTimeMS }; + if (typeof operation.arguments?.timeoutMS === 'number') { + options.timeoutMS = operation.arguments.timeoutMS; + } + await session.withTransaction(async () => { for (const callbackOperation of operation.arguments!.callback) { await executeOperationAndCheck(callbackOperation, entities, client, testConfig, true); @@ -767,11 +798,10 @@ operations.set('runCommand', async ({ entities, operation }: OperationFunctionPa throw new AssertionError('runCommand requires a command'); const { command } = operation.arguments; - if (operation.arguments.timeoutMS != null) throw new AssertionError('timeoutMS not supported'); - const options = { readPreference: operation.arguments.readPreference, - session: operation.arguments.session + session: operation.arguments.session, + timeoutMS: operation.arguments.timeoutMS }; return db.command(command, options); @@ -782,7 +812,9 @@ operations.set('runCursorCommand', async ({ entities, operation }: OperationFunc const { command, ...opts } = operation.arguments!; const cursor = db.runCursorCommand(command, { readPreference: ReadPreference.fromOptions({ readPreference: opts.readPreference }), - session: opts.session + session: opts.session, + timeoutMode: opts.timeoutMode, + timeoutMS: opts.timeoutMS }); if (!Number.isNaN(+opts.batchSize)) cursor.setBatchSize(+opts.batchSize); @@ -794,10 +826,25 @@ operations.set('runCursorCommand', async ({ entities, operation }: OperationFunc operations.set('createCommandCursor', async ({ entities, operation }: OperationFunctionParams) => { const collection = entities.getEntity('db', operation.object); - const { command, ...opts } = operation.arguments!; + const { command, cursorType, ...opts } = operation.arguments!; + switch (cursorType) { + case 'tailableAwait': + opts.tailable = true; + opts.awaitData = true; + break; + case 'tailable': + opts.tailable = true; + break; + default: + break; + } const cursor = collection.runCursorCommand(command, { readPreference: ReadPreference.fromOptions({ readPreference: opts.readPreference }), - session: opts.session + session: opts.session, + tailable: opts.tailable, + awaitData: opts.awaitData, + timeoutMode: opts.timeoutMode, + timeoutMS: opts.timeoutMS }); if (!Number.isNaN(+opts.batchSize)) cursor.setBatchSize(+opts.batchSize); @@ -824,9 +871,30 @@ operations.set('updateOne', async ({ entities, operation }) => { }); operations.set('rename', async ({ entities, operation }) => { - const collection = entities.getEntity('collection', operation.object); - const { to, ...options } = operation.arguments!; - return collection.rename(to, options); + let entity: GridFSBucket | Collection | undefined; + try { + entity = entities.getEntity('collection', operation.object, false); + } catch { + // Ignore wrong type error + } + + if (entity instanceof Collection) { + const { to, ...options } = operation.arguments!; + return entity.rename(to, options); + } + + try { + entity = entities.getEntity('bucket', operation.object, false); + } catch { + // Ignore wrong type error + } + + if (entity instanceof GridFSBucket) { + const { id, newFilename, ...opts } = operation.arguments!; + return entity.rename(id, newFilename, opts as any); + } + + expect.fail(`No collection or bucket with name '${operation.object}' found`); }); operations.set('createDataKey', async ({ entities, operation }) => { @@ -945,7 +1013,7 @@ export async function executeOperationAndCheck( rethrow = false ): Promise { const opFunc = operations.get(operation.name); - expect(opFunc, `Unknown operation: ${operation.name}`).to.exist; + if (opFunc == null) expect.fail(`Unknown operation: ${operation.name}`); if (operation.arguments && operation.arguments.session) { // The session could need to be either pulled from the entity map or in the case where @@ -959,7 +1027,7 @@ export async function executeOperationAndCheck( let result; try { - result = await opFunc!({ entities, operation, client, testConfig }); + result = await opFunc({ entities, operation, client, testConfig }); } catch (error) { if (operation.expectError) { expectErrorCheck(error, operation.expectError, entities); diff --git a/test/tools/unified-spec-runner/schema.ts b/test/tools/unified-spec-runner/schema.ts index 81b81724632..ce722b2e706 100644 --- a/test/tools/unified-spec-runner/schema.ts +++ b/test/tools/unified-spec-runner/schema.ts @@ -386,6 +386,7 @@ export interface StoreEventsAsEntity { } export interface ExpectedError { isError?: true; + isTimeoutError?: boolean; isClientError?: boolean; errorContains?: string; errorCode?: number; diff --git a/test/tools/utils.ts b/test/tools/utils.ts index 3cb50d2cd51..6ddf48d8b01 100644 --- a/test/tools/utils.ts +++ b/test/tools/utils.ts @@ -1,5 +1,5 @@ import * as child_process from 'node:child_process'; -import { once } from 'node:events'; +import { on, once } from 'node:events'; import * as fs from 'node:fs/promises'; import * as path from 'node:path'; @@ -11,13 +11,16 @@ import { setTimeout } from 'timers'; import { inspect, promisify } from 'util'; import { + type AnyClientBulkWriteModel, type Document, type HostAddress, MongoClient, + now, OP_MSG, Topology, type TopologyOptions } from '../mongodb'; +import { type TestConfiguration } from './runner/config'; import { runUnifiedSuite } from './unified-spec-runner/runner'; import { type CollectionData, @@ -266,6 +269,7 @@ export interface FailPoint { failInternalCommands?: boolean; errorLabels?: string[]; appName?: string; + namespace?: string; }; } @@ -568,3 +572,136 @@ export async function itInNodeProcess( } }); } + +/** + * Connects the client and waits until `client` has emitted `count` connectionCreated events. + * + * **This will hang if the client does not have a maxPoolSizeSet!** + * + * This is useful when you want to ensure that the client has pools that are full of connections. + * + * This does not guarantee that all pools that the client has are completely full unless + * count = number of servers to which the client is connected * maxPoolSize. But it can + * serve as a way to ensure that some connections have been established and are in the pools. + */ +export async function waitUntilPoolsFilled( + client: MongoClient, + signal: AbortSignal, + count: number = client.s.options.maxPoolSize +): Promise { + let connectionCount = 0; + + async function wait$() { + for await (const _event of on(client, 'connectionCreated', { signal })) { + connectionCount++; + if (connectionCount >= count) { + break; + } + } + } + + await Promise.all([wait$(), client.connect()]); +} + +export async function configureFailPoint( + configuration: TestConfiguration, + failPoint: FailPoint, + uri = configuration.url() +) { + const utilClient = configuration.newClient(uri); + await utilClient.connect(); + + try { + await utilClient.db('admin').command(failPoint); + } finally { + await utilClient.close(); + } +} + +export async function clearFailPoint(configuration: TestConfiguration, url = configuration.url()) { + const utilClient = configuration.newClient(url); + await utilClient.connect(); + + try { + await utilClient.db('admin').command({ + configureFailPoint: 'failCommand', + mode: 'off' + }); + } finally { + await utilClient.close(); + } +} + +export async function makeMultiBatchWrite( + configuration: TestConfiguration +): Promise { + const { maxBsonObjectSize, maxMessageSizeBytes } = await configuration.hello(); + + const length = maxMessageSizeBytes / maxBsonObjectSize + 1; + const models = Array.from({ length }, () => ({ + namespace: 'db.coll', + name: 'insertOne' as const, + document: { a: 'b'.repeat(maxBsonObjectSize - 500) } + })); + + return models; +} + +export async function makeMultiResponseBatchModelArray( + configuration: TestConfiguration +): Promise { + const { maxBsonObjectSize } = await configuration.hello(); + const namespace = `foo.${new BSON.ObjectId().toHexString()}`; + const models: AnyClientBulkWriteModel[] = [ + { + name: 'updateOne', + namespace, + update: { $set: { age: 1 } }, + upsert: true, + filter: { _id: 'a'.repeat(maxBsonObjectSize / 2) } + }, + { + name: 'updateOne', + namespace, + update: { $set: { age: 1 } }, + upsert: true, + filter: { _id: 'b'.repeat(maxBsonObjectSize / 2) } + } + ]; + + return models; +} + +/** + * A utility to measure the duration of an async function. This is intended to be used for CSOT + * testing, where we expect to timeout within a certain threshold and want to measure the duration + * of that operation. + */ +export async function measureDuration(f: () => Promise): Promise<{ + duration: number; + result: T | Error; +}> { + const start = now(); + const result = await f().catch(e => e); + const end = now(); + return { + duration: end - start, + result + }; +} + +export function mergeTestMetadata( + metadata: MongoDBMetadataUI, + newMetadata: MongoDBMetadataUI +): MongoDBMetadataUI { + return { + requires: { + ...metadata.requires, + ...newMetadata.requires + }, + sessions: { + ...metadata.sessions, + ...newMetadata.sessions + } + }; +} diff --git a/test/types/mongodb.test-d.ts b/test/types/mongodb.test-d.ts index 892235f4747..4037a18159d 100644 --- a/test/types/mongodb.test-d.ts +++ b/test/types/mongodb.test-d.ts @@ -20,9 +20,39 @@ declare const options: MongoDBDriver.MongoClientOptions; expectDeprecated(options.w); expectDeprecated(options.journal); expectDeprecated(options.wtimeoutMS); +// TODO(NODE-6491): expectDeprecated(options.socketTimeoutMS); +// TODO(NODE-6491): expectDeprecated(options.waitQueueTimeoutMS); expectNotDeprecated(options.writeConcern); +expectNotDeprecated(options.serverSelectionTimeoutMS); +expectNotDeprecated(options.connectTimeoutMS); + expectType(options.writeConcern); +declare const estimatedDocumentCountOptions: MongoDBDriver.EstimatedDocumentCountOptions; +// TODO(NODE-6491): expectDeprecated(estimatedDocumentCountOptions.maxTimeMS); + +declare const countOptions: MongoDBDriver.CountOptions; +// TODO(NODE-6491): expectDeprecated(countOptions.maxTimeMS); + +declare const commandOptions: MongoDBDriver.CommandOperationOptions; +// TODO(NODE-6491): expectDeprecated(commandOptions.maxTimeMS); + +declare const aggregateOptions: MongoDBDriver.AggregateOptions; +// TODO(NODE-6491): expectDeprecated(aggregateOptions.maxTimeMS); + +declare const runCommandCursor: MongoDBDriver.RunCommandCursor; +// TODO(NODE-6491): expectDeprecated(runCommandCursor.setMaxTimeMS); +// TODO(NODE-6491): expectDeprecated(runCommandCursor.maxTimeMS); + +declare const cursorOptions: MongoDBDriver.AbstractCursorOptions; +// TODO(NODE-6491): expectDeprecated(cursorOptions.maxTimeMS); + +declare const abstractCursor: MongoDBDriver.AbstractCursor; +// TODO(NODE-6491): expectDeprecated(abstractCursor.maxTimeMS); + +declare const txnOptions: MongoDBDriver.TransactionOptions; +// TODO(NODE-6491): expectDeprecated(txnOptions.maxCommitTimeMS); + interface TSchema extends Document { name: string; } diff --git a/test/types/write_concern.test-d.ts b/test/types/write_concern.test-d.ts index b4249de86c8..2b10824a1c6 100644 --- a/test/types/write_concern.test-d.ts +++ b/test/types/write_concern.test-d.ts @@ -1,13 +1,18 @@ -import { expectNotAssignable } from 'tsd'; +import { expectDeprecated, expectNotAssignable } from 'tsd'; import type { ChangeStreamOptions, FindOptions, ListCollectionsOptions, - ListIndexesOptions + ListIndexesOptions, + WriteConcern } from '../mongodb'; expectNotAssignable({ writeConcern: { w: 0 } }); expectNotAssignable({ writeConcern: { w: 0 } }); expectNotAssignable({ writeConcern: { w: 0 } }); expectNotAssignable({ writeConcern: { w: 0 } }); + +declare const wc: WriteConcern; +// TODO(NODE-6491): expectDeprecated(wc.wtimeoutMS); +expectDeprecated(wc.wtimeout); diff --git a/test/unit/client-side-encryption/auto_encrypter.test.ts b/test/unit/client-side-encryption/auto_encrypter.test.ts index 1e13c0b07c5..79bc321b802 100644 --- a/test/unit/client-side-encryption/auto_encrypter.test.ts +++ b/test/unit/client-side-encryption/auto_encrypter.test.ts @@ -40,9 +40,11 @@ const MOCK_KMS_DECRYPT_REPLY = readHttpResponse(`${__dirname}/data/kms-decrypt-r class MockClient { options: any; + s: { options: any }; constructor(options?: any) { this.options = { options: options || {} }; + this.s = { options: this.options }; } } diff --git a/test/unit/client-side-encryption/client_encryption.test.ts b/test/unit/client-side-encryption/client_encryption.test.ts index 2ecf634771f..aeb1ac9beef 100644 --- a/test/unit/client-side-encryption/client_encryption.test.ts +++ b/test/unit/client-side-encryption/client_encryption.test.ts @@ -14,15 +14,17 @@ import { } from '../../../src/client-side-encryption/errors'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { StateMachine } from '../../../src/client-side-encryption/state_machine'; -import { Binary, BSON, deserialize } from '../../mongodb'; +import { Binary, BSON, deserialize, MongoClient } from '../../mongodb'; const { EJSON } = BSON; class MockClient { options: any; + s: { options: any }; constructor(options?: any) { this.options = { options: options || {} }; + this.s = { options: this.options }; } db(dbName) { return { @@ -100,6 +102,49 @@ describe('ClientEncryption', function () { expect(ClientEncryption.libmongocryptVersion).to.be.a('string'); }); + describe('constructor', () => { + describe('_timeoutMS', () => { + const LOCAL_MASTERKEY = Buffer.from( + 'Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk', + 'base64' + ); + context('when timeoutMS is provided in ClientEncryptionOptions and client', function () { + it('sets clientEncryption._timeoutMS to ClientEncryptionOptions.timeoutMS value', function () { + const client = new MongoClient('mongodb://a/', { timeoutMS: 100 }); + const clientEncryption = new ClientEncryption(client, { + keyVaultNamespace: 'keyvault.datakeys', + kmsProviders: { local: { key: LOCAL_MASTERKEY } }, + timeoutMS: 500 + }); + expect(clientEncryption._timeoutMS).to.equal(500); + }); + }); + + context('when timeoutMS is only provided in ClientEncryptionOptions', function () { + it('sets clientEncryption._timeoutMS to ClientEncryptionOptions.timeoutMS value', function () { + const client = new MongoClient('mongodb://a/'); + const clientEncryption = new ClientEncryption(client, { + keyVaultNamespace: 'keyvault.datakeys', + kmsProviders: { local: { key: LOCAL_MASTERKEY } }, + timeoutMS: 500 + }); + expect(clientEncryption._timeoutMS).to.equal(500); + }); + }); + + context('when timeoutMS is only provided in client', function () { + it('sets clientEncryption._timeoutMS to client.timeoutMS value', function () { + const client = new MongoClient('mongodb://a/', { timeoutMS: 100 }); + const clientEncryption = new ClientEncryption(client, { + keyVaultNamespace: 'keyvault.datakeys', + kmsProviders: { local: { key: LOCAL_MASTERKEY } } + }); + expect(clientEncryption._timeoutMS).to.equal(100); + }); + }); + }); + }); + describe('createEncryptedCollection()', () => { let clientEncryption; const client = new MockClient(); @@ -158,7 +203,10 @@ describe('ClientEncryption', function () { expect(createDataKeySpy.callCount).to.equal(0); const options = createCollectionSpy.getCall(0).args[1]; - expect(options).to.deep.equal({ encryptedFields: { fields: 'not an array' } }); + expect(options).to.deep.equal({ + encryptedFields: { fields: 'not an array' }, + timeoutMS: undefined + }); }); }); @@ -176,7 +224,8 @@ describe('ClientEncryption', function () { expect(createDataKeyStub.callCount).to.equal(1); const options = createCollectionSpy.getCall(0).args[1]; expect(options).to.deep.equal({ - encryptedFields: { fields: ['not an array', { keyId: keyId }, { keyId: {} }] } + encryptedFields: { fields: ['not an array', { keyId: keyId }, { keyId: {} }] }, + timeoutMS: undefined }); }); }); @@ -192,7 +241,10 @@ describe('ClientEncryption', function () { masterKey }); expect(result).to.have.property('collection'); - expect(createDataKey).to.have.been.calledOnceWithExactly('aws', { masterKey }); + expect(createDataKey).to.have.been.calledOnceWithExactly('aws', { + masterKey, + timeoutContext: undefined + }); }); context('when createDataKey rejects', () => { diff --git a/test/unit/client-side-encryption/state_machine.test.ts b/test/unit/client-side-encryption/state_machine.test.ts index 77f3cf3a824..ad319c44ade 100644 --- a/test/unit/client-side-encryption/state_machine.test.ts +++ b/test/unit/client-side-encryption/state_machine.test.ts @@ -12,9 +12,19 @@ import * as tls from 'tls'; import { StateMachine } from '../../../src/client-side-encryption/state_machine'; // eslint-disable-next-line @typescript-eslint/no-restricted-imports import { Db } from '../../../src/db'; -// eslint-disable-next-line @typescript-eslint/no-restricted-imports -import { MongoClient } from '../../../src/mongo_client'; -import { Int32, Long, serialize } from '../../mongodb'; +import { + BSON, + Collection, + CSOTTimeoutContext, + CursorTimeoutContext, + type FindOptions, + Int32, + Long, + MongoClient, + serialize, + squashError +} from '../../mongodb'; +import { sleep } from '../../tools/utils'; describe('StateMachine', function () { class MockRequest implements MongoCryptKMSRequest { @@ -74,12 +84,10 @@ describe('StateMachine', function () { const options = { promoteLongs: false, promoteValues: false }; const serializedCommand = serialize(command); const stateMachine = new StateMachine({} as any); - // eslint-disable-next-line @typescript-eslint/no-empty-function - const callback = () => {}; context('when executing the command', function () { it('does not promote values', function () { - stateMachine.markCommand(clientStub, 'test.coll', serializedCommand, callback); + stateMachine.markCommand(clientStub, 'test.coll', serializedCommand); expect(runCommandStub.calledWith(command, options)).to.be.true; }); }); @@ -461,4 +469,134 @@ describe('StateMachine', function () { expect.fail('missed exception'); }); }); + + describe('CSOT', function () { + describe('#fetchKeys', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let findSpy; + + beforeEach(async function () { + findSpy = sinon.spy(Collection.prototype, 'find'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context('when StateMachine.fetchKeys() is passed a `CSOTimeoutContext`', function () { + it('collection.find uses the provided timeout context', async function () { + const context = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + + await stateMachine + .fetchKeys(client, 'keyVault', BSON.serialize({ a: 1 }), context) + .catch(e => squashError(e)); + + const { timeoutContext } = findSpy.getCalls()[0].args[1] as FindOptions; + expect(timeoutContext).to.be.instanceOf(CursorTimeoutContext); + expect(timeoutContext.timeoutContext).to.equal(context); + }); + }); + + context('when StateMachine.fetchKeys() is not passed a `CSOTimeoutContext`', function () { + it('a timeoutContext is not provided to the find cursor', async function () { + await stateMachine + .fetchKeys(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + const { timeoutContext } = findSpy.getCalls()[0].args[1] as FindOptions; + expect(timeoutContext).to.be.undefined; + }); + }); + }); + + describe('#markCommand', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let dbCommandSpy; + + beforeEach(async function () { + dbCommandSpy = sinon.spy(Db.prototype, 'command'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context('when StateMachine.markCommand() is passed a `CSOTimeoutContext`', function () { + it('db.command runs with its timeoutMS property set to remainingTimeMS', async function () { + const timeoutContext = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await stateMachine + .markCommand(client, 'keyVault', BSON.serialize({ a: 1 }), timeoutContext) + .catch(e => squashError(e)); + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.not.be.undefined; + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.be.lessThanOrEqual(205); + }); + }); + + context('when StateMachine.markCommand() is not passed a `CSOTimeoutContext`', function () { + it('db.command runs with an undefined timeoutMS property', async function () { + await stateMachine + .markCommand(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + expect(dbCommandSpy.getCalls()[0].args[1].timeoutMS).to.be.undefined; + }); + }); + }); + + describe('#fetchCollectionInfo', function () { + const stateMachine = new StateMachine({} as any); + const client = new MongoClient('mongodb://localhost:27017'); + let listCollectionsSpy; + + beforeEach(async function () { + listCollectionsSpy = sinon.spy(Db.prototype, 'listCollections'); + }); + + afterEach(async function () { + sinon.restore(); + await client.close(); + }); + + context( + 'when StateMachine.fetchCollectionInfo() is passed a `CSOTimeoutContext`', + function () { + it('listCollections uses the provided timeoutContext', async function () { + const context = new CSOTTimeoutContext({ + timeoutMS: 500, + serverSelectionTimeoutMS: 30000 + }); + await sleep(300); + await stateMachine + .fetchCollectionInfo(client, 'keyVault', BSON.serialize({ a: 1 }), context) + .catch(e => squashError(e)); + const [_filter, { timeoutContext }] = listCollectionsSpy.getCalls()[0].args; + expect(timeoutContext).to.exist; + expect(timeoutContext.timeoutContext).to.equal(context); + }); + } + ); + + context( + 'when StateMachine.fetchCollectionInfo() is not passed a `CSOTimeoutContext`', + function () { + it('no timeoutContext is provided to listCollections', async function () { + await stateMachine + .fetchCollectionInfo(client, 'keyVault', BSON.serialize({ a: 1 })) + .catch(e => squashError(e)); + const [_filter, { timeoutContext }] = listCollectionsSpy.getCalls()[0].args; + expect(timeoutContext).not.to.exist; + }); + } + ); + }); + }); }); diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 69102e1f150..1604cd82d86 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -5,13 +5,15 @@ const { WaitQueueTimeoutError } = require('../../mongodb'); const mock = require('../../tools/mongodb-mock/index'); const sinon = require('sinon'); const { expect } = require('chai'); -const { setImmediate } = require('timers'); +const { setImmediate } = require('timers/promises'); const { ns, isHello } = require('../../mongodb'); const { createTimerSandbox } = require('../timer_sandbox'); const { topologyWithPlaceholderClient } = require('../../tools/utils'); const { MongoClientAuthProviders } = require('../../mongodb'); +const { TimeoutContext } = require('../../mongodb'); describe('Connection Pool', function () { + let timeoutContext; let mockMongod; const stubServer = { topology: { @@ -26,6 +28,9 @@ describe('Connection Pool', function () { options: { extendedMetadata: {} } + }, + s: { + serverSelectionTimeoutMS: 0 } } }; @@ -41,6 +46,10 @@ describe('Connection Pool', function () { }) ); + beforeEach(() => { + timeoutContext = TimeoutContext.create({ waitQueueTimeoutMS: 0, serverSelectionTimeoutMS: 0 }); + }); + it('should destroy connections which have been closed', async function () { mockMongod.setMessageHandler(request => { const doc = request.document; @@ -61,8 +70,10 @@ describe('Connection Pool', function () { const events = []; pool.on('connectionClosed', event => events.push(event)); - const conn = await pool.checkOut(); - const error = await conn.command(ns('admin.$cmd'), { ping: 1 }, {}).catch(error => error); + const conn = await pool.checkOut({ timeoutContext }); + const error = await conn + .command(ns('admin.$cmd'), { ping: 1 }, { timeoutContext }) + .catch(error => error); expect(error).to.be.instanceOf(Error); pool.checkIn(conn); @@ -90,7 +101,7 @@ describe('Connection Pool', function () { pool.ready(); - const conn = await pool.checkOut(); + const conn = await pool.checkOut({ timeoutContext }); const maybeError = await conn.command(ns('admin.$cmd'), { ping: 1 }, undefined).catch(e => e); expect(maybeError).to.be.instanceOf(MongoError); expect(maybeError).to.match(/timed out/); @@ -98,7 +109,7 @@ describe('Connection Pool', function () { pool.checkIn(conn); }); - it('should clear timed out wait queue members if no connections are available', function (done) { + it('should clear timed out wait queue members if no connections are available', async function () { mockMongod.setMessageHandler(request => { const doc = request.document; if (isHello(doc)) { @@ -111,26 +122,22 @@ describe('Connection Pool', function () { waitQueueTimeoutMS: 200, hostAddress: mockMongod.hostAddress() }); + const timeoutContext = TimeoutContext.create({ + waitQueueTimeoutMS: 200, + serverSelectionTimeoutMS: 0 + }); pool.ready(); - pool.checkOut().then(conn => { - expect(conn).to.exist; - pool.checkOut().then(expect.fail, err => { - expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); - - // We can only process the wait queue with `checkIn` and `checkOut`, so we - // force the pool here to think there are no available connections, even though - // we are checking the connection back in. This simulates a slow leak where - // incoming requests outpace the ability of the queue to fully process cancelled - // wait queue members - sinon.stub(pool, 'availableConnectionCount').get(() => 0); - pool.checkIn(conn); - - setImmediate(() => expect(pool).property('waitQueueSize').to.equal(0)); - done(); - }); - }, expect.fail); + const conn = await pool.checkOut({ timeoutContext }); + const err = await pool.checkOut({ timeoutContext }).catch(e => e); + expect(err).to.exist.and.be.instanceOf(WaitQueueTimeoutError); + sinon.stub(pool, 'availableConnectionCount').get(() => 0); + pool.checkIn(conn); + + await setImmediate(); + + expect(pool).property('waitQueueSize').to.equal(0); }); describe('minPoolSize population', function () { diff --git a/test/unit/cursor/aggregation_cursor.test.ts b/test/unit/cursor/aggregation_cursor.test.ts index 32ca4125ff4..82ae18745b0 100644 --- a/test/unit/cursor/aggregation_cursor.test.ts +++ b/test/unit/cursor/aggregation_cursor.test.ts @@ -1,6 +1,12 @@ import { expect } from 'chai'; -import { type AggregationCursor, MongoClient } from '../../mongodb'; +import { + AggregationCursor, + CursorTimeoutMode, + MongoAPIError, + MongoClient, + ns +} from '../../mongodb'; describe('class AggregationCursor', () => { let client: MongoClient; @@ -126,6 +132,38 @@ describe('class AggregationCursor', () => { }); context('when addStage, bespoke stage methods, or array is used to construct pipeline', () => { + context('when CSOT is enabled', () => { + let aggregationCursor: AggregationCursor; + before(function () { + aggregationCursor = client + .db('test') + .collection('test') + .aggregate([], { timeoutMS: 100, timeoutMode: CursorTimeoutMode.ITERATION }); + }); + + context('when a $out stage is added with .addStage()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.addStage({ $out: 'test' }); + }).to.throw(MongoAPIError); + }); + }); + context('when a $merge stage is added with .addStage()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.addStage({ $merge: {} }); + }).to.throw(MongoAPIError); + }); + }); + context('when a $out stage is added with .out()', () => { + it('throws a MongoAPIError', function () { + expect(() => { + aggregationCursor.out('test'); + }).to.throw(MongoAPIError); + }); + }); + }); + it('sets deeply identical aggregations pipelines', () => { const collection = client.db().collection('test'); @@ -157,4 +195,31 @@ describe('class AggregationCursor', () => { expect(builderGenericStageCursor.pipeline).to.deep.equal(expectedPipeline); }); }); + + describe('constructor()', () => { + context('when CSOT is enabled', () => { + context('when timeoutMode=ITERATION and a $out stage is provided', function () { + it('throws a MongoAPIError', function () { + expect( + () => + new AggregationCursor(client, ns('db.coll'), [{ $out: 'test' }], { + timeoutMS: 100, + timeoutMode: 'iteration' + }) + ).to.throw(MongoAPIError); + }); + }); + context('when timeoutMode=ITERATION and a $merge stage is provided', function () { + it('throws a MongoAPIError', function () { + expect( + () => + new AggregationCursor(client, ns('db.coll'), [{ $merge: 'test' }], { + timeoutMS: 100, + timeoutMode: 'iteration' + }) + ).to.throw(MongoAPIError); + }); + }); + }); + }); }); diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index 6bab40d0318..dca792bd382 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -14,12 +14,15 @@ import { LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE, LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE, MONGODB_ERROR_CODES, + MongoDriverError, MongoError, MongoErrorLabel, MongoMissingDependencyError, MongoNetworkError, MongoNetworkTimeoutError, + MongoOperationTimeoutError, MongoParseError, + MongoRuntimeError, MongoServerError, MongoSystemError, MongoWriteConcernError, @@ -28,6 +31,7 @@ import { ns, PoolClosedError as MongoPoolClosedError, setDifference, + TimeoutContext, type TopologyDescription, type TopologyOptions, WaitQueueTimeoutError as MongoWaitQueueTimeoutError @@ -172,6 +176,23 @@ describe('MongoErrors', () => { }); }); + describe('class MongoOperationTimeoutError', () => { + it('has a name property equal to MongoOperationTimeoutError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.have.property('name', 'MongoOperationTimeoutError'); + }); + + it('is instanceof MongoDriverError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.be.instanceOf(MongoDriverError); + }); + + it('is not instanceof MongoRuntimeError', () => { + const error = new MongoOperationTimeoutError('time out!'); + expect(error).to.not.be.instanceOf(MongoRuntimeError); + }); + }); + describe('MongoMissingDependencyError#constructor', () => { context('when options.cause is set', () => { it('attaches the cause property to the instance', () => { @@ -376,11 +397,17 @@ describe('MongoErrors', () => { { replicaSet: 'rs' } as TopologyOptions ); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); return replSet .connect() - .then(topology => topology.selectServer('primary', {})) + .then(topology => topology.selectServer('primary', { timeoutContext })) .then(server => - server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}) + server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), { + timeoutContext + }) ) .then( () => expect.fail('expected command to fail'), @@ -419,10 +446,14 @@ describe('MongoErrors', () => { if (err) { return cleanup(err); } + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); - topology.selectServer('primary', {}).then(server => { + topology.selectServer('primary', { timeoutContext }).then(server => { server - .command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}) + .command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), { timeoutContext }) .then(expect.fail, err => { let _err; try { diff --git a/test/unit/explain.test.ts b/test/unit/explain.test.ts index 8d71197a81a..adfcfd866c6 100644 --- a/test/unit/explain.test.ts +++ b/test/unit/explain.test.ts @@ -1,7 +1,7 @@ import { expect } from 'chai'; import { it } from 'mocha'; -import { Explain, ExplainVerbosity } from '../mongodb'; +import { Explain, ExplainVerbosity, FindCursor, MongoClient, MongoDBNamespace } from '../mongodb'; describe('class Explain {}', function () { describe('static .fromOptions()', function () { @@ -50,4 +50,42 @@ describe('class Explain {}', function () { }); }); }); + + describe('resolveExplainTimeoutOptions()', function () { + const cursor = new FindCursor( + new MongoClient('mongodb://localhost:27027'), + MongoDBNamespace.fromString('foo.bar'), + {}, + {} + ); + + it('when called with no arguments returns neither timeout nor explain', function () { + const { timeout, explain } = cursor.resolveExplainTimeoutOptions(); + expect(timeout).to.be.undefined; + expect(explain).to.be.undefined; + }); + + it('when called with a timeoutMS option returns only timeout options', function () { + const { timeout, explain } = cursor.resolveExplainTimeoutOptions({ timeoutMS: 1_000 }); + expect(timeout).to.deep.equal({ timeoutMS: 1_000 }); + expect(explain).to.be.undefined; + }); + + it('when called with explain settings returns only explain options', function () { + const { timeout, explain } = cursor.resolveExplainTimeoutOptions({ + verbosity: 'queryPlanner' + }); + expect(timeout).to.be.undefined; + expect(explain).to.deep.equal({ verbosity: 'queryPlanner' }); + }); + + it('when called with explain settings and timeout options returns both explain and timeout options', function () { + const { timeout, explain } = cursor.resolveExplainTimeoutOptions( + { verbosity: 'queryPlanner' }, + { timeoutMS: 1_000 } + ); + expect(timeout).to.deep.equal({ timeoutMS: 1_000 }); + expect(explain).to.deep.equal({ verbosity: 'queryPlanner' }); + }); + }); }); diff --git a/test/unit/index.test.ts b/test/unit/index.test.ts index 595f372c43d..7b064b1078d 100644 --- a/test/unit/index.test.ts +++ b/test/unit/index.test.ts @@ -5,14 +5,7 @@ import { expect } from 'chai'; import * as mongodb from '../../src/index'; import { setDifference } from '../mongodb'; -/** - * TS-NODE Adds these keys but they are undefined, they are not present when you import from lib - * We did not think this strangeness was worth investigating so we just make sure they remain set to undefined - */ -const TS_NODE_EXPORTS = ['AnyBulkWriteOperation', 'BulkWriteOptions']; - const EXPECTED_EXPORTS = [ - ...TS_NODE_EXPORTS, 'AbstractCursor', 'Admin', 'AggregationCursor', @@ -31,11 +24,11 @@ const EXPECTED_EXPORTS = [ 'ClientSession', 'Code', 'Collection', - 'configureExplicitResourceManagement', 'CommandFailedEvent', 'CommandStartedEvent', 'CommandSucceededEvent', 'Compressor', + 'configureExplicitResourceManagement', 'ConnectionCheckedInEvent', 'ConnectionCheckedOutEvent', 'ConnectionCheckOutFailedEvent', @@ -49,10 +42,12 @@ const EXPECTED_EXPORTS = [ 'ConnectionPoolReadyEvent', 'ConnectionReadyEvent', 'CURSOR_FLAGS', + 'CursorTimeoutMode', 'Db', 'DBRef', 'Decimal128', 'Double', + 'ExplainableCursor', 'ExplainVerbosity', 'FindCursor', 'GridFSBucket', @@ -101,6 +96,7 @@ const EXPECTED_EXPORTS = [ 'MongoNetworkTimeoutError', 'MongoNotConnectedError', 'MongoOIDCError', + 'MongoOperationTimeoutError', 'MongoParseError', 'MongoRuntimeError', 'MongoServerClosedError', @@ -112,7 +108,6 @@ const EXPECTED_EXPORTS = [ 'MongoTransactionError', 'MongoUnexpectedServerResponseError', 'MongoWriteConcernError', - 'WriteConcernErrorResult', 'ObjectId', 'OrderedBulkOperation', 'ProfilingLevel', @@ -128,6 +123,10 @@ const EXPECTED_EXPORTS = [ 'ServerHeartbeatStartedEvent', 'ServerHeartbeatSucceededEvent', 'ServerOpeningEvent', + 'ServerSelectionEvent', + 'ServerSelectionFailedEvent', + 'ServerSelectionStartedEvent', + 'ServerSelectionSucceededEvent', 'ServerType', 'SrvPollingEvent', 'Timestamp', @@ -137,12 +136,9 @@ const EXPECTED_EXPORTS = [ 'TopologyType', 'UnorderedBulkOperation', 'UUID', + 'WaitingForSuitableServerEvent', 'WriteConcern', - 'ServerSelectionEvent', - 'ServerSelectionFailedEvent', - 'ServerSelectionStartedEvent', - 'ServerSelectionSucceededEvent', - 'WaitingForSuitableServerEvent' + 'WriteConcernErrorResult' ]; describe('mongodb entrypoint', () => { @@ -153,12 +149,4 @@ describe('mongodb entrypoint', () => { it('exports only the expected keys', () => { expect(setDifference(Object.keys(mongodb), EXPECTED_EXPORTS)).to.be.empty; }); - - it('should export keys added by ts-node as undefined', () => { - // If the array is empty, this test would be a no-op so we should remove it - expect(TS_NODE_EXPORTS).to.have.length.greaterThan(0); - for (const tsNodeExportKey of TS_NODE_EXPORTS) { - expect(mongodb).to.have.property(tsNodeExportKey, undefined); - } - }); }); diff --git a/test/unit/operations/get_more.test.ts b/test/unit/operations/get_more.test.ts index f79da44e22f..76ebf16555d 100644 --- a/test/unit/operations/get_more.test.ts +++ b/test/unit/operations/get_more.test.ts @@ -53,7 +53,12 @@ describe('GetMoreOperation', function () { new ServerDescription('a:1'), {} as any ); - const opts = { ...options, documentsReturnedIn: 'nextBatch', returnFieldSelector: null }; + const opts = { + ...options, + documentsReturnedIn: 'nextBatch', + returnFieldSelector: null, + timeoutContext: undefined + }; const operation = new GetMoreOperation(namespace, cursorId, server, opts); const stub = sinon.stub(server, 'command').resolves({}); diff --git a/test/unit/sdam/topology.test.ts b/test/unit/sdam/topology.test.ts index e4a34417d50..5264b5d9c45 100644 --- a/test/unit/sdam/topology.test.ts +++ b/test/unit/sdam/topology.test.ts @@ -17,6 +17,7 @@ import { Server, SrvPoller, SrvPollingEvent, + TimeoutContext, Topology, TopologyDescription, TopologyDescriptionChangedEvent, @@ -108,17 +109,28 @@ describe('Topology (unit)', function () { const topology = topologyWithPlaceholderClient(mockServer.hostAddress(), {}); topology.connect().then(() => { - topology.selectServer('primary', {}).then(server => { - server.command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250 }).then( - () => expect.fail('expected command to fail'), - err => { - expect(err).to.exist; - expect(err).to.match(/timed out/); - topology.close(); - done(); - } - ); - }, expect.fail); + const ctx = TimeoutContext.create({ + waitQueueTimeoutMS: 0, + serverSelectionTimeoutMS: 0, + socketTimeoutMS: 250 + }); + topology + .selectServer('primary', { + timeoutContext: ctx + }) + .then(server => { + server + .command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250, timeoutContext: ctx }) + .then( + () => expect.fail('expected command to fail'), + err => { + expect(err).to.exist; + expect(err).to.match(/timed out/); + topology.close(); + done(); + } + ); + }, expect.fail); }, expect.fail); }); }); @@ -217,10 +229,16 @@ describe('Topology (unit)', function () { let poolCleared = false; topology.on('connectionPoolCleared', () => (poolCleared = true)); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(poolCleared).to.be.true; }); @@ -245,11 +263,17 @@ describe('Topology (unit)', function () { let poolCleared = false; topology.on('connectionPoolCleared', () => (poolCleared = true)); + const timeoutContext = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(poolCleared).to.be.false; topology.close(); @@ -269,14 +293,20 @@ describe('Topology (unit)', function () { topology = topologyWithPlaceholderClient(mockServer.hostAddress(), {}); await topology.connect(); + const timeoutContext = TimeoutContext.create({ + waitQueueTimeoutMS: 0, + serverSelectionTimeoutMS: 0 + }); const server = await topology.selectServer('primary', {}); let serverDescription; server.on('descriptionReceived', sd => (serverDescription = sd)); - const err = await server.command(ns('test.test'), { insert: { a: 42 } }, {}).then( - () => null, - e => e - ); + const err = await server + .command(ns('test.test'), { insert: { a: 42 } }, { timeoutContext }) + .then( + () => null, + e => e + ); expect(err).to.eql(serverDescription.error); expect(server.description.type).to.equal('Unknown'); }); diff --git a/test/unit/timeout.test.ts b/test/unit/timeout.test.ts index 3fafc21b35f..1dd7e83feb5 100644 --- a/test/unit/timeout.test.ts +++ b/test/unit/timeout.test.ts @@ -1,14 +1,25 @@ import { expect } from 'chai'; -import { MongoInvalidArgumentError, Timeout, TimeoutError } from '../mongodb'; +import { + CSOTTimeoutContext, + LegacyTimeoutContext, + MongoInvalidArgumentError, + MongoRuntimeError, + Timeout, + TimeoutContext, + TimeoutError +} from '../mongodb'; describe('Timeout', function () { let timeout: Timeout; beforeEach(() => { - if (Timeout.is(timeout)) { - timeout.clear(); - } + timeout = null; + }); + + beforeEach(() => { + timeout?.clear(); + timeout = null; }); describe('expires()', function () { @@ -63,54 +74,197 @@ describe('Timeout', function () { }); }); }); +}); + +describe('TimeoutContext', function () { + describe('TimeoutContext.create', function () { + context('when timeoutMS is a number', function () { + it('returns a CSOTTimeoutContext instance', function () { + const ctx = TimeoutContext.create({ + timeoutMS: 0, + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); - describe('is()', function () { - context('when called on a Timeout instance', function () { - it('returns true', function () { - expect(Timeout.is(Timeout.expires(0))).to.be.true; + expect(ctx).to.be.instanceOf(CSOTTimeoutContext); }); }); - context('when called on a nullish object ', function () { - it('returns false', function () { - expect(Timeout.is(undefined)).to.be.false; - expect(Timeout.is(null)).to.be.false; + context('when timeoutMS is undefined', function () { + it('returns a LegacyTimeoutContext instance', function () { + const ctx = TimeoutContext.create({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 0 + }); + + expect(ctx).to.be.instanceOf(LegacyTimeoutContext); }); }); + }); + + describe('CSOTTimeoutContext', function () { + let ctx: CSOTTimeoutContext; + + describe('get serverSelectionTimeout()', function () { + let timeout: Timeout | null; - context('when called on a primitive type', function () { - it('returns false', function () { - expect(Timeout.is(1)).to.be.false; - expect(Timeout.is('hello')).to.be.false; - expect(Timeout.is(true)).to.be.false; - expect(Timeout.is(1n)).to.be.false; - expect(Timeout.is(Symbol.for('test'))).to.be.false; + afterEach(() => { + timeout?.clear(); + }); + + context('when timeoutMS is 0 and serverSelectionTimeoutMS is 0', function () { + it('returns null', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 0, + serverSelectionTimeoutMS: 0 + }); + + expect(ctx.serverSelectionTimeout).to.be.null; + }); + }); + + context('when timeoutMS is 0 and serverSelectionTimeoutMS is >0', function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 0, + serverSelectionTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.instanceOf(Timeout); + + expect(timeout.duration).to.equal(ctx.serverSelectionTimeoutMS); + }); + }); + + context( + 'when timeoutMS is >0 serverSelectionTimeoutMS is >0 and timeoutMS > serverSelectionTimeoutMS', + function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 15, + serverSelectionTimeoutMS: 10 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.exist; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.serverSelectionTimeoutMS); + }); + } + ); + + context( + 'when timeoutMS is >0, serverSelectionTimeoutMS is >0 and timeoutMS < serverSelectionTimeoutMS', + function () { + it('returns a Timeout instance with duration set to timeoutMS', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 10, + serverSelectionTimeoutMS: 15 + }); + + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.exist; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.timeoutMS); + }); + } + ); + }); + + describe('get connectionCheckoutTimeout()', function () { + context('when called before get serverSelectionTimeout()', function () { + it('throws a MongoRuntimeError', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 100, + serverSelectionTimeoutMS: 15 + }); + + expect(() => ctx.connectionCheckoutTimeout).to.throw(MongoRuntimeError); + }); + }); + + context('when called after get serverSelectionTimeout()', function () { + let serverSelectionTimeout: Timeout; + let connectionCheckoutTimeout: Timeout; + + afterEach(() => { + serverSelectionTimeout.clear(); + connectionCheckoutTimeout.clear(); + }); + + it('returns same timeout as serverSelectionTimeout', function () { + ctx = new CSOTTimeoutContext({ + timeoutMS: 100, + serverSelectionTimeoutMS: 86 + }); + serverSelectionTimeout = ctx.serverSelectionTimeout; + connectionCheckoutTimeout = ctx.connectionCheckoutTimeout; + + expect(connectionCheckoutTimeout).to.exist; + expect(connectionCheckoutTimeout).to.equal(serverSelectionTimeout); + }); }); }); + }); + + describe('LegacyTimeoutContext', function () { + let timeout: Timeout | null; + + afterEach(() => { + timeout?.clear(); + }); + + describe('get serverSelectionTimeout()', function () { + context('when serverSelectionTimeoutMS > 0', function () { + it('returns a Timeout instance with duration set to serverSelectionTimeoutMS', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 100, + waitQueueTimeoutMS: 10 + }); - context('when called on a Promise-like object with a matching toStringTag', function () { - it('returns true', function () { - const timeoutLike = { - [Symbol.toStringTag]: 'MongoDBTimeout', - then() { - return 0; - } - }; + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.options.serverSelectionTimeoutMS); + }); + }); + + context('when serverSelectionTimeoutMS = 0', function () { + it('returns null', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 0, + waitQueueTimeoutMS: 10 + }); - expect(Timeout.is(timeoutLike)).to.be.true; + timeout = ctx.serverSelectionTimeout; + expect(timeout).to.be.null; + }); }); }); - context('when called on a Promise-like object without a matching toStringTag', function () { - it('returns false', function () { - const timeoutLike = { - [Symbol.toStringTag]: 'lol', - then() { - return 0; - } - }; + describe('get connectionCheckoutTimeout()', function () { + context('when waitQueueTimeoutMS > 0', function () { + it('returns a Timeout instance with duration set to waitQueueTimeoutMS', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 10, + waitQueueTimeoutMS: 20 + }); + timeout = ctx.connectionCheckoutTimeout; + + expect(timeout).to.be.instanceOf(Timeout); + expect(timeout.duration).to.equal(ctx.options.waitQueueTimeoutMS); + }); + }); + + context('when waitQueueTimeoutMS = 0', function () { + it('returns null', function () { + const ctx = new LegacyTimeoutContext({ + serverSelectionTimeoutMS: 10, + waitQueueTimeoutMS: 0 + }); - expect(Timeout.is(timeoutLike)).to.be.false; + expect(ctx.connectionCheckoutTimeout).to.be.null; + }); }); }); }); diff --git a/test/unit/tools/unified_spec_runner.test.ts b/test/unit/tools/unified_spec_runner.test.ts index a0887be9593..7ebee168590 100644 --- a/test/unit/tools/unified_spec_runner.test.ts +++ b/test/unit/tools/unified_spec_runner.test.ts @@ -100,7 +100,7 @@ describe('Unified Spec Runner', function () { expect(() => resultCheckSpy(actual, expected, entitiesMap, [])).to.throw( AssertionError, - /Expected \[string\] to be one of \[int\]/ + /\[string\] to be one of \[int\]/ ); }); });