First commit
This commit is contained in:
79
node_modules/mongodb/src/sdam/common.ts
generated
vendored
Normal file
79
node_modules/mongodb/src/sdam/common.ts
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
import { clearTimeout } from 'timers';
|
||||
|
||||
import type { Binary, Long, Timestamp } from '../bson';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import type { Topology } from './topology';
|
||||
|
||||
// shared state names
|
||||
export const STATE_CLOSING = 'closing';
|
||||
export const STATE_CLOSED = 'closed';
|
||||
export const STATE_CONNECTING = 'connecting';
|
||||
export const STATE_CONNECTED = 'connected';
|
||||
|
||||
/**
|
||||
* An enumeration of topology types we know about
|
||||
* @public
|
||||
*/
|
||||
export const TopologyType = Object.freeze({
|
||||
Single: 'Single',
|
||||
ReplicaSetNoPrimary: 'ReplicaSetNoPrimary',
|
||||
ReplicaSetWithPrimary: 'ReplicaSetWithPrimary',
|
||||
Sharded: 'Sharded',
|
||||
Unknown: 'Unknown',
|
||||
LoadBalanced: 'LoadBalanced'
|
||||
} as const);
|
||||
|
||||
/** @public */
|
||||
export type TopologyType = (typeof TopologyType)[keyof typeof TopologyType];
|
||||
|
||||
/**
|
||||
* An enumeration of server types we know about
|
||||
* @public
|
||||
*/
|
||||
export const ServerType = Object.freeze({
|
||||
Standalone: 'Standalone',
|
||||
Mongos: 'Mongos',
|
||||
PossiblePrimary: 'PossiblePrimary',
|
||||
RSPrimary: 'RSPrimary',
|
||||
RSSecondary: 'RSSecondary',
|
||||
RSArbiter: 'RSArbiter',
|
||||
RSOther: 'RSOther',
|
||||
RSGhost: 'RSGhost',
|
||||
Unknown: 'Unknown',
|
||||
LoadBalancer: 'LoadBalancer'
|
||||
} as const);
|
||||
|
||||
/** @public */
|
||||
export type ServerType = (typeof ServerType)[keyof typeof ServerType];
|
||||
|
||||
/** @internal */
|
||||
export type TimerQueue = Set<NodeJS.Timeout>;
|
||||
|
||||
/** @internal */
|
||||
export function drainTimerQueue(queue: TimerQueue): void {
|
||||
queue.forEach(clearTimeout);
|
||||
queue.clear();
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ClusterTime {
|
||||
clusterTime: Timestamp;
|
||||
signature: {
|
||||
hash: Binary;
|
||||
keyId: Long;
|
||||
};
|
||||
}
|
||||
|
||||
/** Shared function to determine clusterTime for a given topology or session */
|
||||
export function _advanceClusterTime(
|
||||
entity: Topology | ClientSession,
|
||||
$clusterTime: ClusterTime
|
||||
): void {
|
||||
if (entity.clusterTime == null) {
|
||||
entity.clusterTime = $clusterTime;
|
||||
} else {
|
||||
if ($clusterTime.clusterTime.greaterThan(entity.clusterTime.clusterTime)) {
|
||||
entity.clusterTime = $clusterTime;
|
||||
}
|
||||
}
|
||||
}
|
||||
182
node_modules/mongodb/src/sdam/events.ts
generated
vendored
Normal file
182
node_modules/mongodb/src/sdam/events.ts
generated
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
import type { Document } from '../bson';
|
||||
import type { ServerDescription } from './server_description';
|
||||
import type { TopologyDescription } from './topology_description';
|
||||
|
||||
/**
|
||||
* Emitted when server description changes, but does NOT include changes to the RTT.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
export class ServerDescriptionChangedEvent {
|
||||
/** A unique identifier for the topology */
|
||||
topologyId: number;
|
||||
/** The address (host/port pair) of the server */
|
||||
address: string;
|
||||
/** The previous server description */
|
||||
previousDescription: ServerDescription;
|
||||
/** The new server description */
|
||||
newDescription: ServerDescription;
|
||||
|
||||
/** @internal */
|
||||
constructor(
|
||||
topologyId: number,
|
||||
address: string,
|
||||
previousDescription: ServerDescription,
|
||||
newDescription: ServerDescription
|
||||
) {
|
||||
this.topologyId = topologyId;
|
||||
this.address = address;
|
||||
this.previousDescription = previousDescription;
|
||||
this.newDescription = newDescription;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Emitted when server is initialized.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
export class ServerOpeningEvent {
|
||||
/** A unique identifier for the topology */
|
||||
topologyId: number;
|
||||
/** The address (host/port pair) of the server */
|
||||
address: string;
|
||||
|
||||
/** @internal */
|
||||
constructor(topologyId: number, address: string) {
|
||||
this.topologyId = topologyId;
|
||||
this.address = address;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Emitted when server is closed.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
export class ServerClosedEvent {
|
||||
/** A unique identifier for the topology */
|
||||
topologyId: number;
|
||||
/** The address (host/port pair) of the server */
|
||||
address: string;
|
||||
|
||||
/** @internal */
|
||||
constructor(topologyId: number, address: string) {
|
||||
this.topologyId = topologyId;
|
||||
this.address = address;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Emitted when topology description changes.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
export class TopologyDescriptionChangedEvent {
|
||||
/** A unique identifier for the topology */
|
||||
topologyId: number;
|
||||
/** The old topology description */
|
||||
previousDescription: TopologyDescription;
|
||||
/** The new topology description */
|
||||
newDescription: TopologyDescription;
|
||||
|
||||
/** @internal */
|
||||
constructor(
|
||||
topologyId: number,
|
||||
previousDescription: TopologyDescription,
|
||||
newDescription: TopologyDescription
|
||||
) {
|
||||
this.topologyId = topologyId;
|
||||
this.previousDescription = previousDescription;
|
||||
this.newDescription = newDescription;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Emitted when topology is initialized.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
export class TopologyOpeningEvent {
|
||||
/** A unique identifier for the topology */
|
||||
topologyId: number;
|
||||
|
||||
/** @internal */
|
||||
constructor(topologyId: number) {
|
||||
this.topologyId = topologyId;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Emitted when topology is closed.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
export class TopologyClosedEvent {
|
||||
/** A unique identifier for the topology */
|
||||
topologyId: number;
|
||||
|
||||
/** @internal */
|
||||
constructor(topologyId: number) {
|
||||
this.topologyId = topologyId;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Emitted when the server monitor’s hello command is started - immediately before
|
||||
* the hello command is serialized into raw BSON and written to the socket.
|
||||
*
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
export class ServerHeartbeatStartedEvent {
|
||||
/** The connection id for the command */
|
||||
connectionId: string;
|
||||
|
||||
/** @internal */
|
||||
constructor(connectionId: string) {
|
||||
this.connectionId = connectionId;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Emitted when the server monitor’s hello succeeds.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
export class ServerHeartbeatSucceededEvent {
|
||||
/** The connection id for the command */
|
||||
connectionId: string;
|
||||
/** The execution time of the event in ms */
|
||||
duration: number;
|
||||
/** The command reply */
|
||||
reply: Document;
|
||||
|
||||
/** @internal */
|
||||
constructor(connectionId: string, duration: number, reply: Document | null) {
|
||||
this.connectionId = connectionId;
|
||||
this.duration = duration;
|
||||
this.reply = reply ?? {};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Emitted when the server monitor’s hello fails, either with an “ok: 0” or a socket exception.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
export class ServerHeartbeatFailedEvent {
|
||||
/** The connection id for the command */
|
||||
connectionId: string;
|
||||
/** The execution time of the event in ms */
|
||||
duration: number;
|
||||
/** The command failure */
|
||||
failure: Error;
|
||||
|
||||
/** @internal */
|
||||
constructor(connectionId: string, duration: number, failure: Error) {
|
||||
this.connectionId = connectionId;
|
||||
this.duration = duration;
|
||||
this.failure = failure;
|
||||
}
|
||||
}
|
||||
595
node_modules/mongodb/src/sdam/monitor.ts
generated
vendored
Normal file
595
node_modules/mongodb/src/sdam/monitor.ts
generated
vendored
Normal file
@@ -0,0 +1,595 @@
|
||||
import { clearTimeout, setTimeout } from 'timers';
|
||||
|
||||
import { Document, Long } from '../bson';
|
||||
import { connect } from '../cmap/connect';
|
||||
import { Connection, ConnectionOptions } from '../cmap/connection';
|
||||
import { LEGACY_HELLO_COMMAND } from '../constants';
|
||||
import { MongoError, MongoErrorLabel, MongoNetworkTimeoutError } from '../error';
|
||||
import { CancellationToken, TypedEventEmitter } from '../mongo_types';
|
||||
import type { Callback } from '../utils';
|
||||
import { calculateDurationInMs, EventEmitterWithState, makeStateMachine, now, ns } from '../utils';
|
||||
import { ServerType, STATE_CLOSED, STATE_CLOSING } from './common';
|
||||
import {
|
||||
ServerHeartbeatFailedEvent,
|
||||
ServerHeartbeatStartedEvent,
|
||||
ServerHeartbeatSucceededEvent
|
||||
} from './events';
|
||||
import { Server } from './server';
|
||||
import type { TopologyVersion } from './server_description';
|
||||
|
||||
/** @internal */
|
||||
const kServer = Symbol('server');
|
||||
/** @internal */
|
||||
const kMonitorId = Symbol('monitorId');
|
||||
/** @internal */
|
||||
const kConnection = Symbol('connection');
|
||||
/** @internal */
|
||||
const kCancellationToken = Symbol('cancellationToken');
|
||||
/** @internal */
|
||||
const kRTTPinger = Symbol('rttPinger');
|
||||
/** @internal */
|
||||
const kRoundTripTime = Symbol('roundTripTime');
|
||||
|
||||
const STATE_IDLE = 'idle';
|
||||
const STATE_MONITORING = 'monitoring';
|
||||
const stateTransition = makeStateMachine({
|
||||
[STATE_CLOSING]: [STATE_CLOSING, STATE_IDLE, STATE_CLOSED],
|
||||
[STATE_CLOSED]: [STATE_CLOSED, STATE_MONITORING],
|
||||
[STATE_IDLE]: [STATE_IDLE, STATE_MONITORING, STATE_CLOSING],
|
||||
[STATE_MONITORING]: [STATE_MONITORING, STATE_IDLE, STATE_CLOSING]
|
||||
});
|
||||
|
||||
const INVALID_REQUEST_CHECK_STATES = new Set([STATE_CLOSING, STATE_CLOSED, STATE_MONITORING]);
|
||||
function isInCloseState(monitor: Monitor) {
|
||||
return monitor.s.state === STATE_CLOSED || monitor.s.state === STATE_CLOSING;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export interface MonitorPrivate {
|
||||
state: string;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface MonitorOptions
|
||||
extends Omit<ConnectionOptions, 'id' | 'generation' | 'hostAddress'> {
|
||||
connectTimeoutMS: number;
|
||||
heartbeatFrequencyMS: number;
|
||||
minHeartbeatFrequencyMS: number;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export type MonitorEvents = {
|
||||
serverHeartbeatStarted(event: ServerHeartbeatStartedEvent): void;
|
||||
serverHeartbeatSucceeded(event: ServerHeartbeatSucceededEvent): void;
|
||||
serverHeartbeatFailed(event: ServerHeartbeatFailedEvent): void;
|
||||
resetServer(error?: MongoError): void;
|
||||
resetConnectionPool(): void;
|
||||
close(): void;
|
||||
} & EventEmitterWithState;
|
||||
|
||||
/** @internal */
|
||||
export class Monitor extends TypedEventEmitter<MonitorEvents> {
|
||||
/** @internal */
|
||||
s: MonitorPrivate;
|
||||
address: string;
|
||||
options: Readonly<
|
||||
Pick<MonitorOptions, 'connectTimeoutMS' | 'heartbeatFrequencyMS' | 'minHeartbeatFrequencyMS'>
|
||||
>;
|
||||
connectOptions: ConnectionOptions;
|
||||
[kServer]: Server;
|
||||
[kConnection]?: Connection;
|
||||
[kCancellationToken]: CancellationToken;
|
||||
/** @internal */
|
||||
[kMonitorId]?: MonitorInterval;
|
||||
[kRTTPinger]?: RTTPinger;
|
||||
|
||||
get connection(): Connection | undefined {
|
||||
return this[kConnection];
|
||||
}
|
||||
|
||||
constructor(server: Server, options: MonitorOptions) {
|
||||
super();
|
||||
|
||||
this[kServer] = server;
|
||||
this[kConnection] = undefined;
|
||||
this[kCancellationToken] = new CancellationToken();
|
||||
this[kCancellationToken].setMaxListeners(Infinity);
|
||||
this[kMonitorId] = undefined;
|
||||
this.s = {
|
||||
state: STATE_CLOSED
|
||||
};
|
||||
|
||||
this.address = server.description.address;
|
||||
this.options = Object.freeze({
|
||||
connectTimeoutMS: options.connectTimeoutMS ?? 10000,
|
||||
heartbeatFrequencyMS: options.heartbeatFrequencyMS ?? 10000,
|
||||
minHeartbeatFrequencyMS: options.minHeartbeatFrequencyMS ?? 500
|
||||
});
|
||||
|
||||
const cancellationToken = this[kCancellationToken];
|
||||
// TODO: refactor this to pull it directly from the pool, requires new ConnectionPool integration
|
||||
const connectOptions = Object.assign(
|
||||
{
|
||||
id: '<monitor>' as const,
|
||||
generation: server.pool.generation,
|
||||
connectionType: Connection,
|
||||
cancellationToken,
|
||||
hostAddress: server.description.hostAddress
|
||||
},
|
||||
options,
|
||||
// force BSON serialization options
|
||||
{
|
||||
raw: false,
|
||||
useBigInt64: false,
|
||||
promoteLongs: true,
|
||||
promoteValues: true,
|
||||
promoteBuffers: true
|
||||
}
|
||||
);
|
||||
|
||||
// ensure no authentication is used for monitoring
|
||||
delete connectOptions.credentials;
|
||||
if (connectOptions.autoEncrypter) {
|
||||
delete connectOptions.autoEncrypter;
|
||||
}
|
||||
|
||||
this.connectOptions = Object.freeze(connectOptions);
|
||||
}
|
||||
|
||||
connect(): void {
|
||||
if (this.s.state !== STATE_CLOSED) {
|
||||
return;
|
||||
}
|
||||
|
||||
// start
|
||||
const heartbeatFrequencyMS = this.options.heartbeatFrequencyMS;
|
||||
const minHeartbeatFrequencyMS = this.options.minHeartbeatFrequencyMS;
|
||||
this[kMonitorId] = new MonitorInterval(monitorServer(this), {
|
||||
heartbeatFrequencyMS: heartbeatFrequencyMS,
|
||||
minHeartbeatFrequencyMS: minHeartbeatFrequencyMS,
|
||||
immediate: true
|
||||
});
|
||||
}
|
||||
|
||||
requestCheck(): void {
|
||||
if (INVALID_REQUEST_CHECK_STATES.has(this.s.state)) {
|
||||
return;
|
||||
}
|
||||
|
||||
this[kMonitorId]?.wake();
|
||||
}
|
||||
|
||||
reset(): void {
|
||||
const topologyVersion = this[kServer].description.topologyVersion;
|
||||
if (isInCloseState(this) || topologyVersion == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CLOSING);
|
||||
resetMonitorState(this);
|
||||
|
||||
// restart monitor
|
||||
stateTransition(this, STATE_IDLE);
|
||||
|
||||
// restart monitoring
|
||||
const heartbeatFrequencyMS = this.options.heartbeatFrequencyMS;
|
||||
const minHeartbeatFrequencyMS = this.options.minHeartbeatFrequencyMS;
|
||||
this[kMonitorId] = new MonitorInterval(monitorServer(this), {
|
||||
heartbeatFrequencyMS: heartbeatFrequencyMS,
|
||||
minHeartbeatFrequencyMS: minHeartbeatFrequencyMS
|
||||
});
|
||||
}
|
||||
|
||||
close(): void {
|
||||
if (isInCloseState(this)) {
|
||||
return;
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CLOSING);
|
||||
resetMonitorState(this);
|
||||
|
||||
// close monitor
|
||||
this.emit('close');
|
||||
stateTransition(this, STATE_CLOSED);
|
||||
}
|
||||
}
|
||||
|
||||
function resetMonitorState(monitor: Monitor) {
|
||||
monitor[kMonitorId]?.stop();
|
||||
monitor[kMonitorId] = undefined;
|
||||
|
||||
monitor[kRTTPinger]?.close();
|
||||
monitor[kRTTPinger] = undefined;
|
||||
|
||||
monitor[kCancellationToken].emit('cancel');
|
||||
|
||||
monitor[kConnection]?.destroy({ force: true });
|
||||
monitor[kConnection] = undefined;
|
||||
}
|
||||
|
||||
function checkServer(monitor: Monitor, callback: Callback<Document | null>) {
|
||||
let start = now();
|
||||
monitor.emit(Server.SERVER_HEARTBEAT_STARTED, new ServerHeartbeatStartedEvent(monitor.address));
|
||||
|
||||
function failureHandler(err: Error) {
|
||||
monitor[kConnection]?.destroy({ force: true });
|
||||
monitor[kConnection] = undefined;
|
||||
|
||||
monitor.emit(
|
||||
Server.SERVER_HEARTBEAT_FAILED,
|
||||
new ServerHeartbeatFailedEvent(monitor.address, calculateDurationInMs(start), err)
|
||||
);
|
||||
|
||||
const error = !(err instanceof MongoError) ? new MongoError(err) : err;
|
||||
error.addErrorLabel(MongoErrorLabel.ResetPool);
|
||||
if (error instanceof MongoNetworkTimeoutError) {
|
||||
error.addErrorLabel(MongoErrorLabel.InterruptInUseConnections);
|
||||
}
|
||||
|
||||
monitor.emit('resetServer', error);
|
||||
callback(err);
|
||||
}
|
||||
|
||||
const connection = monitor[kConnection];
|
||||
if (connection && !connection.closed) {
|
||||
const { serverApi, helloOk } = connection;
|
||||
const connectTimeoutMS = monitor.options.connectTimeoutMS;
|
||||
const maxAwaitTimeMS = monitor.options.heartbeatFrequencyMS;
|
||||
const topologyVersion = monitor[kServer].description.topologyVersion;
|
||||
const isAwaitable = topologyVersion != null;
|
||||
|
||||
const cmd = {
|
||||
[serverApi?.version || helloOk ? 'hello' : LEGACY_HELLO_COMMAND]: 1,
|
||||
...(isAwaitable && topologyVersion
|
||||
? { maxAwaitTimeMS, topologyVersion: makeTopologyVersion(topologyVersion) }
|
||||
: {})
|
||||
};
|
||||
|
||||
const options = isAwaitable
|
||||
? {
|
||||
socketTimeoutMS: connectTimeoutMS ? connectTimeoutMS + maxAwaitTimeMS : 0,
|
||||
exhaustAllowed: true
|
||||
}
|
||||
: { socketTimeoutMS: connectTimeoutMS };
|
||||
|
||||
if (isAwaitable && monitor[kRTTPinger] == null) {
|
||||
monitor[kRTTPinger] = new RTTPinger(
|
||||
monitor[kCancellationToken],
|
||||
Object.assign(
|
||||
{ heartbeatFrequencyMS: monitor.options.heartbeatFrequencyMS },
|
||||
monitor.connectOptions
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
connection.command(ns('admin.$cmd'), cmd, options, (err, hello) => {
|
||||
if (err) {
|
||||
return failureHandler(err);
|
||||
}
|
||||
|
||||
if (!('isWritablePrimary' in hello)) {
|
||||
// Provide hello-style response document.
|
||||
hello.isWritablePrimary = hello[LEGACY_HELLO_COMMAND];
|
||||
}
|
||||
|
||||
const rttPinger = monitor[kRTTPinger];
|
||||
const duration =
|
||||
isAwaitable && rttPinger ? rttPinger.roundTripTime : calculateDurationInMs(start);
|
||||
|
||||
monitor.emit(
|
||||
Server.SERVER_HEARTBEAT_SUCCEEDED,
|
||||
new ServerHeartbeatSucceededEvent(monitor.address, duration, hello)
|
||||
);
|
||||
|
||||
// if we are using the streaming protocol then we immediately issue another `started`
|
||||
// event, otherwise the "check" is complete and return to the main monitor loop
|
||||
if (isAwaitable && hello.topologyVersion) {
|
||||
monitor.emit(
|
||||
Server.SERVER_HEARTBEAT_STARTED,
|
||||
new ServerHeartbeatStartedEvent(monitor.address)
|
||||
);
|
||||
start = now();
|
||||
} else {
|
||||
monitor[kRTTPinger]?.close();
|
||||
monitor[kRTTPinger] = undefined;
|
||||
|
||||
callback(undefined, hello);
|
||||
}
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// connecting does an implicit `hello`
|
||||
connect(monitor.connectOptions, (err, conn) => {
|
||||
if (err) {
|
||||
monitor[kConnection] = undefined;
|
||||
|
||||
failureHandler(err);
|
||||
return;
|
||||
}
|
||||
|
||||
if (conn) {
|
||||
// Tell the connection that we are using the streaming protocol so that the
|
||||
// connection's message stream will only read the last hello on the buffer.
|
||||
conn.isMonitoringConnection = true;
|
||||
|
||||
if (isInCloseState(monitor)) {
|
||||
conn.destroy({ force: true });
|
||||
return;
|
||||
}
|
||||
|
||||
monitor[kConnection] = conn;
|
||||
monitor.emit(
|
||||
Server.SERVER_HEARTBEAT_SUCCEEDED,
|
||||
new ServerHeartbeatSucceededEvent(monitor.address, calculateDurationInMs(start), conn.hello)
|
||||
);
|
||||
|
||||
callback(undefined, conn.hello);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function monitorServer(monitor: Monitor) {
|
||||
return (callback: Callback) => {
|
||||
if (monitor.s.state === STATE_MONITORING) {
|
||||
process.nextTick(callback);
|
||||
return;
|
||||
}
|
||||
stateTransition(monitor, STATE_MONITORING);
|
||||
function done() {
|
||||
if (!isInCloseState(monitor)) {
|
||||
stateTransition(monitor, STATE_IDLE);
|
||||
}
|
||||
|
||||
callback();
|
||||
}
|
||||
|
||||
checkServer(monitor, (err, hello) => {
|
||||
if (err) {
|
||||
// otherwise an error occurred on initial discovery, also bail
|
||||
if (monitor[kServer].description.type === ServerType.Unknown) {
|
||||
return done();
|
||||
}
|
||||
}
|
||||
|
||||
// if the check indicates streaming is supported, immediately reschedule monitoring
|
||||
if (hello && hello.topologyVersion) {
|
||||
setTimeout(() => {
|
||||
if (!isInCloseState(monitor)) {
|
||||
monitor[kMonitorId]?.wake();
|
||||
}
|
||||
}, 0);
|
||||
}
|
||||
|
||||
done();
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
function makeTopologyVersion(tv: TopologyVersion) {
|
||||
return {
|
||||
processId: tv.processId,
|
||||
// tests mock counter as just number, but in a real situation counter should always be a Long
|
||||
// TODO(NODE-2674): Preserve int64 sent from MongoDB
|
||||
counter: Long.isLong(tv.counter) ? tv.counter : Long.fromNumber(tv.counter)
|
||||
};
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export interface RTTPingerOptions extends ConnectionOptions {
|
||||
heartbeatFrequencyMS: number;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class RTTPinger {
|
||||
/** @internal */
|
||||
[kConnection]?: Connection;
|
||||
/** @internal */
|
||||
[kCancellationToken]: CancellationToken;
|
||||
/** @internal */
|
||||
[kRoundTripTime]: number;
|
||||
/** @internal */
|
||||
[kMonitorId]: NodeJS.Timeout;
|
||||
closed: boolean;
|
||||
|
||||
constructor(cancellationToken: CancellationToken, options: RTTPingerOptions) {
|
||||
this[kConnection] = undefined;
|
||||
this[kCancellationToken] = cancellationToken;
|
||||
this[kRoundTripTime] = 0;
|
||||
this.closed = false;
|
||||
|
||||
const heartbeatFrequencyMS = options.heartbeatFrequencyMS;
|
||||
this[kMonitorId] = setTimeout(() => measureRoundTripTime(this, options), heartbeatFrequencyMS);
|
||||
}
|
||||
|
||||
get roundTripTime(): number {
|
||||
return this[kRoundTripTime];
|
||||
}
|
||||
|
||||
close(): void {
|
||||
this.closed = true;
|
||||
clearTimeout(this[kMonitorId]);
|
||||
|
||||
this[kConnection]?.destroy({ force: true });
|
||||
this[kConnection] = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
function measureRoundTripTime(rttPinger: RTTPinger, options: RTTPingerOptions) {
|
||||
const start = now();
|
||||
options.cancellationToken = rttPinger[kCancellationToken];
|
||||
const heartbeatFrequencyMS = options.heartbeatFrequencyMS;
|
||||
|
||||
if (rttPinger.closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
function measureAndReschedule(conn?: Connection) {
|
||||
if (rttPinger.closed) {
|
||||
conn?.destroy({ force: true });
|
||||
return;
|
||||
}
|
||||
|
||||
if (rttPinger[kConnection] == null) {
|
||||
rttPinger[kConnection] = conn;
|
||||
}
|
||||
|
||||
rttPinger[kRoundTripTime] = calculateDurationInMs(start);
|
||||
rttPinger[kMonitorId] = setTimeout(
|
||||
() => measureRoundTripTime(rttPinger, options),
|
||||
heartbeatFrequencyMS
|
||||
);
|
||||
}
|
||||
|
||||
const connection = rttPinger[kConnection];
|
||||
if (connection == null) {
|
||||
connect(options, (err, conn) => {
|
||||
if (err) {
|
||||
rttPinger[kConnection] = undefined;
|
||||
rttPinger[kRoundTripTime] = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
measureAndReschedule(conn);
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
connection.command(ns('admin.$cmd'), { [LEGACY_HELLO_COMMAND]: 1 }, undefined, err => {
|
||||
if (err) {
|
||||
rttPinger[kConnection] = undefined;
|
||||
rttPinger[kRoundTripTime] = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
measureAndReschedule();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @internal
|
||||
*/
|
||||
export interface MonitorIntervalOptions {
|
||||
/** The interval to execute a method on */
|
||||
heartbeatFrequencyMS: number;
|
||||
/** A minimum interval that must elapse before the method is called */
|
||||
minHeartbeatFrequencyMS: number;
|
||||
/** Whether the method should be called immediately when the interval is started */
|
||||
immediate: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* @internal
|
||||
*/
|
||||
export class MonitorInterval {
|
||||
fn: (callback: Callback) => void;
|
||||
timerId: NodeJS.Timeout | undefined;
|
||||
lastExecutionEnded: number;
|
||||
isExpeditedCallToFnScheduled = false;
|
||||
stopped = false;
|
||||
isExecutionInProgress = false;
|
||||
hasExecutedOnce = false;
|
||||
|
||||
heartbeatFrequencyMS: number;
|
||||
minHeartbeatFrequencyMS: number;
|
||||
|
||||
constructor(fn: (callback: Callback) => void, options: Partial<MonitorIntervalOptions> = {}) {
|
||||
this.fn = fn;
|
||||
this.lastExecutionEnded = -Infinity;
|
||||
|
||||
this.heartbeatFrequencyMS = options.heartbeatFrequencyMS ?? 1000;
|
||||
this.minHeartbeatFrequencyMS = options.minHeartbeatFrequencyMS ?? 500;
|
||||
|
||||
if (options.immediate) {
|
||||
this._executeAndReschedule();
|
||||
} else {
|
||||
this._reschedule(undefined);
|
||||
}
|
||||
}
|
||||
|
||||
wake() {
|
||||
const currentTime = now();
|
||||
const timeSinceLastCall = currentTime - this.lastExecutionEnded;
|
||||
|
||||
// TODO(NODE-4674): Add error handling and logging to the monitor
|
||||
if (timeSinceLastCall < 0) {
|
||||
return this._executeAndReschedule();
|
||||
}
|
||||
|
||||
if (this.isExecutionInProgress) {
|
||||
return;
|
||||
}
|
||||
|
||||
// debounce multiple calls to wake within the `minInterval`
|
||||
if (this.isExpeditedCallToFnScheduled) {
|
||||
return;
|
||||
}
|
||||
|
||||
// reschedule a call as soon as possible, ensuring the call never happens
|
||||
// faster than the `minInterval`
|
||||
if (timeSinceLastCall < this.minHeartbeatFrequencyMS) {
|
||||
this.isExpeditedCallToFnScheduled = true;
|
||||
this._reschedule(this.minHeartbeatFrequencyMS - timeSinceLastCall);
|
||||
return;
|
||||
}
|
||||
|
||||
this._executeAndReschedule();
|
||||
}
|
||||
|
||||
stop() {
|
||||
this.stopped = true;
|
||||
if (this.timerId) {
|
||||
clearTimeout(this.timerId);
|
||||
this.timerId = undefined;
|
||||
}
|
||||
|
||||
this.lastExecutionEnded = -Infinity;
|
||||
this.isExpeditedCallToFnScheduled = false;
|
||||
}
|
||||
|
||||
toString() {
|
||||
return JSON.stringify(this);
|
||||
}
|
||||
|
||||
toJSON() {
|
||||
const currentTime = now();
|
||||
const timeSinceLastCall = currentTime - this.lastExecutionEnded;
|
||||
return {
|
||||
timerId: this.timerId != null ? 'set' : 'cleared',
|
||||
lastCallTime: this.lastExecutionEnded,
|
||||
isExpeditedCheckScheduled: this.isExpeditedCallToFnScheduled,
|
||||
stopped: this.stopped,
|
||||
heartbeatFrequencyMS: this.heartbeatFrequencyMS,
|
||||
minHeartbeatFrequencyMS: this.minHeartbeatFrequencyMS,
|
||||
currentTime,
|
||||
timeSinceLastCall
|
||||
};
|
||||
}
|
||||
|
||||
private _reschedule(ms?: number) {
|
||||
if (this.stopped) return;
|
||||
if (this.timerId) {
|
||||
clearTimeout(this.timerId);
|
||||
}
|
||||
|
||||
this.timerId = setTimeout(this._executeAndReschedule, ms || this.heartbeatFrequencyMS);
|
||||
}
|
||||
|
||||
private _executeAndReschedule = () => {
|
||||
if (this.stopped) return;
|
||||
if (this.timerId) {
|
||||
clearTimeout(this.timerId);
|
||||
}
|
||||
|
||||
this.isExpeditedCallToFnScheduled = false;
|
||||
this.isExecutionInProgress = true;
|
||||
|
||||
this.fn(() => {
|
||||
this.lastExecutionEnded = now();
|
||||
this.isExecutionInProgress = false;
|
||||
this._reschedule(this.heartbeatFrequencyMS);
|
||||
});
|
||||
};
|
||||
}
|
||||
578
node_modules/mongodb/src/sdam/server.ts
generated
vendored
Normal file
578
node_modules/mongodb/src/sdam/server.ts
generated
vendored
Normal file
@@ -0,0 +1,578 @@
|
||||
import type { Document } from '../bson';
|
||||
import { CommandOptions, Connection, DestroyOptions } from '../cmap/connection';
|
||||
import {
|
||||
ConnectionPool,
|
||||
ConnectionPoolEvents,
|
||||
ConnectionPoolOptions
|
||||
} from '../cmap/connection_pool';
|
||||
import { PoolClearedError } from '../cmap/errors';
|
||||
import {
|
||||
APM_EVENTS,
|
||||
CLOSED,
|
||||
CMAP_EVENTS,
|
||||
CONNECT,
|
||||
DESCRIPTION_RECEIVED,
|
||||
ENDED,
|
||||
HEARTBEAT_EVENTS,
|
||||
SERVER_HEARTBEAT_FAILED,
|
||||
SERVER_HEARTBEAT_STARTED,
|
||||
SERVER_HEARTBEAT_SUCCEEDED
|
||||
} from '../constants';
|
||||
import type { AutoEncrypter } from '../deps';
|
||||
import {
|
||||
AnyError,
|
||||
isNetworkErrorBeforeHandshake,
|
||||
isNodeShuttingDownError,
|
||||
isSDAMUnrecoverableError,
|
||||
MongoError,
|
||||
MongoErrorLabel,
|
||||
MongoInvalidArgumentError,
|
||||
MongoNetworkError,
|
||||
MongoNetworkTimeoutError,
|
||||
MongoRuntimeError,
|
||||
MongoServerClosedError,
|
||||
MongoServerError,
|
||||
MongoUnexpectedServerResponseError,
|
||||
needsRetryableWriteLabel
|
||||
} from '../error';
|
||||
import type { ServerApi } from '../mongo_client';
|
||||
import { TypedEventEmitter } from '../mongo_types';
|
||||
import type { GetMoreOptions } from '../operations/get_more';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { isTransactionCommand } from '../transactions';
|
||||
import {
|
||||
Callback,
|
||||
EventEmitterWithState,
|
||||
makeStateMachine,
|
||||
maxWireVersion,
|
||||
MongoDBNamespace,
|
||||
supportsRetryableWrites
|
||||
} from '../utils';
|
||||
import {
|
||||
ClusterTime,
|
||||
STATE_CLOSED,
|
||||
STATE_CLOSING,
|
||||
STATE_CONNECTED,
|
||||
STATE_CONNECTING,
|
||||
TopologyType
|
||||
} from './common';
|
||||
import type {
|
||||
ServerHeartbeatFailedEvent,
|
||||
ServerHeartbeatStartedEvent,
|
||||
ServerHeartbeatSucceededEvent
|
||||
} from './events';
|
||||
import { Monitor, MonitorOptions } from './monitor';
|
||||
import { compareTopologyVersion, ServerDescription } from './server_description';
|
||||
import type { Topology } from './topology';
|
||||
|
||||
const stateTransition = makeStateMachine({
|
||||
[STATE_CLOSED]: [STATE_CLOSED, STATE_CONNECTING],
|
||||
[STATE_CONNECTING]: [STATE_CONNECTING, STATE_CLOSING, STATE_CONNECTED, STATE_CLOSED],
|
||||
[STATE_CONNECTED]: [STATE_CONNECTED, STATE_CLOSING, STATE_CLOSED],
|
||||
[STATE_CLOSING]: [STATE_CLOSING, STATE_CLOSED]
|
||||
});
|
||||
|
||||
/** @internal */
|
||||
const kMonitor = Symbol('monitor');
|
||||
|
||||
/** @internal */
|
||||
export type ServerOptions = Omit<ConnectionPoolOptions, 'id' | 'generation' | 'hostAddress'> &
|
||||
MonitorOptions;
|
||||
|
||||
/** @internal */
|
||||
export interface ServerPrivate {
|
||||
/** The server description for this server */
|
||||
description: ServerDescription;
|
||||
/** A copy of the options used to construct this instance */
|
||||
options: ServerOptions;
|
||||
/** The current state of the Server */
|
||||
state: string;
|
||||
/** MongoDB server API version */
|
||||
serverApi?: ServerApi;
|
||||
/** A count of the operations currently running against the server. */
|
||||
operationCount: number;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export type ServerEvents = {
|
||||
serverHeartbeatStarted(event: ServerHeartbeatStartedEvent): void;
|
||||
serverHeartbeatSucceeded(event: ServerHeartbeatSucceededEvent): void;
|
||||
serverHeartbeatFailed(event: ServerHeartbeatFailedEvent): void;
|
||||
/** Top level MongoClient doesn't emit this so it is marked: @internal */
|
||||
connect(server: Server): void;
|
||||
descriptionReceived(description: ServerDescription): void;
|
||||
closed(): void;
|
||||
ended(): void;
|
||||
} & ConnectionPoolEvents &
|
||||
EventEmitterWithState;
|
||||
|
||||
/** @internal */
|
||||
export class Server extends TypedEventEmitter<ServerEvents> {
|
||||
/** @internal */
|
||||
s: ServerPrivate;
|
||||
/** @internal */
|
||||
topology: Topology;
|
||||
/** @internal */
|
||||
pool: ConnectionPool;
|
||||
serverApi?: ServerApi;
|
||||
hello?: Document;
|
||||
[kMonitor]: Monitor | null;
|
||||
|
||||
/** @event */
|
||||
static readonly SERVER_HEARTBEAT_STARTED = SERVER_HEARTBEAT_STARTED;
|
||||
/** @event */
|
||||
static readonly SERVER_HEARTBEAT_SUCCEEDED = SERVER_HEARTBEAT_SUCCEEDED;
|
||||
/** @event */
|
||||
static readonly SERVER_HEARTBEAT_FAILED = SERVER_HEARTBEAT_FAILED;
|
||||
/** @event */
|
||||
static readonly CONNECT = CONNECT;
|
||||
/** @event */
|
||||
static readonly DESCRIPTION_RECEIVED = DESCRIPTION_RECEIVED;
|
||||
/** @event */
|
||||
static readonly CLOSED = CLOSED;
|
||||
/** @event */
|
||||
static readonly ENDED = ENDED;
|
||||
|
||||
/**
|
||||
* Create a server
|
||||
*/
|
||||
constructor(topology: Topology, description: ServerDescription, options: ServerOptions) {
|
||||
super();
|
||||
|
||||
this.serverApi = options.serverApi;
|
||||
|
||||
const poolOptions = { hostAddress: description.hostAddress, ...options };
|
||||
|
||||
this.topology = topology;
|
||||
this.pool = new ConnectionPool(this, poolOptions);
|
||||
|
||||
this.s = {
|
||||
description,
|
||||
options,
|
||||
state: STATE_CLOSED,
|
||||
operationCount: 0
|
||||
};
|
||||
|
||||
for (const event of [...CMAP_EVENTS, ...APM_EVENTS]) {
|
||||
this.pool.on(event, (e: any) => this.emit(event, e));
|
||||
}
|
||||
|
||||
this.pool.on(Connection.CLUSTER_TIME_RECEIVED, (clusterTime: ClusterTime) => {
|
||||
this.clusterTime = clusterTime;
|
||||
});
|
||||
|
||||
if (this.loadBalanced) {
|
||||
this[kMonitor] = null;
|
||||
// monitoring is disabled in load balancing mode
|
||||
return;
|
||||
}
|
||||
|
||||
// create the monitor
|
||||
// TODO(NODE-4144): Remove new variable for type narrowing
|
||||
const monitor = new Monitor(this, this.s.options);
|
||||
this[kMonitor] = monitor;
|
||||
|
||||
for (const event of HEARTBEAT_EVENTS) {
|
||||
monitor.on(event, (e: any) => this.emit(event, e));
|
||||
}
|
||||
|
||||
monitor.on('resetServer', (error: MongoError) => markServerUnknown(this, error));
|
||||
monitor.on(Server.SERVER_HEARTBEAT_SUCCEEDED, (event: ServerHeartbeatSucceededEvent) => {
|
||||
this.emit(
|
||||
Server.DESCRIPTION_RECEIVED,
|
||||
new ServerDescription(this.description.hostAddress, event.reply, {
|
||||
roundTripTime: calculateRoundTripTime(this.description.roundTripTime, event.duration)
|
||||
})
|
||||
);
|
||||
|
||||
if (this.s.state === STATE_CONNECTING) {
|
||||
stateTransition(this, STATE_CONNECTED);
|
||||
this.emit(Server.CONNECT, this);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
get clusterTime(): ClusterTime | undefined {
|
||||
return this.topology.clusterTime;
|
||||
}
|
||||
|
||||
set clusterTime(clusterTime: ClusterTime | undefined) {
|
||||
this.topology.clusterTime = clusterTime;
|
||||
}
|
||||
|
||||
get description(): ServerDescription {
|
||||
return this.s.description;
|
||||
}
|
||||
|
||||
get name(): string {
|
||||
return this.s.description.address;
|
||||
}
|
||||
|
||||
get autoEncrypter(): AutoEncrypter | undefined {
|
||||
if (this.s.options && this.s.options.autoEncrypter) {
|
||||
return this.s.options.autoEncrypter;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
get loadBalanced(): boolean {
|
||||
return this.topology.description.type === TopologyType.LoadBalanced;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initiate server connect
|
||||
*/
|
||||
connect(): void {
|
||||
if (this.s.state !== STATE_CLOSED) {
|
||||
return;
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CONNECTING);
|
||||
|
||||
// If in load balancer mode we automatically set the server to
|
||||
// a load balancer. It never transitions out of this state and
|
||||
// has no monitor.
|
||||
if (!this.loadBalanced) {
|
||||
this[kMonitor]?.connect();
|
||||
} else {
|
||||
stateTransition(this, STATE_CONNECTED);
|
||||
this.emit(Server.CONNECT, this);
|
||||
}
|
||||
}
|
||||
|
||||
/** Destroy the server connection */
|
||||
destroy(options?: DestroyOptions, callback?: Callback): void {
|
||||
if (typeof options === 'function') {
|
||||
callback = options;
|
||||
options = { force: false };
|
||||
}
|
||||
options = Object.assign({}, { force: false }, options);
|
||||
|
||||
if (this.s.state === STATE_CLOSED) {
|
||||
if (typeof callback === 'function') {
|
||||
callback();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CLOSING);
|
||||
|
||||
if (!this.loadBalanced) {
|
||||
this[kMonitor]?.close();
|
||||
}
|
||||
|
||||
this.pool.close(options, err => {
|
||||
stateTransition(this, STATE_CLOSED);
|
||||
this.emit('closed');
|
||||
if (typeof callback === 'function') {
|
||||
callback(err);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Immediately schedule monitoring of this server. If there already an attempt being made
|
||||
* this will be a no-op.
|
||||
*/
|
||||
requestCheck(): void {
|
||||
if (!this.loadBalanced) {
|
||||
this[kMonitor]?.requestCheck();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a command
|
||||
* @internal
|
||||
*/
|
||||
command(
|
||||
ns: MongoDBNamespace,
|
||||
cmd: Document,
|
||||
options: CommandOptions,
|
||||
callback: Callback<Document>
|
||||
): void {
|
||||
if (callback == null) {
|
||||
throw new MongoInvalidArgumentError('Callback must be provided');
|
||||
}
|
||||
|
||||
if (ns.db == null || typeof ns === 'string') {
|
||||
throw new MongoInvalidArgumentError('Namespace must not be a string');
|
||||
}
|
||||
|
||||
if (this.s.state === STATE_CLOSING || this.s.state === STATE_CLOSED) {
|
||||
callback(new MongoServerClosedError());
|
||||
return;
|
||||
}
|
||||
|
||||
// Clone the options
|
||||
const finalOptions = Object.assign({}, options, { wireProtocolCommand: false });
|
||||
|
||||
// There are cases where we need to flag the read preference not to get sent in
|
||||
// the command, such as pre-5.0 servers attempting to perform an aggregate write
|
||||
// with a non-primary read preference. In this case the effective read preference
|
||||
// (primary) is not the same as the provided and must be removed completely.
|
||||
if (finalOptions.omitReadPreference) {
|
||||
delete finalOptions.readPreference;
|
||||
}
|
||||
|
||||
const session = finalOptions.session;
|
||||
const conn = session?.pinnedConnection;
|
||||
|
||||
// NOTE: This is a hack! We can't retrieve the connections used for executing an operation
|
||||
// (and prevent them from being checked back in) at the point of operation execution.
|
||||
// This should be considered as part of the work for NODE-2882
|
||||
// NOTE:
|
||||
// When incrementing operation count, it's important that we increment it before we
|
||||
// attempt to check out a connection from the pool. This ensures that operations that
|
||||
// are waiting for a connection are included in the operation count. Load balanced
|
||||
// mode will only ever have a single server, so the operation count doesn't matter.
|
||||
// Incrementing the operation count above the logic to handle load balanced mode would
|
||||
// require special logic to decrement it again, or would double increment (the load
|
||||
// balanced code makes a recursive call). Instead, we increment the count after this
|
||||
// check.
|
||||
if (this.loadBalanced && session && conn == null && isPinnableCommand(cmd, session)) {
|
||||
this.pool.checkOut((err, checkedOut) => {
|
||||
if (err || checkedOut == null) {
|
||||
if (callback) return callback(err);
|
||||
return;
|
||||
}
|
||||
|
||||
session.pin(checkedOut);
|
||||
this.command(ns, cmd, finalOptions, callback);
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
this.incrementOperationCount();
|
||||
|
||||
this.pool.withConnection(
|
||||
conn,
|
||||
(err, conn, cb) => {
|
||||
if (err || !conn) {
|
||||
this.decrementOperationCount();
|
||||
if (!err) {
|
||||
return cb(new MongoRuntimeError('Failed to create connection without error'));
|
||||
}
|
||||
if (!(err instanceof PoolClearedError)) {
|
||||
this.handleError(err);
|
||||
}
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
conn.command(
|
||||
ns,
|
||||
cmd,
|
||||
finalOptions,
|
||||
makeOperationHandler(this, conn, cmd, finalOptions, (error, response) => {
|
||||
this.decrementOperationCount();
|
||||
cb(error, response);
|
||||
})
|
||||
);
|
||||
},
|
||||
callback
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle SDAM error
|
||||
* @internal
|
||||
*/
|
||||
handleError(error: AnyError, connection?: Connection) {
|
||||
if (!(error instanceof MongoError)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const isStaleError =
|
||||
error.connectionGeneration && error.connectionGeneration < this.pool.generation;
|
||||
if (isStaleError) {
|
||||
return;
|
||||
}
|
||||
|
||||
const isNetworkNonTimeoutError =
|
||||
error instanceof MongoNetworkError && !(error instanceof MongoNetworkTimeoutError);
|
||||
const isNetworkTimeoutBeforeHandshakeError = isNetworkErrorBeforeHandshake(error);
|
||||
const isAuthHandshakeError = error.hasErrorLabel(MongoErrorLabel.HandshakeError);
|
||||
if (isNetworkNonTimeoutError || isNetworkTimeoutBeforeHandshakeError || isAuthHandshakeError) {
|
||||
// In load balanced mode we never mark the server as unknown and always
|
||||
// clear for the specific service id.
|
||||
if (!this.loadBalanced) {
|
||||
error.addErrorLabel(MongoErrorLabel.ResetPool);
|
||||
markServerUnknown(this, error);
|
||||
} else if (connection) {
|
||||
this.pool.clear({ serviceId: connection.serviceId });
|
||||
}
|
||||
} else {
|
||||
if (isSDAMUnrecoverableError(error)) {
|
||||
if (shouldHandleStateChangeError(this, error)) {
|
||||
const shouldClearPool = maxWireVersion(this) <= 7 || isNodeShuttingDownError(error);
|
||||
if (this.loadBalanced && connection && shouldClearPool) {
|
||||
this.pool.clear({ serviceId: connection.serviceId });
|
||||
}
|
||||
|
||||
if (!this.loadBalanced) {
|
||||
if (shouldClearPool) {
|
||||
error.addErrorLabel(MongoErrorLabel.ResetPool);
|
||||
}
|
||||
markServerUnknown(this, error);
|
||||
process.nextTick(() => this.requestCheck());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrement the operation count, returning the new count.
|
||||
*/
|
||||
private decrementOperationCount(): number {
|
||||
return (this.s.operationCount -= 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the operation count, returning the new count.
|
||||
*/
|
||||
private incrementOperationCount(): number {
|
||||
return (this.s.operationCount += 1);
|
||||
}
|
||||
}
|
||||
|
||||
function calculateRoundTripTime(oldRtt: number, duration: number): number {
|
||||
if (oldRtt === -1) {
|
||||
return duration;
|
||||
}
|
||||
|
||||
const alpha = 0.2;
|
||||
return alpha * duration + (1 - alpha) * oldRtt;
|
||||
}
|
||||
|
||||
function markServerUnknown(server: Server, error?: MongoServerError) {
|
||||
// Load balancer servers can never be marked unknown.
|
||||
if (server.loadBalanced) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (error instanceof MongoNetworkError && !(error instanceof MongoNetworkTimeoutError)) {
|
||||
server[kMonitor]?.reset();
|
||||
}
|
||||
|
||||
server.emit(
|
||||
Server.DESCRIPTION_RECEIVED,
|
||||
new ServerDescription(server.description.hostAddress, undefined, { error })
|
||||
);
|
||||
}
|
||||
|
||||
function isPinnableCommand(cmd: Document, session?: ClientSession): boolean {
|
||||
if (session) {
|
||||
return (
|
||||
session.inTransaction() ||
|
||||
'aggregate' in cmd ||
|
||||
'find' in cmd ||
|
||||
'getMore' in cmd ||
|
||||
'listCollections' in cmd ||
|
||||
'listIndexes' in cmd
|
||||
);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
function connectionIsStale(pool: ConnectionPool, connection: Connection) {
|
||||
if (connection.serviceId) {
|
||||
return (
|
||||
connection.generation !== pool.serviceGenerations.get(connection.serviceId.toHexString())
|
||||
);
|
||||
}
|
||||
|
||||
return connection.generation !== pool.generation;
|
||||
}
|
||||
|
||||
function shouldHandleStateChangeError(server: Server, err: MongoError) {
|
||||
const etv = err.topologyVersion;
|
||||
const stv = server.description.topologyVersion;
|
||||
return compareTopologyVersion(stv, etv) < 0;
|
||||
}
|
||||
|
||||
function inActiveTransaction(session: ClientSession | undefined, cmd: Document) {
|
||||
return session && session.inTransaction() && !isTransactionCommand(cmd);
|
||||
}
|
||||
|
||||
/** this checks the retryWrites option passed down from the client options, it
|
||||
* does not check if the server supports retryable writes */
|
||||
function isRetryableWritesEnabled(topology: Topology) {
|
||||
return topology.s.options.retryWrites !== false;
|
||||
}
|
||||
|
||||
function makeOperationHandler(
|
||||
server: Server,
|
||||
connection: Connection,
|
||||
cmd: Document,
|
||||
options: CommandOptions | GetMoreOptions | undefined,
|
||||
callback: Callback
|
||||
): Callback {
|
||||
const session = options?.session;
|
||||
return function handleOperationResult(error, result) {
|
||||
// We should not swallow an error if it is present.
|
||||
if (error == null && result != null) {
|
||||
return callback(undefined, result);
|
||||
}
|
||||
|
||||
if (options != null && 'noResponse' in options && options.noResponse === true) {
|
||||
return callback(undefined, null);
|
||||
}
|
||||
|
||||
if (!error) {
|
||||
return callback(new MongoUnexpectedServerResponseError('Empty response with no error'));
|
||||
}
|
||||
|
||||
if (!(error instanceof MongoError)) {
|
||||
// Node.js or some other error we have not special handling for
|
||||
return callback(error);
|
||||
}
|
||||
|
||||
if (connectionIsStale(server.pool, connection)) {
|
||||
return callback(error);
|
||||
}
|
||||
|
||||
if (error instanceof MongoNetworkError) {
|
||||
if (session && !session.hasEnded && session.serverSession) {
|
||||
session.serverSession.isDirty = true;
|
||||
}
|
||||
|
||||
// inActiveTransaction check handles commit and abort.
|
||||
if (
|
||||
inActiveTransaction(session, cmd) &&
|
||||
!error.hasErrorLabel(MongoErrorLabel.TransientTransactionError)
|
||||
) {
|
||||
error.addErrorLabel(MongoErrorLabel.TransientTransactionError);
|
||||
}
|
||||
|
||||
if (
|
||||
(isRetryableWritesEnabled(server.topology) || isTransactionCommand(cmd)) &&
|
||||
supportsRetryableWrites(server) &&
|
||||
!inActiveTransaction(session, cmd)
|
||||
) {
|
||||
error.addErrorLabel(MongoErrorLabel.RetryableWriteError);
|
||||
}
|
||||
} else {
|
||||
if (
|
||||
(isRetryableWritesEnabled(server.topology) || isTransactionCommand(cmd)) &&
|
||||
needsRetryableWriteLabel(error, maxWireVersion(server)) &&
|
||||
!inActiveTransaction(session, cmd)
|
||||
) {
|
||||
error.addErrorLabel(MongoErrorLabel.RetryableWriteError);
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
session &&
|
||||
session.isPinned &&
|
||||
error.hasErrorLabel(MongoErrorLabel.TransientTransactionError)
|
||||
) {
|
||||
session.unpin({ force: true });
|
||||
}
|
||||
|
||||
server.handleError(error, connection);
|
||||
|
||||
return callback(error);
|
||||
};
|
||||
}
|
||||
262
node_modules/mongodb/src/sdam/server_description.ts
generated
vendored
Normal file
262
node_modules/mongodb/src/sdam/server_description.ts
generated
vendored
Normal file
@@ -0,0 +1,262 @@
|
||||
import { Document, Long, ObjectId } from '../bson';
|
||||
import { MongoError, MongoRuntimeError, MongoServerError } from '../error';
|
||||
import { arrayStrictEqual, compareObjectId, errorStrictEqual, HostAddress, now } from '../utils';
|
||||
import type { ClusterTime } from './common';
|
||||
import { ServerType } from './common';
|
||||
|
||||
const WRITABLE_SERVER_TYPES = new Set<ServerType>([
|
||||
ServerType.RSPrimary,
|
||||
ServerType.Standalone,
|
||||
ServerType.Mongos,
|
||||
ServerType.LoadBalancer
|
||||
]);
|
||||
|
||||
const DATA_BEARING_SERVER_TYPES = new Set<ServerType>([
|
||||
ServerType.RSPrimary,
|
||||
ServerType.RSSecondary,
|
||||
ServerType.Mongos,
|
||||
ServerType.Standalone,
|
||||
ServerType.LoadBalancer
|
||||
]);
|
||||
|
||||
/** @public */
|
||||
export interface TopologyVersion {
|
||||
processId: ObjectId;
|
||||
counter: Long;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export type TagSet = { [key: string]: string };
|
||||
|
||||
/** @internal */
|
||||
export interface ServerDescriptionOptions {
|
||||
/** An Error used for better reporting debugging */
|
||||
error?: MongoServerError;
|
||||
|
||||
/** The round trip time to ping this server (in ms) */
|
||||
roundTripTime?: number;
|
||||
|
||||
/** If the client is in load balancing mode. */
|
||||
loadBalanced?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* The client's view of a single server, based on the most recent hello outcome.
|
||||
*
|
||||
* Internal type, not meant to be directly instantiated
|
||||
* @public
|
||||
*/
|
||||
export class ServerDescription {
|
||||
address: string;
|
||||
type: ServerType;
|
||||
hosts: string[];
|
||||
passives: string[];
|
||||
arbiters: string[];
|
||||
tags: TagSet;
|
||||
error: MongoError | null;
|
||||
topologyVersion: TopologyVersion | null;
|
||||
minWireVersion: number;
|
||||
maxWireVersion: number;
|
||||
roundTripTime: number;
|
||||
lastUpdateTime: number;
|
||||
lastWriteDate: number;
|
||||
me: string | null;
|
||||
primary: string | null;
|
||||
setName: string | null;
|
||||
setVersion: number | null;
|
||||
electionId: ObjectId | null;
|
||||
logicalSessionTimeoutMinutes: number | null;
|
||||
|
||||
// NOTE: does this belong here? It seems we should gossip the cluster time at the CMAP level
|
||||
$clusterTime?: ClusterTime;
|
||||
|
||||
/**
|
||||
* Create a ServerDescription
|
||||
* @internal
|
||||
*
|
||||
* @param address - The address of the server
|
||||
* @param hello - An optional hello response for this server
|
||||
*/
|
||||
constructor(
|
||||
address: HostAddress | string,
|
||||
hello?: Document,
|
||||
options: ServerDescriptionOptions = {}
|
||||
) {
|
||||
if (address == null || address === '') {
|
||||
throw new MongoRuntimeError('ServerDescription must be provided with a non-empty address');
|
||||
}
|
||||
|
||||
this.address =
|
||||
typeof address === 'string'
|
||||
? HostAddress.fromString(address).toString() // Use HostAddress to normalize
|
||||
: address.toString();
|
||||
this.type = parseServerType(hello, options);
|
||||
this.hosts = hello?.hosts?.map((host: string) => host.toLowerCase()) ?? [];
|
||||
this.passives = hello?.passives?.map((host: string) => host.toLowerCase()) ?? [];
|
||||
this.arbiters = hello?.arbiters?.map((host: string) => host.toLowerCase()) ?? [];
|
||||
this.tags = hello?.tags ?? {};
|
||||
this.minWireVersion = hello?.minWireVersion ?? 0;
|
||||
this.maxWireVersion = hello?.maxWireVersion ?? 0;
|
||||
this.roundTripTime = options?.roundTripTime ?? -1;
|
||||
this.lastUpdateTime = now();
|
||||
this.lastWriteDate = hello?.lastWrite?.lastWriteDate ?? 0;
|
||||
this.error = options.error ?? null;
|
||||
// TODO(NODE-2674): Preserve int64 sent from MongoDB
|
||||
this.topologyVersion = this.error?.topologyVersion ?? hello?.topologyVersion ?? null;
|
||||
this.setName = hello?.setName ?? null;
|
||||
this.setVersion = hello?.setVersion ?? null;
|
||||
this.electionId = hello?.electionId ?? null;
|
||||
this.logicalSessionTimeoutMinutes = hello?.logicalSessionTimeoutMinutes ?? null;
|
||||
this.primary = hello?.primary ?? null;
|
||||
this.me = hello?.me?.toLowerCase() ?? null;
|
||||
this.$clusterTime = hello?.$clusterTime ?? null;
|
||||
}
|
||||
|
||||
get hostAddress(): HostAddress {
|
||||
return HostAddress.fromString(this.address);
|
||||
}
|
||||
|
||||
get allHosts(): string[] {
|
||||
return this.hosts.concat(this.arbiters).concat(this.passives);
|
||||
}
|
||||
|
||||
/** Is this server available for reads*/
|
||||
get isReadable(): boolean {
|
||||
return this.type === ServerType.RSSecondary || this.isWritable;
|
||||
}
|
||||
|
||||
/** Is this server data bearing */
|
||||
get isDataBearing(): boolean {
|
||||
return DATA_BEARING_SERVER_TYPES.has(this.type);
|
||||
}
|
||||
|
||||
/** Is this server available for writes */
|
||||
get isWritable(): boolean {
|
||||
return WRITABLE_SERVER_TYPES.has(this.type);
|
||||
}
|
||||
|
||||
get host(): string {
|
||||
const chopLength = `:${this.port}`.length;
|
||||
return this.address.slice(0, -chopLength);
|
||||
}
|
||||
|
||||
get port(): number {
|
||||
const port = this.address.split(':').pop();
|
||||
return port ? Number.parseInt(port, 10) : 27017;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if another `ServerDescription` is equal to this one per the rules defined
|
||||
* in the {@link https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#serverdescription|SDAM spec}
|
||||
*/
|
||||
equals(other?: ServerDescription | null): boolean {
|
||||
// Despite using the comparator that would determine a nullish topologyVersion as greater than
|
||||
// for equality we should only always perform direct equality comparison
|
||||
const topologyVersionsEqual =
|
||||
this.topologyVersion === other?.topologyVersion ||
|
||||
compareTopologyVersion(this.topologyVersion, other?.topologyVersion) === 0;
|
||||
|
||||
const electionIdsEqual =
|
||||
this.electionId != null && other?.electionId != null
|
||||
? compareObjectId(this.electionId, other.electionId) === 0
|
||||
: this.electionId === other?.electionId;
|
||||
|
||||
return (
|
||||
other != null &&
|
||||
errorStrictEqual(this.error, other.error) &&
|
||||
this.type === other.type &&
|
||||
this.minWireVersion === other.minWireVersion &&
|
||||
arrayStrictEqual(this.hosts, other.hosts) &&
|
||||
tagsStrictEqual(this.tags, other.tags) &&
|
||||
this.setName === other.setName &&
|
||||
this.setVersion === other.setVersion &&
|
||||
electionIdsEqual &&
|
||||
this.primary === other.primary &&
|
||||
this.logicalSessionTimeoutMinutes === other.logicalSessionTimeoutMinutes &&
|
||||
topologyVersionsEqual
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Parses a `hello` message and determines the server type
|
||||
export function parseServerType(hello?: Document, options?: ServerDescriptionOptions): ServerType {
|
||||
if (options?.loadBalanced) {
|
||||
return ServerType.LoadBalancer;
|
||||
}
|
||||
|
||||
if (!hello || !hello.ok) {
|
||||
return ServerType.Unknown;
|
||||
}
|
||||
|
||||
if (hello.isreplicaset) {
|
||||
return ServerType.RSGhost;
|
||||
}
|
||||
|
||||
if (hello.msg && hello.msg === 'isdbgrid') {
|
||||
return ServerType.Mongos;
|
||||
}
|
||||
|
||||
if (hello.setName) {
|
||||
if (hello.hidden) {
|
||||
return ServerType.RSOther;
|
||||
} else if (hello.isWritablePrimary) {
|
||||
return ServerType.RSPrimary;
|
||||
} else if (hello.secondary) {
|
||||
return ServerType.RSSecondary;
|
||||
} else if (hello.arbiterOnly) {
|
||||
return ServerType.RSArbiter;
|
||||
} else {
|
||||
return ServerType.RSOther;
|
||||
}
|
||||
}
|
||||
|
||||
return ServerType.Standalone;
|
||||
}
|
||||
|
||||
function tagsStrictEqual(tags: TagSet, tags2: TagSet): boolean {
|
||||
const tagsKeys = Object.keys(tags);
|
||||
const tags2Keys = Object.keys(tags2);
|
||||
|
||||
return (
|
||||
tagsKeys.length === tags2Keys.length &&
|
||||
tagsKeys.every((key: string) => tags2[key] === tags[key])
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compares two topology versions.
|
||||
*
|
||||
* 1. If the response topologyVersion is unset or the ServerDescription's
|
||||
* topologyVersion is null, the client MUST assume the response is more recent.
|
||||
* 1. If the response's topologyVersion.processId is not equal to the
|
||||
* ServerDescription's, the client MUST assume the response is more recent.
|
||||
* 1. If the response's topologyVersion.processId is equal to the
|
||||
* ServerDescription's, the client MUST use the counter field to determine
|
||||
* which topologyVersion is more recent.
|
||||
*
|
||||
* ```ts
|
||||
* currentTv < newTv === -1
|
||||
* currentTv === newTv === 0
|
||||
* currentTv > newTv === 1
|
||||
* ```
|
||||
*/
|
||||
export function compareTopologyVersion(
|
||||
currentTv?: TopologyVersion | null,
|
||||
newTv?: TopologyVersion | null
|
||||
): 0 | -1 | 1 {
|
||||
if (currentTv == null || newTv == null) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!currentTv.processId.equals(newTv.processId)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// TODO(NODE-2674): Preserve int64 sent from MongoDB
|
||||
const currentCounter = Long.isLong(currentTv.counter)
|
||||
? currentTv.counter
|
||||
: Long.fromNumber(currentTv.counter);
|
||||
const newCounter = Long.isLong(newTv.counter) ? newTv.counter : Long.fromNumber(newTv.counter);
|
||||
|
||||
return currentCounter.compare(newCounter);
|
||||
}
|
||||
324
node_modules/mongodb/src/sdam/server_selection.ts
generated
vendored
Normal file
324
node_modules/mongodb/src/sdam/server_selection.ts
generated
vendored
Normal file
@@ -0,0 +1,324 @@
|
||||
import { MongoCompatibilityError, MongoInvalidArgumentError } from '../error';
|
||||
import { ReadPreference } from '../read_preference';
|
||||
import { ServerType, TopologyType } from './common';
|
||||
import type { ServerDescription, TagSet } from './server_description';
|
||||
import type { TopologyDescription } from './topology_description';
|
||||
|
||||
// max staleness constants
|
||||
const IDLE_WRITE_PERIOD = 10000;
|
||||
const SMALLEST_MAX_STALENESS_SECONDS = 90;
|
||||
|
||||
// Minimum version to try writes on secondaries.
|
||||
export const MIN_SECONDARY_WRITE_WIRE_VERSION = 13;
|
||||
|
||||
/** @internal */
|
||||
export type ServerSelector = (
|
||||
topologyDescription: TopologyDescription,
|
||||
servers: ServerDescription[]
|
||||
) => ServerDescription[];
|
||||
|
||||
/**
|
||||
* Returns a server selector that selects for writable servers
|
||||
*/
|
||||
export function writableServerSelector(): ServerSelector {
|
||||
return (
|
||||
topologyDescription: TopologyDescription,
|
||||
servers: ServerDescription[]
|
||||
): ServerDescription[] =>
|
||||
latencyWindowReducer(
|
||||
topologyDescription,
|
||||
servers.filter((s: ServerDescription) => s.isWritable)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* The purpose of this selector is to select the same server, only
|
||||
* if it is in a state that it can have commands sent to it.
|
||||
*/
|
||||
export function sameServerSelector(description?: ServerDescription): ServerSelector {
|
||||
return (
|
||||
topologyDescription: TopologyDescription,
|
||||
servers: ServerDescription[]
|
||||
): ServerDescription[] => {
|
||||
if (!description) return [];
|
||||
// Filter the servers to match the provided description only if
|
||||
// the type is not unknown.
|
||||
return servers.filter(sd => {
|
||||
return sd.address === description.address && sd.type !== ServerType.Unknown;
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a server selector that uses a read preference to select a
|
||||
* server potentially for a write on a secondary.
|
||||
*/
|
||||
export function secondaryWritableServerSelector(
|
||||
wireVersion?: number,
|
||||
readPreference?: ReadPreference
|
||||
): ServerSelector {
|
||||
// If server version < 5.0, read preference always primary.
|
||||
// If server version >= 5.0...
|
||||
// - If read preference is supplied, use that.
|
||||
// - If no read preference is supplied, use primary.
|
||||
if (
|
||||
!readPreference ||
|
||||
!wireVersion ||
|
||||
(wireVersion && wireVersion < MIN_SECONDARY_WRITE_WIRE_VERSION)
|
||||
) {
|
||||
return readPreferenceServerSelector(ReadPreference.primary);
|
||||
}
|
||||
return readPreferenceServerSelector(readPreference);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces the passed in array of servers by the rules of the "Max Staleness" specification
|
||||
* found here: https://github.com/mongodb/specifications/blob/master/source/max-staleness/max-staleness.rst
|
||||
*
|
||||
* @param readPreference - The read preference providing max staleness guidance
|
||||
* @param topologyDescription - The topology description
|
||||
* @param servers - The list of server descriptions to be reduced
|
||||
* @returns The list of servers that satisfy the requirements of max staleness
|
||||
*/
|
||||
function maxStalenessReducer(
|
||||
readPreference: ReadPreference,
|
||||
topologyDescription: TopologyDescription,
|
||||
servers: ServerDescription[]
|
||||
): ServerDescription[] {
|
||||
if (readPreference.maxStalenessSeconds == null || readPreference.maxStalenessSeconds < 0) {
|
||||
return servers;
|
||||
}
|
||||
|
||||
const maxStaleness = readPreference.maxStalenessSeconds;
|
||||
const maxStalenessVariance =
|
||||
(topologyDescription.heartbeatFrequencyMS + IDLE_WRITE_PERIOD) / 1000;
|
||||
if (maxStaleness < maxStalenessVariance) {
|
||||
throw new MongoInvalidArgumentError(
|
||||
`Option "maxStalenessSeconds" must be at least ${maxStalenessVariance} seconds`
|
||||
);
|
||||
}
|
||||
|
||||
if (maxStaleness < SMALLEST_MAX_STALENESS_SECONDS) {
|
||||
throw new MongoInvalidArgumentError(
|
||||
`Option "maxStalenessSeconds" must be at least ${SMALLEST_MAX_STALENESS_SECONDS} seconds`
|
||||
);
|
||||
}
|
||||
|
||||
if (topologyDescription.type === TopologyType.ReplicaSetWithPrimary) {
|
||||
const primary: ServerDescription = Array.from(topologyDescription.servers.values()).filter(
|
||||
primaryFilter
|
||||
)[0];
|
||||
|
||||
return servers.reduce((result: ServerDescription[], server: ServerDescription) => {
|
||||
const stalenessMS =
|
||||
server.lastUpdateTime -
|
||||
server.lastWriteDate -
|
||||
(primary.lastUpdateTime - primary.lastWriteDate) +
|
||||
topologyDescription.heartbeatFrequencyMS;
|
||||
|
||||
const staleness = stalenessMS / 1000;
|
||||
const maxStalenessSeconds = readPreference.maxStalenessSeconds ?? 0;
|
||||
if (staleness <= maxStalenessSeconds) {
|
||||
result.push(server);
|
||||
}
|
||||
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
|
||||
if (topologyDescription.type === TopologyType.ReplicaSetNoPrimary) {
|
||||
if (servers.length === 0) {
|
||||
return servers;
|
||||
}
|
||||
|
||||
const sMax = servers.reduce((max: ServerDescription, s: ServerDescription) =>
|
||||
s.lastWriteDate > max.lastWriteDate ? s : max
|
||||
);
|
||||
|
||||
return servers.reduce((result: ServerDescription[], server: ServerDescription) => {
|
||||
const stalenessMS =
|
||||
sMax.lastWriteDate - server.lastWriteDate + topologyDescription.heartbeatFrequencyMS;
|
||||
|
||||
const staleness = stalenessMS / 1000;
|
||||
const maxStalenessSeconds = readPreference.maxStalenessSeconds ?? 0;
|
||||
if (staleness <= maxStalenessSeconds) {
|
||||
result.push(server);
|
||||
}
|
||||
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
|
||||
return servers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines whether a server's tags match a given set of tags
|
||||
*
|
||||
* @param tagSet - The requested tag set to match
|
||||
* @param serverTags - The server's tags
|
||||
*/
|
||||
function tagSetMatch(tagSet: TagSet, serverTags: TagSet) {
|
||||
const keys = Object.keys(tagSet);
|
||||
const serverTagKeys = Object.keys(serverTags);
|
||||
for (let i = 0; i < keys.length; ++i) {
|
||||
const key = keys[i];
|
||||
if (serverTagKeys.indexOf(key) === -1 || serverTags[key] !== tagSet[key]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces a set of server descriptions based on tags requested by the read preference
|
||||
*
|
||||
* @param readPreference - The read preference providing the requested tags
|
||||
* @param servers - The list of server descriptions to reduce
|
||||
* @returns The list of servers matching the requested tags
|
||||
*/
|
||||
function tagSetReducer(
|
||||
readPreference: ReadPreference,
|
||||
servers: ServerDescription[]
|
||||
): ServerDescription[] {
|
||||
if (
|
||||
readPreference.tags == null ||
|
||||
(Array.isArray(readPreference.tags) && readPreference.tags.length === 0)
|
||||
) {
|
||||
return servers;
|
||||
}
|
||||
|
||||
for (let i = 0; i < readPreference.tags.length; ++i) {
|
||||
const tagSet = readPreference.tags[i];
|
||||
const serversMatchingTagset = servers.reduce(
|
||||
(matched: ServerDescription[], server: ServerDescription) => {
|
||||
if (tagSetMatch(tagSet, server.tags)) matched.push(server);
|
||||
return matched;
|
||||
},
|
||||
[]
|
||||
);
|
||||
|
||||
if (serversMatchingTagset.length) {
|
||||
return serversMatchingTagset;
|
||||
}
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces a list of servers to ensure they fall within an acceptable latency window. This is
|
||||
* further specified in the "Server Selection" specification, found here:
|
||||
* https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.rst
|
||||
*
|
||||
* @param topologyDescription - The topology description
|
||||
* @param servers - The list of servers to reduce
|
||||
* @returns The servers which fall within an acceptable latency window
|
||||
*/
|
||||
function latencyWindowReducer(
|
||||
topologyDescription: TopologyDescription,
|
||||
servers: ServerDescription[]
|
||||
): ServerDescription[] {
|
||||
const low = servers.reduce(
|
||||
(min: number, server: ServerDescription) =>
|
||||
min === -1 ? server.roundTripTime : Math.min(server.roundTripTime, min),
|
||||
-1
|
||||
);
|
||||
|
||||
const high = low + topologyDescription.localThresholdMS;
|
||||
return servers.reduce((result: ServerDescription[], server: ServerDescription) => {
|
||||
if (server.roundTripTime <= high && server.roundTripTime >= low) result.push(server);
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
|
||||
// filters
|
||||
function primaryFilter(server: ServerDescription): boolean {
|
||||
return server.type === ServerType.RSPrimary;
|
||||
}
|
||||
|
||||
function secondaryFilter(server: ServerDescription): boolean {
|
||||
return server.type === ServerType.RSSecondary;
|
||||
}
|
||||
|
||||
function nearestFilter(server: ServerDescription): boolean {
|
||||
return server.type === ServerType.RSSecondary || server.type === ServerType.RSPrimary;
|
||||
}
|
||||
|
||||
function knownFilter(server: ServerDescription): boolean {
|
||||
return server.type !== ServerType.Unknown;
|
||||
}
|
||||
|
||||
function loadBalancerFilter(server: ServerDescription): boolean {
|
||||
return server.type === ServerType.LoadBalancer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a function which selects servers based on a provided read preference
|
||||
*
|
||||
* @param readPreference - The read preference to select with
|
||||
*/
|
||||
export function readPreferenceServerSelector(readPreference: ReadPreference): ServerSelector {
|
||||
if (!readPreference.isValid()) {
|
||||
throw new MongoInvalidArgumentError('Invalid read preference specified');
|
||||
}
|
||||
|
||||
return (
|
||||
topologyDescription: TopologyDescription,
|
||||
servers: ServerDescription[]
|
||||
): ServerDescription[] => {
|
||||
const commonWireVersion = topologyDescription.commonWireVersion;
|
||||
if (
|
||||
commonWireVersion &&
|
||||
readPreference.minWireVersion &&
|
||||
readPreference.minWireVersion > commonWireVersion
|
||||
) {
|
||||
throw new MongoCompatibilityError(
|
||||
`Minimum wire version '${readPreference.minWireVersion}' required, but found '${commonWireVersion}'`
|
||||
);
|
||||
}
|
||||
|
||||
if (topologyDescription.type === TopologyType.LoadBalanced) {
|
||||
return servers.filter(loadBalancerFilter);
|
||||
}
|
||||
|
||||
if (topologyDescription.type === TopologyType.Unknown) {
|
||||
return [];
|
||||
}
|
||||
|
||||
if (
|
||||
topologyDescription.type === TopologyType.Single ||
|
||||
topologyDescription.type === TopologyType.Sharded
|
||||
) {
|
||||
return latencyWindowReducer(topologyDescription, servers.filter(knownFilter));
|
||||
}
|
||||
|
||||
const mode = readPreference.mode;
|
||||
if (mode === ReadPreference.PRIMARY) {
|
||||
return servers.filter(primaryFilter);
|
||||
}
|
||||
|
||||
if (mode === ReadPreference.PRIMARY_PREFERRED) {
|
||||
const result = servers.filter(primaryFilter);
|
||||
if (result.length) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
const filter = mode === ReadPreference.NEAREST ? nearestFilter : secondaryFilter;
|
||||
const selectedServers = latencyWindowReducer(
|
||||
topologyDescription,
|
||||
tagSetReducer(
|
||||
readPreference,
|
||||
maxStalenessReducer(readPreference, topologyDescription, servers.filter(filter))
|
||||
)
|
||||
);
|
||||
|
||||
if (mode === ReadPreference.SECONDARY_PREFERRED && selectedServers.length === 0) {
|
||||
return servers.filter(primaryFilter);
|
||||
}
|
||||
|
||||
return selectedServers;
|
||||
};
|
||||
}
|
||||
142
node_modules/mongodb/src/sdam/srv_polling.ts
generated
vendored
Normal file
142
node_modules/mongodb/src/sdam/srv_polling.ts
generated
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
import * as dns from 'dns';
|
||||
import { clearTimeout, setTimeout } from 'timers';
|
||||
|
||||
import { MongoRuntimeError } from '../error';
|
||||
import { TypedEventEmitter } from '../mongo_types';
|
||||
import { HostAddress, matchesParentDomain } from '../utils';
|
||||
|
||||
/**
|
||||
* @internal
|
||||
* @category Event
|
||||
*/
|
||||
export class SrvPollingEvent {
|
||||
srvRecords: dns.SrvRecord[];
|
||||
constructor(srvRecords: dns.SrvRecord[]) {
|
||||
this.srvRecords = srvRecords;
|
||||
}
|
||||
|
||||
hostnames(): Set<string> {
|
||||
return new Set(this.srvRecords.map(r => HostAddress.fromSrvRecord(r).toString()));
|
||||
}
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export interface SrvPollerOptions {
|
||||
srvServiceName: string;
|
||||
srvMaxHosts: number;
|
||||
srvHost: string;
|
||||
heartbeatFrequencyMS: number;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export type SrvPollerEvents = {
|
||||
srvRecordDiscovery(event: SrvPollingEvent): void;
|
||||
};
|
||||
|
||||
/** @internal */
|
||||
export class SrvPoller extends TypedEventEmitter<SrvPollerEvents> {
|
||||
srvHost: string;
|
||||
rescanSrvIntervalMS: number;
|
||||
heartbeatFrequencyMS: number;
|
||||
haMode: boolean;
|
||||
generation: number;
|
||||
srvMaxHosts: number;
|
||||
srvServiceName: string;
|
||||
_timeout?: NodeJS.Timeout;
|
||||
|
||||
/** @event */
|
||||
static readonly SRV_RECORD_DISCOVERY = 'srvRecordDiscovery' as const;
|
||||
|
||||
constructor(options: SrvPollerOptions) {
|
||||
super();
|
||||
|
||||
if (!options || !options.srvHost) {
|
||||
throw new MongoRuntimeError('Options for SrvPoller must exist and include srvHost');
|
||||
}
|
||||
|
||||
this.srvHost = options.srvHost;
|
||||
this.srvMaxHosts = options.srvMaxHosts ?? 0;
|
||||
this.srvServiceName = options.srvServiceName ?? 'mongodb';
|
||||
this.rescanSrvIntervalMS = 60000;
|
||||
this.heartbeatFrequencyMS = options.heartbeatFrequencyMS ?? 10000;
|
||||
|
||||
this.haMode = false;
|
||||
this.generation = 0;
|
||||
|
||||
this._timeout = undefined;
|
||||
}
|
||||
|
||||
get srvAddress(): string {
|
||||
return `_${this.srvServiceName}._tcp.${this.srvHost}`;
|
||||
}
|
||||
|
||||
get intervalMS(): number {
|
||||
return this.haMode ? this.heartbeatFrequencyMS : this.rescanSrvIntervalMS;
|
||||
}
|
||||
|
||||
start(): void {
|
||||
if (!this._timeout) {
|
||||
this.schedule();
|
||||
}
|
||||
}
|
||||
|
||||
stop(): void {
|
||||
if (this._timeout) {
|
||||
clearTimeout(this._timeout);
|
||||
this.generation += 1;
|
||||
this._timeout = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(NODE-4994): implement new logging logic for SrvPoller failures
|
||||
schedule(): void {
|
||||
if (this._timeout) {
|
||||
clearTimeout(this._timeout);
|
||||
}
|
||||
|
||||
this._timeout = setTimeout(() => {
|
||||
this._poll().catch(() => null);
|
||||
}, this.intervalMS);
|
||||
}
|
||||
|
||||
success(srvRecords: dns.SrvRecord[]): void {
|
||||
this.haMode = false;
|
||||
this.schedule();
|
||||
this.emit(SrvPoller.SRV_RECORD_DISCOVERY, new SrvPollingEvent(srvRecords));
|
||||
}
|
||||
|
||||
failure(): void {
|
||||
this.haMode = true;
|
||||
this.schedule();
|
||||
}
|
||||
|
||||
async _poll(): Promise<void> {
|
||||
const generation = this.generation;
|
||||
let srvRecords;
|
||||
|
||||
try {
|
||||
srvRecords = await dns.promises.resolveSrv(this.srvAddress);
|
||||
} catch (dnsError) {
|
||||
this.failure();
|
||||
return;
|
||||
}
|
||||
|
||||
if (generation !== this.generation) {
|
||||
return;
|
||||
}
|
||||
|
||||
const finalAddresses: dns.SrvRecord[] = [];
|
||||
for (const record of srvRecords) {
|
||||
if (matchesParentDomain(record.name, this.srvHost)) {
|
||||
finalAddresses.push(record);
|
||||
}
|
||||
}
|
||||
|
||||
if (!finalAddresses.length) {
|
||||
this.failure();
|
||||
return;
|
||||
}
|
||||
|
||||
this.success(finalAddresses);
|
||||
}
|
||||
}
|
||||
994
node_modules/mongodb/src/sdam/topology.ts
generated
vendored
Normal file
994
node_modules/mongodb/src/sdam/topology.ts
generated
vendored
Normal file
@@ -0,0 +1,994 @@
|
||||
import { clearTimeout, setTimeout } from 'timers';
|
||||
import { promisify } from 'util';
|
||||
|
||||
import type { BSONSerializeOptions, Document } from '../bson';
|
||||
import type { MongoCredentials } from '../cmap/auth/mongo_credentials';
|
||||
import type { ConnectionEvents, DestroyOptions } from '../cmap/connection';
|
||||
import type { CloseOptions, ConnectionPoolEvents } from '../cmap/connection_pool';
|
||||
import type { ClientMetadata } from '../cmap/handshake/client_metadata';
|
||||
import { DEFAULT_OPTIONS, FEATURE_FLAGS } from '../connection_string';
|
||||
import {
|
||||
CLOSE,
|
||||
CONNECT,
|
||||
ERROR,
|
||||
LOCAL_SERVER_EVENTS,
|
||||
OPEN,
|
||||
SERVER_CLOSED,
|
||||
SERVER_DESCRIPTION_CHANGED,
|
||||
SERVER_OPENING,
|
||||
SERVER_RELAY_EVENTS,
|
||||
TIMEOUT,
|
||||
TOPOLOGY_CLOSED,
|
||||
TOPOLOGY_DESCRIPTION_CHANGED,
|
||||
TOPOLOGY_OPENING
|
||||
} from '../constants';
|
||||
import {
|
||||
MongoCompatibilityError,
|
||||
MongoDriverError,
|
||||
MongoError,
|
||||
MongoErrorLabel,
|
||||
MongoRuntimeError,
|
||||
MongoServerSelectionError,
|
||||
MongoTopologyClosedError
|
||||
} from '../error';
|
||||
import type { MongoClient, ServerApi } from '../mongo_client';
|
||||
import { TypedEventEmitter } from '../mongo_types';
|
||||
import { ReadPreference, ReadPreferenceLike } from '../read_preference';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import type { Transaction } from '../transactions';
|
||||
import {
|
||||
Callback,
|
||||
EventEmitterWithState,
|
||||
HostAddress,
|
||||
List,
|
||||
makeStateMachine,
|
||||
ns,
|
||||
shuffle
|
||||
} from '../utils';
|
||||
import {
|
||||
_advanceClusterTime,
|
||||
ClusterTime,
|
||||
drainTimerQueue,
|
||||
ServerType,
|
||||
STATE_CLOSED,
|
||||
STATE_CLOSING,
|
||||
STATE_CONNECTED,
|
||||
STATE_CONNECTING,
|
||||
TimerQueue,
|
||||
TopologyType
|
||||
} from './common';
|
||||
import {
|
||||
ServerClosedEvent,
|
||||
ServerDescriptionChangedEvent,
|
||||
ServerOpeningEvent,
|
||||
TopologyClosedEvent,
|
||||
TopologyDescriptionChangedEvent,
|
||||
TopologyOpeningEvent
|
||||
} from './events';
|
||||
import { Server, ServerEvents, ServerOptions } from './server';
|
||||
import { compareTopologyVersion, ServerDescription } from './server_description';
|
||||
import { readPreferenceServerSelector, ServerSelector } from './server_selection';
|
||||
import { SrvPoller, SrvPollingEvent } from './srv_polling';
|
||||
import { TopologyDescription } from './topology_description';
|
||||
|
||||
// Global state
|
||||
let globalTopologyCounter = 0;
|
||||
|
||||
const stateTransition = makeStateMachine({
|
||||
[STATE_CLOSED]: [STATE_CLOSED, STATE_CONNECTING],
|
||||
[STATE_CONNECTING]: [STATE_CONNECTING, STATE_CLOSING, STATE_CONNECTED, STATE_CLOSED],
|
||||
[STATE_CONNECTED]: [STATE_CONNECTED, STATE_CLOSING, STATE_CLOSED],
|
||||
[STATE_CLOSING]: [STATE_CLOSING, STATE_CLOSED]
|
||||
});
|
||||
|
||||
/** @internal */
|
||||
const kCancelled = Symbol('cancelled');
|
||||
/** @internal */
|
||||
const kWaitQueue = Symbol('waitQueue');
|
||||
|
||||
/** @internal */
|
||||
export type ServerSelectionCallback = Callback<Server>;
|
||||
|
||||
/** @internal */
|
||||
export interface ServerSelectionRequest {
|
||||
serverSelector: ServerSelector;
|
||||
transaction?: Transaction;
|
||||
callback: ServerSelectionCallback;
|
||||
timer?: NodeJS.Timeout;
|
||||
[kCancelled]?: boolean;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export interface TopologyPrivate {
|
||||
/** the id of this topology */
|
||||
id: number;
|
||||
/** passed in options */
|
||||
options: TopologyOptions;
|
||||
/** initial seedlist of servers to connect to */
|
||||
seedlist: HostAddress[];
|
||||
/** initial state */
|
||||
state: string;
|
||||
/** the topology description */
|
||||
description: TopologyDescription;
|
||||
serverSelectionTimeoutMS: number;
|
||||
heartbeatFrequencyMS: number;
|
||||
minHeartbeatFrequencyMS: number;
|
||||
/** A map of server instances to normalized addresses */
|
||||
servers: Map<string, Server>;
|
||||
credentials?: MongoCredentials;
|
||||
clusterTime?: ClusterTime;
|
||||
/** timers created for the initial connect to a server */
|
||||
connectionTimers: TimerQueue;
|
||||
|
||||
/** related to srv polling */
|
||||
srvPoller?: SrvPoller;
|
||||
detectShardedTopology: (event: TopologyDescriptionChangedEvent) => void;
|
||||
detectSrvRecords: (event: SrvPollingEvent) => void;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export interface TopologyOptions extends BSONSerializeOptions, ServerOptions {
|
||||
srvMaxHosts: number;
|
||||
srvServiceName: string;
|
||||
hosts: HostAddress[];
|
||||
retryWrites: boolean;
|
||||
retryReads: boolean;
|
||||
/** How long to block for server selection before throwing an error */
|
||||
serverSelectionTimeoutMS: number;
|
||||
/** The name of the replica set to connect to */
|
||||
replicaSet?: string;
|
||||
srvHost?: string;
|
||||
srvPoller?: SrvPoller;
|
||||
/** Indicates that a client should directly connect to a node without attempting to discover its topology type */
|
||||
directConnection: boolean;
|
||||
loadBalanced: boolean;
|
||||
metadata: ClientMetadata;
|
||||
/** MongoDB server API version */
|
||||
serverApi?: ServerApi;
|
||||
[featureFlag: symbol]: any;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ConnectOptions {
|
||||
readPreference?: ReadPreference;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface SelectServerOptions {
|
||||
readPreference?: ReadPreferenceLike;
|
||||
/** How long to block for server selection before throwing an error */
|
||||
serverSelectionTimeoutMS?: number;
|
||||
session?: ClientSession;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export type TopologyEvents = {
|
||||
/** Top level MongoClient doesn't emit this so it is marked: @internal */
|
||||
connect(topology: Topology): void;
|
||||
serverOpening(event: ServerOpeningEvent): void;
|
||||
serverClosed(event: ServerClosedEvent): void;
|
||||
serverDescriptionChanged(event: ServerDescriptionChangedEvent): void;
|
||||
topologyClosed(event: TopologyClosedEvent): void;
|
||||
topologyOpening(event: TopologyOpeningEvent): void;
|
||||
topologyDescriptionChanged(event: TopologyDescriptionChangedEvent): void;
|
||||
error(error: Error): void;
|
||||
/** @internal */
|
||||
open(topology: Topology): void;
|
||||
close(): void;
|
||||
timeout(): void;
|
||||
} & Omit<ServerEvents, 'connect'> &
|
||||
ConnectionPoolEvents &
|
||||
ConnectionEvents &
|
||||
EventEmitterWithState;
|
||||
/**
|
||||
* A container of server instances representing a connection to a MongoDB topology.
|
||||
* @internal
|
||||
*/
|
||||
export class Topology extends TypedEventEmitter<TopologyEvents> {
|
||||
/** @internal */
|
||||
s: TopologyPrivate;
|
||||
/** @internal */
|
||||
[kWaitQueue]: List<ServerSelectionRequest>;
|
||||
/** @internal */
|
||||
hello?: Document;
|
||||
/** @internal */
|
||||
_type?: string;
|
||||
|
||||
client!: MongoClient;
|
||||
|
||||
/** @event */
|
||||
static readonly SERVER_OPENING = SERVER_OPENING;
|
||||
/** @event */
|
||||
static readonly SERVER_CLOSED = SERVER_CLOSED;
|
||||
/** @event */
|
||||
static readonly SERVER_DESCRIPTION_CHANGED = SERVER_DESCRIPTION_CHANGED;
|
||||
/** @event */
|
||||
static readonly TOPOLOGY_OPENING = TOPOLOGY_OPENING;
|
||||
/** @event */
|
||||
static readonly TOPOLOGY_CLOSED = TOPOLOGY_CLOSED;
|
||||
/** @event */
|
||||
static readonly TOPOLOGY_DESCRIPTION_CHANGED = TOPOLOGY_DESCRIPTION_CHANGED;
|
||||
/** @event */
|
||||
static readonly ERROR = ERROR;
|
||||
/** @event */
|
||||
static readonly OPEN = OPEN;
|
||||
/** @event */
|
||||
static readonly CONNECT = CONNECT;
|
||||
/** @event */
|
||||
static readonly CLOSE = CLOSE;
|
||||
/** @event */
|
||||
static readonly TIMEOUT = TIMEOUT;
|
||||
|
||||
selectServerAsync: (
|
||||
selector: string | ReadPreference | ServerSelector,
|
||||
options: SelectServerOptions
|
||||
) => Promise<Server>;
|
||||
|
||||
/**
|
||||
* @param seedlist - a list of HostAddress instances to connect to
|
||||
*/
|
||||
constructor(
|
||||
client: MongoClient,
|
||||
seeds: string | string[] | HostAddress | HostAddress[],
|
||||
options: TopologyOptions
|
||||
) {
|
||||
super();
|
||||
|
||||
this.client = client;
|
||||
this.selectServerAsync = promisify(
|
||||
(
|
||||
selector: string | ReadPreference | ServerSelector,
|
||||
options: SelectServerOptions,
|
||||
callback: (e: Error, r: Server) => void
|
||||
) => this.selectServer(selector, options, callback as any)
|
||||
);
|
||||
|
||||
// Options should only be undefined in tests, MongoClient will always have defined options
|
||||
options = options ?? {
|
||||
hosts: [HostAddress.fromString('localhost:27017')],
|
||||
...Object.fromEntries(DEFAULT_OPTIONS.entries()),
|
||||
...Object.fromEntries(FEATURE_FLAGS.entries())
|
||||
};
|
||||
|
||||
if (typeof seeds === 'string') {
|
||||
seeds = [HostAddress.fromString(seeds)];
|
||||
} else if (!Array.isArray(seeds)) {
|
||||
seeds = [seeds];
|
||||
}
|
||||
|
||||
const seedlist: HostAddress[] = [];
|
||||
for (const seed of seeds) {
|
||||
if (typeof seed === 'string') {
|
||||
seedlist.push(HostAddress.fromString(seed));
|
||||
} else if (seed instanceof HostAddress) {
|
||||
seedlist.push(seed);
|
||||
} else {
|
||||
// FIXME(NODE-3483): May need to be a MongoParseError
|
||||
throw new MongoRuntimeError(`Topology cannot be constructed from ${JSON.stringify(seed)}`);
|
||||
}
|
||||
}
|
||||
|
||||
const topologyType = topologyTypeFromOptions(options);
|
||||
const topologyId = globalTopologyCounter++;
|
||||
|
||||
const selectedHosts =
|
||||
options.srvMaxHosts == null ||
|
||||
options.srvMaxHosts === 0 ||
|
||||
options.srvMaxHosts >= seedlist.length
|
||||
? seedlist
|
||||
: shuffle(seedlist, options.srvMaxHosts);
|
||||
|
||||
const serverDescriptions = new Map();
|
||||
for (const hostAddress of selectedHosts) {
|
||||
serverDescriptions.set(hostAddress.toString(), new ServerDescription(hostAddress));
|
||||
}
|
||||
|
||||
this[kWaitQueue] = new List();
|
||||
this.s = {
|
||||
// the id of this topology
|
||||
id: topologyId,
|
||||
// passed in options
|
||||
options,
|
||||
// initial seedlist of servers to connect to
|
||||
seedlist,
|
||||
// initial state
|
||||
state: STATE_CLOSED,
|
||||
// the topology description
|
||||
description: new TopologyDescription(
|
||||
topologyType,
|
||||
serverDescriptions,
|
||||
options.replicaSet,
|
||||
undefined,
|
||||
undefined,
|
||||
undefined,
|
||||
options
|
||||
),
|
||||
serverSelectionTimeoutMS: options.serverSelectionTimeoutMS,
|
||||
heartbeatFrequencyMS: options.heartbeatFrequencyMS,
|
||||
minHeartbeatFrequencyMS: options.minHeartbeatFrequencyMS,
|
||||
// a map of server instances to normalized addresses
|
||||
servers: new Map(),
|
||||
credentials: options?.credentials,
|
||||
clusterTime: undefined,
|
||||
|
||||
// timer management
|
||||
connectionTimers: new Set<NodeJS.Timeout>(),
|
||||
detectShardedTopology: ev => this.detectShardedTopology(ev),
|
||||
detectSrvRecords: ev => this.detectSrvRecords(ev)
|
||||
};
|
||||
|
||||
if (options.srvHost && !options.loadBalanced) {
|
||||
this.s.srvPoller =
|
||||
options.srvPoller ??
|
||||
new SrvPoller({
|
||||
heartbeatFrequencyMS: this.s.heartbeatFrequencyMS,
|
||||
srvHost: options.srvHost,
|
||||
srvMaxHosts: options.srvMaxHosts,
|
||||
srvServiceName: options.srvServiceName
|
||||
});
|
||||
|
||||
this.on(Topology.TOPOLOGY_DESCRIPTION_CHANGED, this.s.detectShardedTopology);
|
||||
}
|
||||
}
|
||||
|
||||
private detectShardedTopology(event: TopologyDescriptionChangedEvent) {
|
||||
const previousType = event.previousDescription.type;
|
||||
const newType = event.newDescription.type;
|
||||
|
||||
const transitionToSharded =
|
||||
previousType !== TopologyType.Sharded && newType === TopologyType.Sharded;
|
||||
const srvListeners = this.s.srvPoller?.listeners(SrvPoller.SRV_RECORD_DISCOVERY);
|
||||
const listeningToSrvPolling = !!srvListeners?.includes(this.s.detectSrvRecords);
|
||||
|
||||
if (transitionToSharded && !listeningToSrvPolling) {
|
||||
this.s.srvPoller?.on(SrvPoller.SRV_RECORD_DISCOVERY, this.s.detectSrvRecords);
|
||||
this.s.srvPoller?.start();
|
||||
}
|
||||
}
|
||||
|
||||
private detectSrvRecords(ev: SrvPollingEvent) {
|
||||
const previousTopologyDescription = this.s.description;
|
||||
this.s.description = this.s.description.updateFromSrvPollingEvent(
|
||||
ev,
|
||||
this.s.options.srvMaxHosts
|
||||
);
|
||||
if (this.s.description === previousTopologyDescription) {
|
||||
// Nothing changed, so return
|
||||
return;
|
||||
}
|
||||
|
||||
updateServers(this);
|
||||
|
||||
this.emit(
|
||||
Topology.TOPOLOGY_DESCRIPTION_CHANGED,
|
||||
new TopologyDescriptionChangedEvent(
|
||||
this.s.id,
|
||||
previousTopologyDescription,
|
||||
this.s.description
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns A `TopologyDescription` for this topology
|
||||
*/
|
||||
get description(): TopologyDescription {
|
||||
return this.s.description;
|
||||
}
|
||||
|
||||
get loadBalanced(): boolean {
|
||||
return this.s.options.loadBalanced;
|
||||
}
|
||||
|
||||
get capabilities(): ServerCapabilities {
|
||||
return new ServerCapabilities(this.lastHello());
|
||||
}
|
||||
|
||||
/** Initiate server connect */
|
||||
connect(callback: Callback): void;
|
||||
connect(options: ConnectOptions, callback: Callback): void;
|
||||
connect(options?: ConnectOptions | Callback, callback?: Callback): void {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = options ?? {};
|
||||
if (this.s.state === STATE_CONNECTED) {
|
||||
if (typeof callback === 'function') {
|
||||
callback();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CONNECTING);
|
||||
|
||||
// emit SDAM monitoring events
|
||||
this.emit(Topology.TOPOLOGY_OPENING, new TopologyOpeningEvent(this.s.id));
|
||||
|
||||
// emit an event for the topology change
|
||||
this.emit(
|
||||
Topology.TOPOLOGY_DESCRIPTION_CHANGED,
|
||||
new TopologyDescriptionChangedEvent(
|
||||
this.s.id,
|
||||
new TopologyDescription(TopologyType.Unknown), // initial is always Unknown
|
||||
this.s.description
|
||||
)
|
||||
);
|
||||
|
||||
// connect all known servers, then attempt server selection to connect
|
||||
const serverDescriptions = Array.from(this.s.description.servers.values());
|
||||
this.s.servers = new Map(
|
||||
serverDescriptions.map(serverDescription => [
|
||||
serverDescription.address,
|
||||
createAndConnectServer(this, serverDescription)
|
||||
])
|
||||
);
|
||||
|
||||
// In load balancer mode we need to fake a server description getting
|
||||
// emitted from the monitor, since the monitor doesn't exist.
|
||||
if (this.s.options.loadBalanced) {
|
||||
for (const description of serverDescriptions) {
|
||||
const newDescription = new ServerDescription(description.hostAddress, undefined, {
|
||||
loadBalanced: this.s.options.loadBalanced
|
||||
});
|
||||
this.serverUpdateHandler(newDescription);
|
||||
}
|
||||
}
|
||||
|
||||
const exitWithError = (error: Error) =>
|
||||
callback ? callback(error) : this.emit(Topology.ERROR, error);
|
||||
|
||||
const readPreference = options.readPreference ?? ReadPreference.primary;
|
||||
this.selectServer(readPreferenceServerSelector(readPreference), options, (err, server) => {
|
||||
if (err) {
|
||||
return this.close({ force: false }, () => exitWithError(err));
|
||||
}
|
||||
|
||||
// TODO: NODE-2471
|
||||
const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true;
|
||||
if (!skipPingOnConnect && server && this.s.credentials) {
|
||||
server.command(ns('admin.$cmd'), { ping: 1 }, {}, err => {
|
||||
if (err) {
|
||||
return exitWithError(err);
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CONNECTED);
|
||||
this.emit(Topology.OPEN, this);
|
||||
this.emit(Topology.CONNECT, this);
|
||||
|
||||
callback?.(undefined, this);
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
stateTransition(this, STATE_CONNECTED);
|
||||
this.emit(Topology.OPEN, this);
|
||||
this.emit(Topology.CONNECT, this);
|
||||
|
||||
callback?.(undefined, this);
|
||||
});
|
||||
}
|
||||
|
||||
/** Close this topology */
|
||||
close(options: CloseOptions): void;
|
||||
close(options: CloseOptions, callback: Callback): void;
|
||||
close(options?: CloseOptions, callback?: Callback): void {
|
||||
options = options ?? { force: false };
|
||||
|
||||
if (this.s.state === STATE_CLOSED || this.s.state === STATE_CLOSING) {
|
||||
return callback?.();
|
||||
}
|
||||
|
||||
const destroyedServers = Array.from(this.s.servers.values(), server => {
|
||||
return promisify(destroyServer)(server, this, { force: !!options?.force });
|
||||
});
|
||||
|
||||
Promise.all(destroyedServers)
|
||||
.then(() => {
|
||||
this.s.servers.clear();
|
||||
|
||||
stateTransition(this, STATE_CLOSING);
|
||||
|
||||
drainWaitQueue(this[kWaitQueue], new MongoTopologyClosedError());
|
||||
drainTimerQueue(this.s.connectionTimers);
|
||||
|
||||
if (this.s.srvPoller) {
|
||||
this.s.srvPoller.stop();
|
||||
this.s.srvPoller.removeListener(SrvPoller.SRV_RECORD_DISCOVERY, this.s.detectSrvRecords);
|
||||
}
|
||||
|
||||
this.removeListener(Topology.TOPOLOGY_DESCRIPTION_CHANGED, this.s.detectShardedTopology);
|
||||
|
||||
stateTransition(this, STATE_CLOSED);
|
||||
|
||||
// emit an event for close
|
||||
this.emit(Topology.TOPOLOGY_CLOSED, new TopologyClosedEvent(this.s.id));
|
||||
})
|
||||
.finally(() => callback?.());
|
||||
}
|
||||
|
||||
/**
|
||||
* Selects a server according to the selection predicate provided
|
||||
*
|
||||
* @param selector - An optional selector to select servers by, defaults to a random selection within a latency window
|
||||
* @param options - Optional settings related to server selection
|
||||
* @param callback - The callback used to indicate success or failure
|
||||
* @returns An instance of a `Server` meeting the criteria of the predicate provided
|
||||
*/
|
||||
selectServer(
|
||||
selector: string | ReadPreference | ServerSelector,
|
||||
options: SelectServerOptions,
|
||||
callback: Callback<Server>
|
||||
): void {
|
||||
let serverSelector;
|
||||
if (typeof selector !== 'function') {
|
||||
if (typeof selector === 'string') {
|
||||
serverSelector = readPreferenceServerSelector(ReadPreference.fromString(selector));
|
||||
} else {
|
||||
let readPreference;
|
||||
if (selector instanceof ReadPreference) {
|
||||
readPreference = selector;
|
||||
} else {
|
||||
ReadPreference.translate(options);
|
||||
readPreference = options.readPreference || ReadPreference.primary;
|
||||
}
|
||||
|
||||
serverSelector = readPreferenceServerSelector(readPreference as ReadPreference);
|
||||
}
|
||||
} else {
|
||||
serverSelector = selector;
|
||||
}
|
||||
|
||||
options = Object.assign(
|
||||
{},
|
||||
{ serverSelectionTimeoutMS: this.s.serverSelectionTimeoutMS },
|
||||
options
|
||||
);
|
||||
|
||||
const isSharded = this.description.type === TopologyType.Sharded;
|
||||
const session = options.session;
|
||||
const transaction = session && session.transaction;
|
||||
|
||||
if (isSharded && transaction && transaction.server) {
|
||||
callback(undefined, transaction.server);
|
||||
return;
|
||||
}
|
||||
|
||||
const waitQueueMember: ServerSelectionRequest = {
|
||||
serverSelector,
|
||||
transaction,
|
||||
callback
|
||||
};
|
||||
|
||||
const serverSelectionTimeoutMS = options.serverSelectionTimeoutMS;
|
||||
if (serverSelectionTimeoutMS) {
|
||||
waitQueueMember.timer = setTimeout(() => {
|
||||
waitQueueMember[kCancelled] = true;
|
||||
waitQueueMember.timer = undefined;
|
||||
const timeoutError = new MongoServerSelectionError(
|
||||
`Server selection timed out after ${serverSelectionTimeoutMS} ms`,
|
||||
this.description
|
||||
);
|
||||
|
||||
waitQueueMember.callback(timeoutError);
|
||||
}, serverSelectionTimeoutMS);
|
||||
}
|
||||
|
||||
this[kWaitQueue].push(waitQueueMember);
|
||||
processWaitQueue(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the internal TopologyDescription with a ServerDescription
|
||||
*
|
||||
* @param serverDescription - The server to update in the internal list of server descriptions
|
||||
*/
|
||||
serverUpdateHandler(serverDescription: ServerDescription): void {
|
||||
if (!this.s.description.hasServer(serverDescription.address)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// ignore this server update if its from an outdated topologyVersion
|
||||
if (isStaleServerDescription(this.s.description, serverDescription)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// these will be used for monitoring events later
|
||||
const previousTopologyDescription = this.s.description;
|
||||
const previousServerDescription = this.s.description.servers.get(serverDescription.address);
|
||||
if (!previousServerDescription) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Driver Sessions Spec: "Whenever a driver receives a cluster time from
|
||||
// a server it MUST compare it to the current highest seen cluster time
|
||||
// for the deployment. If the new cluster time is higher than the
|
||||
// highest seen cluster time it MUST become the new highest seen cluster
|
||||
// time. Two cluster times are compared using only the BsonTimestamp
|
||||
// value of the clusterTime embedded field."
|
||||
const clusterTime = serverDescription.$clusterTime;
|
||||
if (clusterTime) {
|
||||
_advanceClusterTime(this, clusterTime);
|
||||
}
|
||||
|
||||
// If we already know all the information contained in this updated description, then
|
||||
// we don't need to emit SDAM events, but still need to update the description, in order
|
||||
// to keep client-tracked attributes like last update time and round trip time up to date
|
||||
const equalDescriptions =
|
||||
previousServerDescription && previousServerDescription.equals(serverDescription);
|
||||
|
||||
// first update the TopologyDescription
|
||||
this.s.description = this.s.description.update(serverDescription);
|
||||
if (this.s.description.compatibilityError) {
|
||||
this.emit(Topology.ERROR, new MongoCompatibilityError(this.s.description.compatibilityError));
|
||||
return;
|
||||
}
|
||||
|
||||
// emit monitoring events for this change
|
||||
if (!equalDescriptions) {
|
||||
const newDescription = this.s.description.servers.get(serverDescription.address);
|
||||
if (newDescription) {
|
||||
this.emit(
|
||||
Topology.SERVER_DESCRIPTION_CHANGED,
|
||||
new ServerDescriptionChangedEvent(
|
||||
this.s.id,
|
||||
serverDescription.address,
|
||||
previousServerDescription,
|
||||
newDescription
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// update server list from updated descriptions
|
||||
updateServers(this, serverDescription);
|
||||
|
||||
// attempt to resolve any outstanding server selection attempts
|
||||
if (this[kWaitQueue].length > 0) {
|
||||
processWaitQueue(this);
|
||||
}
|
||||
|
||||
if (!equalDescriptions) {
|
||||
this.emit(
|
||||
Topology.TOPOLOGY_DESCRIPTION_CHANGED,
|
||||
new TopologyDescriptionChangedEvent(
|
||||
this.s.id,
|
||||
previousTopologyDescription,
|
||||
this.s.description
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
auth(credentials?: MongoCredentials, callback?: Callback): void {
|
||||
if (typeof credentials === 'function') (callback = credentials), (credentials = undefined);
|
||||
if (typeof callback === 'function') callback(undefined, true);
|
||||
}
|
||||
|
||||
get clientMetadata(): ClientMetadata {
|
||||
return this.s.options.metadata;
|
||||
}
|
||||
|
||||
isConnected(): boolean {
|
||||
return this.s.state === STATE_CONNECTED;
|
||||
}
|
||||
|
||||
isDestroyed(): boolean {
|
||||
return this.s.state === STATE_CLOSED;
|
||||
}
|
||||
|
||||
// NOTE: There are many places in code where we explicitly check the last hello
|
||||
// to do feature support detection. This should be done any other way, but for
|
||||
// now we will just return the first hello seen, which should suffice.
|
||||
lastHello(): Document {
|
||||
const serverDescriptions = Array.from(this.description.servers.values());
|
||||
if (serverDescriptions.length === 0) return {};
|
||||
const sd = serverDescriptions.filter(
|
||||
(sd: ServerDescription) => sd.type !== ServerType.Unknown
|
||||
)[0];
|
||||
|
||||
const result = sd || { maxWireVersion: this.description.commonWireVersion };
|
||||
return result;
|
||||
}
|
||||
|
||||
get commonWireVersion(): number | undefined {
|
||||
return this.description.commonWireVersion;
|
||||
}
|
||||
|
||||
get logicalSessionTimeoutMinutes(): number | null {
|
||||
return this.description.logicalSessionTimeoutMinutes;
|
||||
}
|
||||
|
||||
get clusterTime(): ClusterTime | undefined {
|
||||
return this.s.clusterTime;
|
||||
}
|
||||
|
||||
set clusterTime(clusterTime: ClusterTime | undefined) {
|
||||
this.s.clusterTime = clusterTime;
|
||||
}
|
||||
}
|
||||
|
||||
/** Destroys a server, and removes all event listeners from the instance */
|
||||
function destroyServer(
|
||||
server: Server,
|
||||
topology: Topology,
|
||||
options?: DestroyOptions,
|
||||
callback?: Callback
|
||||
) {
|
||||
options = options ?? { force: false };
|
||||
for (const event of LOCAL_SERVER_EVENTS) {
|
||||
server.removeAllListeners(event);
|
||||
}
|
||||
|
||||
server.destroy(options, () => {
|
||||
topology.emit(
|
||||
Topology.SERVER_CLOSED,
|
||||
new ServerClosedEvent(topology.s.id, server.description.address)
|
||||
);
|
||||
|
||||
for (const event of SERVER_RELAY_EVENTS) {
|
||||
server.removeAllListeners(event);
|
||||
}
|
||||
if (typeof callback === 'function') {
|
||||
callback();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/** Predicts the TopologyType from options */
|
||||
function topologyTypeFromOptions(options?: TopologyOptions) {
|
||||
if (options?.directConnection) {
|
||||
return TopologyType.Single;
|
||||
}
|
||||
|
||||
if (options?.replicaSet) {
|
||||
return TopologyType.ReplicaSetNoPrimary;
|
||||
}
|
||||
|
||||
if (options?.loadBalanced) {
|
||||
return TopologyType.LoadBalanced;
|
||||
}
|
||||
|
||||
return TopologyType.Unknown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates new server instances and attempts to connect them
|
||||
*
|
||||
* @param topology - The topology that this server belongs to
|
||||
* @param serverDescription - The description for the server to initialize and connect to
|
||||
*/
|
||||
function createAndConnectServer(topology: Topology, serverDescription: ServerDescription) {
|
||||
topology.emit(
|
||||
Topology.SERVER_OPENING,
|
||||
new ServerOpeningEvent(topology.s.id, serverDescription.address)
|
||||
);
|
||||
|
||||
const server = new Server(topology, serverDescription, topology.s.options);
|
||||
for (const event of SERVER_RELAY_EVENTS) {
|
||||
server.on(event, (e: any) => topology.emit(event, e));
|
||||
}
|
||||
|
||||
server.on(Server.DESCRIPTION_RECEIVED, description => topology.serverUpdateHandler(description));
|
||||
|
||||
server.connect();
|
||||
return server;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param topology - Topology to update.
|
||||
* @param incomingServerDescription - New server description.
|
||||
*/
|
||||
function updateServers(topology: Topology, incomingServerDescription?: ServerDescription) {
|
||||
// update the internal server's description
|
||||
if (incomingServerDescription && topology.s.servers.has(incomingServerDescription.address)) {
|
||||
const server = topology.s.servers.get(incomingServerDescription.address);
|
||||
if (server) {
|
||||
server.s.description = incomingServerDescription;
|
||||
if (
|
||||
incomingServerDescription.error instanceof MongoError &&
|
||||
incomingServerDescription.error.hasErrorLabel(MongoErrorLabel.ResetPool)
|
||||
) {
|
||||
const interruptInUseConnections = incomingServerDescription.error.hasErrorLabel(
|
||||
MongoErrorLabel.InterruptInUseConnections
|
||||
);
|
||||
|
||||
server.pool.clear({ interruptInUseConnections });
|
||||
} else if (incomingServerDescription.error == null) {
|
||||
const newTopologyType = topology.s.description.type;
|
||||
const shouldMarkPoolReady =
|
||||
incomingServerDescription.isDataBearing ||
|
||||
(incomingServerDescription.type !== ServerType.Unknown &&
|
||||
newTopologyType === TopologyType.Single);
|
||||
if (shouldMarkPoolReady) {
|
||||
server.pool.ready();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add new servers for all descriptions we currently don't know about locally
|
||||
for (const serverDescription of topology.description.servers.values()) {
|
||||
if (!topology.s.servers.has(serverDescription.address)) {
|
||||
const server = createAndConnectServer(topology, serverDescription);
|
||||
topology.s.servers.set(serverDescription.address, server);
|
||||
}
|
||||
}
|
||||
|
||||
// for all servers no longer known, remove their descriptions and destroy their instances
|
||||
for (const entry of topology.s.servers) {
|
||||
const serverAddress = entry[0];
|
||||
if (topology.description.hasServer(serverAddress)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!topology.s.servers.has(serverAddress)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const server = topology.s.servers.get(serverAddress);
|
||||
topology.s.servers.delete(serverAddress);
|
||||
|
||||
// prepare server for garbage collection
|
||||
if (server) {
|
||||
destroyServer(server, topology);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function drainWaitQueue(queue: List<ServerSelectionRequest>, err?: MongoDriverError) {
|
||||
while (queue.length) {
|
||||
const waitQueueMember = queue.shift();
|
||||
if (!waitQueueMember) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (waitQueueMember.timer) {
|
||||
clearTimeout(waitQueueMember.timer);
|
||||
}
|
||||
|
||||
if (!waitQueueMember[kCancelled]) {
|
||||
waitQueueMember.callback(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function processWaitQueue(topology: Topology) {
|
||||
if (topology.s.state === STATE_CLOSED) {
|
||||
drainWaitQueue(topology[kWaitQueue], new MongoTopologyClosedError());
|
||||
return;
|
||||
}
|
||||
|
||||
const isSharded = topology.description.type === TopologyType.Sharded;
|
||||
const serverDescriptions = Array.from(topology.description.servers.values());
|
||||
const membersToProcess = topology[kWaitQueue].length;
|
||||
for (let i = 0; i < membersToProcess; ++i) {
|
||||
const waitQueueMember = topology[kWaitQueue].shift();
|
||||
if (!waitQueueMember) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (waitQueueMember[kCancelled]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let selectedDescriptions;
|
||||
try {
|
||||
const serverSelector = waitQueueMember.serverSelector;
|
||||
selectedDescriptions = serverSelector
|
||||
? serverSelector(topology.description, serverDescriptions)
|
||||
: serverDescriptions;
|
||||
} catch (e) {
|
||||
if (waitQueueMember.timer) {
|
||||
clearTimeout(waitQueueMember.timer);
|
||||
}
|
||||
|
||||
waitQueueMember.callback(e);
|
||||
continue;
|
||||
}
|
||||
|
||||
let selectedServer;
|
||||
if (selectedDescriptions.length === 0) {
|
||||
topology[kWaitQueue].push(waitQueueMember);
|
||||
continue;
|
||||
} else if (selectedDescriptions.length === 1) {
|
||||
selectedServer = topology.s.servers.get(selectedDescriptions[0].address);
|
||||
} else {
|
||||
const descriptions = shuffle(selectedDescriptions, 2);
|
||||
const server1 = topology.s.servers.get(descriptions[0].address);
|
||||
const server2 = topology.s.servers.get(descriptions[1].address);
|
||||
|
||||
selectedServer =
|
||||
server1 && server2 && server1.s.operationCount < server2.s.operationCount
|
||||
? server1
|
||||
: server2;
|
||||
}
|
||||
|
||||
if (!selectedServer) {
|
||||
waitQueueMember.callback(
|
||||
new MongoServerSelectionError(
|
||||
'server selection returned a server description but the server was not found in the topology',
|
||||
topology.description
|
||||
)
|
||||
);
|
||||
return;
|
||||
}
|
||||
const transaction = waitQueueMember.transaction;
|
||||
if (isSharded && transaction && transaction.isActive && selectedServer) {
|
||||
transaction.pinServer(selectedServer);
|
||||
}
|
||||
|
||||
if (waitQueueMember.timer) {
|
||||
clearTimeout(waitQueueMember.timer);
|
||||
}
|
||||
|
||||
waitQueueMember.callback(undefined, selectedServer);
|
||||
}
|
||||
|
||||
if (topology[kWaitQueue].length > 0) {
|
||||
// ensure all server monitors attempt monitoring soon
|
||||
for (const [, server] of topology.s.servers) {
|
||||
process.nextTick(function scheduleServerCheck() {
|
||||
return server.requestCheck();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function isStaleServerDescription(
|
||||
topologyDescription: TopologyDescription,
|
||||
incomingServerDescription: ServerDescription
|
||||
) {
|
||||
const currentServerDescription = topologyDescription.servers.get(
|
||||
incomingServerDescription.address
|
||||
);
|
||||
const currentTopologyVersion = currentServerDescription?.topologyVersion;
|
||||
return (
|
||||
compareTopologyVersion(currentTopologyVersion, incomingServerDescription.topologyVersion) > 0
|
||||
);
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export class ServerCapabilities {
|
||||
maxWireVersion: number;
|
||||
minWireVersion: number;
|
||||
|
||||
constructor(hello: Document) {
|
||||
this.minWireVersion = hello.minWireVersion || 0;
|
||||
this.maxWireVersion = hello.maxWireVersion || 0;
|
||||
}
|
||||
|
||||
get hasAggregationCursor(): boolean {
|
||||
return this.maxWireVersion >= 1;
|
||||
}
|
||||
|
||||
get hasWriteCommands(): boolean {
|
||||
return this.maxWireVersion >= 2;
|
||||
}
|
||||
get hasTextSearch(): boolean {
|
||||
return this.minWireVersion >= 0;
|
||||
}
|
||||
|
||||
get hasAuthCommands(): boolean {
|
||||
return this.maxWireVersion >= 1;
|
||||
}
|
||||
|
||||
get hasListCollectionsCommand(): boolean {
|
||||
return this.maxWireVersion >= 3;
|
||||
}
|
||||
|
||||
get hasListIndexesCommand(): boolean {
|
||||
return this.maxWireVersion >= 3;
|
||||
}
|
||||
|
||||
get supportsSnapshotReads(): boolean {
|
||||
return this.maxWireVersion >= 13;
|
||||
}
|
||||
|
||||
get commandsTakeWriteConcern(): boolean {
|
||||
return this.maxWireVersion >= 5;
|
||||
}
|
||||
|
||||
get commandsTakeCollation(): boolean {
|
||||
return this.maxWireVersion >= 5;
|
||||
}
|
||||
}
|
||||
511
node_modules/mongodb/src/sdam/topology_description.ts
generated
vendored
Normal file
511
node_modules/mongodb/src/sdam/topology_description.ts
generated
vendored
Normal file
@@ -0,0 +1,511 @@
|
||||
import type { ObjectId } from '../bson';
|
||||
import * as WIRE_CONSTANTS from '../cmap/wire_protocol/constants';
|
||||
import { MongoRuntimeError, MongoServerError } from '../error';
|
||||
import { compareObjectId, shuffle } from '../utils';
|
||||
import { ServerType, TopologyType } from './common';
|
||||
import { ServerDescription } from './server_description';
|
||||
import type { SrvPollingEvent } from './srv_polling';
|
||||
|
||||
// constants related to compatibility checks
|
||||
const MIN_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_SERVER_VERSION;
|
||||
const MAX_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_SERVER_VERSION;
|
||||
const MIN_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_WIRE_VERSION;
|
||||
const MAX_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_WIRE_VERSION;
|
||||
|
||||
const MONGOS_OR_UNKNOWN = new Set<ServerType>([ServerType.Mongos, ServerType.Unknown]);
|
||||
const MONGOS_OR_STANDALONE = new Set<ServerType>([ServerType.Mongos, ServerType.Standalone]);
|
||||
const NON_PRIMARY_RS_MEMBERS = new Set<ServerType>([
|
||||
ServerType.RSSecondary,
|
||||
ServerType.RSArbiter,
|
||||
ServerType.RSOther
|
||||
]);
|
||||
|
||||
/** @public */
|
||||
export interface TopologyDescriptionOptions {
|
||||
heartbeatFrequencyMS?: number;
|
||||
localThresholdMS?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Representation of a deployment of servers
|
||||
* @public
|
||||
*/
|
||||
export class TopologyDescription {
|
||||
type: TopologyType;
|
||||
setName: string | null;
|
||||
maxSetVersion: number | null;
|
||||
maxElectionId: ObjectId | null;
|
||||
servers: Map<string, ServerDescription>;
|
||||
stale: boolean;
|
||||
compatible: boolean;
|
||||
compatibilityError?: string;
|
||||
logicalSessionTimeoutMinutes: number | null;
|
||||
heartbeatFrequencyMS: number;
|
||||
localThresholdMS: number;
|
||||
commonWireVersion: number;
|
||||
|
||||
/**
|
||||
* Create a TopologyDescription
|
||||
*/
|
||||
constructor(
|
||||
topologyType: TopologyType,
|
||||
serverDescriptions: Map<string, ServerDescription> | null = null,
|
||||
setName: string | null = null,
|
||||
maxSetVersion: number | null = null,
|
||||
maxElectionId: ObjectId | null = null,
|
||||
commonWireVersion: number | null = null,
|
||||
options: TopologyDescriptionOptions | null = null
|
||||
) {
|
||||
options = options ?? {};
|
||||
|
||||
this.type = topologyType ?? TopologyType.Unknown;
|
||||
this.servers = serverDescriptions ?? new Map();
|
||||
this.stale = false;
|
||||
this.compatible = true;
|
||||
this.heartbeatFrequencyMS = options.heartbeatFrequencyMS ?? 0;
|
||||
this.localThresholdMS = options.localThresholdMS ?? 15;
|
||||
this.setName = setName ?? null;
|
||||
this.maxElectionId = maxElectionId ?? null;
|
||||
this.maxSetVersion = maxSetVersion ?? null;
|
||||
this.commonWireVersion = commonWireVersion ?? 0;
|
||||
|
||||
// determine server compatibility
|
||||
for (const serverDescription of this.servers.values()) {
|
||||
// Load balancer mode is always compatible.
|
||||
if (
|
||||
serverDescription.type === ServerType.Unknown ||
|
||||
serverDescription.type === ServerType.LoadBalancer
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (serverDescription.minWireVersion > MAX_SUPPORTED_WIRE_VERSION) {
|
||||
this.compatible = false;
|
||||
this.compatibilityError = `Server at ${serverDescription.address} requires wire version ${serverDescription.minWireVersion}, but this version of the driver only supports up to ${MAX_SUPPORTED_WIRE_VERSION} (MongoDB ${MAX_SUPPORTED_SERVER_VERSION})`;
|
||||
}
|
||||
|
||||
if (serverDescription.maxWireVersion < MIN_SUPPORTED_WIRE_VERSION) {
|
||||
this.compatible = false;
|
||||
this.compatibilityError = `Server at ${serverDescription.address} reports wire version ${serverDescription.maxWireVersion}, but this version of the driver requires at least ${MIN_SUPPORTED_WIRE_VERSION} (MongoDB ${MIN_SUPPORTED_SERVER_VERSION}).`;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Whenever a client updates the TopologyDescription from a hello response, it MUST set
|
||||
// TopologyDescription.logicalSessionTimeoutMinutes to the smallest logicalSessionTimeoutMinutes
|
||||
// value among ServerDescriptions of all data-bearing server types. If any have a null
|
||||
// logicalSessionTimeoutMinutes, then TopologyDescription.logicalSessionTimeoutMinutes MUST be
|
||||
// set to null.
|
||||
this.logicalSessionTimeoutMinutes = null;
|
||||
for (const [, server] of this.servers) {
|
||||
if (server.isReadable) {
|
||||
if (server.logicalSessionTimeoutMinutes == null) {
|
||||
// If any of the servers have a null logicalSessionsTimeout, then the whole topology does
|
||||
this.logicalSessionTimeoutMinutes = null;
|
||||
break;
|
||||
}
|
||||
|
||||
if (this.logicalSessionTimeoutMinutes == null) {
|
||||
// First server with a non null logicalSessionsTimeout
|
||||
this.logicalSessionTimeoutMinutes = server.logicalSessionTimeoutMinutes;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Always select the smaller of the:
|
||||
// current server logicalSessionsTimeout and the topologies logicalSessionsTimeout
|
||||
this.logicalSessionTimeoutMinutes = Math.min(
|
||||
this.logicalSessionTimeoutMinutes,
|
||||
server.logicalSessionTimeoutMinutes
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new TopologyDescription based on the SrvPollingEvent
|
||||
* @internal
|
||||
*/
|
||||
updateFromSrvPollingEvent(ev: SrvPollingEvent, srvMaxHosts = 0): TopologyDescription {
|
||||
/** The SRV addresses defines the set of addresses we should be using */
|
||||
const incomingHostnames = ev.hostnames();
|
||||
const currentHostnames = new Set(this.servers.keys());
|
||||
|
||||
const hostnamesToAdd = new Set<string>(incomingHostnames);
|
||||
const hostnamesToRemove = new Set<string>();
|
||||
for (const hostname of currentHostnames) {
|
||||
// filter hostnamesToAdd (made from incomingHostnames) down to what is *not* present in currentHostnames
|
||||
hostnamesToAdd.delete(hostname);
|
||||
if (!incomingHostnames.has(hostname)) {
|
||||
// If the SRV Records no longer include this hostname
|
||||
// we have to stop using it
|
||||
hostnamesToRemove.add(hostname);
|
||||
}
|
||||
}
|
||||
|
||||
if (hostnamesToAdd.size === 0 && hostnamesToRemove.size === 0) {
|
||||
// No new hosts to add and none to remove
|
||||
return this;
|
||||
}
|
||||
|
||||
const serverDescriptions = new Map(this.servers);
|
||||
for (const removedHost of hostnamesToRemove) {
|
||||
serverDescriptions.delete(removedHost);
|
||||
}
|
||||
|
||||
if (hostnamesToAdd.size > 0) {
|
||||
if (srvMaxHosts === 0) {
|
||||
// Add all!
|
||||
for (const hostToAdd of hostnamesToAdd) {
|
||||
serverDescriptions.set(hostToAdd, new ServerDescription(hostToAdd));
|
||||
}
|
||||
} else if (serverDescriptions.size < srvMaxHosts) {
|
||||
// Add only the amount needed to get us back to srvMaxHosts
|
||||
const selectedHosts = shuffle(hostnamesToAdd, srvMaxHosts - serverDescriptions.size);
|
||||
for (const selectedHostToAdd of selectedHosts) {
|
||||
serverDescriptions.set(selectedHostToAdd, new ServerDescription(selectedHostToAdd));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new TopologyDescription(
|
||||
this.type,
|
||||
serverDescriptions,
|
||||
this.setName,
|
||||
this.maxSetVersion,
|
||||
this.maxElectionId,
|
||||
this.commonWireVersion,
|
||||
{ heartbeatFrequencyMS: this.heartbeatFrequencyMS, localThresholdMS: this.localThresholdMS }
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a copy of this description updated with a given ServerDescription
|
||||
* @internal
|
||||
*/
|
||||
update(serverDescription: ServerDescription): TopologyDescription {
|
||||
const address = serverDescription.address;
|
||||
|
||||
// potentially mutated values
|
||||
let { type: topologyType, setName, maxSetVersion, maxElectionId, commonWireVersion } = this;
|
||||
|
||||
const serverType = serverDescription.type;
|
||||
const serverDescriptions = new Map(this.servers);
|
||||
|
||||
// update common wire version
|
||||
if (serverDescription.maxWireVersion !== 0) {
|
||||
if (commonWireVersion == null) {
|
||||
commonWireVersion = serverDescription.maxWireVersion;
|
||||
} else {
|
||||
commonWireVersion = Math.min(commonWireVersion, serverDescription.maxWireVersion);
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
typeof serverDescription.setName === 'string' &&
|
||||
typeof setName === 'string' &&
|
||||
serverDescription.setName !== setName
|
||||
) {
|
||||
if (topologyType === TopologyType.Single) {
|
||||
// "Single" Topology with setName mismatch is direct connection usage, mark unknown do not remove
|
||||
serverDescription = new ServerDescription(address);
|
||||
} else {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
}
|
||||
|
||||
// update the actual server description
|
||||
serverDescriptions.set(address, serverDescription);
|
||||
|
||||
if (topologyType === TopologyType.Single) {
|
||||
// once we are defined as single, that never changes
|
||||
return new TopologyDescription(
|
||||
TopologyType.Single,
|
||||
serverDescriptions,
|
||||
setName,
|
||||
maxSetVersion,
|
||||
maxElectionId,
|
||||
commonWireVersion,
|
||||
{ heartbeatFrequencyMS: this.heartbeatFrequencyMS, localThresholdMS: this.localThresholdMS }
|
||||
);
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.Unknown) {
|
||||
if (serverType === ServerType.Standalone && this.servers.size !== 1) {
|
||||
serverDescriptions.delete(address);
|
||||
} else {
|
||||
topologyType = topologyTypeForServerType(serverType);
|
||||
}
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.Sharded) {
|
||||
if (!MONGOS_OR_UNKNOWN.has(serverType)) {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.ReplicaSetNoPrimary) {
|
||||
if (MONGOS_OR_STANDALONE.has(serverType)) {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
|
||||
if (serverType === ServerType.RSPrimary) {
|
||||
const result = updateRsFromPrimary(
|
||||
serverDescriptions,
|
||||
serverDescription,
|
||||
setName,
|
||||
maxSetVersion,
|
||||
maxElectionId
|
||||
);
|
||||
|
||||
topologyType = result[0];
|
||||
setName = result[1];
|
||||
maxSetVersion = result[2];
|
||||
maxElectionId = result[3];
|
||||
} else if (NON_PRIMARY_RS_MEMBERS.has(serverType)) {
|
||||
const result = updateRsNoPrimaryFromMember(serverDescriptions, serverDescription, setName);
|
||||
topologyType = result[0];
|
||||
setName = result[1];
|
||||
}
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.ReplicaSetWithPrimary) {
|
||||
if (MONGOS_OR_STANDALONE.has(serverType)) {
|
||||
serverDescriptions.delete(address);
|
||||
topologyType = checkHasPrimary(serverDescriptions);
|
||||
} else if (serverType === ServerType.RSPrimary) {
|
||||
const result = updateRsFromPrimary(
|
||||
serverDescriptions,
|
||||
serverDescription,
|
||||
setName,
|
||||
maxSetVersion,
|
||||
maxElectionId
|
||||
);
|
||||
|
||||
topologyType = result[0];
|
||||
setName = result[1];
|
||||
maxSetVersion = result[2];
|
||||
maxElectionId = result[3];
|
||||
} else if (NON_PRIMARY_RS_MEMBERS.has(serverType)) {
|
||||
topologyType = updateRsWithPrimaryFromMember(
|
||||
serverDescriptions,
|
||||
serverDescription,
|
||||
setName
|
||||
);
|
||||
} else {
|
||||
topologyType = checkHasPrimary(serverDescriptions);
|
||||
}
|
||||
}
|
||||
|
||||
return new TopologyDescription(
|
||||
topologyType,
|
||||
serverDescriptions,
|
||||
setName,
|
||||
maxSetVersion,
|
||||
maxElectionId,
|
||||
commonWireVersion,
|
||||
{ heartbeatFrequencyMS: this.heartbeatFrequencyMS, localThresholdMS: this.localThresholdMS }
|
||||
);
|
||||
}
|
||||
|
||||
get error(): MongoServerError | null {
|
||||
const descriptionsWithError = Array.from(this.servers.values()).filter(
|
||||
(sd: ServerDescription) => sd.error
|
||||
);
|
||||
|
||||
if (descriptionsWithError.length > 0) {
|
||||
return descriptionsWithError[0].error;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the topology description has any known servers
|
||||
*/
|
||||
get hasKnownServers(): boolean {
|
||||
return Array.from(this.servers.values()).some(
|
||||
(sd: ServerDescription) => sd.type !== ServerType.Unknown
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if this topology description has a data-bearing server available.
|
||||
*/
|
||||
get hasDataBearingServers(): boolean {
|
||||
return Array.from(this.servers.values()).some((sd: ServerDescription) => sd.isDataBearing);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the topology has a definition for the provided address
|
||||
* @internal
|
||||
*/
|
||||
hasServer(address: string): boolean {
|
||||
return this.servers.has(address);
|
||||
}
|
||||
}
|
||||
|
||||
function topologyTypeForServerType(serverType: ServerType): TopologyType {
|
||||
switch (serverType) {
|
||||
case ServerType.Standalone:
|
||||
return TopologyType.Single;
|
||||
case ServerType.Mongos:
|
||||
return TopologyType.Sharded;
|
||||
case ServerType.RSPrimary:
|
||||
return TopologyType.ReplicaSetWithPrimary;
|
||||
case ServerType.RSOther:
|
||||
case ServerType.RSSecondary:
|
||||
return TopologyType.ReplicaSetNoPrimary;
|
||||
default:
|
||||
return TopologyType.Unknown;
|
||||
}
|
||||
}
|
||||
|
||||
function updateRsFromPrimary(
|
||||
serverDescriptions: Map<string, ServerDescription>,
|
||||
serverDescription: ServerDescription,
|
||||
setName: string | null = null,
|
||||
maxSetVersion: number | null = null,
|
||||
maxElectionId: ObjectId | null = null
|
||||
): [TopologyType, string | null, number | null, ObjectId | null] {
|
||||
setName = setName || serverDescription.setName;
|
||||
if (setName !== serverDescription.setName) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
|
||||
if (serverDescription.maxWireVersion >= 17) {
|
||||
const electionIdComparison = compareObjectId(maxElectionId, serverDescription.electionId);
|
||||
const maxElectionIdIsEqual = electionIdComparison === 0;
|
||||
const maxElectionIdIsLess = electionIdComparison === -1;
|
||||
const maxSetVersionIsLessOrEqual =
|
||||
(maxSetVersion ?? -1) <= (serverDescription.setVersion ?? -1);
|
||||
|
||||
if (maxElectionIdIsLess || (maxElectionIdIsEqual && maxSetVersionIsLessOrEqual)) {
|
||||
// The reported electionId was greater
|
||||
// or the electionId was equal and reported setVersion was greater
|
||||
// Always update both values, they are a tuple
|
||||
maxElectionId = serverDescription.electionId;
|
||||
maxSetVersion = serverDescription.setVersion;
|
||||
} else {
|
||||
// Stale primary
|
||||
// replace serverDescription with a default ServerDescription of type "Unknown"
|
||||
serverDescriptions.set(
|
||||
serverDescription.address,
|
||||
new ServerDescription(serverDescription.address)
|
||||
);
|
||||
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
} else {
|
||||
const electionId = serverDescription.electionId ? serverDescription.electionId : null;
|
||||
if (serverDescription.setVersion && electionId) {
|
||||
if (maxSetVersion && maxElectionId) {
|
||||
if (
|
||||
maxSetVersion > serverDescription.setVersion ||
|
||||
compareObjectId(maxElectionId, electionId) > 0
|
||||
) {
|
||||
// this primary is stale, we must remove it
|
||||
serverDescriptions.set(
|
||||
serverDescription.address,
|
||||
new ServerDescription(serverDescription.address)
|
||||
);
|
||||
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
}
|
||||
|
||||
maxElectionId = serverDescription.electionId;
|
||||
}
|
||||
|
||||
if (
|
||||
serverDescription.setVersion != null &&
|
||||
(maxSetVersion == null || serverDescription.setVersion > maxSetVersion)
|
||||
) {
|
||||
maxSetVersion = serverDescription.setVersion;
|
||||
}
|
||||
}
|
||||
|
||||
// We've heard from the primary. Is it the same primary as before?
|
||||
for (const [address, server] of serverDescriptions) {
|
||||
if (server.type === ServerType.RSPrimary && server.address !== serverDescription.address) {
|
||||
// Reset old primary's type to Unknown.
|
||||
serverDescriptions.set(address, new ServerDescription(server.address));
|
||||
|
||||
// There can only be one primary
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Discover new hosts from this primary's response.
|
||||
serverDescription.allHosts.forEach((address: string) => {
|
||||
if (!serverDescriptions.has(address)) {
|
||||
serverDescriptions.set(address, new ServerDescription(address));
|
||||
}
|
||||
});
|
||||
|
||||
// Remove hosts not in the response.
|
||||
const currentAddresses = Array.from(serverDescriptions.keys());
|
||||
const responseAddresses = serverDescription.allHosts;
|
||||
currentAddresses
|
||||
.filter((addr: string) => responseAddresses.indexOf(addr) === -1)
|
||||
.forEach((address: string) => {
|
||||
serverDescriptions.delete(address);
|
||||
});
|
||||
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
|
||||
function updateRsWithPrimaryFromMember(
|
||||
serverDescriptions: Map<string, ServerDescription>,
|
||||
serverDescription: ServerDescription,
|
||||
setName: string | null = null
|
||||
): TopologyType {
|
||||
if (setName == null) {
|
||||
// TODO(NODE-3483): should be an appropriate runtime error
|
||||
throw new MongoRuntimeError('Argument "setName" is required if connected to a replica set');
|
||||
}
|
||||
|
||||
if (
|
||||
setName !== serverDescription.setName ||
|
||||
(serverDescription.me && serverDescription.address !== serverDescription.me)
|
||||
) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
}
|
||||
|
||||
return checkHasPrimary(serverDescriptions);
|
||||
}
|
||||
|
||||
function updateRsNoPrimaryFromMember(
|
||||
serverDescriptions: Map<string, ServerDescription>,
|
||||
serverDescription: ServerDescription,
|
||||
setName: string | null = null
|
||||
): [TopologyType, string | null] {
|
||||
const topologyType = TopologyType.ReplicaSetNoPrimary;
|
||||
setName = setName ?? serverDescription.setName;
|
||||
if (setName !== serverDescription.setName) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
return [topologyType, setName];
|
||||
}
|
||||
|
||||
serverDescription.allHosts.forEach((address: string) => {
|
||||
if (!serverDescriptions.has(address)) {
|
||||
serverDescriptions.set(address, new ServerDescription(address));
|
||||
}
|
||||
});
|
||||
|
||||
if (serverDescription.me && serverDescription.address !== serverDescription.me) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
}
|
||||
|
||||
return [topologyType, setName];
|
||||
}
|
||||
|
||||
function checkHasPrimary(serverDescriptions: Map<string, ServerDescription>): TopologyType {
|
||||
for (const serverDescription of serverDescriptions.values()) {
|
||||
if (serverDescription.type === ServerType.RSPrimary) {
|
||||
return TopologyType.ReplicaSetWithPrimary;
|
||||
}
|
||||
}
|
||||
|
||||
return TopologyType.ReplicaSetNoPrimary;
|
||||
}
|
||||
Reference in New Issue
Block a user