First commit
This commit is contained in:
56
node_modules/mongodb/lib/sdam/common.js
generated
vendored
Normal file
56
node_modules/mongodb/lib/sdam/common.js
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports._advanceClusterTime = exports.drainTimerQueue = exports.ServerType = exports.TopologyType = exports.STATE_CONNECTED = exports.STATE_CONNECTING = exports.STATE_CLOSED = exports.STATE_CLOSING = void 0;
|
||||
const timers_1 = require("timers");
|
||||
// shared state names
|
||||
exports.STATE_CLOSING = 'closing';
|
||||
exports.STATE_CLOSED = 'closed';
|
||||
exports.STATE_CONNECTING = 'connecting';
|
||||
exports.STATE_CONNECTED = 'connected';
|
||||
/**
|
||||
* An enumeration of topology types we know about
|
||||
* @public
|
||||
*/
|
||||
exports.TopologyType = Object.freeze({
|
||||
Single: 'Single',
|
||||
ReplicaSetNoPrimary: 'ReplicaSetNoPrimary',
|
||||
ReplicaSetWithPrimary: 'ReplicaSetWithPrimary',
|
||||
Sharded: 'Sharded',
|
||||
Unknown: 'Unknown',
|
||||
LoadBalanced: 'LoadBalanced'
|
||||
});
|
||||
/**
|
||||
* An enumeration of server types we know about
|
||||
* @public
|
||||
*/
|
||||
exports.ServerType = Object.freeze({
|
||||
Standalone: 'Standalone',
|
||||
Mongos: 'Mongos',
|
||||
PossiblePrimary: 'PossiblePrimary',
|
||||
RSPrimary: 'RSPrimary',
|
||||
RSSecondary: 'RSSecondary',
|
||||
RSArbiter: 'RSArbiter',
|
||||
RSOther: 'RSOther',
|
||||
RSGhost: 'RSGhost',
|
||||
Unknown: 'Unknown',
|
||||
LoadBalancer: 'LoadBalancer'
|
||||
});
|
||||
/** @internal */
|
||||
function drainTimerQueue(queue) {
|
||||
queue.forEach(timers_1.clearTimeout);
|
||||
queue.clear();
|
||||
}
|
||||
exports.drainTimerQueue = drainTimerQueue;
|
||||
/** Shared function to determine clusterTime for a given topology or session */
|
||||
function _advanceClusterTime(entity, $clusterTime) {
|
||||
if (entity.clusterTime == null) {
|
||||
entity.clusterTime = $clusterTime;
|
||||
}
|
||||
else {
|
||||
if ($clusterTime.clusterTime.greaterThan(entity.clusterTime.clusterTime)) {
|
||||
entity.clusterTime = $clusterTime;
|
||||
}
|
||||
}
|
||||
}
|
||||
exports._advanceClusterTime = _advanceClusterTime;
|
||||
//# sourceMappingURL=common.js.map
|
1
node_modules/mongodb/lib/sdam/common.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/common.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"common.js","sourceRoot":"","sources":["../../src/sdam/common.ts"],"names":[],"mappings":";;;AAAA,mCAAsC;AAMtC,qBAAqB;AACR,QAAA,aAAa,GAAG,SAAS,CAAC;AAC1B,QAAA,YAAY,GAAG,QAAQ,CAAC;AACxB,QAAA,gBAAgB,GAAG,YAAY,CAAC;AAChC,QAAA,eAAe,GAAG,WAAW,CAAC;AAE3C;;;GAGG;AACU,QAAA,YAAY,GAAG,MAAM,CAAC,MAAM,CAAC;IACxC,MAAM,EAAE,QAAQ;IAChB,mBAAmB,EAAE,qBAAqB;IAC1C,qBAAqB,EAAE,uBAAuB;IAC9C,OAAO,EAAE,SAAS;IAClB,OAAO,EAAE,SAAS;IAClB,YAAY,EAAE,cAAc;CACpB,CAAC,CAAC;AAKZ;;;GAGG;AACU,QAAA,UAAU,GAAG,MAAM,CAAC,MAAM,CAAC;IACtC,UAAU,EAAE,YAAY;IACxB,MAAM,EAAE,QAAQ;IAChB,eAAe,EAAE,iBAAiB;IAClC,SAAS,EAAE,WAAW;IACtB,WAAW,EAAE,aAAa;IAC1B,SAAS,EAAE,WAAW;IACtB,OAAO,EAAE,SAAS;IAClB,OAAO,EAAE,SAAS;IAClB,OAAO,EAAE,SAAS;IAClB,YAAY,EAAE,cAAc;CACpB,CAAC,CAAC;AAQZ,gBAAgB;AAChB,SAAgB,eAAe,CAAC,KAAiB;IAC/C,KAAK,CAAC,OAAO,CAAC,qBAAY,CAAC,CAAC;IAC5B,KAAK,CAAC,KAAK,EAAE,CAAC;AAChB,CAAC;AAHD,0CAGC;AAWD,+EAA+E;AAC/E,SAAgB,mBAAmB,CACjC,MAAgC,EAChC,YAAyB;IAEzB,IAAI,MAAM,CAAC,WAAW,IAAI,IAAI,EAAE;QAC9B,MAAM,CAAC,WAAW,GAAG,YAAY,CAAC;KACnC;SAAM;QACL,IAAI,YAAY,CAAC,WAAW,CAAC,WAAW,CAAC,MAAM,CAAC,WAAW,CAAC,WAAW,CAAC,EAAE;YACxE,MAAM,CAAC,WAAW,GAAG,YAAY,CAAC;SACnC;KACF;AACH,CAAC;AAXD,kDAWC"}
|
125
node_modules/mongodb/lib/sdam/events.js
generated
vendored
Normal file
125
node_modules/mongodb/lib/sdam/events.js
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ServerHeartbeatFailedEvent = exports.ServerHeartbeatSucceededEvent = exports.ServerHeartbeatStartedEvent = exports.TopologyClosedEvent = exports.TopologyOpeningEvent = exports.TopologyDescriptionChangedEvent = exports.ServerClosedEvent = exports.ServerOpeningEvent = exports.ServerDescriptionChangedEvent = void 0;
|
||||
/**
|
||||
* Emitted when server description changes, but does NOT include changes to the RTT.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ServerDescriptionChangedEvent {
|
||||
/** @internal */
|
||||
constructor(topologyId, address, previousDescription, newDescription) {
|
||||
this.topologyId = topologyId;
|
||||
this.address = address;
|
||||
this.previousDescription = previousDescription;
|
||||
this.newDescription = newDescription;
|
||||
}
|
||||
}
|
||||
exports.ServerDescriptionChangedEvent = ServerDescriptionChangedEvent;
|
||||
/**
|
||||
* Emitted when server is initialized.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ServerOpeningEvent {
|
||||
/** @internal */
|
||||
constructor(topologyId, address) {
|
||||
this.topologyId = topologyId;
|
||||
this.address = address;
|
||||
}
|
||||
}
|
||||
exports.ServerOpeningEvent = ServerOpeningEvent;
|
||||
/**
|
||||
* Emitted when server is closed.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ServerClosedEvent {
|
||||
/** @internal */
|
||||
constructor(topologyId, address) {
|
||||
this.topologyId = topologyId;
|
||||
this.address = address;
|
||||
}
|
||||
}
|
||||
exports.ServerClosedEvent = ServerClosedEvent;
|
||||
/**
|
||||
* Emitted when topology description changes.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class TopologyDescriptionChangedEvent {
|
||||
/** @internal */
|
||||
constructor(topologyId, previousDescription, newDescription) {
|
||||
this.topologyId = topologyId;
|
||||
this.previousDescription = previousDescription;
|
||||
this.newDescription = newDescription;
|
||||
}
|
||||
}
|
||||
exports.TopologyDescriptionChangedEvent = TopologyDescriptionChangedEvent;
|
||||
/**
|
||||
* Emitted when topology is initialized.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class TopologyOpeningEvent {
|
||||
/** @internal */
|
||||
constructor(topologyId) {
|
||||
this.topologyId = topologyId;
|
||||
}
|
||||
}
|
||||
exports.TopologyOpeningEvent = TopologyOpeningEvent;
|
||||
/**
|
||||
* Emitted when topology is closed.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class TopologyClosedEvent {
|
||||
/** @internal */
|
||||
constructor(topologyId) {
|
||||
this.topologyId = topologyId;
|
||||
}
|
||||
}
|
||||
exports.TopologyClosedEvent = TopologyClosedEvent;
|
||||
/**
|
||||
* Emitted when the server monitor’s hello command is started - immediately before
|
||||
* the hello command is serialized into raw BSON and written to the socket.
|
||||
*
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ServerHeartbeatStartedEvent {
|
||||
/** @internal */
|
||||
constructor(connectionId) {
|
||||
this.connectionId = connectionId;
|
||||
}
|
||||
}
|
||||
exports.ServerHeartbeatStartedEvent = ServerHeartbeatStartedEvent;
|
||||
/**
|
||||
* Emitted when the server monitor’s hello succeeds.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ServerHeartbeatSucceededEvent {
|
||||
/** @internal */
|
||||
constructor(connectionId, duration, reply) {
|
||||
this.connectionId = connectionId;
|
||||
this.duration = duration;
|
||||
this.reply = reply ?? {};
|
||||
}
|
||||
}
|
||||
exports.ServerHeartbeatSucceededEvent = ServerHeartbeatSucceededEvent;
|
||||
/**
|
||||
* Emitted when the server monitor’s hello fails, either with an “ok: 0” or a socket exception.
|
||||
* @public
|
||||
* @category Event
|
||||
*/
|
||||
class ServerHeartbeatFailedEvent {
|
||||
/** @internal */
|
||||
constructor(connectionId, duration, failure) {
|
||||
this.connectionId = connectionId;
|
||||
this.duration = duration;
|
||||
this.failure = failure;
|
||||
}
|
||||
}
|
||||
exports.ServerHeartbeatFailedEvent = ServerHeartbeatFailedEvent;
|
||||
//# sourceMappingURL=events.js.map
|
1
node_modules/mongodb/lib/sdam/events.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/events.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"events.js","sourceRoot":"","sources":["../../src/sdam/events.ts"],"names":[],"mappings":";;;AAIA;;;;GAIG;AACH,MAAa,6BAA6B;IAUxC,gBAAgB;IAChB,YACE,UAAkB,EAClB,OAAe,EACf,mBAAsC,EACtC,cAAiC;QAEjC,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;QAC7B,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;QACvB,IAAI,CAAC,mBAAmB,GAAG,mBAAmB,CAAC;QAC/C,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;IACvC,CAAC;CACF;AAtBD,sEAsBC;AAED;;;;GAIG;AACH,MAAa,kBAAkB;IAM7B,gBAAgB;IAChB,YAAY,UAAkB,EAAE,OAAe;QAC7C,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;QAC7B,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;IACzB,CAAC;CACF;AAXD,gDAWC;AAED;;;;GAIG;AACH,MAAa,iBAAiB;IAM5B,gBAAgB;IAChB,YAAY,UAAkB,EAAE,OAAe;QAC7C,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;QAC7B,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;IACzB,CAAC;CACF;AAXD,8CAWC;AAED;;;;GAIG;AACH,MAAa,+BAA+B;IAQ1C,gBAAgB;IAChB,YACE,UAAkB,EAClB,mBAAwC,EACxC,cAAmC;QAEnC,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;QAC7B,IAAI,CAAC,mBAAmB,GAAG,mBAAmB,CAAC;QAC/C,IAAI,CAAC,cAAc,GAAG,cAAc,CAAC;IACvC,CAAC;CACF;AAlBD,0EAkBC;AAED;;;;GAIG;AACH,MAAa,oBAAoB;IAI/B,gBAAgB;IAChB,YAAY,UAAkB;QAC5B,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;IAC/B,CAAC;CACF;AARD,oDAQC;AAED;;;;GAIG;AACH,MAAa,mBAAmB;IAI9B,gBAAgB;IAChB,YAAY,UAAkB;QAC5B,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;IAC/B,CAAC;CACF;AARD,kDAQC;AAED;;;;;;GAMG;AACH,MAAa,2BAA2B;IAItC,gBAAgB;IAChB,YAAY,YAAoB;QAC9B,IAAI,CAAC,YAAY,GAAG,YAAY,CAAC;IACnC,CAAC;CACF;AARD,kEAQC;AAED;;;;GAIG;AACH,MAAa,6BAA6B;IAQxC,gBAAgB;IAChB,YAAY,YAAoB,EAAE,QAAgB,EAAE,KAAsB;QACxE,IAAI,CAAC,YAAY,GAAG,YAAY,CAAC;QACjC,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;QACzB,IAAI,CAAC,KAAK,GAAG,KAAK,IAAI,EAAE,CAAC;IAC3B,CAAC;CACF;AAdD,sEAcC;AAED;;;;GAIG;AACH,MAAa,0BAA0B;IAQrC,gBAAgB;IAChB,YAAY,YAAoB,EAAE,QAAgB,EAAE,OAAc;QAChE,IAAI,CAAC,YAAY,GAAG,YAAY,CAAC;QACjC,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;QACzB,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC;IACzB,CAAC;CACF;AAdD,gEAcC"}
|
417
node_modules/mongodb/lib/sdam/monitor.js
generated
vendored
Normal file
417
node_modules/mongodb/lib/sdam/monitor.js
generated
vendored
Normal file
@@ -0,0 +1,417 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.MonitorInterval = exports.RTTPinger = exports.Monitor = void 0;
|
||||
const timers_1 = require("timers");
|
||||
const bson_1 = require("../bson");
|
||||
const connect_1 = require("../cmap/connect");
|
||||
const connection_1 = require("../cmap/connection");
|
||||
const constants_1 = require("../constants");
|
||||
const error_1 = require("../error");
|
||||
const mongo_types_1 = require("../mongo_types");
|
||||
const utils_1 = require("../utils");
|
||||
const common_1 = require("./common");
|
||||
const events_1 = require("./events");
|
||||
const server_1 = require("./server");
|
||||
/** @internal */
|
||||
const kServer = Symbol('server');
|
||||
/** @internal */
|
||||
const kMonitorId = Symbol('monitorId');
|
||||
/** @internal */
|
||||
const kConnection = Symbol('connection');
|
||||
/** @internal */
|
||||
const kCancellationToken = Symbol('cancellationToken');
|
||||
/** @internal */
|
||||
const kRTTPinger = Symbol('rttPinger');
|
||||
/** @internal */
|
||||
const kRoundTripTime = Symbol('roundTripTime');
|
||||
const STATE_IDLE = 'idle';
|
||||
const STATE_MONITORING = 'monitoring';
|
||||
const stateTransition = (0, utils_1.makeStateMachine)({
|
||||
[common_1.STATE_CLOSING]: [common_1.STATE_CLOSING, STATE_IDLE, common_1.STATE_CLOSED],
|
||||
[common_1.STATE_CLOSED]: [common_1.STATE_CLOSED, STATE_MONITORING],
|
||||
[STATE_IDLE]: [STATE_IDLE, STATE_MONITORING, common_1.STATE_CLOSING],
|
||||
[STATE_MONITORING]: [STATE_MONITORING, STATE_IDLE, common_1.STATE_CLOSING]
|
||||
});
|
||||
const INVALID_REQUEST_CHECK_STATES = new Set([common_1.STATE_CLOSING, common_1.STATE_CLOSED, STATE_MONITORING]);
|
||||
function isInCloseState(monitor) {
|
||||
return monitor.s.state === common_1.STATE_CLOSED || monitor.s.state === common_1.STATE_CLOSING;
|
||||
}
|
||||
/** @internal */
|
||||
class Monitor extends mongo_types_1.TypedEventEmitter {
|
||||
get connection() {
|
||||
return this[kConnection];
|
||||
}
|
||||
constructor(server, options) {
|
||||
super();
|
||||
this[kServer] = server;
|
||||
this[kConnection] = undefined;
|
||||
this[kCancellationToken] = new mongo_types_1.CancellationToken();
|
||||
this[kCancellationToken].setMaxListeners(Infinity);
|
||||
this[kMonitorId] = undefined;
|
||||
this.s = {
|
||||
state: common_1.STATE_CLOSED
|
||||
};
|
||||
this.address = server.description.address;
|
||||
this.options = Object.freeze({
|
||||
connectTimeoutMS: options.connectTimeoutMS ?? 10000,
|
||||
heartbeatFrequencyMS: options.heartbeatFrequencyMS ?? 10000,
|
||||
minHeartbeatFrequencyMS: options.minHeartbeatFrequencyMS ?? 500
|
||||
});
|
||||
const cancellationToken = this[kCancellationToken];
|
||||
// TODO: refactor this to pull it directly from the pool, requires new ConnectionPool integration
|
||||
const connectOptions = Object.assign({
|
||||
id: '<monitor>',
|
||||
generation: server.pool.generation,
|
||||
connectionType: connection_1.Connection,
|
||||
cancellationToken,
|
||||
hostAddress: server.description.hostAddress
|
||||
}, options,
|
||||
// force BSON serialization options
|
||||
{
|
||||
raw: false,
|
||||
useBigInt64: false,
|
||||
promoteLongs: true,
|
||||
promoteValues: true,
|
||||
promoteBuffers: true
|
||||
});
|
||||
// ensure no authentication is used for monitoring
|
||||
delete connectOptions.credentials;
|
||||
if (connectOptions.autoEncrypter) {
|
||||
delete connectOptions.autoEncrypter;
|
||||
}
|
||||
this.connectOptions = Object.freeze(connectOptions);
|
||||
}
|
||||
connect() {
|
||||
if (this.s.state !== common_1.STATE_CLOSED) {
|
||||
return;
|
||||
}
|
||||
// start
|
||||
const heartbeatFrequencyMS = this.options.heartbeatFrequencyMS;
|
||||
const minHeartbeatFrequencyMS = this.options.minHeartbeatFrequencyMS;
|
||||
this[kMonitorId] = new MonitorInterval(monitorServer(this), {
|
||||
heartbeatFrequencyMS: heartbeatFrequencyMS,
|
||||
minHeartbeatFrequencyMS: minHeartbeatFrequencyMS,
|
||||
immediate: true
|
||||
});
|
||||
}
|
||||
requestCheck() {
|
||||
if (INVALID_REQUEST_CHECK_STATES.has(this.s.state)) {
|
||||
return;
|
||||
}
|
||||
this[kMonitorId]?.wake();
|
||||
}
|
||||
reset() {
|
||||
const topologyVersion = this[kServer].description.topologyVersion;
|
||||
if (isInCloseState(this) || topologyVersion == null) {
|
||||
return;
|
||||
}
|
||||
stateTransition(this, common_1.STATE_CLOSING);
|
||||
resetMonitorState(this);
|
||||
// restart monitor
|
||||
stateTransition(this, STATE_IDLE);
|
||||
// restart monitoring
|
||||
const heartbeatFrequencyMS = this.options.heartbeatFrequencyMS;
|
||||
const minHeartbeatFrequencyMS = this.options.minHeartbeatFrequencyMS;
|
||||
this[kMonitorId] = new MonitorInterval(monitorServer(this), {
|
||||
heartbeatFrequencyMS: heartbeatFrequencyMS,
|
||||
minHeartbeatFrequencyMS: minHeartbeatFrequencyMS
|
||||
});
|
||||
}
|
||||
close() {
|
||||
if (isInCloseState(this)) {
|
||||
return;
|
||||
}
|
||||
stateTransition(this, common_1.STATE_CLOSING);
|
||||
resetMonitorState(this);
|
||||
// close monitor
|
||||
this.emit('close');
|
||||
stateTransition(this, common_1.STATE_CLOSED);
|
||||
}
|
||||
}
|
||||
exports.Monitor = Monitor;
|
||||
function resetMonitorState(monitor) {
|
||||
monitor[kMonitorId]?.stop();
|
||||
monitor[kMonitorId] = undefined;
|
||||
monitor[kRTTPinger]?.close();
|
||||
monitor[kRTTPinger] = undefined;
|
||||
monitor[kCancellationToken].emit('cancel');
|
||||
monitor[kConnection]?.destroy({ force: true });
|
||||
monitor[kConnection] = undefined;
|
||||
}
|
||||
function checkServer(monitor, callback) {
|
||||
let start = (0, utils_1.now)();
|
||||
monitor.emit(server_1.Server.SERVER_HEARTBEAT_STARTED, new events_1.ServerHeartbeatStartedEvent(monitor.address));
|
||||
function failureHandler(err) {
|
||||
monitor[kConnection]?.destroy({ force: true });
|
||||
monitor[kConnection] = undefined;
|
||||
monitor.emit(server_1.Server.SERVER_HEARTBEAT_FAILED, new events_1.ServerHeartbeatFailedEvent(monitor.address, (0, utils_1.calculateDurationInMs)(start), err));
|
||||
const error = !(err instanceof error_1.MongoError) ? new error_1.MongoError(err) : err;
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.ResetPool);
|
||||
if (error instanceof error_1.MongoNetworkTimeoutError) {
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.InterruptInUseConnections);
|
||||
}
|
||||
monitor.emit('resetServer', error);
|
||||
callback(err);
|
||||
}
|
||||
const connection = monitor[kConnection];
|
||||
if (connection && !connection.closed) {
|
||||
const { serverApi, helloOk } = connection;
|
||||
const connectTimeoutMS = monitor.options.connectTimeoutMS;
|
||||
const maxAwaitTimeMS = monitor.options.heartbeatFrequencyMS;
|
||||
const topologyVersion = monitor[kServer].description.topologyVersion;
|
||||
const isAwaitable = topologyVersion != null;
|
||||
const cmd = {
|
||||
[serverApi?.version || helloOk ? 'hello' : constants_1.LEGACY_HELLO_COMMAND]: 1,
|
||||
...(isAwaitable && topologyVersion
|
||||
? { maxAwaitTimeMS, topologyVersion: makeTopologyVersion(topologyVersion) }
|
||||
: {})
|
||||
};
|
||||
const options = isAwaitable
|
||||
? {
|
||||
socketTimeoutMS: connectTimeoutMS ? connectTimeoutMS + maxAwaitTimeMS : 0,
|
||||
exhaustAllowed: true
|
||||
}
|
||||
: { socketTimeoutMS: connectTimeoutMS };
|
||||
if (isAwaitable && monitor[kRTTPinger] == null) {
|
||||
monitor[kRTTPinger] = new RTTPinger(monitor[kCancellationToken], Object.assign({ heartbeatFrequencyMS: monitor.options.heartbeatFrequencyMS }, monitor.connectOptions));
|
||||
}
|
||||
connection.command((0, utils_1.ns)('admin.$cmd'), cmd, options, (err, hello) => {
|
||||
if (err) {
|
||||
return failureHandler(err);
|
||||
}
|
||||
if (!('isWritablePrimary' in hello)) {
|
||||
// Provide hello-style response document.
|
||||
hello.isWritablePrimary = hello[constants_1.LEGACY_HELLO_COMMAND];
|
||||
}
|
||||
const rttPinger = monitor[kRTTPinger];
|
||||
const duration = isAwaitable && rttPinger ? rttPinger.roundTripTime : (0, utils_1.calculateDurationInMs)(start);
|
||||
monitor.emit(server_1.Server.SERVER_HEARTBEAT_SUCCEEDED, new events_1.ServerHeartbeatSucceededEvent(monitor.address, duration, hello));
|
||||
// if we are using the streaming protocol then we immediately issue another `started`
|
||||
// event, otherwise the "check" is complete and return to the main monitor loop
|
||||
if (isAwaitable && hello.topologyVersion) {
|
||||
monitor.emit(server_1.Server.SERVER_HEARTBEAT_STARTED, new events_1.ServerHeartbeatStartedEvent(monitor.address));
|
||||
start = (0, utils_1.now)();
|
||||
}
|
||||
else {
|
||||
monitor[kRTTPinger]?.close();
|
||||
monitor[kRTTPinger] = undefined;
|
||||
callback(undefined, hello);
|
||||
}
|
||||
});
|
||||
return;
|
||||
}
|
||||
// connecting does an implicit `hello`
|
||||
(0, connect_1.connect)(monitor.connectOptions, (err, conn) => {
|
||||
if (err) {
|
||||
monitor[kConnection] = undefined;
|
||||
failureHandler(err);
|
||||
return;
|
||||
}
|
||||
if (conn) {
|
||||
// Tell the connection that we are using the streaming protocol so that the
|
||||
// connection's message stream will only read the last hello on the buffer.
|
||||
conn.isMonitoringConnection = true;
|
||||
if (isInCloseState(monitor)) {
|
||||
conn.destroy({ force: true });
|
||||
return;
|
||||
}
|
||||
monitor[kConnection] = conn;
|
||||
monitor.emit(server_1.Server.SERVER_HEARTBEAT_SUCCEEDED, new events_1.ServerHeartbeatSucceededEvent(monitor.address, (0, utils_1.calculateDurationInMs)(start), conn.hello));
|
||||
callback(undefined, conn.hello);
|
||||
}
|
||||
});
|
||||
}
|
||||
function monitorServer(monitor) {
|
||||
return (callback) => {
|
||||
if (monitor.s.state === STATE_MONITORING) {
|
||||
process.nextTick(callback);
|
||||
return;
|
||||
}
|
||||
stateTransition(monitor, STATE_MONITORING);
|
||||
function done() {
|
||||
if (!isInCloseState(monitor)) {
|
||||
stateTransition(monitor, STATE_IDLE);
|
||||
}
|
||||
callback();
|
||||
}
|
||||
checkServer(monitor, (err, hello) => {
|
||||
if (err) {
|
||||
// otherwise an error occurred on initial discovery, also bail
|
||||
if (monitor[kServer].description.type === common_1.ServerType.Unknown) {
|
||||
return done();
|
||||
}
|
||||
}
|
||||
// if the check indicates streaming is supported, immediately reschedule monitoring
|
||||
if (hello && hello.topologyVersion) {
|
||||
(0, timers_1.setTimeout)(() => {
|
||||
if (!isInCloseState(monitor)) {
|
||||
monitor[kMonitorId]?.wake();
|
||||
}
|
||||
}, 0);
|
||||
}
|
||||
done();
|
||||
});
|
||||
};
|
||||
}
|
||||
function makeTopologyVersion(tv) {
|
||||
return {
|
||||
processId: tv.processId,
|
||||
// tests mock counter as just number, but in a real situation counter should always be a Long
|
||||
// TODO(NODE-2674): Preserve int64 sent from MongoDB
|
||||
counter: bson_1.Long.isLong(tv.counter) ? tv.counter : bson_1.Long.fromNumber(tv.counter)
|
||||
};
|
||||
}
|
||||
/** @internal */
|
||||
class RTTPinger {
|
||||
constructor(cancellationToken, options) {
|
||||
this[kConnection] = undefined;
|
||||
this[kCancellationToken] = cancellationToken;
|
||||
this[kRoundTripTime] = 0;
|
||||
this.closed = false;
|
||||
const heartbeatFrequencyMS = options.heartbeatFrequencyMS;
|
||||
this[kMonitorId] = (0, timers_1.setTimeout)(() => measureRoundTripTime(this, options), heartbeatFrequencyMS);
|
||||
}
|
||||
get roundTripTime() {
|
||||
return this[kRoundTripTime];
|
||||
}
|
||||
close() {
|
||||
this.closed = true;
|
||||
(0, timers_1.clearTimeout)(this[kMonitorId]);
|
||||
this[kConnection]?.destroy({ force: true });
|
||||
this[kConnection] = undefined;
|
||||
}
|
||||
}
|
||||
exports.RTTPinger = RTTPinger;
|
||||
function measureRoundTripTime(rttPinger, options) {
|
||||
const start = (0, utils_1.now)();
|
||||
options.cancellationToken = rttPinger[kCancellationToken];
|
||||
const heartbeatFrequencyMS = options.heartbeatFrequencyMS;
|
||||
if (rttPinger.closed) {
|
||||
return;
|
||||
}
|
||||
function measureAndReschedule(conn) {
|
||||
if (rttPinger.closed) {
|
||||
conn?.destroy({ force: true });
|
||||
return;
|
||||
}
|
||||
if (rttPinger[kConnection] == null) {
|
||||
rttPinger[kConnection] = conn;
|
||||
}
|
||||
rttPinger[kRoundTripTime] = (0, utils_1.calculateDurationInMs)(start);
|
||||
rttPinger[kMonitorId] = (0, timers_1.setTimeout)(() => measureRoundTripTime(rttPinger, options), heartbeatFrequencyMS);
|
||||
}
|
||||
const connection = rttPinger[kConnection];
|
||||
if (connection == null) {
|
||||
(0, connect_1.connect)(options, (err, conn) => {
|
||||
if (err) {
|
||||
rttPinger[kConnection] = undefined;
|
||||
rttPinger[kRoundTripTime] = 0;
|
||||
return;
|
||||
}
|
||||
measureAndReschedule(conn);
|
||||
});
|
||||
return;
|
||||
}
|
||||
connection.command((0, utils_1.ns)('admin.$cmd'), { [constants_1.LEGACY_HELLO_COMMAND]: 1 }, undefined, err => {
|
||||
if (err) {
|
||||
rttPinger[kConnection] = undefined;
|
||||
rttPinger[kRoundTripTime] = 0;
|
||||
return;
|
||||
}
|
||||
measureAndReschedule();
|
||||
});
|
||||
}
|
||||
/**
|
||||
* @internal
|
||||
*/
|
||||
class MonitorInterval {
|
||||
constructor(fn, options = {}) {
|
||||
this.isExpeditedCallToFnScheduled = false;
|
||||
this.stopped = false;
|
||||
this.isExecutionInProgress = false;
|
||||
this.hasExecutedOnce = false;
|
||||
this._executeAndReschedule = () => {
|
||||
if (this.stopped)
|
||||
return;
|
||||
if (this.timerId) {
|
||||
(0, timers_1.clearTimeout)(this.timerId);
|
||||
}
|
||||
this.isExpeditedCallToFnScheduled = false;
|
||||
this.isExecutionInProgress = true;
|
||||
this.fn(() => {
|
||||
this.lastExecutionEnded = (0, utils_1.now)();
|
||||
this.isExecutionInProgress = false;
|
||||
this._reschedule(this.heartbeatFrequencyMS);
|
||||
});
|
||||
};
|
||||
this.fn = fn;
|
||||
this.lastExecutionEnded = -Infinity;
|
||||
this.heartbeatFrequencyMS = options.heartbeatFrequencyMS ?? 1000;
|
||||
this.minHeartbeatFrequencyMS = options.minHeartbeatFrequencyMS ?? 500;
|
||||
if (options.immediate) {
|
||||
this._executeAndReschedule();
|
||||
}
|
||||
else {
|
||||
this._reschedule(undefined);
|
||||
}
|
||||
}
|
||||
wake() {
|
||||
const currentTime = (0, utils_1.now)();
|
||||
const timeSinceLastCall = currentTime - this.lastExecutionEnded;
|
||||
// TODO(NODE-4674): Add error handling and logging to the monitor
|
||||
if (timeSinceLastCall < 0) {
|
||||
return this._executeAndReschedule();
|
||||
}
|
||||
if (this.isExecutionInProgress) {
|
||||
return;
|
||||
}
|
||||
// debounce multiple calls to wake within the `minInterval`
|
||||
if (this.isExpeditedCallToFnScheduled) {
|
||||
return;
|
||||
}
|
||||
// reschedule a call as soon as possible, ensuring the call never happens
|
||||
// faster than the `minInterval`
|
||||
if (timeSinceLastCall < this.minHeartbeatFrequencyMS) {
|
||||
this.isExpeditedCallToFnScheduled = true;
|
||||
this._reschedule(this.minHeartbeatFrequencyMS - timeSinceLastCall);
|
||||
return;
|
||||
}
|
||||
this._executeAndReschedule();
|
||||
}
|
||||
stop() {
|
||||
this.stopped = true;
|
||||
if (this.timerId) {
|
||||
(0, timers_1.clearTimeout)(this.timerId);
|
||||
this.timerId = undefined;
|
||||
}
|
||||
this.lastExecutionEnded = -Infinity;
|
||||
this.isExpeditedCallToFnScheduled = false;
|
||||
}
|
||||
toString() {
|
||||
return JSON.stringify(this);
|
||||
}
|
||||
toJSON() {
|
||||
const currentTime = (0, utils_1.now)();
|
||||
const timeSinceLastCall = currentTime - this.lastExecutionEnded;
|
||||
return {
|
||||
timerId: this.timerId != null ? 'set' : 'cleared',
|
||||
lastCallTime: this.lastExecutionEnded,
|
||||
isExpeditedCheckScheduled: this.isExpeditedCallToFnScheduled,
|
||||
stopped: this.stopped,
|
||||
heartbeatFrequencyMS: this.heartbeatFrequencyMS,
|
||||
minHeartbeatFrequencyMS: this.minHeartbeatFrequencyMS,
|
||||
currentTime,
|
||||
timeSinceLastCall
|
||||
};
|
||||
}
|
||||
_reschedule(ms) {
|
||||
if (this.stopped)
|
||||
return;
|
||||
if (this.timerId) {
|
||||
(0, timers_1.clearTimeout)(this.timerId);
|
||||
}
|
||||
this.timerId = (0, timers_1.setTimeout)(this._executeAndReschedule, ms || this.heartbeatFrequencyMS);
|
||||
}
|
||||
}
|
||||
exports.MonitorInterval = MonitorInterval;
|
||||
//# sourceMappingURL=monitor.js.map
|
1
node_modules/mongodb/lib/sdam/monitor.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/monitor.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
381
node_modules/mongodb/lib/sdam/server.js
generated
vendored
Normal file
381
node_modules/mongodb/lib/sdam/server.js
generated
vendored
Normal file
@@ -0,0 +1,381 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Server = void 0;
|
||||
const connection_1 = require("../cmap/connection");
|
||||
const connection_pool_1 = require("../cmap/connection_pool");
|
||||
const errors_1 = require("../cmap/errors");
|
||||
const constants_1 = require("../constants");
|
||||
const error_1 = require("../error");
|
||||
const mongo_types_1 = require("../mongo_types");
|
||||
const transactions_1 = require("../transactions");
|
||||
const utils_1 = require("../utils");
|
||||
const common_1 = require("./common");
|
||||
const monitor_1 = require("./monitor");
|
||||
const server_description_1 = require("./server_description");
|
||||
const stateTransition = (0, utils_1.makeStateMachine)({
|
||||
[common_1.STATE_CLOSED]: [common_1.STATE_CLOSED, common_1.STATE_CONNECTING],
|
||||
[common_1.STATE_CONNECTING]: [common_1.STATE_CONNECTING, common_1.STATE_CLOSING, common_1.STATE_CONNECTED, common_1.STATE_CLOSED],
|
||||
[common_1.STATE_CONNECTED]: [common_1.STATE_CONNECTED, common_1.STATE_CLOSING, common_1.STATE_CLOSED],
|
||||
[common_1.STATE_CLOSING]: [common_1.STATE_CLOSING, common_1.STATE_CLOSED]
|
||||
});
|
||||
/** @internal */
|
||||
const kMonitor = Symbol('monitor');
|
||||
/** @internal */
|
||||
class Server extends mongo_types_1.TypedEventEmitter {
|
||||
/**
|
||||
* Create a server
|
||||
*/
|
||||
constructor(topology, description, options) {
|
||||
super();
|
||||
this.serverApi = options.serverApi;
|
||||
const poolOptions = { hostAddress: description.hostAddress, ...options };
|
||||
this.topology = topology;
|
||||
this.pool = new connection_pool_1.ConnectionPool(this, poolOptions);
|
||||
this.s = {
|
||||
description,
|
||||
options,
|
||||
state: common_1.STATE_CLOSED,
|
||||
operationCount: 0
|
||||
};
|
||||
for (const event of [...constants_1.CMAP_EVENTS, ...constants_1.APM_EVENTS]) {
|
||||
this.pool.on(event, (e) => this.emit(event, e));
|
||||
}
|
||||
this.pool.on(connection_1.Connection.CLUSTER_TIME_RECEIVED, (clusterTime) => {
|
||||
this.clusterTime = clusterTime;
|
||||
});
|
||||
if (this.loadBalanced) {
|
||||
this[kMonitor] = null;
|
||||
// monitoring is disabled in load balancing mode
|
||||
return;
|
||||
}
|
||||
// create the monitor
|
||||
// TODO(NODE-4144): Remove new variable for type narrowing
|
||||
const monitor = new monitor_1.Monitor(this, this.s.options);
|
||||
this[kMonitor] = monitor;
|
||||
for (const event of constants_1.HEARTBEAT_EVENTS) {
|
||||
monitor.on(event, (e) => this.emit(event, e));
|
||||
}
|
||||
monitor.on('resetServer', (error) => markServerUnknown(this, error));
|
||||
monitor.on(Server.SERVER_HEARTBEAT_SUCCEEDED, (event) => {
|
||||
this.emit(Server.DESCRIPTION_RECEIVED, new server_description_1.ServerDescription(this.description.hostAddress, event.reply, {
|
||||
roundTripTime: calculateRoundTripTime(this.description.roundTripTime, event.duration)
|
||||
}));
|
||||
if (this.s.state === common_1.STATE_CONNECTING) {
|
||||
stateTransition(this, common_1.STATE_CONNECTED);
|
||||
this.emit(Server.CONNECT, this);
|
||||
}
|
||||
});
|
||||
}
|
||||
get clusterTime() {
|
||||
return this.topology.clusterTime;
|
||||
}
|
||||
set clusterTime(clusterTime) {
|
||||
this.topology.clusterTime = clusterTime;
|
||||
}
|
||||
get description() {
|
||||
return this.s.description;
|
||||
}
|
||||
get name() {
|
||||
return this.s.description.address;
|
||||
}
|
||||
get autoEncrypter() {
|
||||
if (this.s.options && this.s.options.autoEncrypter) {
|
||||
return this.s.options.autoEncrypter;
|
||||
}
|
||||
return;
|
||||
}
|
||||
get loadBalanced() {
|
||||
return this.topology.description.type === common_1.TopologyType.LoadBalanced;
|
||||
}
|
||||
/**
|
||||
* Initiate server connect
|
||||
*/
|
||||
connect() {
|
||||
if (this.s.state !== common_1.STATE_CLOSED) {
|
||||
return;
|
||||
}
|
||||
stateTransition(this, common_1.STATE_CONNECTING);
|
||||
// If in load balancer mode we automatically set the server to
|
||||
// a load balancer. It never transitions out of this state and
|
||||
// has no monitor.
|
||||
if (!this.loadBalanced) {
|
||||
this[kMonitor]?.connect();
|
||||
}
|
||||
else {
|
||||
stateTransition(this, common_1.STATE_CONNECTED);
|
||||
this.emit(Server.CONNECT, this);
|
||||
}
|
||||
}
|
||||
/** Destroy the server connection */
|
||||
destroy(options, callback) {
|
||||
if (typeof options === 'function') {
|
||||
callback = options;
|
||||
options = { force: false };
|
||||
}
|
||||
options = Object.assign({}, { force: false }, options);
|
||||
if (this.s.state === common_1.STATE_CLOSED) {
|
||||
if (typeof callback === 'function') {
|
||||
callback();
|
||||
}
|
||||
return;
|
||||
}
|
||||
stateTransition(this, common_1.STATE_CLOSING);
|
||||
if (!this.loadBalanced) {
|
||||
this[kMonitor]?.close();
|
||||
}
|
||||
this.pool.close(options, err => {
|
||||
stateTransition(this, common_1.STATE_CLOSED);
|
||||
this.emit('closed');
|
||||
if (typeof callback === 'function') {
|
||||
callback(err);
|
||||
}
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Immediately schedule monitoring of this server. If there already an attempt being made
|
||||
* this will be a no-op.
|
||||
*/
|
||||
requestCheck() {
|
||||
if (!this.loadBalanced) {
|
||||
this[kMonitor]?.requestCheck();
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Execute a command
|
||||
* @internal
|
||||
*/
|
||||
command(ns, cmd, options, callback) {
|
||||
if (callback == null) {
|
||||
throw new error_1.MongoInvalidArgumentError('Callback must be provided');
|
||||
}
|
||||
if (ns.db == null || typeof ns === 'string') {
|
||||
throw new error_1.MongoInvalidArgumentError('Namespace must not be a string');
|
||||
}
|
||||
if (this.s.state === common_1.STATE_CLOSING || this.s.state === common_1.STATE_CLOSED) {
|
||||
callback(new error_1.MongoServerClosedError());
|
||||
return;
|
||||
}
|
||||
// Clone the options
|
||||
const finalOptions = Object.assign({}, options, { wireProtocolCommand: false });
|
||||
// There are cases where we need to flag the read preference not to get sent in
|
||||
// the command, such as pre-5.0 servers attempting to perform an aggregate write
|
||||
// with a non-primary read preference. In this case the effective read preference
|
||||
// (primary) is not the same as the provided and must be removed completely.
|
||||
if (finalOptions.omitReadPreference) {
|
||||
delete finalOptions.readPreference;
|
||||
}
|
||||
const session = finalOptions.session;
|
||||
const conn = session?.pinnedConnection;
|
||||
// NOTE: This is a hack! We can't retrieve the connections used for executing an operation
|
||||
// (and prevent them from being checked back in) at the point of operation execution.
|
||||
// This should be considered as part of the work for NODE-2882
|
||||
// NOTE:
|
||||
// When incrementing operation count, it's important that we increment it before we
|
||||
// attempt to check out a connection from the pool. This ensures that operations that
|
||||
// are waiting for a connection are included in the operation count. Load balanced
|
||||
// mode will only ever have a single server, so the operation count doesn't matter.
|
||||
// Incrementing the operation count above the logic to handle load balanced mode would
|
||||
// require special logic to decrement it again, or would double increment (the load
|
||||
// balanced code makes a recursive call). Instead, we increment the count after this
|
||||
// check.
|
||||
if (this.loadBalanced && session && conn == null && isPinnableCommand(cmd, session)) {
|
||||
this.pool.checkOut((err, checkedOut) => {
|
||||
if (err || checkedOut == null) {
|
||||
if (callback)
|
||||
return callback(err);
|
||||
return;
|
||||
}
|
||||
session.pin(checkedOut);
|
||||
this.command(ns, cmd, finalOptions, callback);
|
||||
});
|
||||
return;
|
||||
}
|
||||
this.incrementOperationCount();
|
||||
this.pool.withConnection(conn, (err, conn, cb) => {
|
||||
if (err || !conn) {
|
||||
this.decrementOperationCount();
|
||||
if (!err) {
|
||||
return cb(new error_1.MongoRuntimeError('Failed to create connection without error'));
|
||||
}
|
||||
if (!(err instanceof errors_1.PoolClearedError)) {
|
||||
this.handleError(err);
|
||||
}
|
||||
return cb(err);
|
||||
}
|
||||
conn.command(ns, cmd, finalOptions, makeOperationHandler(this, conn, cmd, finalOptions, (error, response) => {
|
||||
this.decrementOperationCount();
|
||||
cb(error, response);
|
||||
}));
|
||||
}, callback);
|
||||
}
|
||||
/**
|
||||
* Handle SDAM error
|
||||
* @internal
|
||||
*/
|
||||
handleError(error, connection) {
|
||||
if (!(error instanceof error_1.MongoError)) {
|
||||
return;
|
||||
}
|
||||
const isStaleError = error.connectionGeneration && error.connectionGeneration < this.pool.generation;
|
||||
if (isStaleError) {
|
||||
return;
|
||||
}
|
||||
const isNetworkNonTimeoutError = error instanceof error_1.MongoNetworkError && !(error instanceof error_1.MongoNetworkTimeoutError);
|
||||
const isNetworkTimeoutBeforeHandshakeError = (0, error_1.isNetworkErrorBeforeHandshake)(error);
|
||||
const isAuthHandshakeError = error.hasErrorLabel(error_1.MongoErrorLabel.HandshakeError);
|
||||
if (isNetworkNonTimeoutError || isNetworkTimeoutBeforeHandshakeError || isAuthHandshakeError) {
|
||||
// In load balanced mode we never mark the server as unknown and always
|
||||
// clear for the specific service id.
|
||||
if (!this.loadBalanced) {
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.ResetPool);
|
||||
markServerUnknown(this, error);
|
||||
}
|
||||
else if (connection) {
|
||||
this.pool.clear({ serviceId: connection.serviceId });
|
||||
}
|
||||
}
|
||||
else {
|
||||
if ((0, error_1.isSDAMUnrecoverableError)(error)) {
|
||||
if (shouldHandleStateChangeError(this, error)) {
|
||||
const shouldClearPool = (0, utils_1.maxWireVersion)(this) <= 7 || (0, error_1.isNodeShuttingDownError)(error);
|
||||
if (this.loadBalanced && connection && shouldClearPool) {
|
||||
this.pool.clear({ serviceId: connection.serviceId });
|
||||
}
|
||||
if (!this.loadBalanced) {
|
||||
if (shouldClearPool) {
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.ResetPool);
|
||||
}
|
||||
markServerUnknown(this, error);
|
||||
process.nextTick(() => this.requestCheck());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Decrement the operation count, returning the new count.
|
||||
*/
|
||||
decrementOperationCount() {
|
||||
return (this.s.operationCount -= 1);
|
||||
}
|
||||
/**
|
||||
* Increment the operation count, returning the new count.
|
||||
*/
|
||||
incrementOperationCount() {
|
||||
return (this.s.operationCount += 1);
|
||||
}
|
||||
}
|
||||
exports.Server = Server;
|
||||
/** @event */
|
||||
Server.SERVER_HEARTBEAT_STARTED = constants_1.SERVER_HEARTBEAT_STARTED;
|
||||
/** @event */
|
||||
Server.SERVER_HEARTBEAT_SUCCEEDED = constants_1.SERVER_HEARTBEAT_SUCCEEDED;
|
||||
/** @event */
|
||||
Server.SERVER_HEARTBEAT_FAILED = constants_1.SERVER_HEARTBEAT_FAILED;
|
||||
/** @event */
|
||||
Server.CONNECT = constants_1.CONNECT;
|
||||
/** @event */
|
||||
Server.DESCRIPTION_RECEIVED = constants_1.DESCRIPTION_RECEIVED;
|
||||
/** @event */
|
||||
Server.CLOSED = constants_1.CLOSED;
|
||||
/** @event */
|
||||
Server.ENDED = constants_1.ENDED;
|
||||
function calculateRoundTripTime(oldRtt, duration) {
|
||||
if (oldRtt === -1) {
|
||||
return duration;
|
||||
}
|
||||
const alpha = 0.2;
|
||||
return alpha * duration + (1 - alpha) * oldRtt;
|
||||
}
|
||||
function markServerUnknown(server, error) {
|
||||
// Load balancer servers can never be marked unknown.
|
||||
if (server.loadBalanced) {
|
||||
return;
|
||||
}
|
||||
if (error instanceof error_1.MongoNetworkError && !(error instanceof error_1.MongoNetworkTimeoutError)) {
|
||||
server[kMonitor]?.reset();
|
||||
}
|
||||
server.emit(Server.DESCRIPTION_RECEIVED, new server_description_1.ServerDescription(server.description.hostAddress, undefined, { error }));
|
||||
}
|
||||
function isPinnableCommand(cmd, session) {
|
||||
if (session) {
|
||||
return (session.inTransaction() ||
|
||||
'aggregate' in cmd ||
|
||||
'find' in cmd ||
|
||||
'getMore' in cmd ||
|
||||
'listCollections' in cmd ||
|
||||
'listIndexes' in cmd);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function connectionIsStale(pool, connection) {
|
||||
if (connection.serviceId) {
|
||||
return (connection.generation !== pool.serviceGenerations.get(connection.serviceId.toHexString()));
|
||||
}
|
||||
return connection.generation !== pool.generation;
|
||||
}
|
||||
function shouldHandleStateChangeError(server, err) {
|
||||
const etv = err.topologyVersion;
|
||||
const stv = server.description.topologyVersion;
|
||||
return (0, server_description_1.compareTopologyVersion)(stv, etv) < 0;
|
||||
}
|
||||
function inActiveTransaction(session, cmd) {
|
||||
return session && session.inTransaction() && !(0, transactions_1.isTransactionCommand)(cmd);
|
||||
}
|
||||
/** this checks the retryWrites option passed down from the client options, it
|
||||
* does not check if the server supports retryable writes */
|
||||
function isRetryableWritesEnabled(topology) {
|
||||
return topology.s.options.retryWrites !== false;
|
||||
}
|
||||
function makeOperationHandler(server, connection, cmd, options, callback) {
|
||||
const session = options?.session;
|
||||
return function handleOperationResult(error, result) {
|
||||
// We should not swallow an error if it is present.
|
||||
if (error == null && result != null) {
|
||||
return callback(undefined, result);
|
||||
}
|
||||
if (options != null && 'noResponse' in options && options.noResponse === true) {
|
||||
return callback(undefined, null);
|
||||
}
|
||||
if (!error) {
|
||||
return callback(new error_1.MongoUnexpectedServerResponseError('Empty response with no error'));
|
||||
}
|
||||
if (!(error instanceof error_1.MongoError)) {
|
||||
// Node.js or some other error we have not special handling for
|
||||
return callback(error);
|
||||
}
|
||||
if (connectionIsStale(server.pool, connection)) {
|
||||
return callback(error);
|
||||
}
|
||||
if (error instanceof error_1.MongoNetworkError) {
|
||||
if (session && !session.hasEnded && session.serverSession) {
|
||||
session.serverSession.isDirty = true;
|
||||
}
|
||||
// inActiveTransaction check handles commit and abort.
|
||||
if (inActiveTransaction(session, cmd) &&
|
||||
!error.hasErrorLabel(error_1.MongoErrorLabel.TransientTransactionError)) {
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.TransientTransactionError);
|
||||
}
|
||||
if ((isRetryableWritesEnabled(server.topology) || (0, transactions_1.isTransactionCommand)(cmd)) &&
|
||||
(0, utils_1.supportsRetryableWrites)(server) &&
|
||||
!inActiveTransaction(session, cmd)) {
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.RetryableWriteError);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if ((isRetryableWritesEnabled(server.topology) || (0, transactions_1.isTransactionCommand)(cmd)) &&
|
||||
(0, error_1.needsRetryableWriteLabel)(error, (0, utils_1.maxWireVersion)(server)) &&
|
||||
!inActiveTransaction(session, cmd)) {
|
||||
error.addErrorLabel(error_1.MongoErrorLabel.RetryableWriteError);
|
||||
}
|
||||
}
|
||||
if (session &&
|
||||
session.isPinned &&
|
||||
error.hasErrorLabel(error_1.MongoErrorLabel.TransientTransactionError)) {
|
||||
session.unpin({ force: true });
|
||||
}
|
||||
server.handleError(error, connection);
|
||||
return callback(error);
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=server.js.map
|
1
node_modules/mongodb/lib/sdam/server.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/server.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
189
node_modules/mongodb/lib/sdam/server_description.js
generated
vendored
Normal file
189
node_modules/mongodb/lib/sdam/server_description.js
generated
vendored
Normal file
@@ -0,0 +1,189 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.compareTopologyVersion = exports.parseServerType = exports.ServerDescription = void 0;
|
||||
const bson_1 = require("../bson");
|
||||
const error_1 = require("../error");
|
||||
const utils_1 = require("../utils");
|
||||
const common_1 = require("./common");
|
||||
const WRITABLE_SERVER_TYPES = new Set([
|
||||
common_1.ServerType.RSPrimary,
|
||||
common_1.ServerType.Standalone,
|
||||
common_1.ServerType.Mongos,
|
||||
common_1.ServerType.LoadBalancer
|
||||
]);
|
||||
const DATA_BEARING_SERVER_TYPES = new Set([
|
||||
common_1.ServerType.RSPrimary,
|
||||
common_1.ServerType.RSSecondary,
|
||||
common_1.ServerType.Mongos,
|
||||
common_1.ServerType.Standalone,
|
||||
common_1.ServerType.LoadBalancer
|
||||
]);
|
||||
/**
|
||||
* The client's view of a single server, based on the most recent hello outcome.
|
||||
*
|
||||
* Internal type, not meant to be directly instantiated
|
||||
* @public
|
||||
*/
|
||||
class ServerDescription {
|
||||
/**
|
||||
* Create a ServerDescription
|
||||
* @internal
|
||||
*
|
||||
* @param address - The address of the server
|
||||
* @param hello - An optional hello response for this server
|
||||
*/
|
||||
constructor(address, hello, options = {}) {
|
||||
if (address == null || address === '') {
|
||||
throw new error_1.MongoRuntimeError('ServerDescription must be provided with a non-empty address');
|
||||
}
|
||||
this.address =
|
||||
typeof address === 'string'
|
||||
? utils_1.HostAddress.fromString(address).toString() // Use HostAddress to normalize
|
||||
: address.toString();
|
||||
this.type = parseServerType(hello, options);
|
||||
this.hosts = hello?.hosts?.map((host) => host.toLowerCase()) ?? [];
|
||||
this.passives = hello?.passives?.map((host) => host.toLowerCase()) ?? [];
|
||||
this.arbiters = hello?.arbiters?.map((host) => host.toLowerCase()) ?? [];
|
||||
this.tags = hello?.tags ?? {};
|
||||
this.minWireVersion = hello?.minWireVersion ?? 0;
|
||||
this.maxWireVersion = hello?.maxWireVersion ?? 0;
|
||||
this.roundTripTime = options?.roundTripTime ?? -1;
|
||||
this.lastUpdateTime = (0, utils_1.now)();
|
||||
this.lastWriteDate = hello?.lastWrite?.lastWriteDate ?? 0;
|
||||
this.error = options.error ?? null;
|
||||
// TODO(NODE-2674): Preserve int64 sent from MongoDB
|
||||
this.topologyVersion = this.error?.topologyVersion ?? hello?.topologyVersion ?? null;
|
||||
this.setName = hello?.setName ?? null;
|
||||
this.setVersion = hello?.setVersion ?? null;
|
||||
this.electionId = hello?.electionId ?? null;
|
||||
this.logicalSessionTimeoutMinutes = hello?.logicalSessionTimeoutMinutes ?? null;
|
||||
this.primary = hello?.primary ?? null;
|
||||
this.me = hello?.me?.toLowerCase() ?? null;
|
||||
this.$clusterTime = hello?.$clusterTime ?? null;
|
||||
}
|
||||
get hostAddress() {
|
||||
return utils_1.HostAddress.fromString(this.address);
|
||||
}
|
||||
get allHosts() {
|
||||
return this.hosts.concat(this.arbiters).concat(this.passives);
|
||||
}
|
||||
/** Is this server available for reads*/
|
||||
get isReadable() {
|
||||
return this.type === common_1.ServerType.RSSecondary || this.isWritable;
|
||||
}
|
||||
/** Is this server data bearing */
|
||||
get isDataBearing() {
|
||||
return DATA_BEARING_SERVER_TYPES.has(this.type);
|
||||
}
|
||||
/** Is this server available for writes */
|
||||
get isWritable() {
|
||||
return WRITABLE_SERVER_TYPES.has(this.type);
|
||||
}
|
||||
get host() {
|
||||
const chopLength = `:${this.port}`.length;
|
||||
return this.address.slice(0, -chopLength);
|
||||
}
|
||||
get port() {
|
||||
const port = this.address.split(':').pop();
|
||||
return port ? Number.parseInt(port, 10) : 27017;
|
||||
}
|
||||
/**
|
||||
* Determines if another `ServerDescription` is equal to this one per the rules defined
|
||||
* in the {@link https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#serverdescription|SDAM spec}
|
||||
*/
|
||||
equals(other) {
|
||||
// Despite using the comparator that would determine a nullish topologyVersion as greater than
|
||||
// for equality we should only always perform direct equality comparison
|
||||
const topologyVersionsEqual = this.topologyVersion === other?.topologyVersion ||
|
||||
compareTopologyVersion(this.topologyVersion, other?.topologyVersion) === 0;
|
||||
const electionIdsEqual = this.electionId != null && other?.electionId != null
|
||||
? (0, utils_1.compareObjectId)(this.electionId, other.electionId) === 0
|
||||
: this.electionId === other?.electionId;
|
||||
return (other != null &&
|
||||
(0, utils_1.errorStrictEqual)(this.error, other.error) &&
|
||||
this.type === other.type &&
|
||||
this.minWireVersion === other.minWireVersion &&
|
||||
(0, utils_1.arrayStrictEqual)(this.hosts, other.hosts) &&
|
||||
tagsStrictEqual(this.tags, other.tags) &&
|
||||
this.setName === other.setName &&
|
||||
this.setVersion === other.setVersion &&
|
||||
electionIdsEqual &&
|
||||
this.primary === other.primary &&
|
||||
this.logicalSessionTimeoutMinutes === other.logicalSessionTimeoutMinutes &&
|
||||
topologyVersionsEqual);
|
||||
}
|
||||
}
|
||||
exports.ServerDescription = ServerDescription;
|
||||
// Parses a `hello` message and determines the server type
|
||||
function parseServerType(hello, options) {
|
||||
if (options?.loadBalanced) {
|
||||
return common_1.ServerType.LoadBalancer;
|
||||
}
|
||||
if (!hello || !hello.ok) {
|
||||
return common_1.ServerType.Unknown;
|
||||
}
|
||||
if (hello.isreplicaset) {
|
||||
return common_1.ServerType.RSGhost;
|
||||
}
|
||||
if (hello.msg && hello.msg === 'isdbgrid') {
|
||||
return common_1.ServerType.Mongos;
|
||||
}
|
||||
if (hello.setName) {
|
||||
if (hello.hidden) {
|
||||
return common_1.ServerType.RSOther;
|
||||
}
|
||||
else if (hello.isWritablePrimary) {
|
||||
return common_1.ServerType.RSPrimary;
|
||||
}
|
||||
else if (hello.secondary) {
|
||||
return common_1.ServerType.RSSecondary;
|
||||
}
|
||||
else if (hello.arbiterOnly) {
|
||||
return common_1.ServerType.RSArbiter;
|
||||
}
|
||||
else {
|
||||
return common_1.ServerType.RSOther;
|
||||
}
|
||||
}
|
||||
return common_1.ServerType.Standalone;
|
||||
}
|
||||
exports.parseServerType = parseServerType;
|
||||
function tagsStrictEqual(tags, tags2) {
|
||||
const tagsKeys = Object.keys(tags);
|
||||
const tags2Keys = Object.keys(tags2);
|
||||
return (tagsKeys.length === tags2Keys.length &&
|
||||
tagsKeys.every((key) => tags2[key] === tags[key]));
|
||||
}
|
||||
/**
|
||||
* Compares two topology versions.
|
||||
*
|
||||
* 1. If the response topologyVersion is unset or the ServerDescription's
|
||||
* topologyVersion is null, the client MUST assume the response is more recent.
|
||||
* 1. If the response's topologyVersion.processId is not equal to the
|
||||
* ServerDescription's, the client MUST assume the response is more recent.
|
||||
* 1. If the response's topologyVersion.processId is equal to the
|
||||
* ServerDescription's, the client MUST use the counter field to determine
|
||||
* which topologyVersion is more recent.
|
||||
*
|
||||
* ```ts
|
||||
* currentTv < newTv === -1
|
||||
* currentTv === newTv === 0
|
||||
* currentTv > newTv === 1
|
||||
* ```
|
||||
*/
|
||||
function compareTopologyVersion(currentTv, newTv) {
|
||||
if (currentTv == null || newTv == null) {
|
||||
return -1;
|
||||
}
|
||||
if (!currentTv.processId.equals(newTv.processId)) {
|
||||
return -1;
|
||||
}
|
||||
// TODO(NODE-2674): Preserve int64 sent from MongoDB
|
||||
const currentCounter = bson_1.Long.isLong(currentTv.counter)
|
||||
? currentTv.counter
|
||||
: bson_1.Long.fromNumber(currentTv.counter);
|
||||
const newCounter = bson_1.Long.isLong(newTv.counter) ? newTv.counter : bson_1.Long.fromNumber(newTv.counter);
|
||||
return currentCounter.compare(newCounter);
|
||||
}
|
||||
exports.compareTopologyVersion = compareTopologyVersion;
|
||||
//# sourceMappingURL=server_description.js.map
|
1
node_modules/mongodb/lib/sdam/server_description.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/server_description.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
226
node_modules/mongodb/lib/sdam/server_selection.js
generated
vendored
Normal file
226
node_modules/mongodb/lib/sdam/server_selection.js
generated
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.readPreferenceServerSelector = exports.secondaryWritableServerSelector = exports.sameServerSelector = exports.writableServerSelector = exports.MIN_SECONDARY_WRITE_WIRE_VERSION = void 0;
|
||||
const error_1 = require("../error");
|
||||
const read_preference_1 = require("../read_preference");
|
||||
const common_1 = require("./common");
|
||||
// max staleness constants
|
||||
const IDLE_WRITE_PERIOD = 10000;
|
||||
const SMALLEST_MAX_STALENESS_SECONDS = 90;
|
||||
// Minimum version to try writes on secondaries.
|
||||
exports.MIN_SECONDARY_WRITE_WIRE_VERSION = 13;
|
||||
/**
|
||||
* Returns a server selector that selects for writable servers
|
||||
*/
|
||||
function writableServerSelector() {
|
||||
return (topologyDescription, servers) => latencyWindowReducer(topologyDescription, servers.filter((s) => s.isWritable));
|
||||
}
|
||||
exports.writableServerSelector = writableServerSelector;
|
||||
/**
|
||||
* The purpose of this selector is to select the same server, only
|
||||
* if it is in a state that it can have commands sent to it.
|
||||
*/
|
||||
function sameServerSelector(description) {
|
||||
return (topologyDescription, servers) => {
|
||||
if (!description)
|
||||
return [];
|
||||
// Filter the servers to match the provided description only if
|
||||
// the type is not unknown.
|
||||
return servers.filter(sd => {
|
||||
return sd.address === description.address && sd.type !== common_1.ServerType.Unknown;
|
||||
});
|
||||
};
|
||||
}
|
||||
exports.sameServerSelector = sameServerSelector;
|
||||
/**
|
||||
* Returns a server selector that uses a read preference to select a
|
||||
* server potentially for a write on a secondary.
|
||||
*/
|
||||
function secondaryWritableServerSelector(wireVersion, readPreference) {
|
||||
// If server version < 5.0, read preference always primary.
|
||||
// If server version >= 5.0...
|
||||
// - If read preference is supplied, use that.
|
||||
// - If no read preference is supplied, use primary.
|
||||
if (!readPreference ||
|
||||
!wireVersion ||
|
||||
(wireVersion && wireVersion < exports.MIN_SECONDARY_WRITE_WIRE_VERSION)) {
|
||||
return readPreferenceServerSelector(read_preference_1.ReadPreference.primary);
|
||||
}
|
||||
return readPreferenceServerSelector(readPreference);
|
||||
}
|
||||
exports.secondaryWritableServerSelector = secondaryWritableServerSelector;
|
||||
/**
|
||||
* Reduces the passed in array of servers by the rules of the "Max Staleness" specification
|
||||
* found here: https://github.com/mongodb/specifications/blob/master/source/max-staleness/max-staleness.rst
|
||||
*
|
||||
* @param readPreference - The read preference providing max staleness guidance
|
||||
* @param topologyDescription - The topology description
|
||||
* @param servers - The list of server descriptions to be reduced
|
||||
* @returns The list of servers that satisfy the requirements of max staleness
|
||||
*/
|
||||
function maxStalenessReducer(readPreference, topologyDescription, servers) {
|
||||
if (readPreference.maxStalenessSeconds == null || readPreference.maxStalenessSeconds < 0) {
|
||||
return servers;
|
||||
}
|
||||
const maxStaleness = readPreference.maxStalenessSeconds;
|
||||
const maxStalenessVariance = (topologyDescription.heartbeatFrequencyMS + IDLE_WRITE_PERIOD) / 1000;
|
||||
if (maxStaleness < maxStalenessVariance) {
|
||||
throw new error_1.MongoInvalidArgumentError(`Option "maxStalenessSeconds" must be at least ${maxStalenessVariance} seconds`);
|
||||
}
|
||||
if (maxStaleness < SMALLEST_MAX_STALENESS_SECONDS) {
|
||||
throw new error_1.MongoInvalidArgumentError(`Option "maxStalenessSeconds" must be at least ${SMALLEST_MAX_STALENESS_SECONDS} seconds`);
|
||||
}
|
||||
if (topologyDescription.type === common_1.TopologyType.ReplicaSetWithPrimary) {
|
||||
const primary = Array.from(topologyDescription.servers.values()).filter(primaryFilter)[0];
|
||||
return servers.reduce((result, server) => {
|
||||
const stalenessMS = server.lastUpdateTime -
|
||||
server.lastWriteDate -
|
||||
(primary.lastUpdateTime - primary.lastWriteDate) +
|
||||
topologyDescription.heartbeatFrequencyMS;
|
||||
const staleness = stalenessMS / 1000;
|
||||
const maxStalenessSeconds = readPreference.maxStalenessSeconds ?? 0;
|
||||
if (staleness <= maxStalenessSeconds) {
|
||||
result.push(server);
|
||||
}
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
if (topologyDescription.type === common_1.TopologyType.ReplicaSetNoPrimary) {
|
||||
if (servers.length === 0) {
|
||||
return servers;
|
||||
}
|
||||
const sMax = servers.reduce((max, s) => s.lastWriteDate > max.lastWriteDate ? s : max);
|
||||
return servers.reduce((result, server) => {
|
||||
const stalenessMS = sMax.lastWriteDate - server.lastWriteDate + topologyDescription.heartbeatFrequencyMS;
|
||||
const staleness = stalenessMS / 1000;
|
||||
const maxStalenessSeconds = readPreference.maxStalenessSeconds ?? 0;
|
||||
if (staleness <= maxStalenessSeconds) {
|
||||
result.push(server);
|
||||
}
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
return servers;
|
||||
}
|
||||
/**
|
||||
* Determines whether a server's tags match a given set of tags
|
||||
*
|
||||
* @param tagSet - The requested tag set to match
|
||||
* @param serverTags - The server's tags
|
||||
*/
|
||||
function tagSetMatch(tagSet, serverTags) {
|
||||
const keys = Object.keys(tagSet);
|
||||
const serverTagKeys = Object.keys(serverTags);
|
||||
for (let i = 0; i < keys.length; ++i) {
|
||||
const key = keys[i];
|
||||
if (serverTagKeys.indexOf(key) === -1 || serverTags[key] !== tagSet[key]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
/**
|
||||
* Reduces a set of server descriptions based on tags requested by the read preference
|
||||
*
|
||||
* @param readPreference - The read preference providing the requested tags
|
||||
* @param servers - The list of server descriptions to reduce
|
||||
* @returns The list of servers matching the requested tags
|
||||
*/
|
||||
function tagSetReducer(readPreference, servers) {
|
||||
if (readPreference.tags == null ||
|
||||
(Array.isArray(readPreference.tags) && readPreference.tags.length === 0)) {
|
||||
return servers;
|
||||
}
|
||||
for (let i = 0; i < readPreference.tags.length; ++i) {
|
||||
const tagSet = readPreference.tags[i];
|
||||
const serversMatchingTagset = servers.reduce((matched, server) => {
|
||||
if (tagSetMatch(tagSet, server.tags))
|
||||
matched.push(server);
|
||||
return matched;
|
||||
}, []);
|
||||
if (serversMatchingTagset.length) {
|
||||
return serversMatchingTagset;
|
||||
}
|
||||
}
|
||||
return [];
|
||||
}
|
||||
/**
|
||||
* Reduces a list of servers to ensure they fall within an acceptable latency window. This is
|
||||
* further specified in the "Server Selection" specification, found here:
|
||||
* https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.rst
|
||||
*
|
||||
* @param topologyDescription - The topology description
|
||||
* @param servers - The list of servers to reduce
|
||||
* @returns The servers which fall within an acceptable latency window
|
||||
*/
|
||||
function latencyWindowReducer(topologyDescription, servers) {
|
||||
const low = servers.reduce((min, server) => min === -1 ? server.roundTripTime : Math.min(server.roundTripTime, min), -1);
|
||||
const high = low + topologyDescription.localThresholdMS;
|
||||
return servers.reduce((result, server) => {
|
||||
if (server.roundTripTime <= high && server.roundTripTime >= low)
|
||||
result.push(server);
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
// filters
|
||||
function primaryFilter(server) {
|
||||
return server.type === common_1.ServerType.RSPrimary;
|
||||
}
|
||||
function secondaryFilter(server) {
|
||||
return server.type === common_1.ServerType.RSSecondary;
|
||||
}
|
||||
function nearestFilter(server) {
|
||||
return server.type === common_1.ServerType.RSSecondary || server.type === common_1.ServerType.RSPrimary;
|
||||
}
|
||||
function knownFilter(server) {
|
||||
return server.type !== common_1.ServerType.Unknown;
|
||||
}
|
||||
function loadBalancerFilter(server) {
|
||||
return server.type === common_1.ServerType.LoadBalancer;
|
||||
}
|
||||
/**
|
||||
* Returns a function which selects servers based on a provided read preference
|
||||
*
|
||||
* @param readPreference - The read preference to select with
|
||||
*/
|
||||
function readPreferenceServerSelector(readPreference) {
|
||||
if (!readPreference.isValid()) {
|
||||
throw new error_1.MongoInvalidArgumentError('Invalid read preference specified');
|
||||
}
|
||||
return (topologyDescription, servers) => {
|
||||
const commonWireVersion = topologyDescription.commonWireVersion;
|
||||
if (commonWireVersion &&
|
||||
readPreference.minWireVersion &&
|
||||
readPreference.minWireVersion > commonWireVersion) {
|
||||
throw new error_1.MongoCompatibilityError(`Minimum wire version '${readPreference.minWireVersion}' required, but found '${commonWireVersion}'`);
|
||||
}
|
||||
if (topologyDescription.type === common_1.TopologyType.LoadBalanced) {
|
||||
return servers.filter(loadBalancerFilter);
|
||||
}
|
||||
if (topologyDescription.type === common_1.TopologyType.Unknown) {
|
||||
return [];
|
||||
}
|
||||
if (topologyDescription.type === common_1.TopologyType.Single ||
|
||||
topologyDescription.type === common_1.TopologyType.Sharded) {
|
||||
return latencyWindowReducer(topologyDescription, servers.filter(knownFilter));
|
||||
}
|
||||
const mode = readPreference.mode;
|
||||
if (mode === read_preference_1.ReadPreference.PRIMARY) {
|
||||
return servers.filter(primaryFilter);
|
||||
}
|
||||
if (mode === read_preference_1.ReadPreference.PRIMARY_PREFERRED) {
|
||||
const result = servers.filter(primaryFilter);
|
||||
if (result.length) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
const filter = mode === read_preference_1.ReadPreference.NEAREST ? nearestFilter : secondaryFilter;
|
||||
const selectedServers = latencyWindowReducer(topologyDescription, tagSetReducer(readPreference, maxStalenessReducer(readPreference, topologyDescription, servers.filter(filter))));
|
||||
if (mode === read_preference_1.ReadPreference.SECONDARY_PREFERRED && selectedServers.length === 0) {
|
||||
return servers.filter(primaryFilter);
|
||||
}
|
||||
return selectedServers;
|
||||
};
|
||||
}
|
||||
exports.readPreferenceServerSelector = readPreferenceServerSelector;
|
||||
//# sourceMappingURL=server_selection.js.map
|
1
node_modules/mongodb/lib/sdam/server_selection.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/server_selection.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
103
node_modules/mongodb/lib/sdam/srv_polling.js
generated
vendored
Normal file
103
node_modules/mongodb/lib/sdam/srv_polling.js
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.SrvPoller = exports.SrvPollingEvent = void 0;
|
||||
const dns = require("dns");
|
||||
const timers_1 = require("timers");
|
||||
const error_1 = require("../error");
|
||||
const mongo_types_1 = require("../mongo_types");
|
||||
const utils_1 = require("../utils");
|
||||
/**
|
||||
* @internal
|
||||
* @category Event
|
||||
*/
|
||||
class SrvPollingEvent {
|
||||
constructor(srvRecords) {
|
||||
this.srvRecords = srvRecords;
|
||||
}
|
||||
hostnames() {
|
||||
return new Set(this.srvRecords.map(r => utils_1.HostAddress.fromSrvRecord(r).toString()));
|
||||
}
|
||||
}
|
||||
exports.SrvPollingEvent = SrvPollingEvent;
|
||||
/** @internal */
|
||||
class SrvPoller extends mongo_types_1.TypedEventEmitter {
|
||||
constructor(options) {
|
||||
super();
|
||||
if (!options || !options.srvHost) {
|
||||
throw new error_1.MongoRuntimeError('Options for SrvPoller must exist and include srvHost');
|
||||
}
|
||||
this.srvHost = options.srvHost;
|
||||
this.srvMaxHosts = options.srvMaxHosts ?? 0;
|
||||
this.srvServiceName = options.srvServiceName ?? 'mongodb';
|
||||
this.rescanSrvIntervalMS = 60000;
|
||||
this.heartbeatFrequencyMS = options.heartbeatFrequencyMS ?? 10000;
|
||||
this.haMode = false;
|
||||
this.generation = 0;
|
||||
this._timeout = undefined;
|
||||
}
|
||||
get srvAddress() {
|
||||
return `_${this.srvServiceName}._tcp.${this.srvHost}`;
|
||||
}
|
||||
get intervalMS() {
|
||||
return this.haMode ? this.heartbeatFrequencyMS : this.rescanSrvIntervalMS;
|
||||
}
|
||||
start() {
|
||||
if (!this._timeout) {
|
||||
this.schedule();
|
||||
}
|
||||
}
|
||||
stop() {
|
||||
if (this._timeout) {
|
||||
(0, timers_1.clearTimeout)(this._timeout);
|
||||
this.generation += 1;
|
||||
this._timeout = undefined;
|
||||
}
|
||||
}
|
||||
// TODO(NODE-4994): implement new logging logic for SrvPoller failures
|
||||
schedule() {
|
||||
if (this._timeout) {
|
||||
(0, timers_1.clearTimeout)(this._timeout);
|
||||
}
|
||||
this._timeout = (0, timers_1.setTimeout)(() => {
|
||||
this._poll().catch(() => null);
|
||||
}, this.intervalMS);
|
||||
}
|
||||
success(srvRecords) {
|
||||
this.haMode = false;
|
||||
this.schedule();
|
||||
this.emit(SrvPoller.SRV_RECORD_DISCOVERY, new SrvPollingEvent(srvRecords));
|
||||
}
|
||||
failure() {
|
||||
this.haMode = true;
|
||||
this.schedule();
|
||||
}
|
||||
async _poll() {
|
||||
const generation = this.generation;
|
||||
let srvRecords;
|
||||
try {
|
||||
srvRecords = await dns.promises.resolveSrv(this.srvAddress);
|
||||
}
|
||||
catch (dnsError) {
|
||||
this.failure();
|
||||
return;
|
||||
}
|
||||
if (generation !== this.generation) {
|
||||
return;
|
||||
}
|
||||
const finalAddresses = [];
|
||||
for (const record of srvRecords) {
|
||||
if ((0, utils_1.matchesParentDomain)(record.name, this.srvHost)) {
|
||||
finalAddresses.push(record);
|
||||
}
|
||||
}
|
||||
if (!finalAddresses.length) {
|
||||
this.failure();
|
||||
return;
|
||||
}
|
||||
this.success(finalAddresses);
|
||||
}
|
||||
}
|
||||
exports.SrvPoller = SrvPoller;
|
||||
/** @event */
|
||||
SrvPoller.SRV_RECORD_DISCOVERY = 'srvRecordDiscovery';
|
||||
//# sourceMappingURL=srv_polling.js.map
|
1
node_modules/mongodb/lib/sdam/srv_polling.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/srv_polling.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"srv_polling.js","sourceRoot":"","sources":["../../src/sdam/srv_polling.ts"],"names":[],"mappings":";;;AAAA,2BAA2B;AAC3B,mCAAkD;AAElD,oCAA6C;AAC7C,gDAAmD;AACnD,oCAA4D;AAE5D;;;GAGG;AACH,MAAa,eAAe;IAE1B,YAAY,UAA2B;QACrC,IAAI,CAAC,UAAU,GAAG,UAAU,CAAC;IAC/B,CAAC;IAED,SAAS;QACP,OAAO,IAAI,GAAG,CAAC,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,mBAAW,CAAC,aAAa,CAAC,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAC,CAAC;IACpF,CAAC;CACF;AATD,0CASC;AAeD,gBAAgB;AAChB,MAAa,SAAU,SAAQ,+BAAkC;IAa/D,YAAY,OAAyB;QACnC,KAAK,EAAE,CAAC;QAER,IAAI,CAAC,OAAO,IAAI,CAAC,OAAO,CAAC,OAAO,EAAE;YAChC,MAAM,IAAI,yBAAiB,CAAC,sDAAsD,CAAC,CAAC;SACrF;QAED,IAAI,CAAC,OAAO,GAAG,OAAO,CAAC,OAAO,CAAC;QAC/B,IAAI,CAAC,WAAW,GAAG,OAAO,CAAC,WAAW,IAAI,CAAC,CAAC;QAC5C,IAAI,CAAC,cAAc,GAAG,OAAO,CAAC,cAAc,IAAI,SAAS,CAAC;QAC1D,IAAI,CAAC,mBAAmB,GAAG,KAAK,CAAC;QACjC,IAAI,CAAC,oBAAoB,GAAG,OAAO,CAAC,oBAAoB,IAAI,KAAK,CAAC;QAElE,IAAI,CAAC,MAAM,GAAG,KAAK,CAAC;QACpB,IAAI,CAAC,UAAU,GAAG,CAAC,CAAC;QAEpB,IAAI,CAAC,QAAQ,GAAG,SAAS,CAAC;IAC5B,CAAC;IAED,IAAI,UAAU;QACZ,OAAO,IAAI,IAAI,CAAC,cAAc,SAAS,IAAI,CAAC,OAAO,EAAE,CAAC;IACxD,CAAC;IAED,IAAI,UAAU;QACZ,OAAO,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,IAAI,CAAC,oBAAoB,CAAC,CAAC,CAAC,IAAI,CAAC,mBAAmB,CAAC;IAC5E,CAAC;IAED,KAAK;QACH,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE;YAClB,IAAI,CAAC,QAAQ,EAAE,CAAC;SACjB;IACH,CAAC;IAED,IAAI;QACF,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,IAAA,qBAAY,EAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;YAC5B,IAAI,CAAC,UAAU,IAAI,CAAC,CAAC;YACrB,IAAI,CAAC,QAAQ,GAAG,SAAS,CAAC;SAC3B;IACH,CAAC;IAED,sEAAsE;IACtE,QAAQ;QACN,IAAI,IAAI,CAAC,QAAQ,EAAE;YACjB,IAAA,qBAAY,EAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;SAC7B;QAED,IAAI,CAAC,QAAQ,GAAG,IAAA,mBAAU,EAAC,GAAG,EAAE;YAC9B,IAAI,CAAC,KAAK,EAAE,CAAC,KAAK,CAAC,GAAG,EAAE,CAAC,IAAI,CAAC,CAAC;QACjC,CAAC,EAAE,IAAI,CAAC,UAAU,CAAC,CAAC;IACtB,CAAC;IAED,OAAO,CAAC,UAA2B;QACjC,IAAI,CAAC,MAAM,GAAG,KAAK,CAAC;QACpB,IAAI,CAAC,QAAQ,EAAE,CAAC;QAChB,IAAI,CAAC,IAAI,CAAC,SAAS,CAAC,oBAAoB,EAAE,IAAI,eAAe,CAAC,UAAU,CAAC,CAAC,CAAC;IAC7E,CAAC;IAED,OAAO;QACL,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC;QACnB,IAAI,CAAC,QAAQ,EAAE,CAAC;IAClB,CAAC;IAED,KAAK,CAAC,KAAK;QACT,MAAM,UAAU,GAAG,IAAI,CAAC,UAAU,CAAC;QACnC,IAAI,UAAU,CAAC;QAEf,IAAI;YACF,UAAU,GAAG,MAAM,GAAG,CAAC,QAAQ,CAAC,UAAU,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;SAC7D;QAAC,OAAO,QAAQ,EAAE;YACjB,IAAI,CAAC,OAAO,EAAE,CAAC;YACf,OAAO;SACR;QAED,IAAI,UAAU,KAAK,IAAI,CAAC,UAAU,EAAE;YAClC,OAAO;SACR;QAED,MAAM,cAAc,GAAoB,EAAE,CAAC;QAC3C,KAAK,MAAM,MAAM,IAAI,UAAU,EAAE;YAC/B,IAAI,IAAA,2BAAmB,EAAC,MAAM,CAAC,IAAI,EAAE,IAAI,CAAC,OAAO,CAAC,EAAE;gBAClD,cAAc,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;aAC7B;SACF;QAED,IAAI,CAAC,cAAc,CAAC,MAAM,EAAE;YAC1B,IAAI,CAAC,OAAO,EAAE,CAAC;YACf,OAAO;SACR;QAED,IAAI,CAAC,OAAO,CAAC,cAAc,CAAC,CAAC;IAC/B,CAAC;;AAxGH,8BAyGC;AA/FC,aAAa;AACG,8BAAoB,GAAG,oBAA6B,CAAC"}
|
621
node_modules/mongodb/lib/sdam/topology.js
generated
vendored
Normal file
621
node_modules/mongodb/lib/sdam/topology.js
generated
vendored
Normal file
@@ -0,0 +1,621 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.ServerCapabilities = exports.Topology = void 0;
|
||||
const timers_1 = require("timers");
|
||||
const util_1 = require("util");
|
||||
const connection_string_1 = require("../connection_string");
|
||||
const constants_1 = require("../constants");
|
||||
const error_1 = require("../error");
|
||||
const mongo_types_1 = require("../mongo_types");
|
||||
const read_preference_1 = require("../read_preference");
|
||||
const utils_1 = require("../utils");
|
||||
const common_1 = require("./common");
|
||||
const events_1 = require("./events");
|
||||
const server_1 = require("./server");
|
||||
const server_description_1 = require("./server_description");
|
||||
const server_selection_1 = require("./server_selection");
|
||||
const srv_polling_1 = require("./srv_polling");
|
||||
const topology_description_1 = require("./topology_description");
|
||||
// Global state
|
||||
let globalTopologyCounter = 0;
|
||||
const stateTransition = (0, utils_1.makeStateMachine)({
|
||||
[common_1.STATE_CLOSED]: [common_1.STATE_CLOSED, common_1.STATE_CONNECTING],
|
||||
[common_1.STATE_CONNECTING]: [common_1.STATE_CONNECTING, common_1.STATE_CLOSING, common_1.STATE_CONNECTED, common_1.STATE_CLOSED],
|
||||
[common_1.STATE_CONNECTED]: [common_1.STATE_CONNECTED, common_1.STATE_CLOSING, common_1.STATE_CLOSED],
|
||||
[common_1.STATE_CLOSING]: [common_1.STATE_CLOSING, common_1.STATE_CLOSED]
|
||||
});
|
||||
/** @internal */
|
||||
const kCancelled = Symbol('cancelled');
|
||||
/** @internal */
|
||||
const kWaitQueue = Symbol('waitQueue');
|
||||
/**
|
||||
* A container of server instances representing a connection to a MongoDB topology.
|
||||
* @internal
|
||||
*/
|
||||
class Topology extends mongo_types_1.TypedEventEmitter {
|
||||
/**
|
||||
* @param seedlist - a list of HostAddress instances to connect to
|
||||
*/
|
||||
constructor(client, seeds, options) {
|
||||
super();
|
||||
this.client = client;
|
||||
this.selectServerAsync = (0, util_1.promisify)((selector, options, callback) => this.selectServer(selector, options, callback));
|
||||
// Options should only be undefined in tests, MongoClient will always have defined options
|
||||
options = options ?? {
|
||||
hosts: [utils_1.HostAddress.fromString('localhost:27017')],
|
||||
...Object.fromEntries(connection_string_1.DEFAULT_OPTIONS.entries()),
|
||||
...Object.fromEntries(connection_string_1.FEATURE_FLAGS.entries())
|
||||
};
|
||||
if (typeof seeds === 'string') {
|
||||
seeds = [utils_1.HostAddress.fromString(seeds)];
|
||||
}
|
||||
else if (!Array.isArray(seeds)) {
|
||||
seeds = [seeds];
|
||||
}
|
||||
const seedlist = [];
|
||||
for (const seed of seeds) {
|
||||
if (typeof seed === 'string') {
|
||||
seedlist.push(utils_1.HostAddress.fromString(seed));
|
||||
}
|
||||
else if (seed instanceof utils_1.HostAddress) {
|
||||
seedlist.push(seed);
|
||||
}
|
||||
else {
|
||||
// FIXME(NODE-3483): May need to be a MongoParseError
|
||||
throw new error_1.MongoRuntimeError(`Topology cannot be constructed from ${JSON.stringify(seed)}`);
|
||||
}
|
||||
}
|
||||
const topologyType = topologyTypeFromOptions(options);
|
||||
const topologyId = globalTopologyCounter++;
|
||||
const selectedHosts = options.srvMaxHosts == null ||
|
||||
options.srvMaxHosts === 0 ||
|
||||
options.srvMaxHosts >= seedlist.length
|
||||
? seedlist
|
||||
: (0, utils_1.shuffle)(seedlist, options.srvMaxHosts);
|
||||
const serverDescriptions = new Map();
|
||||
for (const hostAddress of selectedHosts) {
|
||||
serverDescriptions.set(hostAddress.toString(), new server_description_1.ServerDescription(hostAddress));
|
||||
}
|
||||
this[kWaitQueue] = new utils_1.List();
|
||||
this.s = {
|
||||
// the id of this topology
|
||||
id: topologyId,
|
||||
// passed in options
|
||||
options,
|
||||
// initial seedlist of servers to connect to
|
||||
seedlist,
|
||||
// initial state
|
||||
state: common_1.STATE_CLOSED,
|
||||
// the topology description
|
||||
description: new topology_description_1.TopologyDescription(topologyType, serverDescriptions, options.replicaSet, undefined, undefined, undefined, options),
|
||||
serverSelectionTimeoutMS: options.serverSelectionTimeoutMS,
|
||||
heartbeatFrequencyMS: options.heartbeatFrequencyMS,
|
||||
minHeartbeatFrequencyMS: options.minHeartbeatFrequencyMS,
|
||||
// a map of server instances to normalized addresses
|
||||
servers: new Map(),
|
||||
credentials: options?.credentials,
|
||||
clusterTime: undefined,
|
||||
// timer management
|
||||
connectionTimers: new Set(),
|
||||
detectShardedTopology: ev => this.detectShardedTopology(ev),
|
||||
detectSrvRecords: ev => this.detectSrvRecords(ev)
|
||||
};
|
||||
if (options.srvHost && !options.loadBalanced) {
|
||||
this.s.srvPoller =
|
||||
options.srvPoller ??
|
||||
new srv_polling_1.SrvPoller({
|
||||
heartbeatFrequencyMS: this.s.heartbeatFrequencyMS,
|
||||
srvHost: options.srvHost,
|
||||
srvMaxHosts: options.srvMaxHosts,
|
||||
srvServiceName: options.srvServiceName
|
||||
});
|
||||
this.on(Topology.TOPOLOGY_DESCRIPTION_CHANGED, this.s.detectShardedTopology);
|
||||
}
|
||||
}
|
||||
detectShardedTopology(event) {
|
||||
const previousType = event.previousDescription.type;
|
||||
const newType = event.newDescription.type;
|
||||
const transitionToSharded = previousType !== common_1.TopologyType.Sharded && newType === common_1.TopologyType.Sharded;
|
||||
const srvListeners = this.s.srvPoller?.listeners(srv_polling_1.SrvPoller.SRV_RECORD_DISCOVERY);
|
||||
const listeningToSrvPolling = !!srvListeners?.includes(this.s.detectSrvRecords);
|
||||
if (transitionToSharded && !listeningToSrvPolling) {
|
||||
this.s.srvPoller?.on(srv_polling_1.SrvPoller.SRV_RECORD_DISCOVERY, this.s.detectSrvRecords);
|
||||
this.s.srvPoller?.start();
|
||||
}
|
||||
}
|
||||
detectSrvRecords(ev) {
|
||||
const previousTopologyDescription = this.s.description;
|
||||
this.s.description = this.s.description.updateFromSrvPollingEvent(ev, this.s.options.srvMaxHosts);
|
||||
if (this.s.description === previousTopologyDescription) {
|
||||
// Nothing changed, so return
|
||||
return;
|
||||
}
|
||||
updateServers(this);
|
||||
this.emit(Topology.TOPOLOGY_DESCRIPTION_CHANGED, new events_1.TopologyDescriptionChangedEvent(this.s.id, previousTopologyDescription, this.s.description));
|
||||
}
|
||||
/**
|
||||
* @returns A `TopologyDescription` for this topology
|
||||
*/
|
||||
get description() {
|
||||
return this.s.description;
|
||||
}
|
||||
get loadBalanced() {
|
||||
return this.s.options.loadBalanced;
|
||||
}
|
||||
get capabilities() {
|
||||
return new ServerCapabilities(this.lastHello());
|
||||
}
|
||||
connect(options, callback) {
|
||||
if (typeof options === 'function')
|
||||
(callback = options), (options = {});
|
||||
options = options ?? {};
|
||||
if (this.s.state === common_1.STATE_CONNECTED) {
|
||||
if (typeof callback === 'function') {
|
||||
callback();
|
||||
}
|
||||
return;
|
||||
}
|
||||
stateTransition(this, common_1.STATE_CONNECTING);
|
||||
// emit SDAM monitoring events
|
||||
this.emit(Topology.TOPOLOGY_OPENING, new events_1.TopologyOpeningEvent(this.s.id));
|
||||
// emit an event for the topology change
|
||||
this.emit(Topology.TOPOLOGY_DESCRIPTION_CHANGED, new events_1.TopologyDescriptionChangedEvent(this.s.id, new topology_description_1.TopologyDescription(common_1.TopologyType.Unknown), // initial is always Unknown
|
||||
this.s.description));
|
||||
// connect all known servers, then attempt server selection to connect
|
||||
const serverDescriptions = Array.from(this.s.description.servers.values());
|
||||
this.s.servers = new Map(serverDescriptions.map(serverDescription => [
|
||||
serverDescription.address,
|
||||
createAndConnectServer(this, serverDescription)
|
||||
]));
|
||||
// In load balancer mode we need to fake a server description getting
|
||||
// emitted from the monitor, since the monitor doesn't exist.
|
||||
if (this.s.options.loadBalanced) {
|
||||
for (const description of serverDescriptions) {
|
||||
const newDescription = new server_description_1.ServerDescription(description.hostAddress, undefined, {
|
||||
loadBalanced: this.s.options.loadBalanced
|
||||
});
|
||||
this.serverUpdateHandler(newDescription);
|
||||
}
|
||||
}
|
||||
const exitWithError = (error) => callback ? callback(error) : this.emit(Topology.ERROR, error);
|
||||
const readPreference = options.readPreference ?? read_preference_1.ReadPreference.primary;
|
||||
this.selectServer((0, server_selection_1.readPreferenceServerSelector)(readPreference), options, (err, server) => {
|
||||
if (err) {
|
||||
return this.close({ force: false }, () => exitWithError(err));
|
||||
}
|
||||
// TODO: NODE-2471
|
||||
const skipPingOnConnect = this.s.options[Symbol.for('@@mdb.skipPingOnConnect')] === true;
|
||||
if (!skipPingOnConnect && server && this.s.credentials) {
|
||||
server.command((0, utils_1.ns)('admin.$cmd'), { ping: 1 }, {}, err => {
|
||||
if (err) {
|
||||
return exitWithError(err);
|
||||
}
|
||||
stateTransition(this, common_1.STATE_CONNECTED);
|
||||
this.emit(Topology.OPEN, this);
|
||||
this.emit(Topology.CONNECT, this);
|
||||
callback?.(undefined, this);
|
||||
});
|
||||
return;
|
||||
}
|
||||
stateTransition(this, common_1.STATE_CONNECTED);
|
||||
this.emit(Topology.OPEN, this);
|
||||
this.emit(Topology.CONNECT, this);
|
||||
callback?.(undefined, this);
|
||||
});
|
||||
}
|
||||
close(options, callback) {
|
||||
options = options ?? { force: false };
|
||||
if (this.s.state === common_1.STATE_CLOSED || this.s.state === common_1.STATE_CLOSING) {
|
||||
return callback?.();
|
||||
}
|
||||
const destroyedServers = Array.from(this.s.servers.values(), server => {
|
||||
return (0, util_1.promisify)(destroyServer)(server, this, { force: !!options?.force });
|
||||
});
|
||||
Promise.all(destroyedServers)
|
||||
.then(() => {
|
||||
this.s.servers.clear();
|
||||
stateTransition(this, common_1.STATE_CLOSING);
|
||||
drainWaitQueue(this[kWaitQueue], new error_1.MongoTopologyClosedError());
|
||||
(0, common_1.drainTimerQueue)(this.s.connectionTimers);
|
||||
if (this.s.srvPoller) {
|
||||
this.s.srvPoller.stop();
|
||||
this.s.srvPoller.removeListener(srv_polling_1.SrvPoller.SRV_RECORD_DISCOVERY, this.s.detectSrvRecords);
|
||||
}
|
||||
this.removeListener(Topology.TOPOLOGY_DESCRIPTION_CHANGED, this.s.detectShardedTopology);
|
||||
stateTransition(this, common_1.STATE_CLOSED);
|
||||
// emit an event for close
|
||||
this.emit(Topology.TOPOLOGY_CLOSED, new events_1.TopologyClosedEvent(this.s.id));
|
||||
})
|
||||
.finally(() => callback?.());
|
||||
}
|
||||
/**
|
||||
* Selects a server according to the selection predicate provided
|
||||
*
|
||||
* @param selector - An optional selector to select servers by, defaults to a random selection within a latency window
|
||||
* @param options - Optional settings related to server selection
|
||||
* @param callback - The callback used to indicate success or failure
|
||||
* @returns An instance of a `Server` meeting the criteria of the predicate provided
|
||||
*/
|
||||
selectServer(selector, options, callback) {
|
||||
let serverSelector;
|
||||
if (typeof selector !== 'function') {
|
||||
if (typeof selector === 'string') {
|
||||
serverSelector = (0, server_selection_1.readPreferenceServerSelector)(read_preference_1.ReadPreference.fromString(selector));
|
||||
}
|
||||
else {
|
||||
let readPreference;
|
||||
if (selector instanceof read_preference_1.ReadPreference) {
|
||||
readPreference = selector;
|
||||
}
|
||||
else {
|
||||
read_preference_1.ReadPreference.translate(options);
|
||||
readPreference = options.readPreference || read_preference_1.ReadPreference.primary;
|
||||
}
|
||||
serverSelector = (0, server_selection_1.readPreferenceServerSelector)(readPreference);
|
||||
}
|
||||
}
|
||||
else {
|
||||
serverSelector = selector;
|
||||
}
|
||||
options = Object.assign({}, { serverSelectionTimeoutMS: this.s.serverSelectionTimeoutMS }, options);
|
||||
const isSharded = this.description.type === common_1.TopologyType.Sharded;
|
||||
const session = options.session;
|
||||
const transaction = session && session.transaction;
|
||||
if (isSharded && transaction && transaction.server) {
|
||||
callback(undefined, transaction.server);
|
||||
return;
|
||||
}
|
||||
const waitQueueMember = {
|
||||
serverSelector,
|
||||
transaction,
|
||||
callback
|
||||
};
|
||||
const serverSelectionTimeoutMS = options.serverSelectionTimeoutMS;
|
||||
if (serverSelectionTimeoutMS) {
|
||||
waitQueueMember.timer = (0, timers_1.setTimeout)(() => {
|
||||
waitQueueMember[kCancelled] = true;
|
||||
waitQueueMember.timer = undefined;
|
||||
const timeoutError = new error_1.MongoServerSelectionError(`Server selection timed out after ${serverSelectionTimeoutMS} ms`, this.description);
|
||||
waitQueueMember.callback(timeoutError);
|
||||
}, serverSelectionTimeoutMS);
|
||||
}
|
||||
this[kWaitQueue].push(waitQueueMember);
|
||||
processWaitQueue(this);
|
||||
}
|
||||
/**
|
||||
* Update the internal TopologyDescription with a ServerDescription
|
||||
*
|
||||
* @param serverDescription - The server to update in the internal list of server descriptions
|
||||
*/
|
||||
serverUpdateHandler(serverDescription) {
|
||||
if (!this.s.description.hasServer(serverDescription.address)) {
|
||||
return;
|
||||
}
|
||||
// ignore this server update if its from an outdated topologyVersion
|
||||
if (isStaleServerDescription(this.s.description, serverDescription)) {
|
||||
return;
|
||||
}
|
||||
// these will be used for monitoring events later
|
||||
const previousTopologyDescription = this.s.description;
|
||||
const previousServerDescription = this.s.description.servers.get(serverDescription.address);
|
||||
if (!previousServerDescription) {
|
||||
return;
|
||||
}
|
||||
// Driver Sessions Spec: "Whenever a driver receives a cluster time from
|
||||
// a server it MUST compare it to the current highest seen cluster time
|
||||
// for the deployment. If the new cluster time is higher than the
|
||||
// highest seen cluster time it MUST become the new highest seen cluster
|
||||
// time. Two cluster times are compared using only the BsonTimestamp
|
||||
// value of the clusterTime embedded field."
|
||||
const clusterTime = serverDescription.$clusterTime;
|
||||
if (clusterTime) {
|
||||
(0, common_1._advanceClusterTime)(this, clusterTime);
|
||||
}
|
||||
// If we already know all the information contained in this updated description, then
|
||||
// we don't need to emit SDAM events, but still need to update the description, in order
|
||||
// to keep client-tracked attributes like last update time and round trip time up to date
|
||||
const equalDescriptions = previousServerDescription && previousServerDescription.equals(serverDescription);
|
||||
// first update the TopologyDescription
|
||||
this.s.description = this.s.description.update(serverDescription);
|
||||
if (this.s.description.compatibilityError) {
|
||||
this.emit(Topology.ERROR, new error_1.MongoCompatibilityError(this.s.description.compatibilityError));
|
||||
return;
|
||||
}
|
||||
// emit monitoring events for this change
|
||||
if (!equalDescriptions) {
|
||||
const newDescription = this.s.description.servers.get(serverDescription.address);
|
||||
if (newDescription) {
|
||||
this.emit(Topology.SERVER_DESCRIPTION_CHANGED, new events_1.ServerDescriptionChangedEvent(this.s.id, serverDescription.address, previousServerDescription, newDescription));
|
||||
}
|
||||
}
|
||||
// update server list from updated descriptions
|
||||
updateServers(this, serverDescription);
|
||||
// attempt to resolve any outstanding server selection attempts
|
||||
if (this[kWaitQueue].length > 0) {
|
||||
processWaitQueue(this);
|
||||
}
|
||||
if (!equalDescriptions) {
|
||||
this.emit(Topology.TOPOLOGY_DESCRIPTION_CHANGED, new events_1.TopologyDescriptionChangedEvent(this.s.id, previousTopologyDescription, this.s.description));
|
||||
}
|
||||
}
|
||||
auth(credentials, callback) {
|
||||
if (typeof credentials === 'function')
|
||||
(callback = credentials), (credentials = undefined);
|
||||
if (typeof callback === 'function')
|
||||
callback(undefined, true);
|
||||
}
|
||||
get clientMetadata() {
|
||||
return this.s.options.metadata;
|
||||
}
|
||||
isConnected() {
|
||||
return this.s.state === common_1.STATE_CONNECTED;
|
||||
}
|
||||
isDestroyed() {
|
||||
return this.s.state === common_1.STATE_CLOSED;
|
||||
}
|
||||
// NOTE: There are many places in code where we explicitly check the last hello
|
||||
// to do feature support detection. This should be done any other way, but for
|
||||
// now we will just return the first hello seen, which should suffice.
|
||||
lastHello() {
|
||||
const serverDescriptions = Array.from(this.description.servers.values());
|
||||
if (serverDescriptions.length === 0)
|
||||
return {};
|
||||
const sd = serverDescriptions.filter((sd) => sd.type !== common_1.ServerType.Unknown)[0];
|
||||
const result = sd || { maxWireVersion: this.description.commonWireVersion };
|
||||
return result;
|
||||
}
|
||||
get commonWireVersion() {
|
||||
return this.description.commonWireVersion;
|
||||
}
|
||||
get logicalSessionTimeoutMinutes() {
|
||||
return this.description.logicalSessionTimeoutMinutes;
|
||||
}
|
||||
get clusterTime() {
|
||||
return this.s.clusterTime;
|
||||
}
|
||||
set clusterTime(clusterTime) {
|
||||
this.s.clusterTime = clusterTime;
|
||||
}
|
||||
}
|
||||
exports.Topology = Topology;
|
||||
/** @event */
|
||||
Topology.SERVER_OPENING = constants_1.SERVER_OPENING;
|
||||
/** @event */
|
||||
Topology.SERVER_CLOSED = constants_1.SERVER_CLOSED;
|
||||
/** @event */
|
||||
Topology.SERVER_DESCRIPTION_CHANGED = constants_1.SERVER_DESCRIPTION_CHANGED;
|
||||
/** @event */
|
||||
Topology.TOPOLOGY_OPENING = constants_1.TOPOLOGY_OPENING;
|
||||
/** @event */
|
||||
Topology.TOPOLOGY_CLOSED = constants_1.TOPOLOGY_CLOSED;
|
||||
/** @event */
|
||||
Topology.TOPOLOGY_DESCRIPTION_CHANGED = constants_1.TOPOLOGY_DESCRIPTION_CHANGED;
|
||||
/** @event */
|
||||
Topology.ERROR = constants_1.ERROR;
|
||||
/** @event */
|
||||
Topology.OPEN = constants_1.OPEN;
|
||||
/** @event */
|
||||
Topology.CONNECT = constants_1.CONNECT;
|
||||
/** @event */
|
||||
Topology.CLOSE = constants_1.CLOSE;
|
||||
/** @event */
|
||||
Topology.TIMEOUT = constants_1.TIMEOUT;
|
||||
/** Destroys a server, and removes all event listeners from the instance */
|
||||
function destroyServer(server, topology, options, callback) {
|
||||
options = options ?? { force: false };
|
||||
for (const event of constants_1.LOCAL_SERVER_EVENTS) {
|
||||
server.removeAllListeners(event);
|
||||
}
|
||||
server.destroy(options, () => {
|
||||
topology.emit(Topology.SERVER_CLOSED, new events_1.ServerClosedEvent(topology.s.id, server.description.address));
|
||||
for (const event of constants_1.SERVER_RELAY_EVENTS) {
|
||||
server.removeAllListeners(event);
|
||||
}
|
||||
if (typeof callback === 'function') {
|
||||
callback();
|
||||
}
|
||||
});
|
||||
}
|
||||
/** Predicts the TopologyType from options */
|
||||
function topologyTypeFromOptions(options) {
|
||||
if (options?.directConnection) {
|
||||
return common_1.TopologyType.Single;
|
||||
}
|
||||
if (options?.replicaSet) {
|
||||
return common_1.TopologyType.ReplicaSetNoPrimary;
|
||||
}
|
||||
if (options?.loadBalanced) {
|
||||
return common_1.TopologyType.LoadBalanced;
|
||||
}
|
||||
return common_1.TopologyType.Unknown;
|
||||
}
|
||||
/**
|
||||
* Creates new server instances and attempts to connect them
|
||||
*
|
||||
* @param topology - The topology that this server belongs to
|
||||
* @param serverDescription - The description for the server to initialize and connect to
|
||||
*/
|
||||
function createAndConnectServer(topology, serverDescription) {
|
||||
topology.emit(Topology.SERVER_OPENING, new events_1.ServerOpeningEvent(topology.s.id, serverDescription.address));
|
||||
const server = new server_1.Server(topology, serverDescription, topology.s.options);
|
||||
for (const event of constants_1.SERVER_RELAY_EVENTS) {
|
||||
server.on(event, (e) => topology.emit(event, e));
|
||||
}
|
||||
server.on(server_1.Server.DESCRIPTION_RECEIVED, description => topology.serverUpdateHandler(description));
|
||||
server.connect();
|
||||
return server;
|
||||
}
|
||||
/**
|
||||
* @param topology - Topology to update.
|
||||
* @param incomingServerDescription - New server description.
|
||||
*/
|
||||
function updateServers(topology, incomingServerDescription) {
|
||||
// update the internal server's description
|
||||
if (incomingServerDescription && topology.s.servers.has(incomingServerDescription.address)) {
|
||||
const server = topology.s.servers.get(incomingServerDescription.address);
|
||||
if (server) {
|
||||
server.s.description = incomingServerDescription;
|
||||
if (incomingServerDescription.error instanceof error_1.MongoError &&
|
||||
incomingServerDescription.error.hasErrorLabel(error_1.MongoErrorLabel.ResetPool)) {
|
||||
const interruptInUseConnections = incomingServerDescription.error.hasErrorLabel(error_1.MongoErrorLabel.InterruptInUseConnections);
|
||||
server.pool.clear({ interruptInUseConnections });
|
||||
}
|
||||
else if (incomingServerDescription.error == null) {
|
||||
const newTopologyType = topology.s.description.type;
|
||||
const shouldMarkPoolReady = incomingServerDescription.isDataBearing ||
|
||||
(incomingServerDescription.type !== common_1.ServerType.Unknown &&
|
||||
newTopologyType === common_1.TopologyType.Single);
|
||||
if (shouldMarkPoolReady) {
|
||||
server.pool.ready();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// add new servers for all descriptions we currently don't know about locally
|
||||
for (const serverDescription of topology.description.servers.values()) {
|
||||
if (!topology.s.servers.has(serverDescription.address)) {
|
||||
const server = createAndConnectServer(topology, serverDescription);
|
||||
topology.s.servers.set(serverDescription.address, server);
|
||||
}
|
||||
}
|
||||
// for all servers no longer known, remove their descriptions and destroy their instances
|
||||
for (const entry of topology.s.servers) {
|
||||
const serverAddress = entry[0];
|
||||
if (topology.description.hasServer(serverAddress)) {
|
||||
continue;
|
||||
}
|
||||
if (!topology.s.servers.has(serverAddress)) {
|
||||
continue;
|
||||
}
|
||||
const server = topology.s.servers.get(serverAddress);
|
||||
topology.s.servers.delete(serverAddress);
|
||||
// prepare server for garbage collection
|
||||
if (server) {
|
||||
destroyServer(server, topology);
|
||||
}
|
||||
}
|
||||
}
|
||||
function drainWaitQueue(queue, err) {
|
||||
while (queue.length) {
|
||||
const waitQueueMember = queue.shift();
|
||||
if (!waitQueueMember) {
|
||||
continue;
|
||||
}
|
||||
if (waitQueueMember.timer) {
|
||||
(0, timers_1.clearTimeout)(waitQueueMember.timer);
|
||||
}
|
||||
if (!waitQueueMember[kCancelled]) {
|
||||
waitQueueMember.callback(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
function processWaitQueue(topology) {
|
||||
if (topology.s.state === common_1.STATE_CLOSED) {
|
||||
drainWaitQueue(topology[kWaitQueue], new error_1.MongoTopologyClosedError());
|
||||
return;
|
||||
}
|
||||
const isSharded = topology.description.type === common_1.TopologyType.Sharded;
|
||||
const serverDescriptions = Array.from(topology.description.servers.values());
|
||||
const membersToProcess = topology[kWaitQueue].length;
|
||||
for (let i = 0; i < membersToProcess; ++i) {
|
||||
const waitQueueMember = topology[kWaitQueue].shift();
|
||||
if (!waitQueueMember) {
|
||||
continue;
|
||||
}
|
||||
if (waitQueueMember[kCancelled]) {
|
||||
continue;
|
||||
}
|
||||
let selectedDescriptions;
|
||||
try {
|
||||
const serverSelector = waitQueueMember.serverSelector;
|
||||
selectedDescriptions = serverSelector
|
||||
? serverSelector(topology.description, serverDescriptions)
|
||||
: serverDescriptions;
|
||||
}
|
||||
catch (e) {
|
||||
if (waitQueueMember.timer) {
|
||||
(0, timers_1.clearTimeout)(waitQueueMember.timer);
|
||||
}
|
||||
waitQueueMember.callback(e);
|
||||
continue;
|
||||
}
|
||||
let selectedServer;
|
||||
if (selectedDescriptions.length === 0) {
|
||||
topology[kWaitQueue].push(waitQueueMember);
|
||||
continue;
|
||||
}
|
||||
else if (selectedDescriptions.length === 1) {
|
||||
selectedServer = topology.s.servers.get(selectedDescriptions[0].address);
|
||||
}
|
||||
else {
|
||||
const descriptions = (0, utils_1.shuffle)(selectedDescriptions, 2);
|
||||
const server1 = topology.s.servers.get(descriptions[0].address);
|
||||
const server2 = topology.s.servers.get(descriptions[1].address);
|
||||
selectedServer =
|
||||
server1 && server2 && server1.s.operationCount < server2.s.operationCount
|
||||
? server1
|
||||
: server2;
|
||||
}
|
||||
if (!selectedServer) {
|
||||
waitQueueMember.callback(new error_1.MongoServerSelectionError('server selection returned a server description but the server was not found in the topology', topology.description));
|
||||
return;
|
||||
}
|
||||
const transaction = waitQueueMember.transaction;
|
||||
if (isSharded && transaction && transaction.isActive && selectedServer) {
|
||||
transaction.pinServer(selectedServer);
|
||||
}
|
||||
if (waitQueueMember.timer) {
|
||||
(0, timers_1.clearTimeout)(waitQueueMember.timer);
|
||||
}
|
||||
waitQueueMember.callback(undefined, selectedServer);
|
||||
}
|
||||
if (topology[kWaitQueue].length > 0) {
|
||||
// ensure all server monitors attempt monitoring soon
|
||||
for (const [, server] of topology.s.servers) {
|
||||
process.nextTick(function scheduleServerCheck() {
|
||||
return server.requestCheck();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
function isStaleServerDescription(topologyDescription, incomingServerDescription) {
|
||||
const currentServerDescription = topologyDescription.servers.get(incomingServerDescription.address);
|
||||
const currentTopologyVersion = currentServerDescription?.topologyVersion;
|
||||
return ((0, server_description_1.compareTopologyVersion)(currentTopologyVersion, incomingServerDescription.topologyVersion) > 0);
|
||||
}
|
||||
/** @public */
|
||||
class ServerCapabilities {
|
||||
constructor(hello) {
|
||||
this.minWireVersion = hello.minWireVersion || 0;
|
||||
this.maxWireVersion = hello.maxWireVersion || 0;
|
||||
}
|
||||
get hasAggregationCursor() {
|
||||
return this.maxWireVersion >= 1;
|
||||
}
|
||||
get hasWriteCommands() {
|
||||
return this.maxWireVersion >= 2;
|
||||
}
|
||||
get hasTextSearch() {
|
||||
return this.minWireVersion >= 0;
|
||||
}
|
||||
get hasAuthCommands() {
|
||||
return this.maxWireVersion >= 1;
|
||||
}
|
||||
get hasListCollectionsCommand() {
|
||||
return this.maxWireVersion >= 3;
|
||||
}
|
||||
get hasListIndexesCommand() {
|
||||
return this.maxWireVersion >= 3;
|
||||
}
|
||||
get supportsSnapshotReads() {
|
||||
return this.maxWireVersion >= 13;
|
||||
}
|
||||
get commandsTakeWriteConcern() {
|
||||
return this.maxWireVersion >= 5;
|
||||
}
|
||||
get commandsTakeCollation() {
|
||||
return this.maxWireVersion >= 5;
|
||||
}
|
||||
}
|
||||
exports.ServerCapabilities = ServerCapabilities;
|
||||
//# sourceMappingURL=topology.js.map
|
1
node_modules/mongodb/lib/sdam/topology.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/topology.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
360
node_modules/mongodb/lib/sdam/topology_description.js
generated
vendored
Normal file
360
node_modules/mongodb/lib/sdam/topology_description.js
generated
vendored
Normal file
@@ -0,0 +1,360 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.TopologyDescription = void 0;
|
||||
const WIRE_CONSTANTS = require("../cmap/wire_protocol/constants");
|
||||
const error_1 = require("../error");
|
||||
const utils_1 = require("../utils");
|
||||
const common_1 = require("./common");
|
||||
const server_description_1 = require("./server_description");
|
||||
// constants related to compatibility checks
|
||||
const MIN_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_SERVER_VERSION;
|
||||
const MAX_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_SERVER_VERSION;
|
||||
const MIN_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_WIRE_VERSION;
|
||||
const MAX_SUPPORTED_WIRE_VERSION = WIRE_CONSTANTS.MAX_SUPPORTED_WIRE_VERSION;
|
||||
const MONGOS_OR_UNKNOWN = new Set([common_1.ServerType.Mongos, common_1.ServerType.Unknown]);
|
||||
const MONGOS_OR_STANDALONE = new Set([common_1.ServerType.Mongos, common_1.ServerType.Standalone]);
|
||||
const NON_PRIMARY_RS_MEMBERS = new Set([
|
||||
common_1.ServerType.RSSecondary,
|
||||
common_1.ServerType.RSArbiter,
|
||||
common_1.ServerType.RSOther
|
||||
]);
|
||||
/**
|
||||
* Representation of a deployment of servers
|
||||
* @public
|
||||
*/
|
||||
class TopologyDescription {
|
||||
/**
|
||||
* Create a TopologyDescription
|
||||
*/
|
||||
constructor(topologyType, serverDescriptions = null, setName = null, maxSetVersion = null, maxElectionId = null, commonWireVersion = null, options = null) {
|
||||
options = options ?? {};
|
||||
this.type = topologyType ?? common_1.TopologyType.Unknown;
|
||||
this.servers = serverDescriptions ?? new Map();
|
||||
this.stale = false;
|
||||
this.compatible = true;
|
||||
this.heartbeatFrequencyMS = options.heartbeatFrequencyMS ?? 0;
|
||||
this.localThresholdMS = options.localThresholdMS ?? 15;
|
||||
this.setName = setName ?? null;
|
||||
this.maxElectionId = maxElectionId ?? null;
|
||||
this.maxSetVersion = maxSetVersion ?? null;
|
||||
this.commonWireVersion = commonWireVersion ?? 0;
|
||||
// determine server compatibility
|
||||
for (const serverDescription of this.servers.values()) {
|
||||
// Load balancer mode is always compatible.
|
||||
if (serverDescription.type === common_1.ServerType.Unknown ||
|
||||
serverDescription.type === common_1.ServerType.LoadBalancer) {
|
||||
continue;
|
||||
}
|
||||
if (serverDescription.minWireVersion > MAX_SUPPORTED_WIRE_VERSION) {
|
||||
this.compatible = false;
|
||||
this.compatibilityError = `Server at ${serverDescription.address} requires wire version ${serverDescription.minWireVersion}, but this version of the driver only supports up to ${MAX_SUPPORTED_WIRE_VERSION} (MongoDB ${MAX_SUPPORTED_SERVER_VERSION})`;
|
||||
}
|
||||
if (serverDescription.maxWireVersion < MIN_SUPPORTED_WIRE_VERSION) {
|
||||
this.compatible = false;
|
||||
this.compatibilityError = `Server at ${serverDescription.address} reports wire version ${serverDescription.maxWireVersion}, but this version of the driver requires at least ${MIN_SUPPORTED_WIRE_VERSION} (MongoDB ${MIN_SUPPORTED_SERVER_VERSION}).`;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Whenever a client updates the TopologyDescription from a hello response, it MUST set
|
||||
// TopologyDescription.logicalSessionTimeoutMinutes to the smallest logicalSessionTimeoutMinutes
|
||||
// value among ServerDescriptions of all data-bearing server types. If any have a null
|
||||
// logicalSessionTimeoutMinutes, then TopologyDescription.logicalSessionTimeoutMinutes MUST be
|
||||
// set to null.
|
||||
this.logicalSessionTimeoutMinutes = null;
|
||||
for (const [, server] of this.servers) {
|
||||
if (server.isReadable) {
|
||||
if (server.logicalSessionTimeoutMinutes == null) {
|
||||
// If any of the servers have a null logicalSessionsTimeout, then the whole topology does
|
||||
this.logicalSessionTimeoutMinutes = null;
|
||||
break;
|
||||
}
|
||||
if (this.logicalSessionTimeoutMinutes == null) {
|
||||
// First server with a non null logicalSessionsTimeout
|
||||
this.logicalSessionTimeoutMinutes = server.logicalSessionTimeoutMinutes;
|
||||
continue;
|
||||
}
|
||||
// Always select the smaller of the:
|
||||
// current server logicalSessionsTimeout and the topologies logicalSessionsTimeout
|
||||
this.logicalSessionTimeoutMinutes = Math.min(this.logicalSessionTimeoutMinutes, server.logicalSessionTimeoutMinutes);
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Returns a new TopologyDescription based on the SrvPollingEvent
|
||||
* @internal
|
||||
*/
|
||||
updateFromSrvPollingEvent(ev, srvMaxHosts = 0) {
|
||||
/** The SRV addresses defines the set of addresses we should be using */
|
||||
const incomingHostnames = ev.hostnames();
|
||||
const currentHostnames = new Set(this.servers.keys());
|
||||
const hostnamesToAdd = new Set(incomingHostnames);
|
||||
const hostnamesToRemove = new Set();
|
||||
for (const hostname of currentHostnames) {
|
||||
// filter hostnamesToAdd (made from incomingHostnames) down to what is *not* present in currentHostnames
|
||||
hostnamesToAdd.delete(hostname);
|
||||
if (!incomingHostnames.has(hostname)) {
|
||||
// If the SRV Records no longer include this hostname
|
||||
// we have to stop using it
|
||||
hostnamesToRemove.add(hostname);
|
||||
}
|
||||
}
|
||||
if (hostnamesToAdd.size === 0 && hostnamesToRemove.size === 0) {
|
||||
// No new hosts to add and none to remove
|
||||
return this;
|
||||
}
|
||||
const serverDescriptions = new Map(this.servers);
|
||||
for (const removedHost of hostnamesToRemove) {
|
||||
serverDescriptions.delete(removedHost);
|
||||
}
|
||||
if (hostnamesToAdd.size > 0) {
|
||||
if (srvMaxHosts === 0) {
|
||||
// Add all!
|
||||
for (const hostToAdd of hostnamesToAdd) {
|
||||
serverDescriptions.set(hostToAdd, new server_description_1.ServerDescription(hostToAdd));
|
||||
}
|
||||
}
|
||||
else if (serverDescriptions.size < srvMaxHosts) {
|
||||
// Add only the amount needed to get us back to srvMaxHosts
|
||||
const selectedHosts = (0, utils_1.shuffle)(hostnamesToAdd, srvMaxHosts - serverDescriptions.size);
|
||||
for (const selectedHostToAdd of selectedHosts) {
|
||||
serverDescriptions.set(selectedHostToAdd, new server_description_1.ServerDescription(selectedHostToAdd));
|
||||
}
|
||||
}
|
||||
}
|
||||
return new TopologyDescription(this.type, serverDescriptions, this.setName, this.maxSetVersion, this.maxElectionId, this.commonWireVersion, { heartbeatFrequencyMS: this.heartbeatFrequencyMS, localThresholdMS: this.localThresholdMS });
|
||||
}
|
||||
/**
|
||||
* Returns a copy of this description updated with a given ServerDescription
|
||||
* @internal
|
||||
*/
|
||||
update(serverDescription) {
|
||||
const address = serverDescription.address;
|
||||
// potentially mutated values
|
||||
let { type: topologyType, setName, maxSetVersion, maxElectionId, commonWireVersion } = this;
|
||||
const serverType = serverDescription.type;
|
||||
const serverDescriptions = new Map(this.servers);
|
||||
// update common wire version
|
||||
if (serverDescription.maxWireVersion !== 0) {
|
||||
if (commonWireVersion == null) {
|
||||
commonWireVersion = serverDescription.maxWireVersion;
|
||||
}
|
||||
else {
|
||||
commonWireVersion = Math.min(commonWireVersion, serverDescription.maxWireVersion);
|
||||
}
|
||||
}
|
||||
if (typeof serverDescription.setName === 'string' &&
|
||||
typeof setName === 'string' &&
|
||||
serverDescription.setName !== setName) {
|
||||
if (topologyType === common_1.TopologyType.Single) {
|
||||
// "Single" Topology with setName mismatch is direct connection usage, mark unknown do not remove
|
||||
serverDescription = new server_description_1.ServerDescription(address);
|
||||
}
|
||||
else {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
}
|
||||
// update the actual server description
|
||||
serverDescriptions.set(address, serverDescription);
|
||||
if (topologyType === common_1.TopologyType.Single) {
|
||||
// once we are defined as single, that never changes
|
||||
return new TopologyDescription(common_1.TopologyType.Single, serverDescriptions, setName, maxSetVersion, maxElectionId, commonWireVersion, { heartbeatFrequencyMS: this.heartbeatFrequencyMS, localThresholdMS: this.localThresholdMS });
|
||||
}
|
||||
if (topologyType === common_1.TopologyType.Unknown) {
|
||||
if (serverType === common_1.ServerType.Standalone && this.servers.size !== 1) {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
else {
|
||||
topologyType = topologyTypeForServerType(serverType);
|
||||
}
|
||||
}
|
||||
if (topologyType === common_1.TopologyType.Sharded) {
|
||||
if (!MONGOS_OR_UNKNOWN.has(serverType)) {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
}
|
||||
if (topologyType === common_1.TopologyType.ReplicaSetNoPrimary) {
|
||||
if (MONGOS_OR_STANDALONE.has(serverType)) {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
if (serverType === common_1.ServerType.RSPrimary) {
|
||||
const result = updateRsFromPrimary(serverDescriptions, serverDescription, setName, maxSetVersion, maxElectionId);
|
||||
topologyType = result[0];
|
||||
setName = result[1];
|
||||
maxSetVersion = result[2];
|
||||
maxElectionId = result[3];
|
||||
}
|
||||
else if (NON_PRIMARY_RS_MEMBERS.has(serverType)) {
|
||||
const result = updateRsNoPrimaryFromMember(serverDescriptions, serverDescription, setName);
|
||||
topologyType = result[0];
|
||||
setName = result[1];
|
||||
}
|
||||
}
|
||||
if (topologyType === common_1.TopologyType.ReplicaSetWithPrimary) {
|
||||
if (MONGOS_OR_STANDALONE.has(serverType)) {
|
||||
serverDescriptions.delete(address);
|
||||
topologyType = checkHasPrimary(serverDescriptions);
|
||||
}
|
||||
else if (serverType === common_1.ServerType.RSPrimary) {
|
||||
const result = updateRsFromPrimary(serverDescriptions, serverDescription, setName, maxSetVersion, maxElectionId);
|
||||
topologyType = result[0];
|
||||
setName = result[1];
|
||||
maxSetVersion = result[2];
|
||||
maxElectionId = result[3];
|
||||
}
|
||||
else if (NON_PRIMARY_RS_MEMBERS.has(serverType)) {
|
||||
topologyType = updateRsWithPrimaryFromMember(serverDescriptions, serverDescription, setName);
|
||||
}
|
||||
else {
|
||||
topologyType = checkHasPrimary(serverDescriptions);
|
||||
}
|
||||
}
|
||||
return new TopologyDescription(topologyType, serverDescriptions, setName, maxSetVersion, maxElectionId, commonWireVersion, { heartbeatFrequencyMS: this.heartbeatFrequencyMS, localThresholdMS: this.localThresholdMS });
|
||||
}
|
||||
get error() {
|
||||
const descriptionsWithError = Array.from(this.servers.values()).filter((sd) => sd.error);
|
||||
if (descriptionsWithError.length > 0) {
|
||||
return descriptionsWithError[0].error;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
/**
|
||||
* Determines if the topology description has any known servers
|
||||
*/
|
||||
get hasKnownServers() {
|
||||
return Array.from(this.servers.values()).some((sd) => sd.type !== common_1.ServerType.Unknown);
|
||||
}
|
||||
/**
|
||||
* Determines if this topology description has a data-bearing server available.
|
||||
*/
|
||||
get hasDataBearingServers() {
|
||||
return Array.from(this.servers.values()).some((sd) => sd.isDataBearing);
|
||||
}
|
||||
/**
|
||||
* Determines if the topology has a definition for the provided address
|
||||
* @internal
|
||||
*/
|
||||
hasServer(address) {
|
||||
return this.servers.has(address);
|
||||
}
|
||||
}
|
||||
exports.TopologyDescription = TopologyDescription;
|
||||
function topologyTypeForServerType(serverType) {
|
||||
switch (serverType) {
|
||||
case common_1.ServerType.Standalone:
|
||||
return common_1.TopologyType.Single;
|
||||
case common_1.ServerType.Mongos:
|
||||
return common_1.TopologyType.Sharded;
|
||||
case common_1.ServerType.RSPrimary:
|
||||
return common_1.TopologyType.ReplicaSetWithPrimary;
|
||||
case common_1.ServerType.RSOther:
|
||||
case common_1.ServerType.RSSecondary:
|
||||
return common_1.TopologyType.ReplicaSetNoPrimary;
|
||||
default:
|
||||
return common_1.TopologyType.Unknown;
|
||||
}
|
||||
}
|
||||
function updateRsFromPrimary(serverDescriptions, serverDescription, setName = null, maxSetVersion = null, maxElectionId = null) {
|
||||
setName = setName || serverDescription.setName;
|
||||
if (setName !== serverDescription.setName) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
if (serverDescription.maxWireVersion >= 17) {
|
||||
const electionIdComparison = (0, utils_1.compareObjectId)(maxElectionId, serverDescription.electionId);
|
||||
const maxElectionIdIsEqual = electionIdComparison === 0;
|
||||
const maxElectionIdIsLess = electionIdComparison === -1;
|
||||
const maxSetVersionIsLessOrEqual = (maxSetVersion ?? -1) <= (serverDescription.setVersion ?? -1);
|
||||
if (maxElectionIdIsLess || (maxElectionIdIsEqual && maxSetVersionIsLessOrEqual)) {
|
||||
// The reported electionId was greater
|
||||
// or the electionId was equal and reported setVersion was greater
|
||||
// Always update both values, they are a tuple
|
||||
maxElectionId = serverDescription.electionId;
|
||||
maxSetVersion = serverDescription.setVersion;
|
||||
}
|
||||
else {
|
||||
// Stale primary
|
||||
// replace serverDescription with a default ServerDescription of type "Unknown"
|
||||
serverDescriptions.set(serverDescription.address, new server_description_1.ServerDescription(serverDescription.address));
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
}
|
||||
else {
|
||||
const electionId = serverDescription.electionId ? serverDescription.electionId : null;
|
||||
if (serverDescription.setVersion && electionId) {
|
||||
if (maxSetVersion && maxElectionId) {
|
||||
if (maxSetVersion > serverDescription.setVersion ||
|
||||
(0, utils_1.compareObjectId)(maxElectionId, electionId) > 0) {
|
||||
// this primary is stale, we must remove it
|
||||
serverDescriptions.set(serverDescription.address, new server_description_1.ServerDescription(serverDescription.address));
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
}
|
||||
maxElectionId = serverDescription.electionId;
|
||||
}
|
||||
if (serverDescription.setVersion != null &&
|
||||
(maxSetVersion == null || serverDescription.setVersion > maxSetVersion)) {
|
||||
maxSetVersion = serverDescription.setVersion;
|
||||
}
|
||||
}
|
||||
// We've heard from the primary. Is it the same primary as before?
|
||||
for (const [address, server] of serverDescriptions) {
|
||||
if (server.type === common_1.ServerType.RSPrimary && server.address !== serverDescription.address) {
|
||||
// Reset old primary's type to Unknown.
|
||||
serverDescriptions.set(address, new server_description_1.ServerDescription(server.address));
|
||||
// There can only be one primary
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Discover new hosts from this primary's response.
|
||||
serverDescription.allHosts.forEach((address) => {
|
||||
if (!serverDescriptions.has(address)) {
|
||||
serverDescriptions.set(address, new server_description_1.ServerDescription(address));
|
||||
}
|
||||
});
|
||||
// Remove hosts not in the response.
|
||||
const currentAddresses = Array.from(serverDescriptions.keys());
|
||||
const responseAddresses = serverDescription.allHosts;
|
||||
currentAddresses
|
||||
.filter((addr) => responseAddresses.indexOf(addr) === -1)
|
||||
.forEach((address) => {
|
||||
serverDescriptions.delete(address);
|
||||
});
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
function updateRsWithPrimaryFromMember(serverDescriptions, serverDescription, setName = null) {
|
||||
if (setName == null) {
|
||||
// TODO(NODE-3483): should be an appropriate runtime error
|
||||
throw new error_1.MongoRuntimeError('Argument "setName" is required if connected to a replica set');
|
||||
}
|
||||
if (setName !== serverDescription.setName ||
|
||||
(serverDescription.me && serverDescription.address !== serverDescription.me)) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
}
|
||||
return checkHasPrimary(serverDescriptions);
|
||||
}
|
||||
function updateRsNoPrimaryFromMember(serverDescriptions, serverDescription, setName = null) {
|
||||
const topologyType = common_1.TopologyType.ReplicaSetNoPrimary;
|
||||
setName = setName ?? serverDescription.setName;
|
||||
if (setName !== serverDescription.setName) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
return [topologyType, setName];
|
||||
}
|
||||
serverDescription.allHosts.forEach((address) => {
|
||||
if (!serverDescriptions.has(address)) {
|
||||
serverDescriptions.set(address, new server_description_1.ServerDescription(address));
|
||||
}
|
||||
});
|
||||
if (serverDescription.me && serverDescription.address !== serverDescription.me) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
}
|
||||
return [topologyType, setName];
|
||||
}
|
||||
function checkHasPrimary(serverDescriptions) {
|
||||
for (const serverDescription of serverDescriptions.values()) {
|
||||
if (serverDescription.type === common_1.ServerType.RSPrimary) {
|
||||
return common_1.TopologyType.ReplicaSetWithPrimary;
|
||||
}
|
||||
}
|
||||
return common_1.TopologyType.ReplicaSetNoPrimary;
|
||||
}
|
||||
//# sourceMappingURL=topology_description.js.map
|
1
node_modules/mongodb/lib/sdam/topology_description.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/sdam/topology_description.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user