First commit
This commit is contained in:
895
node_modules/mongodb/lib/bulk/common.js
generated
vendored
Normal file
895
node_modules/mongodb/lib/bulk/common.js
generated
vendored
Normal file
@@ -0,0 +1,895 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.BulkOperationBase = exports.FindOperators = exports.MongoBulkWriteError = exports.mergeBatchResults = exports.WriteError = exports.WriteConcernError = exports.BulkWriteResult = exports.Batch = exports.BatchType = void 0;
|
||||
const bson_1 = require("../bson");
|
||||
const error_1 = require("../error");
|
||||
const delete_1 = require("../operations/delete");
|
||||
const execute_operation_1 = require("../operations/execute_operation");
|
||||
const insert_1 = require("../operations/insert");
|
||||
const operation_1 = require("../operations/operation");
|
||||
const update_1 = require("../operations/update");
|
||||
const utils_1 = require("../utils");
|
||||
const write_concern_1 = require("../write_concern");
|
||||
/** @internal */
|
||||
const kServerError = Symbol('serverError');
|
||||
/** @public */
|
||||
exports.BatchType = Object.freeze({
|
||||
INSERT: 1,
|
||||
UPDATE: 2,
|
||||
DELETE: 3
|
||||
});
|
||||
/**
|
||||
* Keeps the state of a unordered batch so we can rewrite the results
|
||||
* correctly after command execution
|
||||
*
|
||||
* @public
|
||||
*/
|
||||
class Batch {
|
||||
constructor(batchType, originalZeroIndex) {
|
||||
this.originalZeroIndex = originalZeroIndex;
|
||||
this.currentIndex = 0;
|
||||
this.originalIndexes = [];
|
||||
this.batchType = batchType;
|
||||
this.operations = [];
|
||||
this.size = 0;
|
||||
this.sizeBytes = 0;
|
||||
}
|
||||
}
|
||||
exports.Batch = Batch;
|
||||
/**
|
||||
* @public
|
||||
* The result of a bulk write.
|
||||
*/
|
||||
class BulkWriteResult {
|
||||
static generateIdMap(ids) {
|
||||
const idMap = {};
|
||||
for (const doc of ids) {
|
||||
idMap[doc.index] = doc._id;
|
||||
}
|
||||
return idMap;
|
||||
}
|
||||
/**
|
||||
* Create a new BulkWriteResult instance
|
||||
* @internal
|
||||
*/
|
||||
constructor(bulkResult) {
|
||||
this.result = bulkResult;
|
||||
this.insertedCount = this.result.nInserted ?? 0;
|
||||
this.matchedCount = this.result.nMatched ?? 0;
|
||||
this.modifiedCount = this.result.nModified ?? 0;
|
||||
this.deletedCount = this.result.nRemoved ?? 0;
|
||||
this.upsertedCount = this.result.upserted.length ?? 0;
|
||||
this.upsertedIds = BulkWriteResult.generateIdMap(this.result.upserted);
|
||||
this.insertedIds = BulkWriteResult.generateIdMap(this.result.insertedIds);
|
||||
Object.defineProperty(this, 'result', { value: this.result, enumerable: false });
|
||||
}
|
||||
/** Evaluates to true if the bulk operation correctly executes */
|
||||
get ok() {
|
||||
return this.result.ok;
|
||||
}
|
||||
/**
|
||||
* The number of inserted documents
|
||||
* @deprecated Use insertedCount instead.
|
||||
*/
|
||||
get nInserted() {
|
||||
return this.result.nInserted;
|
||||
}
|
||||
/**
|
||||
* Number of upserted documents
|
||||
* @deprecated User upsertedCount instead.
|
||||
*/
|
||||
get nUpserted() {
|
||||
return this.result.nUpserted;
|
||||
}
|
||||
/**
|
||||
* Number of matched documents
|
||||
* @deprecated Use matchedCount instead.
|
||||
*/
|
||||
get nMatched() {
|
||||
return this.result.nMatched;
|
||||
}
|
||||
/**
|
||||
* Number of documents updated physically on disk
|
||||
* @deprecated Use modifiedCount instead.
|
||||
*/
|
||||
get nModified() {
|
||||
return this.result.nModified;
|
||||
}
|
||||
/**
|
||||
* Number of removed documents
|
||||
* @deprecated Use deletedCount instead.
|
||||
*/
|
||||
get nRemoved() {
|
||||
return this.result.nRemoved;
|
||||
}
|
||||
/**
|
||||
* Returns an array of all inserted ids
|
||||
* @deprecated Use insertedIds instead.
|
||||
*/
|
||||
getInsertedIds() {
|
||||
return this.result.insertedIds;
|
||||
}
|
||||
/**
|
||||
* Returns an array of all upserted ids
|
||||
* @deprecated Use upsertedIds instead.
|
||||
*/
|
||||
getUpsertedIds() {
|
||||
return this.result.upserted;
|
||||
}
|
||||
/** Returns the upserted id at the given index */
|
||||
getUpsertedIdAt(index) {
|
||||
return this.result.upserted[index];
|
||||
}
|
||||
/** Returns raw internal result */
|
||||
getRawResponse() {
|
||||
return this.result;
|
||||
}
|
||||
/** Returns true if the bulk operation contains a write error */
|
||||
hasWriteErrors() {
|
||||
return this.result.writeErrors.length > 0;
|
||||
}
|
||||
/** Returns the number of write errors off the bulk operation */
|
||||
getWriteErrorCount() {
|
||||
return this.result.writeErrors.length;
|
||||
}
|
||||
/** Returns a specific write error object */
|
||||
getWriteErrorAt(index) {
|
||||
return index < this.result.writeErrors.length ? this.result.writeErrors[index] : undefined;
|
||||
}
|
||||
/** Retrieve all write errors */
|
||||
getWriteErrors() {
|
||||
return this.result.writeErrors;
|
||||
}
|
||||
/** Retrieve the write concern error if one exists */
|
||||
getWriteConcernError() {
|
||||
if (this.result.writeConcernErrors.length === 0) {
|
||||
return;
|
||||
}
|
||||
else if (this.result.writeConcernErrors.length === 1) {
|
||||
// Return the error
|
||||
return this.result.writeConcernErrors[0];
|
||||
}
|
||||
else {
|
||||
// Combine the errors
|
||||
let errmsg = '';
|
||||
for (let i = 0; i < this.result.writeConcernErrors.length; i++) {
|
||||
const err = this.result.writeConcernErrors[i];
|
||||
errmsg = errmsg + err.errmsg;
|
||||
// TODO: Something better
|
||||
if (i === 0)
|
||||
errmsg = errmsg + ' and ';
|
||||
}
|
||||
return new WriteConcernError({ errmsg, code: error_1.MONGODB_ERROR_CODES.WriteConcernFailed });
|
||||
}
|
||||
}
|
||||
toString() {
|
||||
return `BulkWriteResult(${this.result})`;
|
||||
}
|
||||
isOk() {
|
||||
return this.result.ok === 1;
|
||||
}
|
||||
}
|
||||
exports.BulkWriteResult = BulkWriteResult;
|
||||
/**
|
||||
* An error representing a failure by the server to apply the requested write concern to the bulk operation.
|
||||
* @public
|
||||
* @category Error
|
||||
*/
|
||||
class WriteConcernError {
|
||||
constructor(error) {
|
||||
this[kServerError] = error;
|
||||
}
|
||||
/** Write concern error code. */
|
||||
get code() {
|
||||
return this[kServerError].code;
|
||||
}
|
||||
/** Write concern error message. */
|
||||
get errmsg() {
|
||||
return this[kServerError].errmsg;
|
||||
}
|
||||
/** Write concern error info. */
|
||||
get errInfo() {
|
||||
return this[kServerError].errInfo;
|
||||
}
|
||||
toJSON() {
|
||||
return this[kServerError];
|
||||
}
|
||||
toString() {
|
||||
return `WriteConcernError(${this.errmsg})`;
|
||||
}
|
||||
}
|
||||
exports.WriteConcernError = WriteConcernError;
|
||||
/**
|
||||
* An error that occurred during a BulkWrite on the server.
|
||||
* @public
|
||||
* @category Error
|
||||
*/
|
||||
class WriteError {
|
||||
constructor(err) {
|
||||
this.err = err;
|
||||
}
|
||||
/** WriteError code. */
|
||||
get code() {
|
||||
return this.err.code;
|
||||
}
|
||||
/** WriteError original bulk operation index. */
|
||||
get index() {
|
||||
return this.err.index;
|
||||
}
|
||||
/** WriteError message. */
|
||||
get errmsg() {
|
||||
return this.err.errmsg;
|
||||
}
|
||||
/** WriteError details. */
|
||||
get errInfo() {
|
||||
return this.err.errInfo;
|
||||
}
|
||||
/** Returns the underlying operation that caused the error */
|
||||
getOperation() {
|
||||
return this.err.op;
|
||||
}
|
||||
toJSON() {
|
||||
return { code: this.err.code, index: this.err.index, errmsg: this.err.errmsg, op: this.err.op };
|
||||
}
|
||||
toString() {
|
||||
return `WriteError(${JSON.stringify(this.toJSON())})`;
|
||||
}
|
||||
}
|
||||
exports.WriteError = WriteError;
|
||||
/** Merges results into shared data structure */
|
||||
function mergeBatchResults(batch, bulkResult, err, result) {
|
||||
// If we have an error set the result to be the err object
|
||||
if (err) {
|
||||
result = err;
|
||||
}
|
||||
else if (result && result.result) {
|
||||
result = result.result;
|
||||
}
|
||||
if (result == null) {
|
||||
return;
|
||||
}
|
||||
// Do we have a top level error stop processing and return
|
||||
if (result.ok === 0 && bulkResult.ok === 1) {
|
||||
bulkResult.ok = 0;
|
||||
const writeError = {
|
||||
index: 0,
|
||||
code: result.code || 0,
|
||||
errmsg: result.message,
|
||||
errInfo: result.errInfo,
|
||||
op: batch.operations[0]
|
||||
};
|
||||
bulkResult.writeErrors.push(new WriteError(writeError));
|
||||
return;
|
||||
}
|
||||
else if (result.ok === 0 && bulkResult.ok === 0) {
|
||||
return;
|
||||
}
|
||||
// If we have an insert Batch type
|
||||
if (isInsertBatch(batch) && result.n) {
|
||||
bulkResult.nInserted = bulkResult.nInserted + result.n;
|
||||
}
|
||||
// If we have an insert Batch type
|
||||
if (isDeleteBatch(batch) && result.n) {
|
||||
bulkResult.nRemoved = bulkResult.nRemoved + result.n;
|
||||
}
|
||||
let nUpserted = 0;
|
||||
// We have an array of upserted values, we need to rewrite the indexes
|
||||
if (Array.isArray(result.upserted)) {
|
||||
nUpserted = result.upserted.length;
|
||||
for (let i = 0; i < result.upserted.length; i++) {
|
||||
bulkResult.upserted.push({
|
||||
index: result.upserted[i].index + batch.originalZeroIndex,
|
||||
_id: result.upserted[i]._id
|
||||
});
|
||||
}
|
||||
}
|
||||
else if (result.upserted) {
|
||||
nUpserted = 1;
|
||||
bulkResult.upserted.push({
|
||||
index: batch.originalZeroIndex,
|
||||
_id: result.upserted
|
||||
});
|
||||
}
|
||||
// If we have an update Batch type
|
||||
if (isUpdateBatch(batch) && result.n) {
|
||||
const nModified = result.nModified;
|
||||
bulkResult.nUpserted = bulkResult.nUpserted + nUpserted;
|
||||
bulkResult.nMatched = bulkResult.nMatched + (result.n - nUpserted);
|
||||
if (typeof nModified === 'number') {
|
||||
bulkResult.nModified = bulkResult.nModified + nModified;
|
||||
}
|
||||
else {
|
||||
bulkResult.nModified = 0;
|
||||
}
|
||||
}
|
||||
if (Array.isArray(result.writeErrors)) {
|
||||
for (let i = 0; i < result.writeErrors.length; i++) {
|
||||
const writeError = {
|
||||
index: batch.originalIndexes[result.writeErrors[i].index],
|
||||
code: result.writeErrors[i].code,
|
||||
errmsg: result.writeErrors[i].errmsg,
|
||||
errInfo: result.writeErrors[i].errInfo,
|
||||
op: batch.operations[result.writeErrors[i].index]
|
||||
};
|
||||
bulkResult.writeErrors.push(new WriteError(writeError));
|
||||
}
|
||||
}
|
||||
if (result.writeConcernError) {
|
||||
bulkResult.writeConcernErrors.push(new WriteConcernError(result.writeConcernError));
|
||||
}
|
||||
}
|
||||
exports.mergeBatchResults = mergeBatchResults;
|
||||
function executeCommands(bulkOperation, options, callback) {
|
||||
if (bulkOperation.s.batches.length === 0) {
|
||||
return callback(undefined, new BulkWriteResult(bulkOperation.s.bulkResult));
|
||||
}
|
||||
const batch = bulkOperation.s.batches.shift();
|
||||
function resultHandler(err, result) {
|
||||
// Error is a driver related error not a bulk op error, return early
|
||||
if (err && 'message' in err && !(err instanceof error_1.MongoWriteConcernError)) {
|
||||
return callback(new MongoBulkWriteError(err, new BulkWriteResult(bulkOperation.s.bulkResult)));
|
||||
}
|
||||
if (err instanceof error_1.MongoWriteConcernError) {
|
||||
return handleMongoWriteConcernError(batch, bulkOperation.s.bulkResult, err, callback);
|
||||
}
|
||||
// Merge the results together
|
||||
mergeBatchResults(batch, bulkOperation.s.bulkResult, err, result);
|
||||
const writeResult = new BulkWriteResult(bulkOperation.s.bulkResult);
|
||||
if (bulkOperation.handleWriteError(callback, writeResult))
|
||||
return;
|
||||
// Execute the next command in line
|
||||
executeCommands(bulkOperation, options, callback);
|
||||
}
|
||||
const finalOptions = (0, utils_1.resolveOptions)(bulkOperation, {
|
||||
...options,
|
||||
ordered: bulkOperation.isOrdered
|
||||
});
|
||||
if (finalOptions.bypassDocumentValidation !== true) {
|
||||
delete finalOptions.bypassDocumentValidation;
|
||||
}
|
||||
// Set an operationIf if provided
|
||||
if (bulkOperation.operationId) {
|
||||
resultHandler.operationId = bulkOperation.operationId;
|
||||
}
|
||||
// Is the bypassDocumentValidation options specific
|
||||
if (bulkOperation.s.bypassDocumentValidation === true) {
|
||||
finalOptions.bypassDocumentValidation = true;
|
||||
}
|
||||
// Is the checkKeys option disabled
|
||||
if (bulkOperation.s.checkKeys === false) {
|
||||
finalOptions.checkKeys = false;
|
||||
}
|
||||
if (finalOptions.retryWrites) {
|
||||
if (isUpdateBatch(batch)) {
|
||||
finalOptions.retryWrites = finalOptions.retryWrites && !batch.operations.some(op => op.multi);
|
||||
}
|
||||
if (isDeleteBatch(batch)) {
|
||||
finalOptions.retryWrites =
|
||||
finalOptions.retryWrites && !batch.operations.some(op => op.limit === 0);
|
||||
}
|
||||
}
|
||||
try {
|
||||
if (isInsertBatch(batch)) {
|
||||
(0, execute_operation_1.executeOperation)(bulkOperation.s.collection.client, new insert_1.InsertOperation(bulkOperation.s.namespace, batch.operations, finalOptions), resultHandler);
|
||||
}
|
||||
else if (isUpdateBatch(batch)) {
|
||||
(0, execute_operation_1.executeOperation)(bulkOperation.s.collection.client, new update_1.UpdateOperation(bulkOperation.s.namespace, batch.operations, finalOptions), resultHandler);
|
||||
}
|
||||
else if (isDeleteBatch(batch)) {
|
||||
(0, execute_operation_1.executeOperation)(bulkOperation.s.collection.client, new delete_1.DeleteOperation(bulkOperation.s.namespace, batch.operations, finalOptions), resultHandler);
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
// Force top level error
|
||||
err.ok = 0;
|
||||
// Merge top level error and return
|
||||
mergeBatchResults(batch, bulkOperation.s.bulkResult, err, undefined);
|
||||
callback();
|
||||
}
|
||||
}
|
||||
function handleMongoWriteConcernError(batch, bulkResult, err, callback) {
|
||||
mergeBatchResults(batch, bulkResult, undefined, err.result);
|
||||
callback(new MongoBulkWriteError({
|
||||
message: err.result?.writeConcernError.errmsg,
|
||||
code: err.result?.writeConcernError.result
|
||||
}, new BulkWriteResult(bulkResult)));
|
||||
}
|
||||
/**
|
||||
* An error indicating an unsuccessful Bulk Write
|
||||
* @public
|
||||
* @category Error
|
||||
*/
|
||||
class MongoBulkWriteError extends error_1.MongoServerError {
|
||||
/** Creates a new MongoBulkWriteError */
|
||||
constructor(error, result) {
|
||||
super(error);
|
||||
this.writeErrors = [];
|
||||
if (error instanceof WriteConcernError)
|
||||
this.err = error;
|
||||
else if (!(error instanceof Error)) {
|
||||
this.message = error.message;
|
||||
this.code = error.code;
|
||||
this.writeErrors = error.writeErrors ?? [];
|
||||
}
|
||||
this.result = result;
|
||||
Object.assign(this, error);
|
||||
}
|
||||
get name() {
|
||||
return 'MongoBulkWriteError';
|
||||
}
|
||||
/** Number of documents inserted. */
|
||||
get insertedCount() {
|
||||
return this.result.insertedCount;
|
||||
}
|
||||
/** Number of documents matched for update. */
|
||||
get matchedCount() {
|
||||
return this.result.matchedCount;
|
||||
}
|
||||
/** Number of documents modified. */
|
||||
get modifiedCount() {
|
||||
return this.result.modifiedCount;
|
||||
}
|
||||
/** Number of documents deleted. */
|
||||
get deletedCount() {
|
||||
return this.result.deletedCount;
|
||||
}
|
||||
/** Number of documents upserted. */
|
||||
get upsertedCount() {
|
||||
return this.result.upsertedCount;
|
||||
}
|
||||
/** Inserted document generated Id's, hash key is the index of the originating operation */
|
||||
get insertedIds() {
|
||||
return this.result.insertedIds;
|
||||
}
|
||||
/** Upserted document generated Id's, hash key is the index of the originating operation */
|
||||
get upsertedIds() {
|
||||
return this.result.upsertedIds;
|
||||
}
|
||||
}
|
||||
exports.MongoBulkWriteError = MongoBulkWriteError;
|
||||
/**
|
||||
* A builder object that is returned from {@link BulkOperationBase#find}.
|
||||
* Is used to build a write operation that involves a query filter.
|
||||
*
|
||||
* @public
|
||||
*/
|
||||
class FindOperators {
|
||||
/**
|
||||
* Creates a new FindOperators object.
|
||||
* @internal
|
||||
*/
|
||||
constructor(bulkOperation) {
|
||||
this.bulkOperation = bulkOperation;
|
||||
}
|
||||
/** Add a multiple update operation to the bulk operation */
|
||||
update(updateDocument) {
|
||||
const currentOp = buildCurrentOp(this.bulkOperation);
|
||||
return this.bulkOperation.addToOperationsList(exports.BatchType.UPDATE, (0, update_1.makeUpdateStatement)(currentOp.selector, updateDocument, {
|
||||
...currentOp,
|
||||
multi: true
|
||||
}));
|
||||
}
|
||||
/** Add a single update operation to the bulk operation */
|
||||
updateOne(updateDocument) {
|
||||
if (!(0, utils_1.hasAtomicOperators)(updateDocument)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Update document requires atomic operators');
|
||||
}
|
||||
const currentOp = buildCurrentOp(this.bulkOperation);
|
||||
return this.bulkOperation.addToOperationsList(exports.BatchType.UPDATE, (0, update_1.makeUpdateStatement)(currentOp.selector, updateDocument, { ...currentOp, multi: false }));
|
||||
}
|
||||
/** Add a replace one operation to the bulk operation */
|
||||
replaceOne(replacement) {
|
||||
if ((0, utils_1.hasAtomicOperators)(replacement)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Replacement document must not use atomic operators');
|
||||
}
|
||||
const currentOp = buildCurrentOp(this.bulkOperation);
|
||||
return this.bulkOperation.addToOperationsList(exports.BatchType.UPDATE, (0, update_1.makeUpdateStatement)(currentOp.selector, replacement, { ...currentOp, multi: false }));
|
||||
}
|
||||
/** Add a delete one operation to the bulk operation */
|
||||
deleteOne() {
|
||||
const currentOp = buildCurrentOp(this.bulkOperation);
|
||||
return this.bulkOperation.addToOperationsList(exports.BatchType.DELETE, (0, delete_1.makeDeleteStatement)(currentOp.selector, { ...currentOp, limit: 1 }));
|
||||
}
|
||||
/** Add a delete many operation to the bulk operation */
|
||||
delete() {
|
||||
const currentOp = buildCurrentOp(this.bulkOperation);
|
||||
return this.bulkOperation.addToOperationsList(exports.BatchType.DELETE, (0, delete_1.makeDeleteStatement)(currentOp.selector, { ...currentOp, limit: 0 }));
|
||||
}
|
||||
/** Upsert modifier for update bulk operation, noting that this operation is an upsert. */
|
||||
upsert() {
|
||||
if (!this.bulkOperation.s.currentOp) {
|
||||
this.bulkOperation.s.currentOp = {};
|
||||
}
|
||||
this.bulkOperation.s.currentOp.upsert = true;
|
||||
return this;
|
||||
}
|
||||
/** Specifies the collation for the query condition. */
|
||||
collation(collation) {
|
||||
if (!this.bulkOperation.s.currentOp) {
|
||||
this.bulkOperation.s.currentOp = {};
|
||||
}
|
||||
this.bulkOperation.s.currentOp.collation = collation;
|
||||
return this;
|
||||
}
|
||||
/** Specifies arrayFilters for UpdateOne or UpdateMany bulk operations. */
|
||||
arrayFilters(arrayFilters) {
|
||||
if (!this.bulkOperation.s.currentOp) {
|
||||
this.bulkOperation.s.currentOp = {};
|
||||
}
|
||||
this.bulkOperation.s.currentOp.arrayFilters = arrayFilters;
|
||||
return this;
|
||||
}
|
||||
/** Specifies hint for the bulk operation. */
|
||||
hint(hint) {
|
||||
if (!this.bulkOperation.s.currentOp) {
|
||||
this.bulkOperation.s.currentOp = {};
|
||||
}
|
||||
this.bulkOperation.s.currentOp.hint = hint;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
exports.FindOperators = FindOperators;
|
||||
/**
|
||||
* TODO(NODE-4063)
|
||||
* BulkWrites merge complexity is implemented in executeCommands
|
||||
* This provides a vehicle to treat bulkOperations like any other operation (hence "shim")
|
||||
* We would like this logic to simply live inside the BulkWriteOperation class
|
||||
* @internal
|
||||
*/
|
||||
class BulkWriteShimOperation extends operation_1.AbstractOperation {
|
||||
constructor(bulkOperation, options) {
|
||||
super(options);
|
||||
this.bulkOperation = bulkOperation;
|
||||
}
|
||||
execute(server, session, callback) {
|
||||
if (this.options.session == null) {
|
||||
// An implicit session could have been created by 'executeOperation'
|
||||
// So if we stick it on finalOptions here, each bulk operation
|
||||
// will use this same session, it'll be passed in the same way
|
||||
// an explicit session would be
|
||||
this.options.session = session;
|
||||
}
|
||||
return executeCommands(this.bulkOperation, this.options, callback);
|
||||
}
|
||||
}
|
||||
/** @public */
|
||||
class BulkOperationBase {
|
||||
/**
|
||||
* Create a new OrderedBulkOperation or UnorderedBulkOperation instance
|
||||
* @internal
|
||||
*/
|
||||
constructor(collection, options, isOrdered) {
|
||||
// determine whether bulkOperation is ordered or unordered
|
||||
this.isOrdered = isOrdered;
|
||||
const topology = (0, utils_1.getTopology)(collection);
|
||||
options = options == null ? {} : options;
|
||||
// TODO Bring from driver information in hello
|
||||
// Get the namespace for the write operations
|
||||
const namespace = collection.s.namespace;
|
||||
// Used to mark operation as executed
|
||||
const executed = false;
|
||||
// Current item
|
||||
const currentOp = undefined;
|
||||
// Set max byte size
|
||||
const hello = topology.lastHello();
|
||||
// If we have autoEncryption on, batch-splitting must be done on 2mb chunks, but single documents
|
||||
// over 2mb are still allowed
|
||||
const usingAutoEncryption = !!(topology.s.options && topology.s.options.autoEncrypter);
|
||||
const maxBsonObjectSize = hello && hello.maxBsonObjectSize ? hello.maxBsonObjectSize : 1024 * 1024 * 16;
|
||||
const maxBatchSizeBytes = usingAutoEncryption ? 1024 * 1024 * 2 : maxBsonObjectSize;
|
||||
const maxWriteBatchSize = hello && hello.maxWriteBatchSize ? hello.maxWriteBatchSize : 1000;
|
||||
// Calculates the largest possible size of an Array key, represented as a BSON string
|
||||
// element. This calculation:
|
||||
// 1 byte for BSON type
|
||||
// # of bytes = length of (string representation of (maxWriteBatchSize - 1))
|
||||
// + 1 bytes for null terminator
|
||||
const maxKeySize = (maxWriteBatchSize - 1).toString(10).length + 2;
|
||||
// Final options for retryable writes
|
||||
let finalOptions = Object.assign({}, options);
|
||||
finalOptions = (0, utils_1.applyRetryableWrites)(finalOptions, collection.s.db);
|
||||
// Final results
|
||||
const bulkResult = {
|
||||
ok: 1,
|
||||
writeErrors: [],
|
||||
writeConcernErrors: [],
|
||||
insertedIds: [],
|
||||
nInserted: 0,
|
||||
nUpserted: 0,
|
||||
nMatched: 0,
|
||||
nModified: 0,
|
||||
nRemoved: 0,
|
||||
upserted: []
|
||||
};
|
||||
// Internal state
|
||||
this.s = {
|
||||
// Final result
|
||||
bulkResult,
|
||||
// Current batch state
|
||||
currentBatch: undefined,
|
||||
currentIndex: 0,
|
||||
// ordered specific
|
||||
currentBatchSize: 0,
|
||||
currentBatchSizeBytes: 0,
|
||||
// unordered specific
|
||||
currentInsertBatch: undefined,
|
||||
currentUpdateBatch: undefined,
|
||||
currentRemoveBatch: undefined,
|
||||
batches: [],
|
||||
// Write concern
|
||||
writeConcern: write_concern_1.WriteConcern.fromOptions(options),
|
||||
// Max batch size options
|
||||
maxBsonObjectSize,
|
||||
maxBatchSizeBytes,
|
||||
maxWriteBatchSize,
|
||||
maxKeySize,
|
||||
// Namespace
|
||||
namespace,
|
||||
// Topology
|
||||
topology,
|
||||
// Options
|
||||
options: finalOptions,
|
||||
// BSON options
|
||||
bsonOptions: (0, bson_1.resolveBSONOptions)(options),
|
||||
// Current operation
|
||||
currentOp,
|
||||
// Executed
|
||||
executed,
|
||||
// Collection
|
||||
collection,
|
||||
// Fundamental error
|
||||
err: undefined,
|
||||
// check keys
|
||||
checkKeys: typeof options.checkKeys === 'boolean' ? options.checkKeys : false
|
||||
};
|
||||
// bypass Validation
|
||||
if (options.bypassDocumentValidation === true) {
|
||||
this.s.bypassDocumentValidation = true;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Add a single insert document to the bulk operation
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const bulkOp = collection.initializeOrderedBulkOp();
|
||||
*
|
||||
* // Adds three inserts to the bulkOp.
|
||||
* bulkOp
|
||||
* .insert({ a: 1 })
|
||||
* .insert({ b: 2 })
|
||||
* .insert({ c: 3 });
|
||||
* await bulkOp.execute();
|
||||
* ```
|
||||
*/
|
||||
insert(document) {
|
||||
if (document._id == null && !shouldForceServerObjectId(this)) {
|
||||
document._id = new bson_1.ObjectId();
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.INSERT, document);
|
||||
}
|
||||
/**
|
||||
* Builds a find operation for an update/updateOne/delete/deleteOne/replaceOne.
|
||||
* Returns a builder object used to complete the definition of the operation.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const bulkOp = collection.initializeOrderedBulkOp();
|
||||
*
|
||||
* // Add an updateOne to the bulkOp
|
||||
* bulkOp.find({ a: 1 }).updateOne({ $set: { b: 2 } });
|
||||
*
|
||||
* // Add an updateMany to the bulkOp
|
||||
* bulkOp.find({ c: 3 }).update({ $set: { d: 4 } });
|
||||
*
|
||||
* // Add an upsert
|
||||
* bulkOp.find({ e: 5 }).upsert().updateOne({ $set: { f: 6 } });
|
||||
*
|
||||
* // Add a deletion
|
||||
* bulkOp.find({ g: 7 }).deleteOne();
|
||||
*
|
||||
* // Add a multi deletion
|
||||
* bulkOp.find({ h: 8 }).delete();
|
||||
*
|
||||
* // Add a replaceOne
|
||||
* bulkOp.find({ i: 9 }).replaceOne({writeConcern: { j: 10 }});
|
||||
*
|
||||
* // Update using a pipeline (requires Mongodb 4.2 or higher)
|
||||
* bulk.find({ k: 11, y: { $exists: true }, z: { $exists: true } }).updateOne([
|
||||
* { $set: { total: { $sum: [ '$y', '$z' ] } } }
|
||||
* ]);
|
||||
*
|
||||
* // All of the ops will now be executed
|
||||
* await bulkOp.execute();
|
||||
* ```
|
||||
*/
|
||||
find(selector) {
|
||||
if (!selector) {
|
||||
throw new error_1.MongoInvalidArgumentError('Bulk find operation must specify a selector');
|
||||
}
|
||||
// Save a current selector
|
||||
this.s.currentOp = {
|
||||
selector: selector
|
||||
};
|
||||
return new FindOperators(this);
|
||||
}
|
||||
/** Specifies a raw operation to perform in the bulk write. */
|
||||
raw(op) {
|
||||
if (op == null || typeof op !== 'object') {
|
||||
throw new error_1.MongoInvalidArgumentError('Operation must be an object with an operation key');
|
||||
}
|
||||
if ('insertOne' in op) {
|
||||
const forceServerObjectId = shouldForceServerObjectId(this);
|
||||
if (op.insertOne && op.insertOne.document == null) {
|
||||
// NOTE: provided for legacy support, but this is a malformed operation
|
||||
if (forceServerObjectId !== true && op.insertOne._id == null) {
|
||||
op.insertOne._id = new bson_1.ObjectId();
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.INSERT, op.insertOne);
|
||||
}
|
||||
if (forceServerObjectId !== true && op.insertOne.document._id == null) {
|
||||
op.insertOne.document._id = new bson_1.ObjectId();
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.INSERT, op.insertOne.document);
|
||||
}
|
||||
if ('replaceOne' in op || 'updateOne' in op || 'updateMany' in op) {
|
||||
if ('replaceOne' in op) {
|
||||
if ('q' in op.replaceOne) {
|
||||
throw new error_1.MongoInvalidArgumentError('Raw operations are not allowed');
|
||||
}
|
||||
const updateStatement = (0, update_1.makeUpdateStatement)(op.replaceOne.filter, op.replaceOne.replacement, { ...op.replaceOne, multi: false });
|
||||
if ((0, utils_1.hasAtomicOperators)(updateStatement.u)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Replacement document must not use atomic operators');
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.UPDATE, updateStatement);
|
||||
}
|
||||
if ('updateOne' in op) {
|
||||
if ('q' in op.updateOne) {
|
||||
throw new error_1.MongoInvalidArgumentError('Raw operations are not allowed');
|
||||
}
|
||||
const updateStatement = (0, update_1.makeUpdateStatement)(op.updateOne.filter, op.updateOne.update, {
|
||||
...op.updateOne,
|
||||
multi: false
|
||||
});
|
||||
if (!(0, utils_1.hasAtomicOperators)(updateStatement.u)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Update document requires atomic operators');
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.UPDATE, updateStatement);
|
||||
}
|
||||
if ('updateMany' in op) {
|
||||
if ('q' in op.updateMany) {
|
||||
throw new error_1.MongoInvalidArgumentError('Raw operations are not allowed');
|
||||
}
|
||||
const updateStatement = (0, update_1.makeUpdateStatement)(op.updateMany.filter, op.updateMany.update, {
|
||||
...op.updateMany,
|
||||
multi: true
|
||||
});
|
||||
if (!(0, utils_1.hasAtomicOperators)(updateStatement.u)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Update document requires atomic operators');
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.UPDATE, updateStatement);
|
||||
}
|
||||
}
|
||||
if ('deleteOne' in op) {
|
||||
if ('q' in op.deleteOne) {
|
||||
throw new error_1.MongoInvalidArgumentError('Raw operations are not allowed');
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.DELETE, (0, delete_1.makeDeleteStatement)(op.deleteOne.filter, { ...op.deleteOne, limit: 1 }));
|
||||
}
|
||||
if ('deleteMany' in op) {
|
||||
if ('q' in op.deleteMany) {
|
||||
throw new error_1.MongoInvalidArgumentError('Raw operations are not allowed');
|
||||
}
|
||||
return this.addToOperationsList(exports.BatchType.DELETE, (0, delete_1.makeDeleteStatement)(op.deleteMany.filter, { ...op.deleteMany, limit: 0 }));
|
||||
}
|
||||
// otherwise an unknown operation was provided
|
||||
throw new error_1.MongoInvalidArgumentError('bulkWrite only supports insertOne, updateOne, updateMany, deleteOne, deleteMany');
|
||||
}
|
||||
get bsonOptions() {
|
||||
return this.s.bsonOptions;
|
||||
}
|
||||
get writeConcern() {
|
||||
return this.s.writeConcern;
|
||||
}
|
||||
get batches() {
|
||||
const batches = [...this.s.batches];
|
||||
if (this.isOrdered) {
|
||||
if (this.s.currentBatch)
|
||||
batches.push(this.s.currentBatch);
|
||||
}
|
||||
else {
|
||||
if (this.s.currentInsertBatch)
|
||||
batches.push(this.s.currentInsertBatch);
|
||||
if (this.s.currentUpdateBatch)
|
||||
batches.push(this.s.currentUpdateBatch);
|
||||
if (this.s.currentRemoveBatch)
|
||||
batches.push(this.s.currentRemoveBatch);
|
||||
}
|
||||
return batches;
|
||||
}
|
||||
async execute(options = {}) {
|
||||
if (this.s.executed) {
|
||||
throw new error_1.MongoBatchReExecutionError();
|
||||
}
|
||||
const writeConcern = write_concern_1.WriteConcern.fromOptions(options);
|
||||
if (writeConcern) {
|
||||
this.s.writeConcern = writeConcern;
|
||||
}
|
||||
// If we have current batch
|
||||
if (this.isOrdered) {
|
||||
if (this.s.currentBatch)
|
||||
this.s.batches.push(this.s.currentBatch);
|
||||
}
|
||||
else {
|
||||
if (this.s.currentInsertBatch)
|
||||
this.s.batches.push(this.s.currentInsertBatch);
|
||||
if (this.s.currentUpdateBatch)
|
||||
this.s.batches.push(this.s.currentUpdateBatch);
|
||||
if (this.s.currentRemoveBatch)
|
||||
this.s.batches.push(this.s.currentRemoveBatch);
|
||||
}
|
||||
// If we have no operations in the bulk raise an error
|
||||
if (this.s.batches.length === 0) {
|
||||
throw new error_1.MongoInvalidArgumentError('Invalid BulkOperation, Batch cannot be empty');
|
||||
}
|
||||
this.s.executed = true;
|
||||
const finalOptions = { ...this.s.options, ...options };
|
||||
const operation = new BulkWriteShimOperation(this, finalOptions);
|
||||
return (0, execute_operation_1.executeOperation)(this.s.collection.client, operation);
|
||||
}
|
||||
/**
|
||||
* Handles the write error before executing commands
|
||||
* @internal
|
||||
*/
|
||||
handleWriteError(callback, writeResult) {
|
||||
if (this.s.bulkResult.writeErrors.length > 0) {
|
||||
const msg = this.s.bulkResult.writeErrors[0].errmsg
|
||||
? this.s.bulkResult.writeErrors[0].errmsg
|
||||
: 'write operation failed';
|
||||
callback(new MongoBulkWriteError({
|
||||
message: msg,
|
||||
code: this.s.bulkResult.writeErrors[0].code,
|
||||
writeErrors: this.s.bulkResult.writeErrors
|
||||
}, writeResult));
|
||||
return true;
|
||||
}
|
||||
const writeConcernError = writeResult.getWriteConcernError();
|
||||
if (writeConcernError) {
|
||||
callback(new MongoBulkWriteError(writeConcernError, writeResult));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
exports.BulkOperationBase = BulkOperationBase;
|
||||
Object.defineProperty(BulkOperationBase.prototype, 'length', {
|
||||
enumerable: true,
|
||||
get() {
|
||||
return this.s.currentIndex;
|
||||
}
|
||||
});
|
||||
function shouldForceServerObjectId(bulkOperation) {
|
||||
if (typeof bulkOperation.s.options.forceServerObjectId === 'boolean') {
|
||||
return bulkOperation.s.options.forceServerObjectId;
|
||||
}
|
||||
if (typeof bulkOperation.s.collection.s.db.options?.forceServerObjectId === 'boolean') {
|
||||
return bulkOperation.s.collection.s.db.options?.forceServerObjectId;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function isInsertBatch(batch) {
|
||||
return batch.batchType === exports.BatchType.INSERT;
|
||||
}
|
||||
function isUpdateBatch(batch) {
|
||||
return batch.batchType === exports.BatchType.UPDATE;
|
||||
}
|
||||
function isDeleteBatch(batch) {
|
||||
return batch.batchType === exports.BatchType.DELETE;
|
||||
}
|
||||
function buildCurrentOp(bulkOp) {
|
||||
let { currentOp } = bulkOp.s;
|
||||
bulkOp.s.currentOp = undefined;
|
||||
if (!currentOp)
|
||||
currentOp = {};
|
||||
return currentOp;
|
||||
}
|
||||
//# sourceMappingURL=common.js.map
|
1
node_modules/mongodb/lib/bulk/common.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/bulk/common.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
67
node_modules/mongodb/lib/bulk/ordered.js
generated
vendored
Normal file
67
node_modules/mongodb/lib/bulk/ordered.js
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.OrderedBulkOperation = void 0;
|
||||
const BSON = require("../bson");
|
||||
const error_1 = require("../error");
|
||||
const common_1 = require("./common");
|
||||
/** @public */
|
||||
class OrderedBulkOperation extends common_1.BulkOperationBase {
|
||||
/** @internal */
|
||||
constructor(collection, options) {
|
||||
super(collection, options, true);
|
||||
}
|
||||
addToOperationsList(batchType, document) {
|
||||
// Get the bsonSize
|
||||
const bsonSize = BSON.calculateObjectSize(document, {
|
||||
checkKeys: false,
|
||||
// Since we don't know what the user selected for BSON options here,
|
||||
// err on the safe side, and check the size with ignoreUndefined: false.
|
||||
ignoreUndefined: false
|
||||
});
|
||||
// Throw error if the doc is bigger than the max BSON size
|
||||
if (bsonSize >= this.s.maxBsonObjectSize)
|
||||
// TODO(NODE-3483): Change this to MongoBSONError
|
||||
throw new error_1.MongoInvalidArgumentError(`Document is larger than the maximum size ${this.s.maxBsonObjectSize}`);
|
||||
// Create a new batch object if we don't have a current one
|
||||
if (this.s.currentBatch == null) {
|
||||
this.s.currentBatch = new common_1.Batch(batchType, this.s.currentIndex);
|
||||
}
|
||||
const maxKeySize = this.s.maxKeySize;
|
||||
// Check if we need to create a new batch
|
||||
if (
|
||||
// New batch if we exceed the max batch op size
|
||||
this.s.currentBatchSize + 1 >= this.s.maxWriteBatchSize ||
|
||||
// New batch if we exceed the maxBatchSizeBytes. Only matters if batch already has a doc,
|
||||
// since we can't sent an empty batch
|
||||
(this.s.currentBatchSize > 0 &&
|
||||
this.s.currentBatchSizeBytes + maxKeySize + bsonSize >= this.s.maxBatchSizeBytes) ||
|
||||
// New batch if the new op does not have the same op type as the current batch
|
||||
this.s.currentBatch.batchType !== batchType) {
|
||||
// Save the batch to the execution stack
|
||||
this.s.batches.push(this.s.currentBatch);
|
||||
// Create a new batch
|
||||
this.s.currentBatch = new common_1.Batch(batchType, this.s.currentIndex);
|
||||
// Reset the current size trackers
|
||||
this.s.currentBatchSize = 0;
|
||||
this.s.currentBatchSizeBytes = 0;
|
||||
}
|
||||
if (batchType === common_1.BatchType.INSERT) {
|
||||
this.s.bulkResult.insertedIds.push({
|
||||
index: this.s.currentIndex,
|
||||
_id: document._id
|
||||
});
|
||||
}
|
||||
// We have an array of documents
|
||||
if (Array.isArray(document)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Operation passed in cannot be an Array');
|
||||
}
|
||||
this.s.currentBatch.originalIndexes.push(this.s.currentIndex);
|
||||
this.s.currentBatch.operations.push(document);
|
||||
this.s.currentBatchSize += 1;
|
||||
this.s.currentBatchSizeBytes += maxKeySize + bsonSize;
|
||||
this.s.currentIndex += 1;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
exports.OrderedBulkOperation = OrderedBulkOperation;
|
||||
//# sourceMappingURL=ordered.js.map
|
1
node_modules/mongodb/lib/bulk/ordered.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/bulk/ordered.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"ordered.js","sourceRoot":"","sources":["../../src/bulk/ordered.ts"],"names":[],"mappings":";;;AACA,gCAAgC;AAEhC,oCAAqD;AAGrD,qCAAiF;AAEjF,cAAc;AACd,MAAa,oBAAqB,SAAQ,0BAAiB;IACzD,gBAAgB;IAChB,YAAY,UAAsB,EAAE,OAAyB;QAC3D,KAAK,CAAC,UAAU,EAAE,OAAO,EAAE,IAAI,CAAC,CAAC;IACnC,CAAC;IAED,mBAAmB,CACjB,SAAoB,EACpB,QAAsD;QAEtD,mBAAmB;QACnB,MAAM,QAAQ,GAAG,IAAI,CAAC,mBAAmB,CAAC,QAAQ,EAAE;YAClD,SAAS,EAAE,KAAK;YAChB,oEAAoE;YACpE,wEAAwE;YACxE,eAAe,EAAE,KAAK;SAChB,CAAC,CAAC;QAEV,0DAA0D;QAC1D,IAAI,QAAQ,IAAI,IAAI,CAAC,CAAC,CAAC,iBAAiB;YACtC,iDAAiD;YACjD,MAAM,IAAI,iCAAyB,CACjC,4CAA4C,IAAI,CAAC,CAAC,CAAC,iBAAiB,EAAE,CACvE,CAAC;QAEJ,2DAA2D;QAC3D,IAAI,IAAI,CAAC,CAAC,CAAC,YAAY,IAAI,IAAI,EAAE;YAC/B,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,cAAK,CAAC,SAAS,EAAE,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;SACjE;QAED,MAAM,UAAU,GAAG,IAAI,CAAC,CAAC,CAAC,UAAU,CAAC;QAErC,yCAAyC;QACzC;QACE,+CAA+C;QAC/C,IAAI,CAAC,CAAC,CAAC,gBAAgB,GAAG,CAAC,IAAI,IAAI,CAAC,CAAC,CAAC,iBAAiB;YACvD,yFAAyF;YACzF,qCAAqC;YACrC,CAAC,IAAI,CAAC,CAAC,CAAC,gBAAgB,GAAG,CAAC;gBAC1B,IAAI,CAAC,CAAC,CAAC,qBAAqB,GAAG,UAAU,GAAG,QAAQ,IAAI,IAAI,CAAC,CAAC,CAAC,iBAAiB,CAAC;YACnF,8EAA8E;YAC9E,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,SAAS,KAAK,SAAS,EAC3C;YACA,wCAAwC;YACxC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;YAEzC,qBAAqB;YACrB,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,cAAK,CAAC,SAAS,EAAE,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;YAEhE,kCAAkC;YAClC,IAAI,CAAC,CAAC,CAAC,gBAAgB,GAAG,CAAC,CAAC;YAC5B,IAAI,CAAC,CAAC,CAAC,qBAAqB,GAAG,CAAC,CAAC;SAClC;QAED,IAAI,SAAS,KAAK,kBAAS,CAAC,MAAM,EAAE;YAClC,IAAI,CAAC,CAAC,CAAC,UAAU,CAAC,WAAW,CAAC,IAAI,CAAC;gBACjC,KAAK,EAAE,IAAI,CAAC,CAAC,CAAC,YAAY;gBAC1B,GAAG,EAAG,QAAqB,CAAC,GAAG;aAChC,CAAC,CAAC;SACJ;QAED,gCAAgC;QAChC,IAAI,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE;YAC3B,MAAM,IAAI,iCAAyB,CAAC,wCAAwC,CAAC,CAAC;SAC/E;QAED,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,eAAe,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;QAC9D,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,UAAU,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAC9C,IAAI,CAAC,CAAC,CAAC,gBAAgB,IAAI,CAAC,CAAC;QAC7B,IAAI,CAAC,CAAC,CAAC,qBAAqB,IAAI,UAAU,GAAG,QAAQ,CAAC;QACtD,IAAI,CAAC,CAAC,CAAC,YAAY,IAAI,CAAC,CAAC;QACzB,OAAO,IAAI,CAAC;IACd,CAAC;CACF;AAzED,oDAyEC"}
|
92
node_modules/mongodb/lib/bulk/unordered.js
generated
vendored
Normal file
92
node_modules/mongodb/lib/bulk/unordered.js
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.UnorderedBulkOperation = void 0;
|
||||
const BSON = require("../bson");
|
||||
const error_1 = require("../error");
|
||||
const common_1 = require("./common");
|
||||
/** @public */
|
||||
class UnorderedBulkOperation extends common_1.BulkOperationBase {
|
||||
/** @internal */
|
||||
constructor(collection, options) {
|
||||
super(collection, options, false);
|
||||
}
|
||||
handleWriteError(callback, writeResult) {
|
||||
if (this.s.batches.length) {
|
||||
return false;
|
||||
}
|
||||
return super.handleWriteError(callback, writeResult);
|
||||
}
|
||||
addToOperationsList(batchType, document) {
|
||||
// Get the bsonSize
|
||||
const bsonSize = BSON.calculateObjectSize(document, {
|
||||
checkKeys: false,
|
||||
// Since we don't know what the user selected for BSON options here,
|
||||
// err on the safe side, and check the size with ignoreUndefined: false.
|
||||
ignoreUndefined: false
|
||||
});
|
||||
// Throw error if the doc is bigger than the max BSON size
|
||||
if (bsonSize >= this.s.maxBsonObjectSize) {
|
||||
// TODO(NODE-3483): Change this to MongoBSONError
|
||||
throw new error_1.MongoInvalidArgumentError(`Document is larger than the maximum size ${this.s.maxBsonObjectSize}`);
|
||||
}
|
||||
// Holds the current batch
|
||||
this.s.currentBatch = undefined;
|
||||
// Get the right type of batch
|
||||
if (batchType === common_1.BatchType.INSERT) {
|
||||
this.s.currentBatch = this.s.currentInsertBatch;
|
||||
}
|
||||
else if (batchType === common_1.BatchType.UPDATE) {
|
||||
this.s.currentBatch = this.s.currentUpdateBatch;
|
||||
}
|
||||
else if (batchType === common_1.BatchType.DELETE) {
|
||||
this.s.currentBatch = this.s.currentRemoveBatch;
|
||||
}
|
||||
const maxKeySize = this.s.maxKeySize;
|
||||
// Create a new batch object if we don't have a current one
|
||||
if (this.s.currentBatch == null) {
|
||||
this.s.currentBatch = new common_1.Batch(batchType, this.s.currentIndex);
|
||||
}
|
||||
// Check if we need to create a new batch
|
||||
if (
|
||||
// New batch if we exceed the max batch op size
|
||||
this.s.currentBatch.size + 1 >= this.s.maxWriteBatchSize ||
|
||||
// New batch if we exceed the maxBatchSizeBytes. Only matters if batch already has a doc,
|
||||
// since we can't sent an empty batch
|
||||
(this.s.currentBatch.size > 0 &&
|
||||
this.s.currentBatch.sizeBytes + maxKeySize + bsonSize >= this.s.maxBatchSizeBytes) ||
|
||||
// New batch if the new op does not have the same op type as the current batch
|
||||
this.s.currentBatch.batchType !== batchType) {
|
||||
// Save the batch to the execution stack
|
||||
this.s.batches.push(this.s.currentBatch);
|
||||
// Create a new batch
|
||||
this.s.currentBatch = new common_1.Batch(batchType, this.s.currentIndex);
|
||||
}
|
||||
// We have an array of documents
|
||||
if (Array.isArray(document)) {
|
||||
throw new error_1.MongoInvalidArgumentError('Operation passed in cannot be an Array');
|
||||
}
|
||||
this.s.currentBatch.operations.push(document);
|
||||
this.s.currentBatch.originalIndexes.push(this.s.currentIndex);
|
||||
this.s.currentIndex = this.s.currentIndex + 1;
|
||||
// Save back the current Batch to the right type
|
||||
if (batchType === common_1.BatchType.INSERT) {
|
||||
this.s.currentInsertBatch = this.s.currentBatch;
|
||||
this.s.bulkResult.insertedIds.push({
|
||||
index: this.s.bulkResult.insertedIds.length,
|
||||
_id: document._id
|
||||
});
|
||||
}
|
||||
else if (batchType === common_1.BatchType.UPDATE) {
|
||||
this.s.currentUpdateBatch = this.s.currentBatch;
|
||||
}
|
||||
else if (batchType === common_1.BatchType.DELETE) {
|
||||
this.s.currentRemoveBatch = this.s.currentBatch;
|
||||
}
|
||||
// Update current batch size
|
||||
this.s.currentBatch.size += 1;
|
||||
this.s.currentBatch.sizeBytes += maxKeySize + bsonSize;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
exports.UnorderedBulkOperation = UnorderedBulkOperation;
|
||||
//# sourceMappingURL=unordered.js.map
|
1
node_modules/mongodb/lib/bulk/unordered.js.map
generated
vendored
Normal file
1
node_modules/mongodb/lib/bulk/unordered.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"unordered.js","sourceRoot":"","sources":["../../src/bulk/unordered.ts"],"names":[],"mappings":";;;AACA,gCAAgC;AAEhC,oCAAqD;AAIrD,qCAAkG;AAElG,cAAc;AACd,MAAa,sBAAuB,SAAQ,0BAAiB;IAC3D,gBAAgB;IAChB,YAAY,UAAsB,EAAE,OAAyB;QAC3D,KAAK,CAAC,UAAU,EAAE,OAAO,EAAE,KAAK,CAAC,CAAC;IACpC,CAAC;IAEQ,gBAAgB,CAAC,QAAkB,EAAE,WAA4B;QACxE,IAAI,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,MAAM,EAAE;YACzB,OAAO,KAAK,CAAC;SACd;QAED,OAAO,KAAK,CAAC,gBAAgB,CAAC,QAAQ,EAAE,WAAW,CAAC,CAAC;IACvD,CAAC;IAED,mBAAmB,CACjB,SAAoB,EACpB,QAAsD;QAEtD,mBAAmB;QACnB,MAAM,QAAQ,GAAG,IAAI,CAAC,mBAAmB,CAAC,QAAQ,EAAE;YAClD,SAAS,EAAE,KAAK;YAEhB,oEAAoE;YACpE,wEAAwE;YACxE,eAAe,EAAE,KAAK;SAChB,CAAC,CAAC;QAEV,0DAA0D;QAC1D,IAAI,QAAQ,IAAI,IAAI,CAAC,CAAC,CAAC,iBAAiB,EAAE;YACxC,iDAAiD;YACjD,MAAM,IAAI,iCAAyB,CACjC,4CAA4C,IAAI,CAAC,CAAC,CAAC,iBAAiB,EAAE,CACvE,CAAC;SACH;QAED,0BAA0B;QAC1B,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,SAAS,CAAC;QAChC,8BAA8B;QAC9B,IAAI,SAAS,KAAK,kBAAS,CAAC,MAAM,EAAE;YAClC,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,CAAC,CAAC,CAAC,kBAAkB,CAAC;SACjD;aAAM,IAAI,SAAS,KAAK,kBAAS,CAAC,MAAM,EAAE;YACzC,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,CAAC,CAAC,CAAC,kBAAkB,CAAC;SACjD;aAAM,IAAI,SAAS,KAAK,kBAAS,CAAC,MAAM,EAAE;YACzC,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,CAAC,CAAC,CAAC,kBAAkB,CAAC;SACjD;QAED,MAAM,UAAU,GAAG,IAAI,CAAC,CAAC,CAAC,UAAU,CAAC;QAErC,2DAA2D;QAC3D,IAAI,IAAI,CAAC,CAAC,CAAC,YAAY,IAAI,IAAI,EAAE;YAC/B,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,cAAK,CAAC,SAAS,EAAE,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;SACjE;QAED,yCAAyC;QACzC;QACE,+CAA+C;QAC/C,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,IAAI,GAAG,CAAC,IAAI,IAAI,CAAC,CAAC,CAAC,iBAAiB;YACxD,yFAAyF;YACzF,qCAAqC;YACrC,CAAC,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,IAAI,GAAG,CAAC;gBAC3B,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,SAAS,GAAG,UAAU,GAAG,QAAQ,IAAI,IAAI,CAAC,CAAC,CAAC,iBAAiB,CAAC;YACpF,8EAA8E;YAC9E,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,SAAS,KAAK,SAAS,EAC3C;YACA,wCAAwC;YACxC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;YAEzC,qBAAqB;YACrB,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,cAAK,CAAC,SAAS,EAAE,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;SACjE;QAED,gCAAgC;QAChC,IAAI,KAAK,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE;YAC3B,MAAM,IAAI,iCAAyB,CAAC,wCAAwC,CAAC,CAAC;SAC/E;QAED,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,UAAU,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;QAC9C,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,eAAe,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,CAAC;QAC9D,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,IAAI,CAAC,CAAC,CAAC,YAAY,GAAG,CAAC,CAAC;QAE9C,gDAAgD;QAChD,IAAI,SAAS,KAAK,kBAAS,CAAC,MAAM,EAAE;YAClC,IAAI,CAAC,CAAC,CAAC,kBAAkB,GAAG,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC;YAChD,IAAI,CAAC,CAAC,CAAC,UAAU,CAAC,WAAW,CAAC,IAAI,CAAC;gBACjC,KAAK,EAAE,IAAI,CAAC,CAAC,CAAC,UAAU,CAAC,WAAW,CAAC,MAAM;gBAC3C,GAAG,EAAG,QAAqB,CAAC,GAAG;aAChC,CAAC,CAAC;SACJ;aAAM,IAAI,SAAS,KAAK,kBAAS,CAAC,MAAM,EAAE;YACzC,IAAI,CAAC,CAAC,CAAC,kBAAkB,GAAG,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC;SACjD;aAAM,IAAI,SAAS,KAAK,kBAAS,CAAC,MAAM,EAAE;YACzC,IAAI,CAAC,CAAC,CAAC,kBAAkB,GAAG,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC;SACjD;QAED,4BAA4B;QAC5B,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,IAAI,IAAI,CAAC,CAAC;QAC9B,IAAI,CAAC,CAAC,CAAC,YAAY,CAAC,SAAS,IAAI,UAAU,GAAG,QAAQ,CAAC;QAEvD,OAAO,IAAI,CAAC;IACd,CAAC;CACF;AAnGD,wDAmGC"}
|
Reference in New Issue
Block a user