First commit

This commit is contained in:
Aravind142857
2023-06-05 20:06:13 -05:00
commit 4e3c08d1c4
672 changed files with 179969 additions and 0 deletions

313
node_modules/mongodb/lib/gridfs/download.js generated vendored Normal file
View File

@@ -0,0 +1,313 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.GridFSBucketReadStream = void 0;
const stream_1 = require("stream");
const error_1 = require("../error");
/**
* A readable stream that enables you to read buffers from GridFS.
*
* Do not instantiate this class directly. Use `openDownloadStream()` instead.
* @public
*/
class GridFSBucketReadStream extends stream_1.Readable {
/**
* @param chunks - Handle for chunks collection
* @param files - Handle for files collection
* @param readPreference - The read preference to use
* @param filter - The filter to use to find the file document
* @internal
*/
constructor(chunks, files, readPreference, filter, options) {
super();
this.s = {
bytesToTrim: 0,
bytesToSkip: 0,
bytesRead: 0,
chunks,
expected: 0,
files,
filter,
init: false,
expectedEnd: 0,
options: {
start: 0,
end: 0,
...options
},
readPreference
};
}
/**
* Reads from the cursor and pushes to the stream.
* Private Impl, do not call directly
* @internal
*/
_read() {
if (this.destroyed)
return;
waitForFile(this, () => doRead(this));
}
/**
* Sets the 0-based offset in bytes to start streaming from. Throws
* an error if this stream has entered flowing mode
* (e.g. if you've already called `on('data')`)
*
* @param start - 0-based offset in bytes to start streaming from
*/
start(start = 0) {
throwIfInitialized(this);
this.s.options.start = start;
return this;
}
/**
* Sets the 0-based offset in bytes to start streaming from. Throws
* an error if this stream has entered flowing mode
* (e.g. if you've already called `on('data')`)
*
* @param end - Offset in bytes to stop reading at
*/
end(end = 0) {
throwIfInitialized(this);
this.s.options.end = end;
return this;
}
/**
* Marks this stream as aborted (will never push another `data` event)
* and kills the underlying cursor. Will emit the 'end' event, and then
* the 'close' event once the cursor is successfully killed.
*/
async abort() {
this.push(null);
this.destroyed = true;
if (this.s.cursor) {
try {
await this.s.cursor.close();
}
finally {
this.emit(GridFSBucketReadStream.CLOSE);
}
}
else {
if (!this.s.init) {
// If not initialized, fire close event because we will never
// get a cursor
this.emit(GridFSBucketReadStream.CLOSE);
}
}
}
}
exports.GridFSBucketReadStream = GridFSBucketReadStream;
/**
* An error occurred
* @event
*/
GridFSBucketReadStream.ERROR = 'error';
/**
* Fires when the stream loaded the file document corresponding to the provided id.
* @event
*/
GridFSBucketReadStream.FILE = 'file';
/**
* Emitted when a chunk of data is available to be consumed.
* @event
*/
GridFSBucketReadStream.DATA = 'data';
/**
* Fired when the stream is exhausted (no more data events).
* @event
*/
GridFSBucketReadStream.END = 'end';
/**
* Fired when the stream is exhausted and the underlying cursor is killed
* @event
*/
GridFSBucketReadStream.CLOSE = 'close';
function throwIfInitialized(stream) {
if (stream.s.init) {
throw new error_1.MongoGridFSStreamError('Options cannot be changed after the stream is initialized');
}
}
function doRead(stream) {
if (stream.destroyed)
return;
if (!stream.s.cursor)
return;
if (!stream.s.file)
return;
const handleReadResult = ({ error, doc }) => {
if (stream.destroyed) {
return;
}
if (error) {
stream.emit(GridFSBucketReadStream.ERROR, error);
return;
}
if (!doc) {
stream.push(null);
stream.s.cursor?.close().then(() => {
stream.emit(GridFSBucketReadStream.CLOSE);
}, error => {
stream.emit(GridFSBucketReadStream.ERROR, error);
});
return;
}
if (!stream.s.file)
return;
const bytesRemaining = stream.s.file.length - stream.s.bytesRead;
const expectedN = stream.s.expected++;
const expectedLength = Math.min(stream.s.file.chunkSize, bytesRemaining);
if (doc.n > expectedN) {
return stream.emit(GridFSBucketReadStream.ERROR, new error_1.MongoGridFSChunkError(`ChunkIsMissing: Got unexpected n: ${doc.n}, expected: ${expectedN}`));
}
if (doc.n < expectedN) {
return stream.emit(GridFSBucketReadStream.ERROR, new error_1.MongoGridFSChunkError(`ExtraChunk: Got unexpected n: ${doc.n}, expected: ${expectedN}`));
}
let buf = Buffer.isBuffer(doc.data) ? doc.data : doc.data.buffer;
if (buf.byteLength !== expectedLength) {
if (bytesRemaining <= 0) {
return stream.emit(GridFSBucketReadStream.ERROR, new error_1.MongoGridFSChunkError(`ExtraChunk: Got unexpected n: ${doc.n}, expected file length ${stream.s.file.length} bytes but already read ${stream.s.bytesRead} bytes`));
}
return stream.emit(GridFSBucketReadStream.ERROR, new error_1.MongoGridFSChunkError(`ChunkIsWrongSize: Got unexpected length: ${buf.byteLength}, expected: ${expectedLength}`));
}
stream.s.bytesRead += buf.byteLength;
if (buf.byteLength === 0) {
return stream.push(null);
}
let sliceStart = null;
let sliceEnd = null;
if (stream.s.bytesToSkip != null) {
sliceStart = stream.s.bytesToSkip;
stream.s.bytesToSkip = 0;
}
const atEndOfStream = expectedN === stream.s.expectedEnd - 1;
const bytesLeftToRead = stream.s.options.end - stream.s.bytesToSkip;
if (atEndOfStream && stream.s.bytesToTrim != null) {
sliceEnd = stream.s.file.chunkSize - stream.s.bytesToTrim;
}
else if (stream.s.options.end && bytesLeftToRead < doc.data.byteLength) {
sliceEnd = bytesLeftToRead;
}
if (sliceStart != null || sliceEnd != null) {
buf = buf.slice(sliceStart || 0, sliceEnd || buf.byteLength);
}
stream.push(buf);
return;
};
stream.s.cursor.next().then(doc => handleReadResult({ error: null, doc }), error => handleReadResult({ error, doc: null }));
}
function init(stream) {
const findOneOptions = {};
if (stream.s.readPreference) {
findOneOptions.readPreference = stream.s.readPreference;
}
if (stream.s.options && stream.s.options.sort) {
findOneOptions.sort = stream.s.options.sort;
}
if (stream.s.options && stream.s.options.skip) {
findOneOptions.skip = stream.s.options.skip;
}
const handleReadResult = ({ error, doc }) => {
if (error) {
return stream.emit(GridFSBucketReadStream.ERROR, error);
}
if (!doc) {
const identifier = stream.s.filter._id
? stream.s.filter._id.toString()
: stream.s.filter.filename;
const errmsg = `FileNotFound: file ${identifier} was not found`;
// TODO(NODE-3483)
const err = new error_1.MongoRuntimeError(errmsg);
err.code = 'ENOENT'; // TODO: NODE-3338 set property as part of constructor
return stream.emit(GridFSBucketReadStream.ERROR, err);
}
// If document is empty, kill the stream immediately and don't
// execute any reads
if (doc.length <= 0) {
stream.push(null);
return;
}
if (stream.destroyed) {
// If user destroys the stream before we have a cursor, wait
// until the query is done to say we're 'closed' because we can't
// cancel a query.
stream.emit(GridFSBucketReadStream.CLOSE);
return;
}
try {
stream.s.bytesToSkip = handleStartOption(stream, doc, stream.s.options);
}
catch (error) {
return stream.emit(GridFSBucketReadStream.ERROR, error);
}
const filter = { files_id: doc._id };
// Currently (MongoDB 3.4.4) skip function does not support the index,
// it needs to retrieve all the documents first and then skip them. (CS-25811)
// As work around we use $gte on the "n" field.
if (stream.s.options && stream.s.options.start != null) {
const skip = Math.floor(stream.s.options.start / doc.chunkSize);
if (skip > 0) {
filter['n'] = { $gte: skip };
}
}
stream.s.cursor = stream.s.chunks.find(filter).sort({ n: 1 });
if (stream.s.readPreference) {
stream.s.cursor.withReadPreference(stream.s.readPreference);
}
stream.s.expectedEnd = Math.ceil(doc.length / doc.chunkSize);
stream.s.file = doc;
try {
stream.s.bytesToTrim = handleEndOption(stream, doc, stream.s.cursor, stream.s.options);
}
catch (error) {
return stream.emit(GridFSBucketReadStream.ERROR, error);
}
stream.emit(GridFSBucketReadStream.FILE, doc);
return;
};
stream.s.files.findOne(stream.s.filter, findOneOptions).then(doc => handleReadResult({ error: null, doc }), error => handleReadResult({ error, doc: null }));
}
function waitForFile(stream, callback) {
if (stream.s.file) {
return callback();
}
if (!stream.s.init) {
init(stream);
stream.s.init = true;
}
stream.once('file', () => {
callback();
});
}
function handleStartOption(stream, doc, options) {
if (options && options.start != null) {
if (options.start > doc.length) {
throw new error_1.MongoInvalidArgumentError(`Stream start (${options.start}) must not be more than the length of the file (${doc.length})`);
}
if (options.start < 0) {
throw new error_1.MongoInvalidArgumentError(`Stream start (${options.start}) must not be negative`);
}
if (options.end != null && options.end < options.start) {
throw new error_1.MongoInvalidArgumentError(`Stream start (${options.start}) must not be greater than stream end (${options.end})`);
}
stream.s.bytesRead = Math.floor(options.start / doc.chunkSize) * doc.chunkSize;
stream.s.expected = Math.floor(options.start / doc.chunkSize);
return options.start - stream.s.bytesRead;
}
throw new error_1.MongoInvalidArgumentError('Start option must be defined');
}
function handleEndOption(stream, doc, cursor, options) {
if (options && options.end != null) {
if (options.end > doc.length) {
throw new error_1.MongoInvalidArgumentError(`Stream end (${options.end}) must not be more than the length of the file (${doc.length})`);
}
if (options.start == null || options.start < 0) {
throw new error_1.MongoInvalidArgumentError(`Stream end (${options.end}) must not be negative`);
}
const start = options.start != null ? Math.floor(options.start / doc.chunkSize) : 0;
cursor.limit(Math.ceil(options.end / doc.chunkSize) - start);
stream.s.expectedEnd = Math.ceil(options.end / doc.chunkSize);
return Math.ceil(options.end / doc.chunkSize) * doc.chunkSize - options.end;
}
throw new error_1.MongoInvalidArgumentError('End option must be defined');
}
//# sourceMappingURL=download.js.map

1
node_modules/mongodb/lib/gridfs/download.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

128
node_modules/mongodb/lib/gridfs/index.js generated vendored Normal file
View File

@@ -0,0 +1,128 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.GridFSBucket = void 0;
const error_1 = require("../error");
const mongo_types_1 = require("../mongo_types");
const write_concern_1 = require("../write_concern");
const download_1 = require("./download");
const upload_1 = require("./upload");
const DEFAULT_GRIDFS_BUCKET_OPTIONS = {
bucketName: 'fs',
chunkSizeBytes: 255 * 1024
};
/**
* Constructor for a streaming GridFS interface
* @public
*/
class GridFSBucket extends mongo_types_1.TypedEventEmitter {
constructor(db, options) {
super();
this.setMaxListeners(0);
const privateOptions = {
...DEFAULT_GRIDFS_BUCKET_OPTIONS,
...options,
writeConcern: write_concern_1.WriteConcern.fromOptions(options)
};
this.s = {
db,
options: privateOptions,
_chunksCollection: db.collection(privateOptions.bucketName + '.chunks'),
_filesCollection: db.collection(privateOptions.bucketName + '.files'),
checkedIndexes: false,
calledOpenUploadStream: false
};
}
/**
* Returns a writable stream (GridFSBucketWriteStream) for writing
* buffers to GridFS. The stream's 'id' property contains the resulting
* file's id.
*
* @param filename - The value of the 'filename' key in the files doc
* @param options - Optional settings.
*/
openUploadStream(filename, options) {
return new upload_1.GridFSBucketWriteStream(this, filename, options);
}
/**
* Returns a writable stream (GridFSBucketWriteStream) for writing
* buffers to GridFS for a custom file id. The stream's 'id' property contains the resulting
* file's id.
*/
openUploadStreamWithId(id, filename, options) {
return new upload_1.GridFSBucketWriteStream(this, filename, { ...options, id });
}
/** Returns a readable stream (GridFSBucketReadStream) for streaming file data from GridFS. */
openDownloadStream(id, options) {
return new download_1.GridFSBucketReadStream(this.s._chunksCollection, this.s._filesCollection, this.s.options.readPreference, { _id: id }, options);
}
/**
* Deletes a file with the given id
*
* @param id - The id of the file doc
*/
async delete(id) {
const { deletedCount } = await this.s._filesCollection.deleteOne({ _id: id });
// Delete orphaned chunks before returning FileNotFound
await this.s._chunksCollection.deleteMany({ files_id: id });
if (deletedCount === 0) {
// TODO(NODE-3483): Replace with more appropriate error
// Consider creating new error MongoGridFSFileNotFoundError
throw new error_1.MongoRuntimeError(`File not found for id ${id}`);
}
}
/** Convenience wrapper around find on the files collection */
find(filter = {}, options = {}) {
return this.s._filesCollection.find(filter, options);
}
/**
* Returns a readable stream (GridFSBucketReadStream) for streaming the
* file with the given name from GridFS. If there are multiple files with
* the same name, this will stream the most recent file with the given name
* (as determined by the `uploadDate` field). You can set the `revision`
* option to change this behavior.
*/
openDownloadStreamByName(filename, options) {
let sort = { uploadDate: -1 };
let skip = undefined;
if (options && options.revision != null) {
if (options.revision >= 0) {
sort = { uploadDate: 1 };
skip = options.revision;
}
else {
skip = -options.revision - 1;
}
}
return new download_1.GridFSBucketReadStream(this.s._chunksCollection, this.s._filesCollection, this.s.options.readPreference, { filename }, { ...options, sort, skip });
}
/**
* Renames the file with the given _id to the given string
*
* @param id - the id of the file to rename
* @param filename - new name for the file
*/
async rename(id, filename) {
const filter = { _id: id };
const update = { $set: { filename } };
const { matchedCount } = await this.s._filesCollection.updateOne(filter, update);
if (matchedCount === 0) {
throw new error_1.MongoRuntimeError(`File with id ${id} not found`);
}
}
/** Removes this bucket's files collection, followed by its chunks collection. */
async drop() {
await this.s._filesCollection.drop();
await this.s._chunksCollection.drop();
}
}
exports.GridFSBucket = GridFSBucket;
/**
* When the first call to openUploadStream is made, the upload stream will
* check to see if it needs to create the proper indexes on the chunks and
* files collections. This event is fired either when 1) it determines that
* no index creation is necessary, 2) when it successfully creates the
* necessary indexes.
* @event
*/
GridFSBucket.INDEX = 'index';
//# sourceMappingURL=index.js.map

1
node_modules/mongodb/lib/gridfs/index.js.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/gridfs/index.ts"],"names":[],"mappings":";;;AAIA,oCAA6C;AAC7C,gDAA2D;AAG3D,oDAAqE;AAErE,yCAKoB;AACpB,qCAAgG;AAEhG,MAAM,6BAA6B,GAG/B;IACF,UAAU,EAAE,IAAI;IAChB,cAAc,EAAE,GAAG,GAAG,IAAI;CAC3B,CAAC;AAgCF;;;GAGG;AACH,MAAa,YAAa,SAAQ,+BAAqC;IAcrE,YAAY,EAAM,EAAE,OAA6B;QAC/C,KAAK,EAAE,CAAC;QACR,IAAI,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC;QACxB,MAAM,cAAc,GAAG;YACrB,GAAG,6BAA6B;YAChC,GAAG,OAAO;YACV,YAAY,EAAE,4BAAY,CAAC,WAAW,CAAC,OAAO,CAAC;SAChD,CAAC;QACF,IAAI,CAAC,CAAC,GAAG;YACP,EAAE;YACF,OAAO,EAAE,cAAc;YACvB,iBAAiB,EAAE,EAAE,CAAC,UAAU,CAAc,cAAc,CAAC,UAAU,GAAG,SAAS,CAAC;YACpF,gBAAgB,EAAE,EAAE,CAAC,UAAU,CAAa,cAAc,CAAC,UAAU,GAAG,QAAQ,CAAC;YACjF,cAAc,EAAE,KAAK;YACrB,sBAAsB,EAAE,KAAK;SAC9B,CAAC;IACJ,CAAC;IAED;;;;;;;OAOG;IAEH,gBAAgB,CACd,QAAgB,EAChB,OAAwC;QAExC,OAAO,IAAI,gCAAuB,CAAC,IAAI,EAAE,QAAQ,EAAE,OAAO,CAAC,CAAC;IAC9D,CAAC;IAED;;;;OAIG;IACH,sBAAsB,CACpB,EAAY,EACZ,QAAgB,EAChB,OAAwC;QAExC,OAAO,IAAI,gCAAuB,CAAC,IAAI,EAAE,QAAQ,EAAE,EAAE,GAAG,OAAO,EAAE,EAAE,EAAE,CAAC,CAAC;IACzE,CAAC;IAED,8FAA8F;IAC9F,kBAAkB,CAChB,EAAY,EACZ,OAAuC;QAEvC,OAAO,IAAI,iCAAsB,CAC/B,IAAI,CAAC,CAAC,CAAC,iBAAiB,EACxB,IAAI,CAAC,CAAC,CAAC,gBAAgB,EACvB,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,cAAc,EAC7B,EAAE,GAAG,EAAE,EAAE,EAAE,EACX,OAAO,CACR,CAAC;IACJ,CAAC;IAED;;;;OAIG;IACH,KAAK,CAAC,MAAM,CAAC,EAAY;QACvB,MAAM,EAAE,YAAY,EAAE,GAAG,MAAM,IAAI,CAAC,CAAC,CAAC,gBAAgB,CAAC,SAAS,CAAC,EAAE,GAAG,EAAE,EAAE,EAAE,CAAC,CAAC;QAE9E,uDAAuD;QACvD,MAAM,IAAI,CAAC,CAAC,CAAC,iBAAiB,CAAC,UAAU,CAAC,EAAE,QAAQ,EAAE,EAAE,EAAE,CAAC,CAAC;QAE5D,IAAI,YAAY,KAAK,CAAC,EAAE;YACtB,uDAAuD;YACvD,2DAA2D;YAC3D,MAAM,IAAI,yBAAiB,CAAC,yBAAyB,EAAE,EAAE,CAAC,CAAC;SAC5D;IACH,CAAC;IAED,8DAA8D;IAC9D,IAAI,CAAC,SAA6B,EAAE,EAAE,UAAuB,EAAE;QAC7D,OAAO,IAAI,CAAC,CAAC,CAAC,gBAAgB,CAAC,IAAI,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACvD,CAAC;IAED;;;;;;OAMG;IACH,wBAAwB,CACtB,QAAgB,EAChB,OAAmD;QAEnD,IAAI,IAAI,GAAS,EAAE,UAAU,EAAE,CAAC,CAAC,EAAE,CAAC;QACpC,IAAI,IAAI,GAAG,SAAS,CAAC;QACrB,IAAI,OAAO,IAAI,OAAO,CAAC,QAAQ,IAAI,IAAI,EAAE;YACvC,IAAI,OAAO,CAAC,QAAQ,IAAI,CAAC,EAAE;gBACzB,IAAI,GAAG,EAAE,UAAU,EAAE,CAAC,EAAE,CAAC;gBACzB,IAAI,GAAG,OAAO,CAAC,QAAQ,CAAC;aACzB;iBAAM;gBACL,IAAI,GAAG,CAAC,OAAO,CAAC,QAAQ,GAAG,CAAC,CAAC;aAC9B;SACF;QACD,OAAO,IAAI,iCAAsB,CAC/B,IAAI,CAAC,CAAC,CAAC,iBAAiB,EACxB,IAAI,CAAC,CAAC,CAAC,gBAAgB,EACvB,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,cAAc,EAC7B,EAAE,QAAQ,EAAE,EACZ,EAAE,GAAG,OAAO,EAAE,IAAI,EAAE,IAAI,EAAE,CAC3B,CAAC;IACJ,CAAC;IAED;;;;;OAKG;IACH,KAAK,CAAC,MAAM,CAAC,EAAY,EAAE,QAAgB;QACzC,MAAM,MAAM,GAAG,EAAE,GAAG,EAAE,EAAE,EAAE,CAAC;QAC3B,MAAM,MAAM,GAAG,EAAE,IAAI,EAAE,EAAE,QAAQ,EAAE,EAAE,CAAC;QACtC,MAAM,EAAE,YAAY,EAAE,GAAG,MAAM,IAAI,CAAC,CAAC,CAAC,gBAAgB,CAAC,SAAS,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;QACjF,IAAI,YAAY,KAAK,CAAC,EAAE;YACtB,MAAM,IAAI,yBAAiB,CAAC,gBAAgB,EAAE,YAAY,CAAC,CAAC;SAC7D;IACH,CAAC;IAED,iFAAiF;IACjF,KAAK,CAAC,IAAI;QACR,MAAM,IAAI,CAAC,CAAC,CAAC,gBAAgB,CAAC,IAAI,EAAE,CAAC;QACrC,MAAM,IAAI,CAAC,CAAC,CAAC,iBAAiB,CAAC,IAAI,EAAE,CAAC;IACxC,CAAC;;AAnJH,oCAoJC;AAhJC;;;;;;;GAOG;AACa,kBAAK,GAAG,OAAgB,CAAC"}

342
node_modules/mongodb/lib/gridfs/upload.js generated vendored Normal file
View File

@@ -0,0 +1,342 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.GridFSBucketWriteStream = void 0;
const stream_1 = require("stream");
const bson_1 = require("../bson");
const error_1 = require("../error");
const write_concern_1 = require("./../write_concern");
/**
* A writable stream that enables you to write buffers to GridFS.
*
* Do not instantiate this class directly. Use `openUploadStream()` instead.
* @public
*/
class GridFSBucketWriteStream extends stream_1.Writable {
/**
* @param bucket - Handle for this stream's corresponding bucket
* @param filename - The value of the 'filename' key in the files doc
* @param options - Optional settings.
* @internal
*/
constructor(bucket, filename, options) {
super();
options = options ?? {};
this.bucket = bucket;
this.chunks = bucket.s._chunksCollection;
this.filename = filename;
this.files = bucket.s._filesCollection;
this.options = options;
this.writeConcern = write_concern_1.WriteConcern.fromOptions(options) || bucket.s.options.writeConcern;
// Signals the write is all done
this.done = false;
this.id = options.id ? options.id : new bson_1.ObjectId();
// properly inherit the default chunksize from parent
this.chunkSizeBytes = options.chunkSizeBytes || this.bucket.s.options.chunkSizeBytes;
this.bufToStore = Buffer.alloc(this.chunkSizeBytes);
this.length = 0;
this.n = 0;
this.pos = 0;
this.state = {
streamEnd: false,
outstandingRequests: 0,
errored: false,
aborted: false
};
if (!this.bucket.s.calledOpenUploadStream) {
this.bucket.s.calledOpenUploadStream = true;
checkIndexes(this).then(() => {
this.bucket.s.checkedIndexes = true;
this.bucket.emit('index');
}, () => null);
}
}
write(chunk, encodingOrCallback, callback) {
const encoding = typeof encodingOrCallback === 'function' ? undefined : encodingOrCallback;
callback = typeof encodingOrCallback === 'function' ? encodingOrCallback : callback;
return waitForIndexes(this, () => doWrite(this, chunk, encoding, callback));
}
/**
* Places this write stream into an aborted state (all future writes fail)
* and deletes all chunks that have already been written.
*/
async abort() {
if (this.state.streamEnd) {
// TODO(NODE-3485): Replace with MongoGridFSStreamClosed
throw new error_1.MongoAPIError('Cannot abort a stream that has already completed');
}
if (this.state.aborted) {
// TODO(NODE-3485): Replace with MongoGridFSStreamClosed
throw new error_1.MongoAPIError('Cannot call abort() on a stream twice');
}
this.state.aborted = true;
await this.chunks.deleteMany({ files_id: this.id });
}
end(chunkOrCallback, encodingOrCallback, callback) {
const chunk = typeof chunkOrCallback === 'function' ? undefined : chunkOrCallback;
const encoding = typeof encodingOrCallback === 'function' ? undefined : encodingOrCallback;
callback =
typeof chunkOrCallback === 'function'
? chunkOrCallback
: typeof encodingOrCallback === 'function'
? encodingOrCallback
: callback;
if (this.state.streamEnd || checkAborted(this, callback))
return this;
this.state.streamEnd = true;
if (callback) {
this.once(GridFSBucketWriteStream.FINISH, (result) => {
if (callback)
callback(undefined, result);
});
}
if (!chunk) {
waitForIndexes(this, () => !!writeRemnant(this));
return this;
}
this.write(chunk, encoding, () => {
writeRemnant(this);
});
return this;
}
}
exports.GridFSBucketWriteStream = GridFSBucketWriteStream;
/** @event */
GridFSBucketWriteStream.CLOSE = 'close';
/** @event */
GridFSBucketWriteStream.ERROR = 'error';
/**
* `end()` was called and the write stream successfully wrote the file metadata and all the chunks to MongoDB.
* @event
*/
GridFSBucketWriteStream.FINISH = 'finish';
function __handleError(stream, error, callback) {
if (stream.state.errored) {
return;
}
stream.state.errored = true;
if (callback) {
return callback(error);
}
stream.emit(GridFSBucketWriteStream.ERROR, error);
}
function createChunkDoc(filesId, n, data) {
return {
_id: new bson_1.ObjectId(),
files_id: filesId,
n,
data
};
}
async function checkChunksIndex(stream) {
const index = { files_id: 1, n: 1 };
let indexes;
try {
indexes = await stream.chunks.listIndexes().toArray();
}
catch (error) {
if (error instanceof error_1.MongoError && error.code === error_1.MONGODB_ERROR_CODES.NamespaceNotFound) {
indexes = [];
}
else {
throw error;
}
}
const hasChunksIndex = !!indexes.find(index => {
const keys = Object.keys(index.key);
if (keys.length === 2 && index.key.files_id === 1 && index.key.n === 1) {
return true;
}
return false;
});
if (!hasChunksIndex) {
const writeConcernOptions = getWriteOptions(stream);
await stream.chunks.createIndex(index, {
...writeConcernOptions,
background: true,
unique: true
});
}
}
function checkDone(stream, callback) {
if (stream.done)
return true;
if (stream.state.streamEnd && stream.state.outstandingRequests === 0 && !stream.state.errored) {
// Set done so we do not trigger duplicate createFilesDoc
stream.done = true;
// Create a new files doc
const filesDoc = createFilesDoc(stream.id, stream.length, stream.chunkSizeBytes, stream.filename, stream.options.contentType, stream.options.aliases, stream.options.metadata);
if (checkAborted(stream, callback)) {
return false;
}
stream.files.insertOne(filesDoc, getWriteOptions(stream)).then(() => {
stream.emit(GridFSBucketWriteStream.FINISH, filesDoc);
stream.emit(GridFSBucketWriteStream.CLOSE);
}, error => {
return __handleError(stream, error, callback);
});
return true;
}
return false;
}
async function checkIndexes(stream) {
const doc = await stream.files.findOne({}, { projection: { _id: 1 } });
if (doc != null) {
// If at least one document exists assume the collection has the required index
return;
}
const index = { filename: 1, uploadDate: 1 };
let indexes;
try {
indexes = await stream.files.listIndexes().toArray();
}
catch (error) {
if (error instanceof error_1.MongoError && error.code === error_1.MONGODB_ERROR_CODES.NamespaceNotFound) {
indexes = [];
}
else {
throw error;
}
}
const hasFileIndex = !!indexes.find(index => {
const keys = Object.keys(index.key);
if (keys.length === 2 && index.key.filename === 1 && index.key.uploadDate === 1) {
return true;
}
return false;
});
if (!hasFileIndex) {
await stream.files.createIndex(index, { background: false });
}
await checkChunksIndex(stream);
}
function createFilesDoc(_id, length, chunkSize, filename, contentType, aliases, metadata) {
const ret = {
_id,
length,
chunkSize,
uploadDate: new Date(),
filename
};
if (contentType) {
ret.contentType = contentType;
}
if (aliases) {
ret.aliases = aliases;
}
if (metadata) {
ret.metadata = metadata;
}
return ret;
}
function doWrite(stream, chunk, encoding, callback) {
if (checkAborted(stream, callback)) {
return false;
}
const inputBuf = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk, encoding);
stream.length += inputBuf.length;
// Input is small enough to fit in our buffer
if (stream.pos + inputBuf.length < stream.chunkSizeBytes) {
inputBuf.copy(stream.bufToStore, stream.pos);
stream.pos += inputBuf.length;
callback && callback();
// Note that we reverse the typical semantics of write's return value
// to be compatible with node's `.pipe()` function.
// True means client can keep writing.
return true;
}
// Otherwise, buffer is too big for current chunk, so we need to flush
// to MongoDB.
let inputBufRemaining = inputBuf.length;
let spaceRemaining = stream.chunkSizeBytes - stream.pos;
let numToCopy = Math.min(spaceRemaining, inputBuf.length);
let outstandingRequests = 0;
while (inputBufRemaining > 0) {
const inputBufPos = inputBuf.length - inputBufRemaining;
inputBuf.copy(stream.bufToStore, stream.pos, inputBufPos, inputBufPos + numToCopy);
stream.pos += numToCopy;
spaceRemaining -= numToCopy;
let doc;
if (spaceRemaining === 0) {
doc = createChunkDoc(stream.id, stream.n, Buffer.from(stream.bufToStore));
++stream.state.outstandingRequests;
++outstandingRequests;
if (checkAborted(stream, callback)) {
return false;
}
stream.chunks.insertOne(doc, getWriteOptions(stream)).then(() => {
--stream.state.outstandingRequests;
--outstandingRequests;
if (!outstandingRequests) {
stream.emit('drain', doc);
callback && callback();
checkDone(stream);
}
}, error => {
return __handleError(stream, error);
});
spaceRemaining = stream.chunkSizeBytes;
stream.pos = 0;
++stream.n;
}
inputBufRemaining -= numToCopy;
numToCopy = Math.min(spaceRemaining, inputBufRemaining);
}
// Note that we reverse the typical semantics of write's return value
// to be compatible with node's `.pipe()` function.
// False means the client should wait for the 'drain' event.
return false;
}
function getWriteOptions(stream) {
const obj = {};
if (stream.writeConcern) {
obj.writeConcern = {
w: stream.writeConcern.w,
wtimeout: stream.writeConcern.wtimeout,
j: stream.writeConcern.j
};
}
return obj;
}
function waitForIndexes(stream, callback) {
if (stream.bucket.s.checkedIndexes) {
return callback(false);
}
stream.bucket.once('index', () => {
callback(true);
});
return true;
}
function writeRemnant(stream, callback) {
// Buffer is empty, so don't bother to insert
if (stream.pos === 0) {
return checkDone(stream, callback);
}
++stream.state.outstandingRequests;
// Create a new buffer to make sure the buffer isn't bigger than it needs
// to be.
const remnant = Buffer.alloc(stream.pos);
stream.bufToStore.copy(remnant, 0, 0, stream.pos);
const doc = createChunkDoc(stream.id, stream.n, remnant);
// If the stream was aborted, do not write remnant
if (checkAborted(stream, callback)) {
return false;
}
stream.chunks.insertOne(doc, getWriteOptions(stream)).then(() => {
--stream.state.outstandingRequests;
checkDone(stream);
}, error => {
return __handleError(stream, error);
});
return true;
}
function checkAborted(stream, callback) {
if (stream.state.aborted) {
if (typeof callback === 'function') {
// TODO(NODE-3485): Replace with MongoGridFSStreamClosedError
callback(new error_1.MongoAPIError('Stream has been aborted'));
}
return true;
}
return false;
}
//# sourceMappingURL=upload.js.map

1
node_modules/mongodb/lib/gridfs/upload.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long