Host FlexSearch

This commit is contained in:
Will Faught
2025-03-02 00:23:46 -08:00
parent 2e77b35940
commit 078157e62c
164 changed files with 23495 additions and 1 deletions

51
paige/node_modules/flexsearch/dist/module/async.js generated vendored Normal file
View File

@@ -0,0 +1,51 @@
import { IndexInterface, DocumentInterface } from "./type.js";
//import { promise as Promise } from "./polyfill.js";
import { is_function, is_object, is_string } from "./common.js";
export default function (prototype) {
register(prototype, "add");
register(prototype, "append");
register(prototype, "search");
register(prototype, "update");
register(prototype, "remove");
}
function register(prototype, key) {
prototype[key + "Async"] = function () {
/** @type {IndexInterface|DocumentInterface} */
const self = this,
args = /*[].slice.call*/arguments,
arg = args[args.length - 1];
let callback;
if (is_function(arg)) {
callback = arg;
delete args[args.length - 1];
}
const promise = new Promise(function (resolve) {
setTimeout(function () {
self.async = !0;
const res = self[key].apply(self, args);
self.async = !1;
resolve(res);
});
});
if (callback) {
promise.then(callback);
return this;
} else {
return promise;
}
};
}

168
paige/node_modules/flexsearch/dist/module/cache.js generated vendored Normal file
View File

@@ -0,0 +1,168 @@
import { IndexInterface, DocumentInterface } from "./type.js";
import { create_object, is_object } from "./common.js";
/**
* @param {boolean|number=} limit
* @constructor
*/
function CacheClass(limit) {
/** @private */
this.limit = !0 !== limit && limit;
/** @private */
this.cache = create_object();
/** @private */
this.queue = [];
//this.clear();
}
export default CacheClass;
/**
* @param {string|Object} query
* @param {number|Object=} limit
* @param {Object=} options
* @this {IndexInterface}
* @returns {Array<number|string>}
*/
export function searchCache(query, limit, options) {
if (is_object(query)) {
query = query.query;
}
let cache = this.cache.get(query);
if (!cache) {
cache = this.search(query, limit, options);
this.cache.set(query, cache);
}
return cache;
}
// CacheClass.prototype.clear = function(){
//
// /** @private */
// this.cache = create_object();
//
// /** @private */
// this.queue = [];
// };
CacheClass.prototype.set = function (key, value) {
if (!this.cache[key]) {
// it is just a shame that native function array.shift() performs so bad
// const length = this.queue.length;
//
// this.queue[length] = key;
//
// if(length === this.limit){
//
// delete this.cache[this.queue.shift()];
// }
// the same bad performance
// this.queue.unshift(key);
//
// if(this.queue.length === this.limit){
//
// this.queue.pop();
// }
// fast implementation variant
// let length = this.queue.length;
//
// if(length === this.limit){
//
// length--;
//
// delete this.cache[this.queue[0]];
//
// for(let x = 0; x < length; x++){
//
// this.queue[x] = this.queue[x + 1];
// }
// }
//
// this.queue[length] = key;
// current fastest implementation variant
// theoretically that should not perform better compared to the example above
let length = this.queue.length;
if (length === this.limit) {
delete this.cache[this.queue[length - 1]];
} else {
length++;
}
for (let x = length - 1; 0 < x; x--) {
this.queue[x] = this.queue[x - 1];
}
this.queue[0] = key;
}
this.cache[key] = value;
};
CacheClass.prototype.get = function (key) {
const cache = this.cache[key];
if (this.limit && cache) {
// probably the indexOf() method performs faster when matched content is on front (left-to-right)
// using lastIndexOf() does not help, it performs almost slower
const pos = this.queue.indexOf(key);
// if(pos < this.queue.length - 1){
//
// const tmp = this.queue[pos];
// this.queue[pos] = this.queue[pos + 1];
// this.queue[pos + 1] = tmp;
// }
if (pos) {
const tmp = this.queue[pos - 1];
this.queue[pos - 1] = this.queue[pos];
this.queue[pos] = tmp;
}
}
return cache;
};
CacheClass.prototype.del = function (id) {
for (let i = 0, item, key; i < this.queue.length; i++) {
key = this.queue[i];
item = this.cache[key];
if (item.includes(id)) {
this.queue.splice(i--, 1);
delete this.cache[key];
}
}
};

78
paige/node_modules/flexsearch/dist/module/common.js generated vendored Normal file
View File

@@ -0,0 +1,78 @@
export function parse_option(value, default_value) {
return "undefined" != typeof value ? value : default_value;
}
/**
* @param {!number} count
* @returns {Array<Object>}
*/
export function create_object_array(count) {
const array = Array(count);
for (let i = 0; i < count; i++) {
array[i] = create_object();
}
return array;
}
export function create_arrays(count) {
const array = Array(count);
for (let i = 0; i < count; i++) {
array[i] = [];
}
return array;
}
/**
* @param {!Object} obj
* @returns {Array<string>}
*/
export function get_keys(obj) {
return Object.keys(obj);
}
export function create_object() {
return Object.create(null);
}
export function concat(arrays) {
return [].concat.apply([], arrays);
}
export function sort_by_length_down(a, b) {
return b.length - a.length;
}
export function is_array(val) {
return val.constructor === Array;
}
export function is_string(val) {
return "string" == typeof val;
}
export function is_object(val) {
return "object" == typeof val;
}
export function is_function(val) {
return "function" == typeof val;
}

731
paige/node_modules/flexsearch/dist/module/document.js generated vendored Normal file
View File

@@ -0,0 +1,731 @@
/**!
* FlexSearch.js
* Author and Copyright: Thomas Wilkerling
* Licence: Apache-2.0
* Hosted by Nextapps GmbH
* https://github.com/nextapps-de/flexsearch
*/
import Index from "./index.js";
import { DocumentInterface } from "./type.js";
import Cache, { searchCache } from "./cache.js";
import { create_object, is_array, is_string, is_object, parse_option, get_keys } from "./common.js";
import apply_async from "./async.js";
import { intersect, intersect_union } from "./intersect.js";
import { exportDocument, importDocument } from "./serialize.js";
import WorkerIndex from "./worker/index.js";
/**
* @constructor
* @implements {DocumentInterface}
* @param {Object=} options
* @return {Document}
*/
function Document(options) {
if (!(this instanceof Document)) {
return new Document(options);
}
const document = options.document || options.doc || options;
let opt;
this.tree = [];
this.field = [];
this.marker = [];
this.register = create_object();
this.key = (opt = document.key || document.id) && parse_tree(opt, this.marker) || "id";
this.fastupdate = parse_option(options.fastupdate, /* append: */ /* skip update: */ /* skip_update: */!0);
this.storetree = (opt = document.store) && !0 !== opt && [];
this.store = opt && create_object();
// TODO case-insensitive tags
this.tag = (opt = document.tag) && parse_tree(opt, this.marker);
this.tagindex = opt && create_object();
this.cache = (opt = options.cache) && new Cache(opt);
// do not apply cache again for the indexes
options.cache = !1;
this.worker = options.worker;
// this switch is used by recall of promise callbacks
this.async = !1;
/** @export */
this.index = parse_descriptor.call(this, options, document);
}
export default Document;
/**
* @this Document
*/
function parse_descriptor(options, document) {
const index = create_object();
let field = document.index || document.field || document;
if (is_string(field)) {
field = [field];
}
for (let i = 0, key, opt; i < field.length; i++) {
key = field[i];
if (!is_string(key)) {
opt = key;
key = key.field;
}
opt = is_object(opt) ? Object.assign({}, options, opt) : options;
if (this.worker) {
index[key] = new WorkerIndex(opt);
if (!index[key].worker) {
this.worker = !1;
}
}
if (!this.worker) {
index[key] = new Index(opt, this.register);
}
this.tree[i] = parse_tree(key, this.marker);
this.field[i] = key;
}
if (this.storetree) {
let store = document.store;
if (is_string(store)) {
store = [store];
}
for (let i = 0; i < store.length; i++) {
this.storetree[i] = parse_tree(store[i], this.marker);
}
}
return index;
}
function parse_tree(key, marker) {
const tree = key.split(":");
let count = 0;
for (let i = 0; i < tree.length; i++) {
key = tree[i];
if (0 <= key.indexOf("[]")) {
key = key.substring(0, key.length - 2);
if (key) {
marker[count] = !0;
}
}
if (key) {
tree[count++] = key;
}
}
if (count < tree.length) {
tree.length = count;
}
return 1 < count ? tree : tree[0];
}
// TODO support generic function created from string when tree depth > 1
function parse_simple(obj, tree) {
if (is_string(tree)) {
obj = obj[tree];
} else {
for (let i = 0; obj && i < tree.length; i++) {
obj = obj[tree[i]];
}
}
return obj;
}
// TODO support generic function created from string when tree depth > 1
function store_value(obj, store, tree, pos, key) {
obj = obj[key];
// reached target field
if (pos === tree.length - 1) {
// store target value
store[key] = obj;
} else if (obj) {
if (is_array(obj)) {
store = store[key] = Array(obj.length);
for (let i = 0; i < obj.length; i++) {
// do not increase pos (an array is not a field)
store_value(obj, store, tree, pos, i);
}
} else {
store = store[key] || (store[key] = create_object());
key = tree[++pos];
store_value(obj, store, tree, pos, key);
}
}
}
function add_index(obj, tree, marker, pos, index, id, key, _append) {
obj = obj[key];
if (obj) {
// reached target field
if (pos === tree.length - 1) {
// handle target value
if (is_array(obj)) {
// append array contents so each entry gets a new scoring context
if (marker[pos]) {
for (let i = 0; i < obj.length; i++) {
index.add(id, obj[i], !0, !0);
}
return;
}
// or join array contents and use one scoring context
obj = obj.join(" ");
}
index.add(id, obj, _append, !0);
} else {
if (is_array(obj)) {
for (let i = 0; i < obj.length; i++) {
// do not increase index, an array is not a field
add_index(obj, tree, marker, pos, index, id, i, _append);
}
} else {
key = tree[++pos];
add_index(obj, tree, marker, pos, index, id, key, _append);
}
}
}
}
/**
*
* @param id
* @param content
* @param {boolean=} _append
* @returns {Document|Promise}
*/
Document.prototype.add = function (id, content, _append) {
if (is_object(id)) {
content = id;
id = parse_simple(content, this.key);
}
if (content && (id || 0 === id)) {
if (!_append && this.register[id]) {
return this.update(id, content);
}
for (let i = 0, tree, field; i < this.field.length; i++) {
field = this.field[i];
tree = this.tree[i];
if (is_string(tree)) {
tree = [tree];
}
add_index(content, tree, this.marker, 0, this.index[field], id, tree[0], _append);
}
if (this.tag) {
let tag = parse_simple(content, this.tag),
dupes = create_object();
if (is_string(tag)) {
tag = [tag];
}
for (let i = 0, key, arr; i < tag.length; i++) {
key = tag[i];
if (!dupes[key]) {
dupes[key] = 1;
arr = this.tagindex[key] || (this.tagindex[key] = []);
if (!_append || !arr.includes(id)) {
arr[arr.length] = id;
// add a reference to the register for fast updates
if (this.fastupdate) {
const tmp = this.register[id] || (this.register[id] = []);
tmp[tmp.length] = arr;
}
}
}
}
}
// TODO: how to handle store when appending contents?
if (this.store && (!_append || !this.store[id])) {
let store;
if (this.storetree) {
store = create_object();
for (let i = 0, tree; i < this.storetree.length; i++) {
tree = this.storetree[i];
if (is_string(tree)) {
store[tree] = content[tree];
} else {
store_value(content, store, tree, 0, tree[0]);
}
}
}
this.store[id] = store || content;
}
}
return this;
};
Document.prototype.append = function (id, content) {
return this.add(id, content, !0);
};
Document.prototype.update = function (id, content) {
return this.remove(id).add(id, content);
};
Document.prototype.remove = function (id) {
if (is_object(id)) {
id = parse_simple(id, this.key);
}
if (this.register[id]) {
for (let i = 0; i < this.field.length; i++) {
// workers does not share the register
this.index[this.field[i]].remove(id, !this.worker);
if (this.fastupdate) {
// when fastupdate was enabled all ids are removed
break;
}
}
if (this.tag) {
// when fastupdate was enabled all ids are already removed
if (!this.fastupdate) {
for (let key in this.tagindex) {
const tag = this.tagindex[key],
pos = tag.indexOf(id);
if (-1 !== pos) {
if (1 < tag.length) {
tag.splice(pos, 1);
} else {
delete this.tagindex[key];
}
}
}
}
}
if (this.store) {
delete this.store[id];
}
delete this.register[id];
}
return this;
};
/**
* @param {!string|Object} query
* @param {number|Object=} limit
* @param {Object=} options
* @param {Array<Array>=} _resolve For internal use only.
* @returns {Promise|Array}
*/
Document.prototype.search = function (query, limit, options, _resolve) {
if (!options) {
if (!limit && is_object(query)) {
options = /** @type {Object} */query;
query = "";
} else if (is_object(limit)) {
options = /** @type {Object} */limit;
limit = 0;
}
}
let result = [],
result_field = [],
pluck,
enrich,
field,
tag,
bool,
offset,
count = 0;
if (options) {
if (is_array(options)) {
field = options;
options = null;
} else {
query = options.query || query;
pluck = options.pluck;
field = pluck || options.index || options.field /*|| (is_string(options) && [options])*/;
tag = options.tag;
enrich = this.store && options.enrich;
bool = "and" === options.bool;
limit = options.limit || limit || 100;
offset = options.offset || 0;
if (tag) {
if (is_string(tag)) {
tag = [tag];
}
// when tags is used and no query was set,
// then just return the tag indexes
if (!query) {
for (let i = 0, res; i < tag.length; i++) {
res = get_tag.call(this, tag[i], limit, offset, enrich);
if (res) {
result[result.length] = res;
count++;
}
}
return count ? result : [];
}
}
if (is_string(field)) {
field = [field];
}
}
}
field || (field = this.field);
bool = bool && (1 < field.length || tag && 1 < tag.length);
const promises = !_resolve && (this.worker || this.async) && [];
// TODO solve this in one loop below
for (let i = 0, res, key, len; i < field.length; i++) {
let field_options;
key = field[i];
if (!is_string(key)) {
field_options = key;
key = field_options.field;
query = field_options.query || query;
limit = field_options.limit || limit;
enrich = field_options.enrich || enrich;
}
if (promises) {
promises[i] = this.index[key].searchAsync(query, limit, field_options || options);
// just collect and continue
continue;
} else if (_resolve) {
res = _resolve[i];
} else {
// inherit options also when search? it is just for laziness, Object.assign() has a cost
res = this.index[key].search(query, limit, field_options || options);
}
len = res && res.length;
if (tag && len) {
const arr = [];
let count = 0;
if (bool) {
// prepare for intersection
arr[0] = [res];
}
for (let y = 0, key, res; y < tag.length; y++) {
key = tag[y];
res = this.tagindex[key];
len = res && res.length;
if (len) {
count++;
arr[arr.length] = bool ? [res] : res;
}
}
if (count) {
if (bool) {
res = intersect(arr, limit || 100, offset || 0);
} else {
res = intersect_union(res, arr);
}
len = res.length;
}
}
if (len) {
result_field[count] = key;
result[count++] = res;
} else if (bool) {
return [];
}
}
if (promises) {
const self = this;
// anyone knows a better workaround of optionally having async promises?
// the promise.all() needs to be wrapped into additional promise,
// otherwise the recursive callback wouldn't run before return
return new Promise(function (resolve) {
Promise.all(promises).then(function (result) {
resolve(self.search(query, limit, options, result));
});
});
}
if (!count) {
// fast path "not found"
return [];
}
if (pluck && (!enrich || !this.store)) {
// fast path optimization
return result[0];
}
for (let i = 0, res; i < result_field.length; i++) {
res = result[i];
if (res.length) {
if (enrich) {
res = apply_enrich.call(this, res);
}
}
if (pluck) {
return res;
}
result[i] = {
field: result_field[i],
result: res
};
}
return result;
};
/**
* @this Document
*/
function get_tag(key, limit, offset) {
let res = this.tagindex[key],
len = res && res.length - offset;
}
/**
* @this Document
*/
function apply_enrich(res) {
const arr = Array(res.length);
for (let x = 0, id; x < res.length; x++) {
id = res[x];
arr[x] = {
id: id,
doc: this.store[id]
};
}
return arr;
}
Document.prototype.contain = function (id) {
return !!this.register[id];
};
Document.prototype.get = function (id) {
return this.store[id];
};
Document.prototype.set = function (id, data) {
this.store[id] = data;
return this;
};
Document.prototype.searchCache = searchCache;
Document.prototype.export = exportDocument;
Document.prototype.import = importDocument;
apply_async(Document.prototype);

28
paige/node_modules/flexsearch/dist/module/engine.js generated vendored Normal file
View File

@@ -0,0 +1,28 @@
import { searchCache } from "./cache";
/**
* @constructor
* @abstract
*/
function Engine(index) {
index.prototype.searchCache = searchCache;
index.prototype.addAsync = addAsync;
index.prototype.appendAsync = appendAsync;
index.prototype.searchAsync = searchAsync;
index.prototype.updateAsync = updateAsync;
index.prototype.removeAsync = removeAsync;
}
Engine.prototype.searchCache = searchCache;
Engine.prototype.addAsync = addAsync;
Engine.prototype.appendAsync = appendAsync;
Engine.prototype.searchAsync = searchAsync;
Engine.prototype.updateAsync = updateAsync;
Engine.prototype.removeAsync = removeAsync;

22
paige/node_modules/flexsearch/dist/module/global.js generated vendored Normal file
View File

@@ -0,0 +1,22 @@
export const global_lang = {};
export const global_charset = {};
/**
* @param {!string} name
* @param {Object} charset
*/
export function registerCharset(name, charset) {
global_charset[name] = charset;
}
/**
* @param {!string} name
* @param {Object} lang
*/
export function registerLanguage(name, lang) {
global_lang[name] = lang;
}

784
paige/node_modules/flexsearch/dist/module/index.js generated vendored Normal file
View File

@@ -0,0 +1,784 @@
/**!
* FlexSearch.js
* Author and Copyright: Thomas Wilkerling
* Licence: Apache-2.0
* Hosted by Nextapps GmbH
* https://github.com/nextapps-de/flexsearch
*/
import { IndexInterface } from "./type.js";
import { encode as default_encoder } from "./lang/latin/default.js";
import { create_object, create_object_array, concat, sort_by_length_down, is_array, is_string, is_object, parse_option } from "./common.js";
import { pipeline, init_stemmer_or_matcher, init_filter } from "./lang.js";
import { global_lang, global_charset } from "./global.js";
import apply_async from "./async.js";
import { intersect } from "./intersect.js";
import Cache, { searchCache } from "./cache.js";
import apply_preset from "./preset.js";
import { exportIndex, importIndex } from "./serialize.js";
/**
* @constructor
* @implements IndexInterface
* @param {Object=} options
* @param {Object=} _register
* @return {Index}
*/
function Index(options, _register) {
if (!(this instanceof Index)) {
return new Index(options);
}
let charset, lang, tmp;
if (options) {
options = apply_preset(options);
charset = options.charset;
lang = options.lang;
if (is_string(charset)) {
if (-1 === charset.indexOf(":")) {
charset += ":default";
}
charset = global_charset[charset];
}
if (is_string(lang)) {
lang = global_lang[lang];
}
} else {
options = {};
}
let resolution,
optimize,
context = options.context || {};
this.encode = options.encode || charset && charset.encode || default_encoder;
this.register = _register || create_object();
this.resolution = resolution = options.resolution || 9;
this.tokenize = tmp = charset && charset.tokenize || options.tokenize || "strict";
this.depth = "strict" === tmp && context.depth;
this.bidirectional = parse_option(context.bidirectional, /* append: */ /* skip update: */ /* skip_update: */!0);
this.optimize = optimize = parse_option(options.optimize, !0);
this.fastupdate = parse_option(options.fastupdate, !0);
this.minlength = options.minlength || 1;
this.boost = options.boost;
// when not using the memory strategy the score array should not pre-allocated to its full length
this.map = optimize ? create_object_array(resolution) : create_object();
this.resolution_ctx = resolution = context.resolution || 1;
this.ctx = optimize ? create_object_array(resolution) : create_object();
this.rtl = charset && charset.rtl || options.rtl;
this.matcher = (tmp = options.matcher || lang && lang.matcher) && init_stemmer_or_matcher(tmp, !1);
this.stemmer = (tmp = options.stemmer || lang && lang.stemmer) && init_stemmer_or_matcher(tmp, !0);
this.filter = (tmp = options.filter || lang && lang.filter) && init_filter(tmp);
this.cache = (tmp = options.cache) && new Cache(tmp);
}
export default Index;
//Index.prototype.pipeline = pipeline;
/**
* @param {!number|string} id
* @param {!string} content
*/
Index.prototype.append = function (id, content) {
return this.add(id, content, !0);
};
// TODO:
// string + number as text
// boolean, null, undefined as ?
/**
* @param {!number|string} id
* @param {!string} content
* @param {boolean=} _append
* @param {boolean=} _skip_update
*/
Index.prototype.add = function (id, content, _append, _skip_update) {
if (content && (id || 0 === id)) {
if (!_skip_update && !_append && this.register[id]) {
return this.update(id, content);
}
content = this.encode("" + content);
const length = content.length;
if (length) {
// check context dupes to skip all contextual redundancy along a document
const dupes_ctx = create_object(),
dupes = create_object(),
depth = this.depth,
resolution = this.resolution;
for (let i = 0; i < length; i++) {
let term = content[this.rtl ? length - 1 - i : i],
term_length = term.length;
// skip dupes will break the context chain
if (term && term_length >= this.minlength && (depth || !dupes[term])) {
let score = get_score(resolution, length, i),
token = "";
switch (this.tokenize) {
case "full":
if (2 < term_length) {
for (let x = 0; x < term_length; x++) {
for (let y = term_length; y > x; y--) {
if (y - x >= this.minlength) {
const partial_score = get_score(resolution, length, i, term_length, x);
token = term.substring(x, y);
this.push_index(dupes, token, partial_score, id, _append);
}
}
}
break;
}
// fallthrough to next case when term length < 3
case "reverse":
// skip last round (this token exist already in "forward")
if (1 < term_length) {
for (let x = term_length - 1; 0 < x; x--) {
token = term[x] + token;
if (token.length >= this.minlength) {
const partial_score = get_score(resolution, length, i, term_length, x);
this.push_index(dupes, token, partial_score, id, _append);
}
}
token = "";
}
// fallthrough to next case to apply forward also
case "forward":
if (1 < term_length) {
for (let x = 0; x < term_length; x++) {
token += term[x];
if (token.length >= this.minlength) {
this.push_index(dupes, token, score, id, _append);
}
}
break;
}
// fallthrough to next case when token has a length of 1
default:
// case "strict":
if (this.boost) {
score = Math.min(0 | score / this.boost(content, term, i), resolution - 1);
}
this.push_index(dupes, term, score, id, _append);
// context is just supported by tokenizer "strict"
if (depth) {
if (1 < length && i < length - 1) {
// check inner dupes to skip repeating words in the current context
const dupes_inner = create_object(),
resolution = this.resolution_ctx,
keyword = term,
size = Math.min(depth + 1, length - i);
dupes_inner[keyword] = 1;
for (let x = 1; x < size; x++) {
term = content[this.rtl ? length - 1 - i - x : i + x];
if (term && term.length >= this.minlength && !dupes_inner[term]) {
dupes_inner[term] = 1;
const context_score = get_score(resolution + (length / 2 > resolution ? 0 : 1), length, i, size - 1, x - 1),
swap = this.bidirectional && term > keyword;
this.push_index(dupes_ctx, swap ? keyword : term, context_score, id, _append, swap ? term : keyword);
}
}
}
}
}
}
}
this.fastupdate || (this.register[id] = 1);
}
}
return this;
};
/**
* @param {number} resolution
* @param {number} length
* @param {number} i
* @param {number=} term_length
* @param {number=} x
* @returns {number}
*/
function get_score(resolution, length, i, term_length, x) {
// console.log("resolution", resolution);
// console.log("length", length);
// console.log("term_length", term_length);
// console.log("i", i);
// console.log("x", x);
// console.log((resolution - 1) / (length + (term_length || 0)) * (i + (x || 0)) + 1);
// the first resolution slot is reserved for the best match,
// when a query matches the first word(s).
// also to stretch score to the whole range of resolution, the
// calculation is shift by one and cut the floating point.
// this needs the resolution "1" to be handled additionally.
// do not stretch the resolution more than the term length will
// improve performance and memory, also it improves scoring in
// most cases between a short document and a long document
return i && 1 < resolution ? length + (term_length || 0) <= resolution ? i + (x || 0) : 0 | (resolution - 1) / (length + (term_length || 0)) * (i + (x || 0)) + 1 : 0;
}
/**
* @private
* @param dupes
* @param value
* @param score
* @param id
* @param {boolean=} append
* @param {string=} keyword
*/
Index.prototype.push_index = function (dupes, value, score, id, append, keyword) {
let arr = keyword ? this.ctx : this.map;
if (!dupes[value] || keyword && !dupes[value][keyword]) {
if (this.optimize) {
arr = arr[score];
}
if (keyword) {
dupes = dupes[value] || (dupes[value] = create_object());
dupes[keyword] = 1;
arr = arr[keyword] || (arr[keyword] = create_object());
} else {
dupes[value] = 1;
}
arr = arr[value] || (arr[value] = []);
if (!this.optimize) {
arr = arr[score] || (arr[score] = []);
}
if (!append || !arr.includes(id)) {
arr[arr.length] = id;
// add a reference to the register for fast updates
if (this.fastupdate) {
const tmp = this.register[id] || (this.register[id] = []);
tmp[tmp.length] = arr;
}
}
}
};
/**
* @param {string|Object} query
* @param {number|Object=} limit
* @param {Object=} options
* @returns {Array<number|string>}
*/
Index.prototype.search = function (query, limit, options) {
if (!options) {
if (!limit && is_object(query)) {
options = /** @type {Object} */query;
query = options.query;
} else if (is_object(limit)) {
options = /** @type {Object} */limit;
}
}
let result = [],
length,
context,
suggest,
offset = 0;
if (options) {
query = options.query || query;
limit = options.limit;
offset = options.offset || 0;
context = options.context;
suggest = options.suggest;
}
if (query) {
query = /** @type {Array} */this.encode("" + query);
length = query.length;
// TODO: solve this in one single loop below
if (1 < length) {
const dupes = create_object(),
query_new = [];
for (let i = 0, count = 0, term; i < length; i++) {
term = query[i];
if (term && term.length >= this.minlength && !dupes[term]) {
// this fast path can just apply when not in memory-optimized mode
if (!this.optimize && !suggest && !this.map[term]) {
// fast path "not found"
return result;
} else {
query_new[count++] = term;
dupes[term] = 1;
}
}
}
query = query_new;
length = query.length;
}
}
if (!length) {
return result;
}
limit || (limit = 100);
let depth = this.depth && 1 < length && !1 !== context,
index = 0,
keyword;
if (depth) {
keyword = query[0];
index = 1;
} else {
if (1 < length) {
query.sort(sort_by_length_down);
}
}
for (let arr, term; index < length; index++) {
term = query[index];
// console.log(keyword);
// console.log(term);
// console.log("");
if (depth) {
arr = this.add_result(result, suggest, limit, offset, 2 === length, term, keyword);
// console.log(arr);
// console.log(result);
// when suggestion enabled just forward keyword if term was found
// as long as the result is empty forward the pointer also
if (!suggest || !1 !== arr || !result.length) {
keyword = term;
}
} else {
arr = this.add_result(result, suggest, limit, offset, 1 === length, term);
}
if (arr) {
return (/** @type {Array<number|string>} */arr
);
}
// apply suggestions on last loop or fallback
if (suggest && index == length - 1) {
let length = result.length;
if (!length) {
if (depth) {
// fallback to non-contextual search when no result was found
depth = 0;
index = -1;
continue;
}
return result;
} else if (1 === length) {
// fast path optimization
return single_result(result[0], limit, offset);
}
}
}
return intersect(result, limit, offset, suggest);
};
/**
* Returns an array when the result is done (to stop the process immediately),
* returns false when suggestions is enabled and no result was found,
* or returns nothing when a set was pushed successfully to the results
*
* @private
* @param {Array} result
* @param {Array} suggest
* @param {number} limit
* @param {number} offset
* @param {boolean} single_term
* @param {string} term
* @param {string=} keyword
* @return {Array<Array<string|number>>|boolean|undefined}
*/
Index.prototype.add_result = function (result, suggest, limit, offset, single_term, term, keyword) {
let word_arr = [],
arr = keyword ? this.ctx : this.map;
if (!this.optimize) {
arr = get_array(arr, term, keyword, this.bidirectional);
}
if (arr) {
let count = 0;
const arr_len = Math.min(arr.length, keyword ? this.resolution_ctx : this.resolution);
// relevance:
for (let x = 0, size = 0, tmp, len; x < arr_len; x++) {
tmp = arr[x];
if (tmp) {
if (this.optimize) {
tmp = get_array(tmp, term, keyword, this.bidirectional);
}
if (offset) {
if (tmp && single_term) {
len = tmp.length;
if (len <= offset) {
offset -= len;
tmp = null;
} else {
tmp = tmp.slice(offset);
offset = 0;
}
}
}
if (tmp) {
// keep score (sparse array):
//word_arr[x] = tmp;
// simplified score order:
word_arr[count++] = tmp;
if (single_term) {
size += tmp.length;
if (size >= limit) {
// fast path optimization
break;
}
}
}
}
}
if (count) {
if (single_term) {
// fast path optimization
// offset was already applied at this point
return single_result(word_arr, limit, 0);
}
result[result.length] = word_arr;
return;
}
}
// return an empty array will stop the loop,
// to prevent stop when using suggestions return a false value
return !suggest && word_arr;
};
function single_result(result, limit, offset) {
if (1 === result.length) {
result = result[0];
} else {
result = concat(result);
}
return offset || result.length > limit ? result.slice(offset, offset + limit) : result;
}
function get_array(arr, term, keyword, bidirectional) {
if (keyword) {
// the frequency of the starting letter is slightly less
// on the last half of the alphabet (m-z) in almost every latin language,
// so we sort downwards (https://en.wikipedia.org/wiki/Letter_frequency)
const swap = bidirectional && term > keyword;
arr = arr[swap ? term : keyword];
arr = arr && arr[swap ? keyword : term];
} else {
arr = arr[term];
}
return arr;
}
Index.prototype.contain = function (id) {
return !!this.register[id];
};
Index.prototype.update = function (id, content) {
return this.remove(id).add(id, content);
};
/**
* @param {boolean=} _skip_deletion
*/
Index.prototype.remove = function (id, _skip_deletion) {
const refs = this.register[id];
if (refs) {
if (this.fastupdate) {
// fast updates performs really fast but did not fully cleanup the key entries
for (let i = 0, tmp; i < refs.length; i++) {
tmp = refs[i];
tmp.splice(tmp.indexOf(id), 1);
}
} else {
remove_index(this.map, id, this.resolution, this.optimize);
if (this.depth) {
remove_index(this.ctx, id, this.resolution_ctx, this.optimize);
}
}
_skip_deletion || delete this.register[id];
if (this.cache) {
this.cache.del(id);
}
}
return this;
};
/**
* @param map
* @param id
* @param res
* @param optimize
* @param {number=} resolution
* @return {number}
*/
function remove_index(map, id, res, optimize, resolution) {
let count = 0;
if (is_array(map)) {
// the first array is the score array in both strategies
if (!resolution) {
resolution = Math.min(map.length, res);
for (let x = 0, arr; x < resolution; x++) {
arr = map[x];
if (arr) {
count = remove_index(arr, id, res, optimize, resolution);
if (!optimize && !count) {
// when not memory optimized the score index should removed
delete map[x];
}
}
}
} else {
const pos = map.indexOf(id);
if (-1 !== pos) {
// fast path, when length is 1 or lower then the whole field gets deleted
if (1 < map.length) {
map.splice(pos, 1);
count++;
}
} else {
count++;
}
}
} else {
for (let key in map) {
count = remove_index(map[key], id, res, optimize, resolution);
if (!count) {
delete map[key];
}
}
}
return count;
}
Index.prototype.searchCache = searchCache;
Index.prototype.export = exportIndex;
Index.prototype.import = importIndex;
apply_async(Index.prototype);

394
paige/node_modules/flexsearch/dist/module/intersect.js generated vendored Normal file
View File

@@ -0,0 +1,394 @@
import { create_object, concat } from "./common.js";
/**
* Implementation based on Array.includes() provides better performance,
* but it needs at least one word in the query which is less frequent.
* Also on large indexes it does not scale well performance-wise.
* This strategy also lacks of suggestion capabilities (matching & sorting).
*
* @param arrays
* @param limit
* @param offset
* @param {boolean|Array=} suggest
* @returns {Array}
*/
// export function intersect(arrays, limit, offset, suggest) {
//
// const length = arrays.length;
// let result = [];
// let check;
//
// // determine shortest array and collect results
// // from the sparse relevance arrays
//
// let smallest_size;
// let smallest_arr;
// let smallest_index;
//
// for(let x = 0; x < length; x++){
//
// const arr = arrays[x];
// const len = arr.length;
//
// let size = 0;
//
// for(let y = 0, tmp; y < len; y++){
//
// tmp = arr[y];
//
// if(tmp){
//
// size += tmp.length;
// }
// }
//
// if(!smallest_size || (size < smallest_size)){
//
// smallest_size = size;
// smallest_arr = arr;
// smallest_index = x;
// }
// }
//
// smallest_arr = smallest_arr.length === 1 ?
//
// smallest_arr[0]
// :
// concat(smallest_arr);
//
// if(suggest){
//
// suggest = [smallest_arr];
// check = create_object();
// }
//
// let size = 0;
// let steps = 0;
//
// // process terms in reversed order often results in better performance.
// // the outer loop must be the words array, using the
// // smallest array here disables the "fast fail" optimization.
//
// for(let x = length - 1; x >= 0; x--){
//
// if(x !== smallest_index){
//
// steps++;
//
// const word_arr = arrays[x];
// const word_arr_len = word_arr.length;
// const new_arr = [];
//
// let count = 0;
//
// for(let z = 0, id; z < smallest_arr.length; z++){
//
// id = smallest_arr[z];
//
// let found;
//
// // process relevance in forward order (direction is
// // important for adding IDs during the last round)
//
// for(let y = 0; y < word_arr_len; y++){
//
// const arr = word_arr[y];
//
// if(arr.length){
//
// found = arr.includes(id);
//
// if(found){
//
// // check if in last round
//
// if(steps === length - 1){
//
// if(offset){
//
// offset--;
// }
// else{
//
// result[size++] = id;
//
// if(size === limit){
//
// // fast path "end reached"
//
// return result;
// }
// }
//
// if(suggest){
//
// check[id] = 1;
// }
// }
//
// break;
// }
// }
// }
//
// if(found){
//
// new_arr[count++] = id;
// }
// }
//
// if(suggest){
//
// suggest[steps] = new_arr;
// }
// else if(!count){
//
// return [];
// }
//
// smallest_arr = new_arr;
// }
// }
//
// if(suggest){
//
// // needs to iterate in reverse direction
//
// for(let x = suggest.length - 1, arr, len; x >= 0; x--){
//
// arr = suggest[x];
// len = arr && arr.length;
//
// if(len){
//
// for(let y = 0, id; y < len; y++){
//
// id = arr[y];
//
// if(!check[id]){
//
// check[id] = 1;
//
// if(offset){
//
// offset--;
// }
// else{
//
// result[size++] = id;
//
// if(size === limit){
//
// // fast path "end reached"
//
// return result;
// }
// }
// }
// }
// }
// }
// }
//
// return result;
// }
/**
* Implementation based on Object[key] provides better suggestions
* capabilities and has less performance scaling issues on large indexes.
*
* @param arrays
* @param limit
* @param offset
* @param {boolean|Array=} suggest
* @returns {Array}
*/
export function intersect(arrays, limit, offset, suggest) {
const length = arrays.length;
let result = [],
check,
check_suggest,
size = 0;
if (suggest) {
suggest = [];
}
// process terms in reversed order often has advantage for the fast path "end reached".
// also a reversed order prioritize the order of words from a query.
for (let x = length - 1; 0 <= x; x--) {
const word_arr = arrays[x],
word_arr_len = word_arr.length,
check_new = create_object();
let found = !check;
// process relevance in forward order (direction is
// important for adding IDs during the last round)
for (let y = 0; y < word_arr_len; y++) {
const arr = word_arr[y],
arr_len = arr.length;
if (arr_len) {
// loop through IDs
for (let z = 0, check_idx, id; z < arr_len; z++) {
id = arr[z];
if (check) {
if (check[id]) {
// check if in last round
if (!x) {
if (offset) {
offset--;
} else {
result[size++] = id;
if (size === limit) {
// fast path "end reached"
return result;
}
}
}
if (x || suggest) {
check_new[id] = 1;
}
found = /* append: */ /* skip update: */ /* skip_update: */!0;
}
if (suggest) {
check_idx = (check_suggest[id] || 0) + 1;
check_suggest[id] = check_idx;
// do not adding IDs which are already included in the result (saves one loop)
// the first intersection match has the check index 2, so shift by -2
if (check_idx < length) {
const tmp = suggest[check_idx - 2] || (suggest[check_idx - 2] = []);
tmp[tmp.length] = id;
}
}
} else {
// pre-fill in first round
check_new[id] = 1;
}
}
}
}
if (suggest) {
// re-use the first pre-filled check for suggestions
check || (check_suggest = check_new);
} else if (!found) {
return [];
}
check = check_new;
}
if (suggest) {
// needs to iterate in reverse direction
for (let x = suggest.length - 1, arr, len; 0 <= x; x--) {
arr = suggest[x];
len = arr.length;
for (let y = 0, id; y < len; y++) {
id = arr[y];
if (!check[id]) {
if (offset) {
offset--;
} else {
result[size++] = id;
if (size === limit) {
// fast path "end reached"
return result;
}
}
check[id] = 1;
}
}
}
}
return result;
}
/**
* @param mandatory
* @param arrays
* @returns {Array}
*/
export function intersect_union(mandatory, arrays) {
const check = create_object(),
union = create_object(),
result = [];
for (let x = 0; x < mandatory.length; x++) {
check[mandatory[x]] = 1;
}
for (let x = 0, arr; x < arrays.length; x++) {
arr = arrays[x];
for (let y = 0, id; y < arr.length; y++) {
id = arr[y];
if (check[id]) {
if (!union[id]) {
union[id] = 1;
result[result.length] = id;
}
}
}
}
return result;
}

321
paige/node_modules/flexsearch/dist/module/lang.js generated vendored Normal file
View File

@@ -0,0 +1,321 @@
import { IndexInterface } from "./type.js";
import { create_object, get_keys } from "./common.js";
/**
* @param {!string} str
* @param {boolean|Array<string|RegExp>=} normalize
* @param {boolean|string|RegExp=} split
* @param {boolean=} _collapse
* @returns {string|Array<string>}
* @this IndexInterface
*/
export function pipeline(str, normalize, split, _collapse) {
if (str) {
if (normalize) {
str = replace(str, /** @type {Array<string|RegExp>} */normalize);
}
if (this.matcher) {
str = replace(str, this.matcher);
}
if (this.stemmer && 1 < str.length) {
str = replace(str, this.stemmer);
}
if (_collapse && 1 < str.length) {
str = collapse(str);
}
if (split || "" === split) {
const words = str.split( /** @type {string|RegExp} */split);
return this.filter ? filter(words, this.filter) : words;
}
}
return str;
}
// TODO improve normalize + remove non-delimited chars like in "I'm" + split on whitespace+
export const regex_whitespace = /[\p{Z}\p{S}\p{P}\p{C}]+/u;
// https://github.com/nextapps-de/flexsearch/pull/414
//export const regex_whitespace = /[\s\xA0\u2000-\u200B\u2028\u2029\u3000\ufeff!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~]/
const regex_normalize = /[\u0300-\u036f]/g;
export function normalize(str) {
if (str.normalize) {
str = str.normalize("NFD").replace(regex_normalize, "");
}
return str;
}
/**
* @param {!string} str
* @param {boolean|Array<string|RegExp>=} normalize
* @param {boolean|string|RegExp=} split
* @param {boolean=} _collapse
* @returns {string|Array<string>}
*/
// FlexSearch.prototype.pipeline = function(str, normalize, split, _collapse){
//
// if(str){
//
// if(normalize && str){
//
// str = replace(str, /** @type {Array<string|RegExp>} */ (normalize));
// }
//
// if(str && this.matcher){
//
// str = replace(str, this.matcher);
// }
//
// if(this.stemmer && str.length > 1){
//
// str = replace(str, this.stemmer);
// }
//
// if(_collapse && str.length > 1){
//
// str = collapse(str);
// }
//
// if(str){
//
// if(split || (split === "")){
//
// const words = str.split(/** @type {string|RegExp} */ (split));
//
// return this.filter ? filter(words, this.filter) : words;
// }
// }
// }
//
// return str;
// };
// export function pipeline(str, normalize, matcher, stemmer, split, _filter, _collapse){
//
// if(str){
//
// if(normalize && str){
//
// str = replace(str, normalize);
// }
//
// if(matcher && str){
//
// str = replace(str, matcher);
// }
//
// if(stemmer && str.length > 1){
//
// str = replace(str, stemmer);
// }
//
// if(_collapse && str.length > 1){
//
// str = collapse(str);
// }
//
// if(str){
//
// if(split !== false){
//
// str = str.split(split);
//
// if(_filter){
//
// str = filter(str, _filter);
// }
// }
// }
// }
//
// return str;
// }
/**
* @param {Array<string>} words
* @returns {Object<string, string>}
*/
export function init_filter(words) {
const filter = create_object();
for (let i = 0, length = words.length; i < length; i++) {
filter[words[i]] = 1;
}
return filter;
}
/**
* @param {!Object<string, string>} obj
* @param {boolean} is_stemmer
* @returns {Array}
*/
export function init_stemmer_or_matcher(obj, is_stemmer) {
const keys = get_keys(obj),
length = keys.length,
final = [];
let removal = "",
count = 0;
for (let i = 0, key, tmp; i < length; i++) {
key = keys[i];
tmp = obj[key];
if (tmp) {
final[count++] = regex(is_stemmer ? "(?!\\b)" + key + "(\\b|_)" : key);
final[count++] = tmp;
} else {
removal += (removal ? "|" : "") + key;
}
}
if (removal) {
final[count++] = regex(is_stemmer ? "(?!\\b)(" + removal + ")(\\b|_)" : "(" + removal + ")");
final[count] = "";
}
return final;
}
/**
* @param {!string} str
* @param {Array} regexp
* @returns {string}
*/
export function replace(str, regexp) {
for (let i = 0, len = regexp.length; i < len; i += 2) {
str = str.replace(regexp[i], regexp[i + 1]);
if (!str) {
break;
}
}
return str;
}
/**
* @param {!string} str
* @returns {RegExp}
*/
export function regex(str) {
return new RegExp(str, "g");
}
/**
* Regex: replace(/(?:(\w)(?:\1)*)/g, "$1")
* @param {!string} string
* @returns {string}
*/
export function collapse(string) {
let final = "",
prev = "";
for (let i = 0, len = string.length, char; i < len; i++) {
if ((char = string[i]) !== prev) {
final += prev = char;
}
}
return final;
}
// TODO using fast-swap
export function filter(words, map) {
const length = words.length,
filtered = [];
for (let i = 0, count = 0; i < length; i++) {
const word = words[i];
if (word && !map[word]) {
filtered[count++] = word;
}
}
return filtered;
}
// const chars = {a:1, e:1, i:1, o:1, u:1, y:1};
//
// function collapse_repeating_chars(string){
//
// let collapsed_string = "",
// char_prev = "",
// char_next = "";
//
// for(let i = 0; i < string.length; i++){
//
// const char = string[i];
//
// if(char !== char_prev){
//
// if(i && (char === "h")){
//
// if((chars[char_prev] && chars[char_next]) || (char_prev === " ")){
//
// collapsed_string += char;
// }
// }
// else{
//
// collapsed_string += char;
// }
// }
//
// char_next = (
//
// (i === (string.length - 1)) ?
//
// ""
// :
// string[i + 1]
// );
//
// char_prev = char;
// }
//
// return collapsed_string;
// }

View File

@@ -0,0 +1,27 @@
import { IndexInterface } from "../../type.js";
import { pipeline } from "../../lang.js";
export const rtl = /* append: */ /* skip update: */ /* skip_update: */!0;
export const tokenize = "";
export default {
encode: encode,
rtl: !0
};
const regex = /[\x00-\x7F]+/g,
split = /\s+/;
/**
* @param {string|number} str
* @this IndexInterface
*/
export function encode(str) {
return pipeline.call(this,
/* string: */("" + str).replace(regex, " "),
/* normalize: */
/* collapse: */!1,
/* split: */split, !1);
}

41
paige/node_modules/flexsearch/dist/module/lang/at.js generated vendored Normal file
View File

@@ -0,0 +1,41 @@
/**
* http://www.ranks.nl/stopwords
* @type {Array<string>}
*/
export const filter = ["aber", "als", "am", "an", "auch", "auf", "aus", "bei", "bin", "bis", "bist", "da", "dadurch", "daher", "darum", "das", "daß", "dass", "dein", "deine", "dem", "den", "der", "des", "dessen", "deshalb", "die", "dies", "dieser", "dieses", "doch", "dort", "du", "durch", "ein", "eine", "einem", "einen", "einer", "eines", "er", "es", "euer", "eure", "für", "hatte", "hatten", "hattest", "hattet", "hier", "hinter", "ich", "ihr", "ihre", "im", "in", "ist", "ja", "jede", "jedem", "jeden", "jeder", "jedes", "jener", "jenes", "jetzt", "kann", "kannst", "können", "könnt", "machen", "mein", "meine", "mit", "muß", "mußt", "musst", "müssen", "müßt", "nach", "nachdem", "nein", "nicht", "nun", "oder", "seid", "sein", "seine", "sich", "sie", "sind", "soll", "sollen", "sollst", "sollt", "sonst", "soweit", "sowie", "und", "unser", "unsere", "unter", "vom", "von", "vor", "wann", "warum", "was", "weiter", "weitere", "wenn", "wer", "werde", "werden", "werdet", "weshalb", "wie", "wieder", "wieso", "wir", "wird", "wirst", "wo", "woher", "wohin", "zu", "zum", "zur", "über"];
/**
* @type {Object<string, string>}
*/
export const stemmer = {
niss: "",
isch: "",
lich: "",
heit: "",
keit: "",
end: "",
ung: "",
est: "",
ern: "",
em: "",
er: "",
en: "",
es: "",
st: "",
ig: "",
ik: "",
e: "",
s: ""
};
export const matcher = {};
export default {
filter: filter,
stemmer: stemmer,
matcher: matcher
};

View File

@@ -0,0 +1,26 @@
import { IndexInterface } from "../../type.js";
import { pipeline } from "../../lang.js";
export const rtl = /* normalize: */ /* collapse: */
/* normalize: */
/* collapse: */!1;
export const tokenize = "strict";
export default {
encode: encode,
rtl: !1,
tokenize: "strict"
};
const regex = /[\x00-\x7F]+/g;
/**
* @param {string|number} str
* @this IndexInterface
*/
export function encode(str) {
return pipeline.call(this,
/* string: */("" + str).replace(regex, ""), !1,
/* split: */"", !1);
}

View File

@@ -0,0 +1,27 @@
import { IndexInterface } from "../../type.js";
import { pipeline } from "../../lang.js";
export const rtl = /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */
/* normalize: */
/* collapse: */!1;
export const tokenize = "";
export default {
encode: encode,
rtl: !1
};
const regex = /[\x00-\x7F]+/g,
split = /\s+/;
/**
* @param {string|number} str
* @this IndexInterface
*/
export function encode(str) {
return pipeline.call(this,
/* string: */("" + str).replace(regex, " "), !1,
/* split: */split, !1);
}

54
paige/node_modules/flexsearch/dist/module/lang/de.js generated vendored Normal file
View File

@@ -0,0 +1,54 @@
/**
* Filter are also known as "stopwords", they completely filter out words from being indexed.
* Source: http://www.ranks.nl/stopwords
* Object Definition: Just provide an array of words.
* @type {Array<string>}
*/
export const filter = ["aber", "als", "am", "an", "auch", "auf", "aus", "bei", "bin", "bis", "bist", "da", "dadurch", "daher", "darum", "das", "daß", "dass", "dein", "deine", "dem", "den", "der", "des", "dessen", "deshalb", "die", "dies", "dieser", "dieses", "doch", "dort", "du", "durch", "ein", "eine", "einem", "einen", "einer", "eines", "er", "es", "euer", "eure", "für", "hatte", "hatten", "hattest", "hattet", "hier", "hinter", "ich", "ihr", "ihre", "im", "in", "ist", "ja", "jede", "jedem", "jeden", "jeder", "jedes", "jener", "jenes", "jetzt", "kann", "kannst", "können", "könnt", "machen", "mein", "meine", "mit", "muß", "mußt", "musst", "müssen", "müßt", "nach", "nachdem", "nein", "nicht", "nun", "oder", "seid", "sein", "seine", "sich", "sie", "sind", "soll", "sollen", "sollst", "sollt", "sonst", "soweit", "sowie", "und", "unser", "unsere", "unter", "vom", "von", "vor", "wann", "warum", "was", "weiter", "weitere", "wenn", "wer", "werde", "werden", "werdet", "weshalb", "wie", "wieder", "wieso", "wir", "wird", "wirst", "wo", "woher", "wohin", "zu", "zum", "zur", "über"];
/**
* Stemmer removes word endings and is a kind of "partial normalization". A word ending just matched when the word length is bigger than the matched partial.
* Example: The word "correct" and "correctness" could be the same word, so you can define {"ness": ""} to normalize the ending.
* Object Definition: the key represents the word ending, the value contains the replacement (or empty string for removal).
* @type {Object<string, string>}
*/
export const stemmer = {
niss: "",
isch: "",
lich: "",
heit: "",
keit: "",
ell: "",
bar: "",
end: "",
ung: "",
est: "",
ern: "",
em: "",
er: "",
en: "",
es: "",
st: "",
ig: "",
ik: "",
e: "",
s: ""
};
/**
* Matcher replaces all occurrences of a given string regardless of its position and is also a kind of "partial normalization".
* Object Definition: the key represents the target term, the value contains the search string which should be replaced (could also be an array of multiple terms).
* @type {Object<string, Array<string>|string>}
*/
export const matcher = {};
export default {
filter: filter,
stemmer: stemmer,
matcher: matcher
};

100
paige/node_modules/flexsearch/dist/module/lang/en.js generated vendored Normal file
View File

@@ -0,0 +1,100 @@
/**
* http://www.ranks.nl/stopwords
* @type {Array<string>}
*/
export const filter = ["a", "about", "above", "after", "again", "against", "all", "also", "am", "an", "and", "any", "are", "aren't", "as", "at",
//"back",
"be", "because", "been", "before", "being", "below",
//"between",
"both", "but", "by", "can", "cannot", "can't", "come", "could", "couldn't",
//"day",
"did", "didn't", "do", "does", "doesn't", "doing", "dont", "down", "during", "each", "even", "few", "first", "for", "from", "further", "get",
//"give",
"go",
//"good",
"had", "hadn't", "has", "hasn't", "have", "haven't", "having", "he", "hed",
//"hell",
"her", "here", "here's", "hers", "herself", "hes", "him", "himself", "his", "how", "how's", "i", "id", "if", "ill", "im", "in", "into", "is", "isn't", "it", "it's", "itself", "i've", "just", "know", "let's", "like",
//"look",
"make", "me", "more", "most", "mustn't", "my", "myself", "new", "no", "nor", "not", "now", "of", "off", "on", "once",
//"one",
"only", "or", "other", "ought", "our", "our's", "ourselves", "out", "over", "own",
//"people",
"same", "say", "see", "shan't", "she", "she'd", "shell", "shes", "should", "shouldn't", "so", "some", "such",
//"take",
"than", "that", "that's", "the", "their", "theirs", "them", "themselves", "then", "there", "there's", "these", "they", "they'd", "they'll", "they're", "they've",
//"think",
"this", "those", "through", "time", "to", "too",
//"two",
//"under",
"until", "up", "us",
//"use",
"very", "want", "was", "wasn't", "way", "we", "wed", "well", "were", "weren't", "we've", "what", "what's", "when", "when's", "where", "where's", "which", "while", "who", "whom", "who's", "why", "why's", "will", "with", "won't",
//"work",
"would", "wouldn't",
//"year",
"you", "you'd", "you'll", "your", "you're", "your's", "yourself", "yourselves", "you've"];
/**
* @type {Object<string, string>}
*/
export const stemmer = {
ational: "ate",
iveness: "ive",
fulness: "ful",
ousness: "ous",
ization: "ize",
tional: "tion",
biliti: "ble",
icate: "ic",
ative: "",
alize: "al",
iciti: "ic",
entli: "ent",
ousli: "ous",
alism: "al",
ation: "ate",
aliti: "al",
iviti: "ive",
ement: "",
enci: "ence",
anci: "ance",
izer: "ize",
alli: "al",
ator: "ate",
logi: "log",
ical: "ic",
ance: "",
ence: "",
ness: "",
able: "",
ible: "",
ment: "",
eli: "e",
bli: "ble",
ful: "",
ant: "",
ent: "",
ism: "",
ate: "",
iti: "",
ous: "",
ive: "",
ize: "",
al: "",
ou: "",
er: "",
ic: ""
};
export const matcher = {};
export default {
filter: filter,
stemmer: stemmer,
matcher: matcher
};

View File

@@ -0,0 +1,89 @@
import { IndexInterface } from "../../type.js";
import { regex, replace, collapse } from "../../lang.js";
import { encode as encode_balance } from "./balance.js";
export const rtl = /* normalize: */
/* collapse: */ /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */!1;
export const tokenize = "";
export default {
encode: encode,
rtl: !1,
tokenize: ""
// Phonetic Normalization
};const regex_ae = regex("ae"),
//regex_ai = regex("ai"),
//regex_ay = regex("ay"),
//regex_ey = regex("ey"),
regex_oe = regex("oe"),
//regex_ue = regex("ue"),
//regex_ie = regex("ie"),
//regex_sz = regex("sz"),
//regex_zs = regex("zs"),
//regex_ck = regex("ck"),
//regex_cc = regex("cc"),
regex_sh = regex("sh"),
regex_th = regex("th"),
//regex_dt = regex("dt"),
regex_ph = regex("ph"),
regex_pf = regex("pf"),
pairs = [regex_ae, "a",
// regex_ai, "ei",
// regex_ay, "ei",
// regex_ey, "ei",
regex_oe, "o",
// regex_ue, "u",
// regex_ie, "i",
// regex_sz, "s",
// regex_zs, "s",
regex_sh, "s",
// regex_ck, "k",
// regex_cc, "k",
regex_th, "t",
// regex_dt, "t",
regex_ph, "f", regex_pf, "f",
// regex_ou, "o",
// regex_uo, "u"
// regex("(?![aeiouy])h(?![aeiouy])"), "",
// regex("(?!^[aeiouy])h(?!^[aeiouy])"), ""
regex("(?![aeo])h(?![aeo])"), "", regex("(?!^[aeo])h(?!^[aeo])"), ""];
//regex_ou = regex("ou"),
//regex_uo = regex("uo");
/**
* @param {string|number} str
* @param {boolean=} _skip_postprocessing
* @this IndexInterface
*/
export function encode(str, _skip_postprocessing) {
if (str) {
str = encode_balance.call(this, str).join(" ");
if (2 < str.length) {
str = replace(str, pairs);
}
if (!_skip_postprocessing) {
if (1 < str.length) {
str = collapse(str);
}
if (str) {
str = str.split(" ");
}
}
}
return str || [];
}

View File

@@ -0,0 +1,119 @@
import { IndexInterface } from "../../type.js";
import { encode as encode_simple } from "./simple.js";
// custom soundex implementation
export const rtl = /* normalize: */ /* collapse: */
/* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */!1;
export const tokenize = "strict";
export default {
encode: encode,
rtl: !1,
tokenize: "strict"
//const regex_whitespace = /[\W_]+/g;
};const regex_strip = /[^a-z0-9]+/,
soundex = {
b: "p",
//"p": "p",
//"f": "f",
v: "f", w: "f",
//"s": "s",
z: "s",
x: "s",
ß: "s",
d: "t",
//"t": "t",
//"l": "l",
//"m": "m",
n: "m",
c: "k",
g: "k",
j: "k",
//"k": "k",
q: "k",
//"r": "r",
//"h": "h",
//"a": "a",
//"e": "e",
i: "e",
y: "e",
//"o": "o",
u: "o"
};
// const pairs = [
// regex_whitespace, " ",
// regex_strip, ""
// ];
// modified
/**
* @param {string|number} str
* @this IndexInterface
*/
export function encode(str) {
str = encode_simple.call(this, str).join(" ");
// str = this.pipeline(
//
// /* string: */ normalize("" + str).toLowerCase(),
// /* normalize: */ false,
// /* split: */ false,
// /* collapse: */ false
// );
const result = [];
if (str) {
const words = str.split(regex_strip),
length = words.length;
for (let x = 0, tmp, count = 0; x < length; x++) {
if ((str = words[x]) && ( /*&& (str.length > 2)*/!this.filter || !this.filter[str])) {
tmp = str[0];
let code = soundex[tmp] || tmp,
previous = code; //str[0];
//soundex[code] || code;
for (let i = 1; i < str.length; i++) {
tmp = str[i];
const current = soundex[tmp] || tmp;
if (current && current !== previous) {
code += current;
previous = current;
// if(code.length === 7){
//
// break;
// }
}
}
result[count++] = code; //(code + "0000").substring(0, 4);
}
}
}
return result;
}

View File

@@ -0,0 +1,23 @@
import { IndexInterface } from "../../type.js";
import { pipeline, normalize, regex_whitespace } from "../../lang.js";
export const rtl = /* normalize: */
/* collapse: */ /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */
/* normalize: */
/* collapse: */!1;
export const tokenize = "";
export default {
encode: encode,
rtl: !1,
tokenize: ""
/**
* @param {string|number} str
* @this IndexInterface
*/
};export function encode(str) {
return pipeline.call(this,
/* string: */("" + str).toLowerCase(), !1, /* split: */regex_whitespace, !1);
}

View File

@@ -0,0 +1,65 @@
import { IndexInterface } from "../../type.js";
import { regex, replace, collapse } from "../../lang.js";
import { encode as encode_advanced } from "./advanced.js";
export const rtl = /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */!1;
export const tokenize = "";
export default {
encode: encode,
rtl: !1,
tokenize: ""
// Soundex Normalization
};const prefix = "(?!\\b)",
//soundex_b = regex(prefix + "p"),
// soundex_s = regex(prefix + "z"),
// soundex_k = regex(prefix + "[cgq]"),
// soundex_m = regex(prefix + "n"),
// soundex_t = regex(prefix + "d"),
// soundex_f = regex(prefix + "[vw]"),
//regex_vowel = regex(prefix + "[aeiouy]");
regex_vowel = regex("(?!\\b)[aeo]"),
pairs = [
// soundex_b, "b",
// soundex_s, "s",
// soundex_k, "k",
// soundex_m, "m",
// soundex_t, "t",
// soundex_f, "f",
// regex("(?![aeiouy])h(?![aeiouy])"), "",
// regex("(?!^[aeiouy])h(?!^[aeiouy])"), "",
regex_vowel, ""];
/**
* @param {string|number} str
* @this IndexInterface
*/
export function encode(str) {
if (str) {
str = encode_advanced.call(this, str, /* append: */ /* skip update: */ /* skip_update: */ /* skip post-processing: */!0);
if (1 < str.length) {
//str = replace(str, pairs);
str = str.replace(regex_vowel, "");
}
if (1 < str.length) {
str = collapse(str);
}
if (str) {
str = str.split(" ");
}
}
return str || [];
}

View File

@@ -0,0 +1,45 @@
import { IndexInterface } from "../../type.js";
import { pipeline, normalize, regex_whitespace, regex } from "../../lang.js";
export const rtl = /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */
/* collapse: */!1;
export const tokenize = "";
export default {
encode: encode,
rtl: !1,
tokenize: ""
// Charset Normalization
};const //regex_whitespace = /\W+/,
//regex_strip = regex("[^a-z0-9 ]"),
regex_a = regex("[àáâãäå]"),
regex_e = regex("[èéêë]"),
regex_i = regex("[ìíîï]"),
regex_o = regex("[òóôõöő]"),
regex_u = regex("[ùúûüű]"),
regex_y = regex("[ýŷÿ]"),
regex_n = regex("ñ"),
regex_c = regex("[çc]"),
regex_s = regex("ß"),
regex_and = regex(" & "),
pairs = [regex_a, "a", regex_e, "e", regex_i, "i", regex_o, "o", regex_u, "u", regex_y, "y", regex_n, "n", regex_c, "k", regex_s, "s", regex_and, " and "
//regex_whitespace, " "
//regex_strip, ""
];
/**
* @param {string|number} str
* @this IndexInterface
*/
export function encode(str) {
str = "" + str;
return pipeline.call(this,
/* string: */normalize(str).toLowerCase(),
/* normalize: */!str.normalize && pairs,
/* split: */regex_whitespace, !1);
}

100
paige/node_modules/flexsearch/dist/module/lang/us.js generated vendored Normal file
View File

@@ -0,0 +1,100 @@
/**
* http://www.ranks.nl/stopwords
* @type {Array<string>}
*/
export const filter = ["a", "about", "above", "after", "again", "against", "all", "also", "am", "an", "and", "any", "are", "aren't", "as", "at",
//"back",
"be", "because", "been", "before", "being", "below",
//"between",
"both", "but", "by", "can", "cannot", "can't", "come", "could", "couldn't",
//"day",
"did", "didn't", "do", "does", "doesn't", "doing", "dont", "down", "during", "each", "even", "few", "first", "for", "from", "further", "get",
//"give",
"go",
//"good",
"had", "hadn't", "has", "hasn't", "have", "haven't", "having", "he", "hed",
//"hell",
"her", "here", "here's", "hers", "herself", "hes", "him", "himself", "his", "how", "how's", "i", "id", "if", "ill", "im", "in", "into", "is", "isn't", "it", "it's", "itself", "i've", "just", "know", "let's", "like",
//"look",
"make", "me", "more", "most", "mustn't", "my", "myself", "new", "no", "nor", "not", "now", "of", "off", "on", "once",
//"one",
"only", "or", "other", "ought", "our", "our's", "ourselves", "out", "over", "own",
//"people",
"same", "say", "see", "shan't", "she", "she'd", "shell", "shes", "should", "shouldn't", "so", "some", "such",
//"take",
"than", "that", "that's", "the", "their", "theirs", "them", "themselves", "then", "there", "there's", "these", "they", "they'd", "they'll", "they're", "they've",
//"think",
"this", "those", "through", "time", "to", "too",
//"two",
//"under",
"until", "up", "us",
//"use",
"very", "want", "was", "wasn't", "way", "we", "wed", "well", "were", "weren't", "we've", "what", "what's", "when", "when's", "where", "where's", "which", "while", "who", "whom", "who's", "why", "why's", "will", "with", "won't",
//"work",
"would", "wouldn't",
//"year",
"you", "you'd", "you'll", "your", "you're", "your's", "yourself", "yourselves", "you've"];
/**
* @type {Object<string, string>}
*/
export const stemmer = {
ational: "ate",
iveness: "ive",
fulness: "ful",
ousness: "ous",
ization: "ize",
tional: "tion",
biliti: "ble",
icate: "ic",
ative: "",
alize: "al",
iciti: "ic",
entli: "ent",
ousli: "ous",
alism: "al",
ation: "ate",
aliti: "al",
iviti: "ive",
ement: "",
enci: "ence",
anci: "ance",
izer: "ize",
alli: "al",
ator: "ate",
logi: "log",
ical: "ic",
ance: "",
ence: "",
ness: "",
able: "",
ible: "",
ment: "",
eli: "e",
bli: "ble",
ful: "",
ant: "",
ent: "",
ism: "",
ate: "",
iti: "",
ous: "",
ive: "",
ize: "",
al: "",
ou: "",
er: "",
ic: ""
};
export const matcher = {};
export default {
filter: filter,
stemmer: stemmer,
matcher: matcher
};

74
paige/node_modules/flexsearch/dist/module/polyfill.js generated vendored Normal file
View File

@@ -0,0 +1,74 @@
export let promise = Promise;
Object.assign || (Object.assign = function () {
const args = arguments,
size = args.length,
obj = args[0];
for (let x = 1, current, keys, length; x < size; x++) {
current = args[x];
keys = Object.keys(current);
length = keys.length;
for (let i = 0, key; i < length; i++) {
key = keys[i];
obj[key] = current[key];
}
}
return obj;
});
// Object.values || (Object.values = function(obj){
//
// const keys = Object.keys(obj);
// const length = keys.length;
// const values = new Array(length);
//
// for(let x = 0; x < length; x++){
//
// values[x] = obj[keys[x]];
// }
//
// return values;
// });
if (!promise) {
/**
* @param {Function} fn
* @constructor
*/
function SimplePromise(fn) {
this.callback = null;
const self = this;
fn(function (val) {
if (self.callback) {
self.callback(val);
// self.callback = null;
// self = null;
}
});
}
/**
* @param {Function} callback
*/
SimplePromise.prototype.then = function (callback) {
this.callback = callback;
};
promise = SimplePromise;
}

87
paige/node_modules/flexsearch/dist/module/preset.js generated vendored Normal file
View File

@@ -0,0 +1,87 @@
import { is_string } from "./common.js";
/**
* @enum {Object}
* @const
*/
const preset = {
memory: {
charset: "latin:extra",
//tokenize: "strict",
resolution: 3,
//threshold: 0,
minlength: 4,
fastupdate: /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */ /* normalize: */
/* collapse: */
/* collapse: */!1
},
performance: {
//charset: "latin",
//tokenize: "strict",
resolution: 3,
minlength: 3,
//fastupdate: true,
optimize: !1, //fastupdate: true,
context: {
depth: 2, resolution: 1
//bidirectional: false
}
},
match: {
charset: "latin:extra",
tokenize: "reverse"
//resolution: 9,
//threshold: 0
},
score: {
charset: "latin:advanced",
//tokenize: "strict",
resolution: 20,
minlength: 3,
context: {
depth: 3,
resolution: 9
//bidirectional: true
}
},
default: {
// charset: "latin:default",
// tokenize: "strict",
// resolution: 3,
// threshold: 0,
// depth: 3
}
// "fast": {
// //charset: "latin",
// //tokenize: "strict",
// threshold: 8,
// resolution: 9,
// depth: 1
// }
};
export default function apply_preset(options) {
if (is_string(options)) {
options = preset[options];
} else {
const preset = options.preset;
if (preset) {
options = Object.assign({}, preset[preset], /** @type {Object} */options);
}
}
return options;
}

271
paige/node_modules/flexsearch/dist/module/serialize.js generated vendored Normal file
View File

@@ -0,0 +1,271 @@
// TODO return promises instead of inner await
import { IndexInterface, DocumentInterface } from "./type.js";
import { create_object, is_string } from "./common.js";
function async(callback, self, field, key, index_doc, index, data, on_done) {
setTimeout(function () {
const res = callback(field ? field + "." + key : key, JSON.stringify(data));
// await isn't supported by ES5
if (res && res.then) {
res.then(function () {
self.export(callback, self, field, index_doc, index + 1, on_done);
});
} else {
self.export(callback, self, field, index_doc, index + 1, on_done);
}
});
}
/**
* @this IndexInterface
*/
export function exportIndex(callback, self, field, index_doc, index, on_done) {
let return_value = /* append: */ /* skip update: */ /* skip_update: */ /* skip post-processing: */!0;
if ('undefined' == typeof on_done) {
return_value = new Promise(resolve => {
on_done = resolve;
});
}
let key, data;
switch (index || (index = 0)) {
case 0:
key = "reg";
// fastupdate isn't supported by export
if (this.fastupdate) {
data = create_object();
for (let key in this.register) {
data[key] = 1;
}
} else {
data = this.register;
}
break;
case 1:
key = "cfg";
data = {
doc: 0,
opt: this.optimize ? 1 : 0
};
break;
case 2:
key = "map";
data = this.map;
break;
case 3:
key = "ctx";
data = this.ctx;
break;
default:
if ('undefined' == typeof field && on_done) {
on_done();
}
return;
}
async(callback, self || this, field, key, index_doc, index, data, on_done);
return return_value;
}
/**
* @this IndexInterface
*/
export function importIndex(key, data) {
if (!data) {
return;
}
if (is_string(data)) {
data = JSON.parse(data);
}
switch (key) {
case "cfg":
this.optimize = !!data.opt;
break;
case "reg":
// fastupdate isn't supported by import
this.fastupdate = /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */ /* collapse: */!1;
this.register = data;
break;
case "map":
this.map = data;
break;
case "ctx":
this.ctx = data;
break;
}
}
/**
* @this DocumentInterface
*/
export function exportDocument(callback, self, field, index_doc, index, on_done) {
let return_value;
if ('undefined' == typeof on_done) {
return_value = new Promise(resolve => {
on_done = resolve;
});
}
index || (index = 0);
index_doc || (index_doc = 0);
if (index_doc < this.field.length) {
const field = this.field[index_doc],
idx = this.index[field];
self = this;
setTimeout(function () {
if (!idx.export(callback, self, index ? field /*.replace(":", "-")*/ : "", index_doc, index++, on_done)) {
index_doc++;
index = 1;
self.export(callback, self, field, index_doc, index, on_done);
}
});
} else {
let key, data;
switch (index) {
case 1:
key = "tag";
data = this.tagindex;
field = null;
break;
case 2:
key = "store";
data = this.store;
field = null;
break;
// case 3:
//
// key = "reg";
// data = this.register;
// break;
default:
on_done();
return;
}
async(callback, this, field, key, index_doc, index, data, on_done);
}
return return_value;
}
/**
* @this DocumentInterface
*/
export function importDocument(key, data) {
if (!data) {
return;
}
if (is_string(data)) {
data = JSON.parse(data);
}
switch (key) {
case "tag":
this.tagindex = data;
break;
case "reg":
// fastupdate isn't supported by import
this.fastupdate = !1;
this.register = data;
for (let i = 0, index; i < this.field.length; i++) {
index = this.index[this.field[i]];
index.register = data;
index.fastupdate = !1;
}
break;
case "store":
this.store = data;
break;
default:
key = key.split(".");
const field = key[0];
key = key[1];
if (field && key) {
this.index[field].import(key, data);
}
}
}

69
paige/node_modules/flexsearch/dist/module/type.js generated vendored Normal file
View File

@@ -0,0 +1,69 @@
/**
* @interface
*/
export function IndexInterface() {
this.cache = null;
this.matcher = null;
this.stemmer = null;
this.filter = null;
}
/**
* @param {!string} str
* @param {boolean|Array<string|RegExp>=} normalize
* @param {boolean|string|RegExp=} split
* @param {boolean=} collapse
* @returns {string|Array<string>}
*/
//IndexInterface.prototype.pipeline;
/**
* @param {!number|string} id
* @param {!string} content
*/
IndexInterface.prototype.add;
/**
* @param {!number|string} id
* @param {!string} content
*/
IndexInterface.prototype.append;
/**
* @param {!string|Object} query
* @param {number|Object=} limit
* @param {Object=} options
* @returns {Array<number|string>}
*/
IndexInterface.prototype.search;
/**
* @param {!number|string} id
* @param {!string} content
*/
IndexInterface.prototype.update;
/**
* @param {!number|string} id
*/
IndexInterface.prototype.remove;
/**
* @interface
*/
export function DocumentInterface() {
this.field = null;
/** @type IndexInterface */
this.index = null;
}

View File

@@ -0,0 +1,52 @@
import Index from "../index.js";
export default function (data) {
data = data.data;
/** @type Index */
const index = self._index,
args = data.args,
task = data.task;
switch (task) {
case "init":
const options = data.options || {},
factory = data.factory,
encode = options.encode;
options.cache = /* normalize: */ /* collapse: */ /* normalize: */
/* collapse: */ /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */ /* collapse: */!1;
if (encode && 0 === encode.indexOf("function")) {
options.encode = Function("return " + encode)();
}
if (factory) {
// export the FlexSearch global payload to "self"
Function("return " + factory)()(self);
/** @type Index */
self._index = new self.FlexSearch.Index(options);
// destroy the exported payload
delete self.FlexSearch;
} else {
self._index = new Index(options);
}
break;
default:
const id = data.id,
message = index[task].apply(index, args);
postMessage("search" === task ? { id: id, msg: message } : { id: id });
}
}

View File

@@ -0,0 +1,136 @@
//import { promise as Promise } from "../polyfill.js";
import { create_object, is_function, is_object, is_string } from "../common.js";
import handler from "./handler.js";
let pid = 0;
/**
* @param {Object=} options
* @constructor
*/
function WorkerIndex(options) {
if (!(this instanceof WorkerIndex)) {
return new WorkerIndex(options);
}
let opt;
if (options) {
if (is_function(opt = options.encode)) {
options.encode = opt.toString();
}
} else {
options = {};
}
// the factory is the outer wrapper from the build
// we use "self" as a trap for node.js
let factory = (self || window)._factory;
if (factory) {
factory = factory.toString();
}
const is_node_js = "undefined" == typeof window && self.exports,
_self = this;
this.worker = create(factory, is_node_js, options.worker);
this.resolver = create_object();
if (!this.worker) {
return;
}
if (is_node_js) {
this.worker.on("message", function (msg) {
_self.resolver[msg.id](msg.msg);
delete _self.resolver[msg.id];
});
} else {
this.worker.onmessage = function (msg) {
msg = msg.data;
_self.resolver[msg.id](msg.msg);
delete _self.resolver[msg.id];
};
}
this.worker.postMessage({
task: "init",
factory: factory,
options: options
});
}
export default WorkerIndex;
register("add");
register("append");
register("search");
register("update");
register("remove");
function register(key) {
WorkerIndex.prototype[key] = WorkerIndex.prototype[key + "Async"] = function () {
const self = this,
args = [].slice.call(arguments),
arg = args[args.length - 1];
let callback;
if (is_function(arg)) {
callback = arg;
args.splice(args.length - 1, 1);
}
const promise = new Promise(function (resolve) {
setTimeout(function () {
self.resolver[++pid] = resolve;
self.worker.postMessage({
task: key,
id: pid,
args: args
});
});
});
if (callback) {
promise.then(callback);
return this;
} else {
return promise;
}
};
}
function create(factory, is_node_js, worker_path) {
let worker;
try {
worker = is_node_js ? eval('new (require("worker_threads")["Worker"])(__dirname + "/node/node.js")') : factory ? new Worker(URL.createObjectURL(new Blob(["onmessage=" + handler.toString()], { type: "text/javascript" }))) : new Worker(is_string(worker_path) ? worker_path : "worker/worker.js", { type: "module" });
} catch (e) {}
return worker;
}

View File

@@ -0,0 +1,36 @@
const { parentPort } = require("worker_threads"),
{ Index } = require("../flexsearch.bundle.min.js");
let index;
parentPort.on("message", function (data) {
/** @type Index */
const args = data.args,
task = data.task,
id = data.id;
switch (task) {
case "init":
const options = data.options || {},
encode = options.encode;
options.cache = /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */ /* normalize: */ /* collapse: */ /* collapse: */!1;
if (encode && 0 === encode.indexOf("function")) {
options.encode = new Function("return " + encode)();
}
index = new Index(options);
break;
default:
const message = index[task].apply(index, args);
parentPort.postMessage("search" === task ? { id: id, msg: message } : { id: id });
}
});

View File

@@ -0,0 +1,2 @@
import handler from "./handler.js";
onmessage = handler;