'use strict';
Object.defineProperty(exports, '__esModule', { value: true });
var tslib = require('tslib');
var app = require('@firebase/app');
var component = require('@firebase/component');
var util = require('@firebase/util');
var logger = require('@firebase/logger');
var util$1 = require('util');
var crypto = require('crypto');
var grpcJs = require('@grpc/grpc-js');
var package_json = require('@grpc/grpc-js/package.json');
var path = require('path');
var protoLoader = require('@grpc/proto-loader');
var name = "@firebase/firestore";
var version$1 = "2.3.10";
var version = "8.8.1";
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var SDK_VERSION = version;
function setSDKVersion(version) {
SDK_VERSION = version;
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* `ListenSequence` is a monotonic sequence. It is initialized with a minimum value to
* exceed. All subsequent calls to next will return increasing values. If provided with a
* `SequenceNumberSyncer`, it will additionally bump its next value when told of a new value, as
* well as write out sequence numbers that it produces via `next()`.
*/
var ListenSequence = /** @class */ (function () {
function ListenSequence(previousValue, sequenceNumberSyncer) {
var _this = this;
this.previousValue = previousValue;
if (sequenceNumberSyncer) {
sequenceNumberSyncer.sequenceNumberHandler = function (sequenceNumber) { return _this.setPreviousValue(sequenceNumber); };
this.writeNewSequenceNumber = function (sequenceNumber) { return sequenceNumberSyncer.writeSequenceNumber(sequenceNumber); };
}
}
ListenSequence.prototype.setPreviousValue = function (externalPreviousValue) {
this.previousValue = Math.max(externalPreviousValue, this.previousValue);
return this.previousValue;
};
ListenSequence.prototype.next = function () {
var nextValue = ++this.previousValue;
if (this.writeNewSequenceNumber) {
this.writeNewSequenceNumber(nextValue);
}
return nextValue;
};
return ListenSequence;
}());
ListenSequence.INVALID = -1;
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Formats an object as a JSON string, suitable for logging. */
function formatJSON(value) {
// util.inspect() results in much more readable output than JSON.stringify()
return util$1.inspect(value, { depth: 100 });
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var logClient = new logger.Logger('@firebase/firestore');
// Helper methods are needed because variables can't be exported as read/write
function getLogLevel() {
return logClient.logLevel;
}
/**
* Sets the verbosity of Cloud Firestore logs (debug, error, or silent).
*
* @param logLevel - The verbosity you set for activity and error logging. Can
* be any of the following values:
*
*
* - `debug` for the most verbose logging level, primarily for
* debugging.
* - `error` to log errors only.
* `silent` to turn off logging.
*
*/
function setLogLevel(logLevel) {
logClient.setLogLevel(logLevel);
}
function logDebug(msg) {
var obj = [];
for (var _i = 1; _i < arguments.length; _i++) {
obj[_i - 1] = arguments[_i];
}
if (logClient.logLevel <= logger.LogLevel.DEBUG) {
var args = obj.map(argToString);
logClient.debug.apply(logClient, tslib.__spreadArray(["Firestore (" + SDK_VERSION + "): " + msg], args));
}
}
function logError(msg) {
var obj = [];
for (var _i = 1; _i < arguments.length; _i++) {
obj[_i - 1] = arguments[_i];
}
if (logClient.logLevel <= logger.LogLevel.ERROR) {
var args = obj.map(argToString);
logClient.error.apply(logClient, tslib.__spreadArray(["Firestore (" + SDK_VERSION + "): " + msg], args));
}
}
function logWarn(msg) {
var obj = [];
for (var _i = 1; _i < arguments.length; _i++) {
obj[_i - 1] = arguments[_i];
}
if (logClient.logLevel <= logger.LogLevel.WARN) {
var args = obj.map(argToString);
logClient.warn.apply(logClient, tslib.__spreadArray(["Firestore (" + SDK_VERSION + "): " + msg], args));
}
}
/**
* Converts an additional log parameter to a string representation.
*/
function argToString(obj) {
if (typeof obj === 'string') {
return obj;
}
else {
try {
return formatJSON(obj);
}
catch (e) {
// Converting to JSON failed, just log the object directly
return obj;
}
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Unconditionally fails, throwing an Error with the given message.
* Messages are stripped in production builds.
*
* Returns `never` and can be used in expressions:
* @example
* let futureVar = fail('not implemented yet');
*/
function fail(failure) {
if (failure === void 0) { failure = 'Unexpected state'; }
// Log the failure in addition to throw an exception, just in case the
// exception is swallowed.
var message = "FIRESTORE (" + SDK_VERSION + ") INTERNAL ASSERTION FAILED: " + failure;
logError(message);
// NOTE: We don't use FirestoreError here because these are internal failures
// that cannot be handled by the user. (Also it would create a circular
// dependency between the error and assert modules which doesn't work.)
throw new Error(message);
}
/**
* Fails if the given assertion condition is false, throwing an Error with the
* given message if it did.
*
* Messages are stripped in production builds.
*/
function hardAssert(assertion, message) {
if (!assertion) {
fail();
}
}
/**
* Casts `obj` to `T`. In non-production builds, verifies that `obj` is an
* instance of `T` before casting.
*/
function debugCast(obj,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
constructor) {
return obj;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var Code = {
// Causes are copied from:
// https://github.com/grpc/grpc/blob/bceec94ea4fc5f0085d81235d8e1c06798dc341a/include/grpc%2B%2B/impl/codegen/status_code_enum.h
/** Not an error; returned on success. */
OK: 'ok',
/** The operation was cancelled (typically by the caller). */
CANCELLED: 'cancelled',
/** Unknown error or an error from a different error domain. */
UNKNOWN: 'unknown',
/**
* Client specified an invalid argument. Note that this differs from
* FAILED_PRECONDITION. INVALID_ARGUMENT indicates arguments that are
* problematic regardless of the state of the system (e.g., a malformed file
* name).
*/
INVALID_ARGUMENT: 'invalid-argument',
/**
* Deadline expired before operation could complete. For operations that
* change the state of the system, this error may be returned even if the
* operation has completed successfully. For example, a successful response
* from a server could have been delayed long enough for the deadline to
* expire.
*/
DEADLINE_EXCEEDED: 'deadline-exceeded',
/** Some requested entity (e.g., file or directory) was not found. */
NOT_FOUND: 'not-found',
/**
* Some entity that we attempted to create (e.g., file or directory) already
* exists.
*/
ALREADY_EXISTS: 'already-exists',
/**
* The caller does not have permission to execute the specified operation.
* PERMISSION_DENIED must not be used for rejections caused by exhausting
* some resource (use RESOURCE_EXHAUSTED instead for those errors).
* PERMISSION_DENIED must not be used if the caller can not be identified
* (use UNAUTHENTICATED instead for those errors).
*/
PERMISSION_DENIED: 'permission-denied',
/**
* The request does not have valid authentication credentials for the
* operation.
*/
UNAUTHENTICATED: 'unauthenticated',
/**
* Some resource has been exhausted, perhaps a per-user quota, or perhaps the
* entire file system is out of space.
*/
RESOURCE_EXHAUSTED: 'resource-exhausted',
/**
* Operation was rejected because the system is not in a state required for
* the operation's execution. For example, directory to be deleted may be
* non-empty, an rmdir operation is applied to a non-directory, etc.
*
* A litmus test that may help a service implementor in deciding
* between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE:
* (a) Use UNAVAILABLE if the client can retry just the failing call.
* (b) Use ABORTED if the client should retry at a higher-level
* (e.g., restarting a read-modify-write sequence).
* (c) Use FAILED_PRECONDITION if the client should not retry until
* the system state has been explicitly fixed. E.g., if an "rmdir"
* fails because the directory is non-empty, FAILED_PRECONDITION
* should be returned since the client should not retry unless
* they have first fixed up the directory by deleting files from it.
* (d) Use FAILED_PRECONDITION if the client performs conditional
* REST Get/Update/Delete on a resource and the resource on the
* server does not match the condition. E.g., conflicting
* read-modify-write on the same resource.
*/
FAILED_PRECONDITION: 'failed-precondition',
/**
* The operation was aborted, typically due to a concurrency issue like
* sequencer check failures, transaction aborts, etc.
*
* See litmus test above for deciding between FAILED_PRECONDITION, ABORTED,
* and UNAVAILABLE.
*/
ABORTED: 'aborted',
/**
* Operation was attempted past the valid range. E.g., seeking or reading
* past end of file.
*
* Unlike INVALID_ARGUMENT, this error indicates a problem that may be fixed
* if the system state changes. For example, a 32-bit file system will
* generate INVALID_ARGUMENT if asked to read at an offset that is not in the
* range [0,2^32-1], but it will generate OUT_OF_RANGE if asked to read from
* an offset past the current file size.
*
* There is a fair bit of overlap between FAILED_PRECONDITION and
* OUT_OF_RANGE. We recommend using OUT_OF_RANGE (the more specific error)
* when it applies so that callers who are iterating through a space can
* easily look for an OUT_OF_RANGE error to detect when they are done.
*/
OUT_OF_RANGE: 'out-of-range',
/** Operation is not implemented or not supported/enabled in this service. */
UNIMPLEMENTED: 'unimplemented',
/**
* Internal errors. Means some invariants expected by underlying System has
* been broken. If you see one of these errors, Something is very broken.
*/
INTERNAL: 'internal',
/**
* The service is currently unavailable. This is a most likely a transient
* condition and may be corrected by retrying with a backoff.
*
* See litmus test above for deciding between FAILED_PRECONDITION, ABORTED,
* and UNAVAILABLE.
*/
UNAVAILABLE: 'unavailable',
/** Unrecoverable data loss or corruption. */
DATA_LOSS: 'data-loss'
};
/** An error returned by a Firestore operation. */
var FirestoreError = /** @class */ (function (_super) {
tslib.__extends(FirestoreError, _super);
/** @hideconstructor */
function FirestoreError(
/**
* The backend error code associated with this error.
*/
code,
/**
* A custom error description.
*/
message) {
var _this = _super.call(this, message) || this;
_this.code = code;
_this.message = message;
/** The custom name for all FirestoreErrors. */
_this.name = 'FirebaseError';
// HACK: We write a toString property directly because Error is not a real
// class and so inheritance does not work correctly. We could alternatively
// do the same "back-door inheritance" trick that FirebaseError does.
_this.toString = function () { return _this.name + ": [code=" + _this.code + "]: " + _this.message; };
return _this;
}
return FirestoreError;
}(Error));
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var DOCUMENT_KEY_NAME = '__name__';
/**
* Path represents an ordered sequence of string segments.
*/
var BasePath = /** @class */ (function () {
function BasePath(segments, offset, length) {
if (offset === undefined) {
offset = 0;
}
else if (offset > segments.length) {
fail();
}
if (length === undefined) {
length = segments.length - offset;
}
else if (length > segments.length - offset) {
fail();
}
this.segments = segments;
this.offset = offset;
this.len = length;
}
Object.defineProperty(BasePath.prototype, "length", {
get: function () {
return this.len;
},
enumerable: false,
configurable: true
});
BasePath.prototype.isEqual = function (other) {
return BasePath.comparator(this, other) === 0;
};
BasePath.prototype.child = function (nameOrPath) {
var segments = this.segments.slice(this.offset, this.limit());
if (nameOrPath instanceof BasePath) {
nameOrPath.forEach(function (segment) {
segments.push(segment);
});
}
else {
segments.push(nameOrPath);
}
return this.construct(segments);
};
/** The index of one past the last segment of the path. */
BasePath.prototype.limit = function () {
return this.offset + this.length;
};
BasePath.prototype.popFirst = function (size) {
size = size === undefined ? 1 : size;
return this.construct(this.segments, this.offset + size, this.length - size);
};
BasePath.prototype.popLast = function () {
return this.construct(this.segments, this.offset, this.length - 1);
};
BasePath.prototype.firstSegment = function () {
return this.segments[this.offset];
};
BasePath.prototype.lastSegment = function () {
return this.get(this.length - 1);
};
BasePath.prototype.get = function (index) {
return this.segments[this.offset + index];
};
BasePath.prototype.isEmpty = function () {
return this.length === 0;
};
BasePath.prototype.isPrefixOf = function (other) {
if (other.length < this.length) {
return false;
}
for (var i = 0; i < this.length; i++) {
if (this.get(i) !== other.get(i)) {
return false;
}
}
return true;
};
BasePath.prototype.isImmediateParentOf = function (potentialChild) {
if (this.length + 1 !== potentialChild.length) {
return false;
}
for (var i = 0; i < this.length; i++) {
if (this.get(i) !== potentialChild.get(i)) {
return false;
}
}
return true;
};
BasePath.prototype.forEach = function (fn) {
for (var i = this.offset, end = this.limit(); i < end; i++) {
fn(this.segments[i]);
}
};
BasePath.prototype.toArray = function () {
return this.segments.slice(this.offset, this.limit());
};
BasePath.comparator = function (p1, p2) {
var len = Math.min(p1.length, p2.length);
for (var i = 0; i < len; i++) {
var left = p1.get(i);
var right = p2.get(i);
if (left < right) {
return -1;
}
if (left > right) {
return 1;
}
}
if (p1.length < p2.length) {
return -1;
}
if (p1.length > p2.length) {
return 1;
}
return 0;
};
return BasePath;
}());
/**
* A slash-separated path for navigating resources (documents and collections)
* within Firestore.
*/
var ResourcePath = /** @class */ (function (_super) {
tslib.__extends(ResourcePath, _super);
function ResourcePath() {
return _super !== null && _super.apply(this, arguments) || this;
}
ResourcePath.prototype.construct = function (segments, offset, length) {
return new ResourcePath(segments, offset, length);
};
ResourcePath.prototype.canonicalString = function () {
// NOTE: The client is ignorant of any path segments containing escape
// sequences (e.g. __id123__) and just passes them through raw (they exist
// for legacy reasons and should not be used frequently).
return this.toArray().join('/');
};
ResourcePath.prototype.toString = function () {
return this.canonicalString();
};
/**
* Creates a resource path from the given slash-delimited string. If multiple
* arguments are provided, all components are combined. Leading and trailing
* slashes from all components are ignored.
*/
ResourcePath.fromString = function () {
var pathComponents = [];
for (var _i = 0; _i < arguments.length; _i++) {
pathComponents[_i] = arguments[_i];
}
// NOTE: The client is ignorant of any path segments containing escape
// sequences (e.g. __id123__) and just passes them through raw (they exist
// for legacy reasons and should not be used frequently).
var segments = [];
for (var _d = 0, pathComponents_1 = pathComponents; _d < pathComponents_1.length; _d++) {
var path = pathComponents_1[_d];
if (path.indexOf('//') >= 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid segment (" + path + "). Paths must not contain // in them.");
}
// Strip leading and traling slashed.
segments.push.apply(segments, path.split('/').filter(function (segment) { return segment.length > 0; }));
}
return new ResourcePath(segments);
};
ResourcePath.emptyPath = function () {
return new ResourcePath([]);
};
return ResourcePath;
}(BasePath));
var identifierRegExp = /^[_a-zA-Z][_a-zA-Z0-9]*$/;
/** A dot-separated path for navigating sub-objects within a document. */
var FieldPath$1 = /** @class */ (function (_super) {
tslib.__extends(FieldPath$1, _super);
function FieldPath$1() {
return _super !== null && _super.apply(this, arguments) || this;
}
FieldPath$1.prototype.construct = function (segments, offset, length) {
return new FieldPath$1(segments, offset, length);
};
/**
* Returns true if the string could be used as a segment in a field path
* without escaping.
*/
FieldPath$1.isValidIdentifier = function (segment) {
return identifierRegExp.test(segment);
};
FieldPath$1.prototype.canonicalString = function () {
return this.toArray()
.map(function (str) {
str = str.replace(/\\/g, '\\\\').replace(/`/g, '\\`');
if (!FieldPath$1.isValidIdentifier(str)) {
str = '`' + str + '`';
}
return str;
})
.join('.');
};
FieldPath$1.prototype.toString = function () {
return this.canonicalString();
};
/**
* Returns true if this field references the key of a document.
*/
FieldPath$1.prototype.isKeyField = function () {
return this.length === 1 && this.get(0) === DOCUMENT_KEY_NAME;
};
/**
* The field designating the key of a document.
*/
FieldPath$1.keyField = function () {
return new FieldPath$1([DOCUMENT_KEY_NAME]);
};
/**
* Parses a field string from the given server-formatted string.
*
* - Splitting the empty string is not allowed (for now at least).
* - Empty segments within the string (e.g. if there are two consecutive
* separators) are not allowed.
*
* TODO(b/37244157): we should make this more strict. Right now, it allows
* non-identifier path components, even if they aren't escaped.
*/
FieldPath$1.fromServerFormat = function (path) {
var segments = [];
var current = '';
var i = 0;
var addCurrentSegment = function () {
if (current.length === 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid field path (" + path + "). Paths must not be empty, begin " +
"with '.', end with '.', or contain '..'");
}
segments.push(current);
current = '';
};
var inBackticks = false;
while (i < path.length) {
var c = path[i];
if (c === '\\') {
if (i + 1 === path.length) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Path has trailing escape character: ' + path);
}
var next = path[i + 1];
if (!(next === '\\' || next === '.' || next === '`')) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Path has invalid escape sequence: ' + path);
}
current += next;
i += 2;
}
else if (c === '`') {
inBackticks = !inBackticks;
i++;
}
else if (c === '.' && !inBackticks) {
addCurrentSegment();
i++;
}
else {
current += c;
i++;
}
}
addCurrentSegment();
if (inBackticks) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Unterminated ` in path: ' + path);
}
return new FieldPath$1(segments);
};
FieldPath$1.emptyPath = function () {
return new FieldPath$1([]);
};
return FieldPath$1;
}(BasePath));
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var escapeChar = '\u0001';
var encodedSeparatorChar = '\u0001';
var encodedNul = '\u0010';
var encodedEscape = '\u0011';
/**
* Encodes a resource path into a IndexedDb-compatible string form.
*/
function encodeResourcePath(path) {
var result = '';
for (var i = 0; i < path.length; i++) {
if (result.length > 0) {
result = encodeSeparator(result);
}
result = encodeSegment(path.get(i), result);
}
return encodeSeparator(result);
}
/** Encodes a single segment of a resource path into the given result */
function encodeSegment(segment, resultBuf) {
var result = resultBuf;
var length = segment.length;
for (var i = 0; i < length; i++) {
var c = segment.charAt(i);
switch (c) {
case '\0':
result += escapeChar + encodedNul;
break;
case escapeChar:
result += escapeChar + encodedEscape;
break;
default:
result += c;
}
}
return result;
}
/** Encodes a path separator into the given result */
function encodeSeparator(result) {
return result + escapeChar + encodedSeparatorChar;
}
/**
* Decodes the given IndexedDb-compatible string form of a resource path into
* a ResourcePath instance. Note that this method is not suitable for use with
* decoding resource names from the server; those are One Platform format
* strings.
*/
function decodeResourcePath(path) {
// Event the empty path must encode as a path of at least length 2. A path
// with exactly 2 must be the empty path.
var length = path.length;
hardAssert(length >= 2);
if (length === 2) {
hardAssert(path.charAt(0) === escapeChar && path.charAt(1) === encodedSeparatorChar);
return ResourcePath.emptyPath();
}
// Escape characters cannot exist past the second-to-last position in the
// source value.
var lastReasonableEscapeIndex = length - 2;
var segments = [];
var segmentBuilder = '';
for (var start = 0; start < length;) {
// The last two characters of a valid encoded path must be a separator, so
// there must be an end to this segment.
var end = path.indexOf(escapeChar, start);
if (end < 0 || end > lastReasonableEscapeIndex) {
fail();
}
var next = path.charAt(end + 1);
switch (next) {
case encodedSeparatorChar:
var currentPiece = path.substring(start, end);
var segment = void 0;
if (segmentBuilder.length === 0) {
// Avoid copying for the common case of a segment that excludes \0
// and \001
segment = currentPiece;
}
else {
segmentBuilder += currentPiece;
segment = segmentBuilder;
segmentBuilder = '';
}
segments.push(segment);
break;
case encodedNul:
segmentBuilder += path.substring(start, end);
segmentBuilder += '\0';
break;
case encodedEscape:
// The escape character can be used in the output to encode itself.
segmentBuilder += path.substring(start, end + 1);
break;
default:
fail();
}
start = end + 2;
}
return new ResourcePath(segments);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Schema Version for the Web client:
* 1. Initial version including Mutation Queue, Query Cache, and Remote
* Document Cache
* 2. Used to ensure a targetGlobal object exists and add targetCount to it. No
* longer required because migration 3 unconditionally clears it.
* 3. Dropped and re-created Query Cache to deal with cache corruption related
* to limbo resolution. Addresses
* https://github.com/firebase/firebase-ios-sdk/issues/1548
* 4. Multi-Tab Support.
* 5. Removal of held write acks.
* 6. Create document global for tracking document cache size.
* 7. Ensure every cached document has a sentinel row with a sequence number.
* 8. Add collection-parent index for Collection Group queries.
* 9. Change RemoteDocumentChanges store to be keyed by readTime rather than
* an auto-incrementing ID. This is required for Index-Free queries.
* 10. Rewrite the canonical IDs to the explicit Protobuf-based format.
* 11. Add bundles and named_queries for bundle support.
*/
var SCHEMA_VERSION = 11;
/**
* Wrapper class to store timestamps (seconds and nanos) in IndexedDb objects.
*/
var DbTimestamp = /** @class */ (function () {
function DbTimestamp(seconds, nanoseconds) {
this.seconds = seconds;
this.nanoseconds = nanoseconds;
}
return DbTimestamp;
}());
/**
* A singleton object to be stored in the 'owner' store in IndexedDb.
*
* A given database can have a single primary tab assigned at a given time. That
* tab must validate that it is still holding the primary lease before every
* operation that requires locked access. The primary tab should regularly
* write an updated timestamp to this lease to prevent other tabs from
* "stealing" the primary lease
*/
var DbPrimaryClient = /** @class */ (function () {
function DbPrimaryClient(ownerId,
/** Whether to allow shared access from multiple tabs. */
allowTabSynchronization, leaseTimestampMs) {
this.ownerId = ownerId;
this.allowTabSynchronization = allowTabSynchronization;
this.leaseTimestampMs = leaseTimestampMs;
}
return DbPrimaryClient;
}());
/**
* Name of the IndexedDb object store.
*
* Note that the name 'owner' is chosen to ensure backwards compatibility with
* older clients that only supported single locked access to the persistence
* layer.
*/
DbPrimaryClient.store = 'owner';
/**
* The key string used for the single object that exists in the
* DbPrimaryClient store.
*/
DbPrimaryClient.key = 'owner';
/**
* An object to be stored in the 'mutationQueues' store in IndexedDb.
*
* Each user gets a single queue of MutationBatches to apply to the server.
* DbMutationQueue tracks the metadata about the queue.
*/
var DbMutationQueue = /** @class */ (function () {
function DbMutationQueue(
/**
* The normalized user ID to which this queue belongs.
*/
userId,
/**
* An identifier for the highest numbered batch that has been acknowledged
* by the server. All MutationBatches in this queue with batchIds less
* than or equal to this value are considered to have been acknowledged by
* the server.
*
* NOTE: this is deprecated and no longer used by the code.
*/
lastAcknowledgedBatchId,
/**
* A stream token that was previously sent by the server.
*
* See StreamingWriteRequest in datastore.proto for more details about
* usage.
*
* After sending this token, earlier tokens may not be used anymore so
* only a single stream token is retained.
*
* NOTE: this is deprecated and no longer used by the code.
*/
lastStreamToken) {
this.userId = userId;
this.lastAcknowledgedBatchId = lastAcknowledgedBatchId;
this.lastStreamToken = lastStreamToken;
}
return DbMutationQueue;
}());
/** Name of the IndexedDb object store. */
DbMutationQueue.store = 'mutationQueues';
/** Keys are automatically assigned via the userId property. */
DbMutationQueue.keyPath = 'userId';
/**
* An object to be stored in the 'mutations' store in IndexedDb.
*
* Represents a batch of user-level mutations intended to be sent to the server
* in a single write. Each user-level batch gets a separate DbMutationBatch
* with a new batchId.
*/
var DbMutationBatch = /** @class */ (function () {
function DbMutationBatch(
/**
* The normalized user ID to which this batch belongs.
*/
userId,
/**
* An identifier for this batch, allocated using an auto-generated key.
*/
batchId,
/**
* The local write time of the batch, stored as milliseconds since the
* epoch.
*/
localWriteTimeMs,
/**
* A list of "mutations" that represent a partial base state from when this
* write batch was initially created. During local application of the write
* batch, these baseMutations are applied prior to the real writes in order
* to override certain document fields from the remote document cache. This
* is necessary in the case of non-idempotent writes (e.g. `increment()`
* transforms) to make sure that the local view of the modified documents
* doesn't flicker if the remote document cache receives the result of the
* non-idempotent write before the write is removed from the queue.
*
* These mutations are never sent to the backend.
*/
baseMutations,
/**
* A list of mutations to apply. All mutations will be applied atomically.
*
* Mutations are serialized via toMutation().
*/
mutations) {
this.userId = userId;
this.batchId = batchId;
this.localWriteTimeMs = localWriteTimeMs;
this.baseMutations = baseMutations;
this.mutations = mutations;
}
return DbMutationBatch;
}());
/** Name of the IndexedDb object store. */
DbMutationBatch.store = 'mutations';
/** Keys are automatically assigned via the userId, batchId properties. */
DbMutationBatch.keyPath = 'batchId';
/** The index name for lookup of mutations by user. */
DbMutationBatch.userMutationsIndex = 'userMutationsIndex';
/** The user mutations index is keyed by [userId, batchId] pairs. */
DbMutationBatch.userMutationsKeyPath = ['userId', 'batchId'];
/**
* An object to be stored in the 'documentMutations' store in IndexedDb.
*
* A manually maintained index of all the mutation batches that affect a given
* document key. The rows in this table are references based on the contents of
* DbMutationBatch.mutations.
*/
var DbDocumentMutation = /** @class */ (function () {
function DbDocumentMutation() {
}
/**
* Creates a [userId] key for use in the DbDocumentMutations index to iterate
* over all of a user's document mutations.
*/
DbDocumentMutation.prefixForUser = function (userId) {
return [userId];
};
/**
* Creates a [userId, encodedPath] key for use in the DbDocumentMutations
* index to iterate over all at document mutations for a given path or lower.
*/
DbDocumentMutation.prefixForPath = function (userId, path) {
return [userId, encodeResourcePath(path)];
};
/**
* Creates a full index key of [userId, encodedPath, batchId] for inserting
* and deleting into the DbDocumentMutations index.
*/
DbDocumentMutation.key = function (userId, path, batchId) {
return [userId, encodeResourcePath(path), batchId];
};
return DbDocumentMutation;
}());
DbDocumentMutation.store = 'documentMutations';
/**
* Because we store all the useful information for this store in the key,
* there is no useful information to store as the value. The raw (unencoded)
* path cannot be stored because IndexedDb doesn't store prototype
* information.
*/
DbDocumentMutation.PLACEHOLDER = new DbDocumentMutation();
/**
* Represents the known absence of a document at a particular version.
* Stored in IndexedDb as part of a DbRemoteDocument object.
*/
var DbNoDocument = /** @class */ (function () {
function DbNoDocument(path, readTime) {
this.path = path;
this.readTime = readTime;
}
return DbNoDocument;
}());
/**
* Represents a document that is known to exist but whose data is unknown.
* Stored in IndexedDb as part of a DbRemoteDocument object.
*/
var DbUnknownDocument = /** @class */ (function () {
function DbUnknownDocument(path, version) {
this.path = path;
this.version = version;
}
return DbUnknownDocument;
}());
/**
* An object to be stored in the 'remoteDocuments' store in IndexedDb.
* It represents either:
*
* - A complete document.
* - A "no document" representing a document that is known not to exist (at
* some version).
* - An "unknown document" representing a document that is known to exist (at
* some version) but whose contents are unknown.
*
* Note: This is the persisted equivalent of a MaybeDocument and could perhaps
* be made more general if necessary.
*/
var DbRemoteDocument = /** @class */ (function () {
// TODO: We are currently storing full document keys almost three times
// (once as part of the primary key, once - partly - as `parentPath` and once
// inside the encoded documents). During our next migration, we should
// rewrite the primary key as parentPath + document ID which would allow us
// to drop one value.
function DbRemoteDocument(
/**
* Set to an instance of DbUnknownDocument if the data for a document is
* not known, but it is known that a document exists at the specified
* version (e.g. it had a successful update applied to it)
*/
unknownDocument,
/**
* Set to an instance of a DbNoDocument if it is known that no document
* exists.
*/
noDocument,
/**
* Set to an instance of a Document if there's a cached version of the
* document.
*/
document,
/**
* Documents that were written to the remote document store based on
* a write acknowledgment are marked with `hasCommittedMutations`. These
* documents are potentially inconsistent with the backend's copy and use
* the write's commit version as their document version.
*/
hasCommittedMutations,
/**
* When the document was read from the backend. Undefined for data written
* prior to schema version 9.
*/
readTime,
/**
* The path of the collection this document is part of. Undefined for data
* written prior to schema version 9.
*/
parentPath) {
this.unknownDocument = unknownDocument;
this.noDocument = noDocument;
this.document = document;
this.hasCommittedMutations = hasCommittedMutations;
this.readTime = readTime;
this.parentPath = parentPath;
}
return DbRemoteDocument;
}());
DbRemoteDocument.store = 'remoteDocuments';
/**
* An index that provides access to all entries sorted by read time (which
* corresponds to the last modification time of each row).
*
* This index is used to provide a changelog for Multi-Tab.
*/
DbRemoteDocument.readTimeIndex = 'readTimeIndex';
DbRemoteDocument.readTimeIndexPath = 'readTime';
/**
* An index that provides access to documents in a collection sorted by read
* time.
*
* This index is used to allow the RemoteDocumentCache to fetch newly changed
* documents in a collection.
*/
DbRemoteDocument.collectionReadTimeIndex = 'collectionReadTimeIndex';
DbRemoteDocument.collectionReadTimeIndexPath = ['parentPath', 'readTime'];
/**
* Contains a single entry that has metadata about the remote document cache.
*/
var DbRemoteDocumentGlobal = /** @class */ (function () {
/**
* @param byteSize - Approximately the total size in bytes of all the
* documents in the document cache.
*/
function DbRemoteDocumentGlobal(byteSize) {
this.byteSize = byteSize;
}
return DbRemoteDocumentGlobal;
}());
DbRemoteDocumentGlobal.store = 'remoteDocumentGlobal';
DbRemoteDocumentGlobal.key = 'remoteDocumentGlobalKey';
/**
* An object to be stored in the 'targets' store in IndexedDb.
*
* This is based on and should be kept in sync with the proto used in the iOS
* client.
*
* Each query the client listens to against the server is tracked on disk so
* that the query can be efficiently resumed on restart.
*/
var DbTarget = /** @class */ (function () {
function DbTarget(
/**
* An auto-generated sequential numeric identifier for the query.
*
* Queries are stored using their canonicalId as the key, but these
* canonicalIds can be quite long so we additionally assign a unique
* queryId which can be used by referenced data structures (e.g.
* indexes) to minimize the on-disk cost.
*/
targetId,
/**
* The canonical string representing this query. This is not unique.
*/
canonicalId,
/**
* The last readTime received from the Watch Service for this query.
*
* This is the same value as TargetChange.read_time in the protos.
*/
readTime,
/**
* An opaque, server-assigned token that allows watching a query to be
* resumed after disconnecting without retransmitting all the data
* that matches the query. The resume token essentially identifies a
* point in time from which the server should resume sending results.
*
* This is related to the snapshotVersion in that the resumeToken
* effectively also encodes that value, but the resumeToken is opaque
* and sometimes encodes additional information.
*
* A consequence of this is that the resumeToken should be used when
* asking the server to reason about where this client is in the watch
* stream, but the client should use the snapshotVersion for its own
* purposes.
*
* This is the same value as TargetChange.resume_token in the protos.
*/
resumeToken,
/**
* A sequence number representing the last time this query was
* listened to, used for garbage collection purposes.
*
* Conventionally this would be a timestamp value, but device-local
* clocks are unreliable and they must be able to create new listens
* even while disconnected. Instead this should be a monotonically
* increasing number that's incremented on each listen call.
*
* This is different from the queryId since the queryId is an
* immutable identifier assigned to the Query on first use while
* lastListenSequenceNumber is updated every time the query is
* listened to.
*/
lastListenSequenceNumber,
/**
* Denotes the maximum snapshot version at which the associated query view
* contained no limbo documents. Undefined for data written prior to
* schema version 9.
*/
lastLimboFreeSnapshotVersion,
/**
* The query for this target.
*
* Because canonical ids are not unique we must store the actual query. We
* use the proto to have an object we can persist without having to
* duplicate translation logic to and from a `Query` object.
*/
query) {
this.targetId = targetId;
this.canonicalId = canonicalId;
this.readTime = readTime;
this.resumeToken = resumeToken;
this.lastListenSequenceNumber = lastListenSequenceNumber;
this.lastLimboFreeSnapshotVersion = lastLimboFreeSnapshotVersion;
this.query = query;
}
return DbTarget;
}());
DbTarget.store = 'targets';
/** Keys are automatically assigned via the targetId property. */
DbTarget.keyPath = 'targetId';
/** The name of the queryTargets index. */
DbTarget.queryTargetsIndexName = 'queryTargetsIndex';
/**
* The index of all canonicalIds to the targets that they match. This is not
* a unique mapping because canonicalId does not promise a unique name for all
* possible queries, so we append the targetId to make the mapping unique.
*/
DbTarget.queryTargetsKeyPath = ['canonicalId', 'targetId'];
/**
* An object representing an association between a target and a document, or a
* sentinel row marking the last sequence number at which a document was used.
* Each document cached must have a corresponding sentinel row before lru
* garbage collection is enabled.
*
* The target associations and sentinel rows are co-located so that orphaned
* documents and their sequence numbers can be identified efficiently via a scan
* of this store.
*/
var DbTargetDocument = /** @class */ (function () {
function DbTargetDocument(
/**
* The targetId identifying a target or 0 for a sentinel row.
*/
targetId,
/**
* The path to the document, as encoded in the key.
*/
path,
/**
* If this is a sentinel row, this should be the sequence number of the last
* time the document specified by `path` was used. Otherwise, it should be
* `undefined`.
*/
sequenceNumber) {
this.targetId = targetId;
this.path = path;
this.sequenceNumber = sequenceNumber;
}
return DbTargetDocument;
}());
/** Name of the IndexedDb object store. */
DbTargetDocument.store = 'targetDocuments';
/** Keys are automatically assigned via the targetId, path properties. */
DbTargetDocument.keyPath = ['targetId', 'path'];
/** The index name for the reverse index. */
DbTargetDocument.documentTargetsIndex = 'documentTargetsIndex';
/** We also need to create the reverse index for these properties. */
DbTargetDocument.documentTargetsKeyPath = ['path', 'targetId'];
/**
* A record of global state tracked across all Targets, tracked separately
* to avoid the need for extra indexes.
*
* This should be kept in-sync with the proto used in the iOS client.
*/
var DbTargetGlobal = /** @class */ (function () {
function DbTargetGlobal(
/**
* The highest numbered target id across all targets.
*
* See DbTarget.targetId.
*/
highestTargetId,
/**
* The highest numbered lastListenSequenceNumber across all targets.
*
* See DbTarget.lastListenSequenceNumber.
*/
highestListenSequenceNumber,
/**
* A global snapshot version representing the last consistent snapshot we
* received from the backend. This is monotonically increasing and any
* snapshots received from the backend prior to this version (e.g. for
* targets resumed with a resumeToken) should be suppressed (buffered)
* until the backend has caught up to this snapshot version again. This
* prevents our cache from ever going backwards in time.
*/
lastRemoteSnapshotVersion,
/**
* The number of targets persisted.
*/
targetCount) {
this.highestTargetId = highestTargetId;
this.highestListenSequenceNumber = highestListenSequenceNumber;
this.lastRemoteSnapshotVersion = lastRemoteSnapshotVersion;
this.targetCount = targetCount;
}
return DbTargetGlobal;
}());
/**
* The key string used for the single object that exists in the
* DbTargetGlobal store.
*/
DbTargetGlobal.key = 'targetGlobalKey';
DbTargetGlobal.store = 'targetGlobal';
/**
* An object representing an association between a Collection id (e.g. 'messages')
* to a parent path (e.g. '/chats/123') that contains it as a (sub)collection.
* This is used to efficiently find all collections to query when performing
* a Collection Group query.
*/
var DbCollectionParent = /** @class */ (function () {
function DbCollectionParent(
/**
* The collectionId (e.g. 'messages')
*/
collectionId,
/**
* The path to the parent (either a document location or an empty path for
* a root-level collection).
*/
parent) {
this.collectionId = collectionId;
this.parent = parent;
}
return DbCollectionParent;
}());
/** Name of the IndexedDb object store. */
DbCollectionParent.store = 'collectionParents';
/** Keys are automatically assigned via the collectionId, parent properties. */
DbCollectionParent.keyPath = ['collectionId', 'parent'];
/**
* A record of the metadata state of each client.
*
* PORTING NOTE: This is used to synchronize multi-tab state and does not need
* to be ported to iOS or Android.
*/
var DbClientMetadata = /** @class */ (function () {
function DbClientMetadata(
// Note: Previous schema versions included a field
// "lastProcessedDocumentChangeId". Don't use anymore.
/** The auto-generated client id assigned at client startup. */
clientId,
/** The last time this state was updated. */
updateTimeMs,
/** Whether the client's network connection is enabled. */
networkEnabled,
/** Whether this client is running in a foreground tab. */
inForeground) {
this.clientId = clientId;
this.updateTimeMs = updateTimeMs;
this.networkEnabled = networkEnabled;
this.inForeground = inForeground;
}
return DbClientMetadata;
}());
/** Name of the IndexedDb object store. */
DbClientMetadata.store = 'clientMetadata';
/** Keys are automatically assigned via the clientId properties. */
DbClientMetadata.keyPath = 'clientId';
/**
* A object representing a bundle loaded by the SDK.
*/
var DbBundle = /** @class */ (function () {
function DbBundle(
/** The ID of the loaded bundle. */
bundleId,
/** The create time of the loaded bundle. */
createTime,
/** The schema version of the loaded bundle. */
version) {
this.bundleId = bundleId;
this.createTime = createTime;
this.version = version;
}
return DbBundle;
}());
/** Name of the IndexedDb object store. */
DbBundle.store = 'bundles';
DbBundle.keyPath = 'bundleId';
/**
* A object representing a named query loaded by the SDK via a bundle.
*/
var DbNamedQuery = /** @class */ (function () {
function DbNamedQuery(
/** The name of the query. */
name,
/** The read time of the results saved in the bundle from the named query. */
readTime,
/** The query saved in the bundle. */
bundledQuery) {
this.name = name;
this.readTime = readTime;
this.bundledQuery = bundledQuery;
}
return DbNamedQuery;
}());
/** Name of the IndexedDb object store. */
DbNamedQuery.store = 'namedQueries';
DbNamedQuery.keyPath = 'name';
// Visible for testing
var V1_STORES = [
DbMutationQueue.store,
DbMutationBatch.store,
DbDocumentMutation.store,
DbRemoteDocument.store,
DbTarget.store,
DbPrimaryClient.store,
DbTargetGlobal.store,
DbTargetDocument.store
];
// V2 is no longer usable (see comment at top of file)
// Visible for testing
var V3_STORES = V1_STORES;
// Visible for testing
// Note: DbRemoteDocumentChanges is no longer used and dropped with v9.
var V4_STORES = tslib.__spreadArray(tslib.__spreadArray([], V3_STORES), [DbClientMetadata.store]);
// V5 does not change the set of stores.
var V6_STORES = tslib.__spreadArray(tslib.__spreadArray([], V4_STORES), [DbRemoteDocumentGlobal.store]);
// V7 does not change the set of stores.
var V8_STORES = tslib.__spreadArray(tslib.__spreadArray([], V6_STORES), [DbCollectionParent.store]);
// V9 does not change the set of stores.
// V10 does not change the set of stores.
var V11_STORES = tslib.__spreadArray(tslib.__spreadArray([], V8_STORES), [DbBundle.store, DbNamedQuery.store]);
/**
* The list of all default IndexedDB stores used throughout the SDK. This is
* used when creating transactions so that access across all stores is done
* atomically.
*/
var ALL_STORES = V11_STORES;
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var PRIMARY_LEASE_LOST_ERROR_MSG = 'The current tab is not in the required state to perform this operation. ' +
'It might be necessary to refresh the browser tab.';
/**
* A base class representing a persistence transaction, encapsulating both the
* transaction's sequence numbers as well as a list of onCommitted listeners.
*
* When you call Persistence.runTransaction(), it will create a transaction and
* pass it to your callback. You then pass it to any method that operates
* on persistence.
*/
var PersistenceTransaction = /** @class */ (function () {
function PersistenceTransaction() {
this.onCommittedListeners = [];
}
PersistenceTransaction.prototype.addOnCommittedListener = function (listener) {
this.onCommittedListeners.push(listener);
};
PersistenceTransaction.prototype.raiseOnCommittedEvent = function () {
this.onCommittedListeners.forEach(function (listener) { return listener(); });
};
return PersistenceTransaction;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var Deferred = /** @class */ (function () {
function Deferred() {
var _this = this;
this.promise = new Promise(function (resolve, reject) {
_this.resolve = resolve;
_this.reject = reject;
});
}
return Deferred;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* PersistencePromise is essentially a re-implementation of Promise except
* it has a .next() method instead of .then() and .next() and .catch() callbacks
* are executed synchronously when a PersistencePromise resolves rather than
* asynchronously (Promise implementations use setImmediate() or similar).
*
* This is necessary to interoperate with IndexedDB which will automatically
* commit transactions if control is returned to the event loop without
* synchronously initiating another operation on the transaction.
*
* NOTE: .then() and .catch() only allow a single consumer, unlike normal
* Promises.
*/
var PersistencePromise = /** @class */ (function () {
function PersistencePromise(callback) {
var _this = this;
// NOTE: next/catchCallback will always point to our own wrapper functions,
// not the user's raw next() or catch() callbacks.
this.nextCallback = null;
this.catchCallback = null;
// When the operation resolves, we'll set result or error and mark isDone.
this.result = undefined;
this.error = undefined;
this.isDone = false;
// Set to true when .then() or .catch() are called and prevents additional
// chaining.
this.callbackAttached = false;
callback(function (value) {
_this.isDone = true;
_this.result = value;
if (_this.nextCallback) {
// value should be defined unless T is Void, but we can't express
// that in the type system.
_this.nextCallback(value);
}
}, function (error) {
_this.isDone = true;
_this.error = error;
if (_this.catchCallback) {
_this.catchCallback(error);
}
});
}
PersistencePromise.prototype.catch = function (fn) {
return this.next(undefined, fn);
};
PersistencePromise.prototype.next = function (nextFn, catchFn) {
var _this = this;
if (this.callbackAttached) {
fail();
}
this.callbackAttached = true;
if (this.isDone) {
if (!this.error) {
return this.wrapSuccess(nextFn, this.result);
}
else {
return this.wrapFailure(catchFn, this.error);
}
}
else {
return new PersistencePromise(function (resolve, reject) {
_this.nextCallback = function (value) {
_this.wrapSuccess(nextFn, value).next(resolve, reject);
};
_this.catchCallback = function (error) {
_this.wrapFailure(catchFn, error).next(resolve, reject);
};
});
}
};
PersistencePromise.prototype.toPromise = function () {
var _this = this;
return new Promise(function (resolve, reject) {
_this.next(resolve, reject);
});
};
PersistencePromise.prototype.wrapUserFunction = function (fn) {
try {
var result = fn();
if (result instanceof PersistencePromise) {
return result;
}
else {
return PersistencePromise.resolve(result);
}
}
catch (e) {
return PersistencePromise.reject(e);
}
};
PersistencePromise.prototype.wrapSuccess = function (nextFn, value) {
if (nextFn) {
return this.wrapUserFunction(function () { return nextFn(value); });
}
else {
// If there's no nextFn, then R must be the same as T
return PersistencePromise.resolve(value);
}
};
PersistencePromise.prototype.wrapFailure = function (catchFn, error) {
if (catchFn) {
return this.wrapUserFunction(function () { return catchFn(error); });
}
else {
return PersistencePromise.reject(error);
}
};
PersistencePromise.resolve = function (result) {
return new PersistencePromise(function (resolve, reject) {
resolve(result);
});
};
PersistencePromise.reject = function (error) {
return new PersistencePromise(function (resolve, reject) {
reject(error);
});
};
PersistencePromise.waitFor = function (
// Accept all Promise types in waitFor().
// eslint-disable-next-line @typescript-eslint/no-explicit-any
all) {
return new PersistencePromise(function (resolve, reject) {
var expectedCount = 0;
var resolvedCount = 0;
var done = false;
all.forEach(function (element) {
++expectedCount;
element.next(function () {
++resolvedCount;
if (done && resolvedCount === expectedCount) {
resolve();
}
}, function (err) { return reject(err); });
});
done = true;
if (resolvedCount === expectedCount) {
resolve();
}
});
};
/**
* Given an array of predicate functions that asynchronously evaluate to a
* boolean, implements a short-circuiting `or` between the results. Predicates
* will be evaluated until one of them returns `true`, then stop. The final
* result will be whether any of them returned `true`.
*/
PersistencePromise.or = function (predicates) {
var p = PersistencePromise.resolve(false);
var _loop_1 = function (predicate) {
p = p.next(function (isTrue) {
if (isTrue) {
return PersistencePromise.resolve(isTrue);
}
else {
return predicate();
}
});
};
for (var _i = 0, predicates_1 = predicates; _i < predicates_1.length; _i++) {
var predicate = predicates_1[_i];
_loop_1(predicate);
}
return p;
};
PersistencePromise.forEach = function (collection, f) {
var _this = this;
var promises = [];
collection.forEach(function (r, s) {
promises.push(f.call(_this, r, s));
});
return this.waitFor(promises);
};
return PersistencePromise;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// References to `window` are guarded by SimpleDb.isAvailable()
/* eslint-disable no-restricted-globals */
var LOG_TAG$g = 'SimpleDb';
/**
* The maximum number of retry attempts for an IndexedDb transaction that fails
* with a DOMException.
*/
var TRANSACTION_RETRY_COUNT = 3;
/**
* Wraps an IDBTransaction and exposes a store() method to get a handle to a
* specific object store.
*/
var SimpleDbTransaction = /** @class */ (function () {
function SimpleDbTransaction(action, transaction) {
var _this = this;
this.action = action;
this.transaction = transaction;
this.aborted = false;
/**
* A promise that resolves with the result of the IndexedDb transaction.
*/
this.completionDeferred = new Deferred();
this.transaction.oncomplete = function () {
_this.completionDeferred.resolve();
};
this.transaction.onabort = function () {
if (transaction.error) {
_this.completionDeferred.reject(new IndexedDbTransactionError(action, transaction.error));
}
else {
_this.completionDeferred.resolve();
}
};
this.transaction.onerror = function (event) {
var error = checkForAndReportiOSError(event.target.error);
_this.completionDeferred.reject(new IndexedDbTransactionError(action, error));
};
}
SimpleDbTransaction.open = function (db, action, mode, objectStoreNames) {
try {
return new SimpleDbTransaction(action, db.transaction(objectStoreNames, mode));
}
catch (e) {
throw new IndexedDbTransactionError(action, e);
}
};
Object.defineProperty(SimpleDbTransaction.prototype, "completionPromise", {
get: function () {
return this.completionDeferred.promise;
},
enumerable: false,
configurable: true
});
SimpleDbTransaction.prototype.abort = function (error) {
if (error) {
this.completionDeferred.reject(error);
}
if (!this.aborted) {
logDebug(LOG_TAG$g, 'Aborting transaction:', error ? error.message : 'Client-initiated abort');
this.aborted = true;
this.transaction.abort();
}
};
/**
* Returns a SimpleDbStore for the specified store. All
* operations performed on the SimpleDbStore happen within the context of this
* transaction and it cannot be used anymore once the transaction is
* completed.
*
* Note that we can't actually enforce that the KeyType and ValueType are
* correct, but they allow type safety through the rest of the consuming code.
*/
SimpleDbTransaction.prototype.store = function (storeName) {
var store = this.transaction.objectStore(storeName);
return new SimpleDbStore(store);
};
return SimpleDbTransaction;
}());
/**
* Provides a wrapper around IndexedDb with a simplified interface that uses
* Promise-like return values to chain operations. Real promises cannot be used
* since .then() continuations are executed asynchronously (e.g. via
* .setImmediate), which would cause IndexedDB to end the transaction.
* See PersistencePromise for more details.
*/
var SimpleDb = /** @class */ (function () {
/*
* Creates a new SimpleDb wrapper for IndexedDb database `name`.
*
* Note that `version` must not be a downgrade. IndexedDB does not support
* downgrading the schema version. We currently do not support any way to do
* versioning outside of IndexedDB's versioning mechanism, as only
* version-upgrade transactions are allowed to do things like create
* objectstores.
*/
function SimpleDb(name, version, schemaConverter) {
this.name = name;
this.version = version;
this.schemaConverter = schemaConverter;
var iOSVersion = SimpleDb.getIOSVersion(util.getUA());
// NOTE: According to https://bugs.webkit.org/show_bug.cgi?id=197050, the
// bug we're checking for should exist in iOS >= 12.2 and < 13, but for
// whatever reason it's much harder to hit after 12.2 so we only proactively
// log on 12.2.
if (iOSVersion === 12.2) {
logError('Firestore persistence suffers from a bug in iOS 12.2 ' +
'Safari that may cause your app to stop working. See ' +
'https://stackoverflow.com/q/56496296/110915 for details ' +
'and a potential workaround.');
}
}
/** Deletes the specified database. */
SimpleDb.delete = function (name) {
logDebug(LOG_TAG$g, 'Removing database:', name);
return wrapRequest(window.indexedDB.deleteDatabase(name)).toPromise();
};
/** Returns true if IndexedDB is available in the current environment. */
SimpleDb.isAvailable = function () {
if (typeof indexedDB === 'undefined') {
return false;
}
if (SimpleDb.isMockPersistence()) {
return true;
}
// We extensively use indexed array values and compound keys,
// which IE and Edge do not support. However, they still have indexedDB
// defined on the window, so we need to check for them here and make sure
// to return that persistence is not enabled for those browsers.
// For tracking support of this feature, see here:
// https://developer.microsoft.com/en-us/microsoft-edge/platform/status/indexeddbarraysandmultientrysupport/
// Check the UA string to find out the browser.
var ua = util.getUA();
// IE 10
// ua = 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)';
// IE 11
// ua = 'Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko';
// Edge
// ua = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML,
// like Gecko) Chrome/39.0.2171.71 Safari/537.36 Edge/12.0';
// iOS Safari: Disable for users running iOS version < 10.
var iOSVersion = SimpleDb.getIOSVersion(ua);
var isUnsupportedIOS = 0 < iOSVersion && iOSVersion < 10;
// Android browser: Disable for userse running version < 4.5.
var androidVersion = SimpleDb.getAndroidVersion(ua);
var isUnsupportedAndroid = 0 < androidVersion && androidVersion < 4.5;
if (ua.indexOf('MSIE ') > 0 ||
ua.indexOf('Trident/') > 0 ||
ua.indexOf('Edge/') > 0 ||
isUnsupportedIOS ||
isUnsupportedAndroid) {
return false;
}
else {
return true;
}
};
/**
* Returns true if the backing IndexedDB store is the Node IndexedDBShim
* (see https://github.com/axemclion/IndexedDBShim).
*/
SimpleDb.isMockPersistence = function () {
var _a;
return (typeof process !== 'undefined' &&
((_a = process.env) === null || _a === void 0 ? void 0 : _a.USE_MOCK_PERSISTENCE) === 'YES');
};
/** Helper to get a typed SimpleDbStore from a transaction. */
SimpleDb.getStore = function (txn, store) {
return txn.store(store);
};
// visible for testing
/** Parse User Agent to determine iOS version. Returns -1 if not found. */
SimpleDb.getIOSVersion = function (ua) {
var iOSVersionRegex = ua.match(/i(?:phone|pad|pod) os ([\d_]+)/i);
var version = iOSVersionRegex
? iOSVersionRegex[1].split('_').slice(0, 2).join('.')
: '-1';
return Number(version);
};
// visible for testing
/** Parse User Agent to determine Android version. Returns -1 if not found. */
SimpleDb.getAndroidVersion = function (ua) {
var androidVersionRegex = ua.match(/Android ([\d.]+)/i);
var version = androidVersionRegex
? androidVersionRegex[1].split('.').slice(0, 2).join('.')
: '-1';
return Number(version);
};
/**
* Opens the specified database, creating or upgrading it if necessary.
*/
SimpleDb.prototype.ensureDb = function (action) {
return tslib.__awaiter(this, void 0, void 0, function () {
var _d;
var _this = this;
return tslib.__generator(this, function (_e) {
switch (_e.label) {
case 0:
if (!!this.db) return [3 /*break*/, 2];
logDebug(LOG_TAG$g, 'Opening database:', this.name);
_d = this;
return [4 /*yield*/, new Promise(function (resolve, reject) {
// TODO(mikelehen): Investigate browser compatibility.
// https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API/Using_IndexedDB
// suggests IE9 and older WebKit browsers handle upgrade
// differently. They expect setVersion, as described here:
// https://developer.mozilla.org/en-US/docs/Web/API/IDBVersionChangeRequest/setVersion
var request = indexedDB.open(_this.name, _this.version);
request.onsuccess = function (event) {
var db = event.target.result;
resolve(db);
};
request.onblocked = function () {
reject(new IndexedDbTransactionError(action, 'Cannot upgrade IndexedDB schema while another tab is open. ' +
'Close all tabs that access Firestore and reload this page to proceed.'));
};
request.onerror = function (event) {
var error = event.target.error;
if (error.name === 'VersionError') {
reject(new FirestoreError(Code.FAILED_PRECONDITION, 'A newer version of the Firestore SDK was previously used and so the persisted ' +
'data is not compatible with the version of the SDK you are now using. The SDK ' +
'will operate with persistence disabled. If you need persistence, please ' +
're-upgrade to a newer version of the SDK or else clear the persisted IndexedDB ' +
'data for your app to start fresh.'));
}
else {
reject(new IndexedDbTransactionError(action, error));
}
};
request.onupgradeneeded = function (event) {
logDebug(LOG_TAG$g, 'Database "' + _this.name + '" requires upgrade from version:', event.oldVersion);
var db = event.target.result;
_this.schemaConverter
.createOrUpgrade(db, request.transaction, event.oldVersion, _this.version)
.next(function () {
logDebug(LOG_TAG$g, 'Database upgrade to version ' + _this.version + ' complete');
});
};
})];
case 1:
_d.db = _e.sent();
_e.label = 2;
case 2:
if (this.versionchangelistener) {
this.db.onversionchange = function (event) { return _this.versionchangelistener(event); };
}
return [2 /*return*/, this.db];
}
});
});
};
SimpleDb.prototype.setVersionChangeListener = function (versionChangeListener) {
this.versionchangelistener = versionChangeListener;
if (this.db) {
this.db.onversionchange = function (event) {
return versionChangeListener(event);
};
}
};
SimpleDb.prototype.runTransaction = function (action, mode, objectStores, transactionFn) {
return tslib.__awaiter(this, void 0, void 0, function () {
var readonly, attemptNumber, _loop_2, this_1, state_1;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
readonly = mode === 'readonly';
attemptNumber = 0;
_loop_2 = function () {
var transaction_1, transactionFnResult, error_1, retryable;
return tslib.__generator(this, function (_e) {
switch (_e.label) {
case 0:
++attemptNumber;
_e.label = 1;
case 1:
_e.trys.push([1, 4, , 5]);
return [4 /*yield*/, this_1.ensureDb(action)];
case 2:
this_1.db = _e.sent();
transaction_1 = SimpleDbTransaction.open(this_1.db, action, readonly ? 'readonly' : 'readwrite', objectStores);
transactionFnResult = transactionFn(transaction_1)
.catch(function (error) {
// Abort the transaction if there was an error.
transaction_1.abort(error);
// We cannot actually recover, and calling `abort()` will cause the transaction's
// completion promise to be rejected. This in turn means that we won't use
// `transactionFnResult` below. We return a rejection here so that we don't add the
// possibility of returning `void` to the type of `transactionFnResult`.
return PersistencePromise.reject(error);
})
.toPromise();
// As noted above, errors are propagated by aborting the transaction. So
// we swallow any error here to avoid the browser logging it as unhandled.
transactionFnResult.catch(function () { });
// Wait for the transaction to complete (i.e. IndexedDb's onsuccess event to
// fire), but still return the original transactionFnResult back to the
// caller.
return [4 /*yield*/, transaction_1.completionPromise];
case 3:
// Wait for the transaction to complete (i.e. IndexedDb's onsuccess event to
// fire), but still return the original transactionFnResult back to the
// caller.
_e.sent();
return [2 /*return*/, { value: transactionFnResult }];
case 4:
error_1 = _e.sent();
retryable = error_1.name !== 'FirebaseError' &&
attemptNumber < TRANSACTION_RETRY_COUNT;
logDebug(LOG_TAG$g, 'Transaction failed with error:', error_1.message, 'Retrying:', retryable);
this_1.close();
if (!retryable) {
return [2 /*return*/, { value: Promise.reject(error_1) }];
}
return [3 /*break*/, 5];
case 5: return [2 /*return*/];
}
});
};
this_1 = this;
_d.label = 1;
case 1:
return [5 /*yield**/, _loop_2()];
case 2:
state_1 = _d.sent();
if (typeof state_1 === "object")
return [2 /*return*/, state_1.value];
return [3 /*break*/, 1];
case 3: return [2 /*return*/];
}
});
});
};
SimpleDb.prototype.close = function () {
if (this.db) {
this.db.close();
}
this.db = undefined;
};
return SimpleDb;
}());
/**
* A controller for iterating over a key range or index. It allows an iterate
* callback to delete the currently-referenced object, or jump to a new key
* within the key range or index.
*/
var IterationController = /** @class */ (function () {
function IterationController(dbCursor) {
this.dbCursor = dbCursor;
this.shouldStop = false;
this.nextKey = null;
}
Object.defineProperty(IterationController.prototype, "isDone", {
get: function () {
return this.shouldStop;
},
enumerable: false,
configurable: true
});
Object.defineProperty(IterationController.prototype, "skipToKey", {
get: function () {
return this.nextKey;
},
enumerable: false,
configurable: true
});
Object.defineProperty(IterationController.prototype, "cursor", {
set: function (value) {
this.dbCursor = value;
},
enumerable: false,
configurable: true
});
/**
* This function can be called to stop iteration at any point.
*/
IterationController.prototype.done = function () {
this.shouldStop = true;
};
/**
* This function can be called to skip to that next key, which could be
* an index or a primary key.
*/
IterationController.prototype.skip = function (key) {
this.nextKey = key;
};
/**
* Delete the current cursor value from the object store.
*
* NOTE: You CANNOT do this with a keysOnly query.
*/
IterationController.prototype.delete = function () {
return wrapRequest(this.dbCursor.delete());
};
return IterationController;
}());
/** An error that wraps exceptions that thrown during IndexedDB execution. */
var IndexedDbTransactionError = /** @class */ (function (_super) {
tslib.__extends(IndexedDbTransactionError, _super);
function IndexedDbTransactionError(actionName, cause) {
var _this = _super.call(this, Code.UNAVAILABLE, "IndexedDB transaction '" + actionName + "' failed: " + cause) || this;
_this.name = 'IndexedDbTransactionError';
return _this;
}
return IndexedDbTransactionError;
}(FirestoreError));
/** Verifies whether `e` is an IndexedDbTransactionError. */
function isIndexedDbTransactionError(e) {
// Use name equality, as instanceof checks on errors don't work with errors
// that wrap other errors.
return e.name === 'IndexedDbTransactionError';
}
/**
* A wrapper around an IDBObjectStore providing an API that:
*
* 1) Has generic KeyType / ValueType parameters to provide strongly-typed
* methods for acting against the object store.
* 2) Deals with IndexedDB's onsuccess / onerror event callbacks, making every
* method return a PersistencePromise instead.
* 3) Provides a higher-level API to avoid needing to do excessive wrapping of
* intermediate IndexedDB types (IDBCursorWithValue, etc.)
*/
var SimpleDbStore = /** @class */ (function () {
function SimpleDbStore(store) {
this.store = store;
}
SimpleDbStore.prototype.put = function (keyOrValue, value) {
var request;
if (value !== undefined) {
logDebug(LOG_TAG$g, 'PUT', this.store.name, keyOrValue, value);
request = this.store.put(value, keyOrValue);
}
else {
logDebug(LOG_TAG$g, 'PUT', this.store.name, '', keyOrValue);
request = this.store.put(keyOrValue);
}
return wrapRequest(request);
};
/**
* Adds a new value into an Object Store and returns the new key. Similar to
* IndexedDb's `add()`, this method will fail on primary key collisions.
*
* @param value - The object to write.
* @returns The key of the value to add.
*/
SimpleDbStore.prototype.add = function (value) {
logDebug(LOG_TAG$g, 'ADD', this.store.name, value, value);
var request = this.store.add(value);
return wrapRequest(request);
};
/**
* Gets the object with the specified key from the specified store, or null
* if no object exists with the specified key.
*
* @key The key of the object to get.
* @returns The object with the specified key or null if no object exists.
*/
SimpleDbStore.prototype.get = function (key) {
var _this = this;
var request = this.store.get(key);
// We're doing an unsafe cast to ValueType.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
return wrapRequest(request).next(function (result) {
// Normalize nonexistence to null.
if (result === undefined) {
result = null;
}
logDebug(LOG_TAG$g, 'GET', _this.store.name, key, result);
return result;
});
};
SimpleDbStore.prototype.delete = function (key) {
logDebug(LOG_TAG$g, 'DELETE', this.store.name, key);
var request = this.store.delete(key);
return wrapRequest(request);
};
/**
* If we ever need more of the count variants, we can add overloads. For now,
* all we need is to count everything in a store.
*
* Returns the number of rows in the store.
*/
SimpleDbStore.prototype.count = function () {
logDebug(LOG_TAG$g, 'COUNT', this.store.name);
var request = this.store.count();
return wrapRequest(request);
};
SimpleDbStore.prototype.loadAll = function (indexOrRange, range) {
var cursor = this.cursor(this.options(indexOrRange, range));
var results = [];
return this.iterateCursor(cursor, function (key, value) {
results.push(value);
}).next(function () {
return results;
});
};
SimpleDbStore.prototype.deleteAll = function (indexOrRange, range) {
logDebug(LOG_TAG$g, 'DELETE ALL', this.store.name);
var options = this.options(indexOrRange, range);
options.keysOnly = false;
var cursor = this.cursor(options);
return this.iterateCursor(cursor, function (key, value, control) {
// NOTE: Calling delete() on a cursor is documented as more efficient than
// calling delete() on an object store with a single key
// (https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/delete),
// however, this requires us *not* to use a keysOnly cursor
// (https://developer.mozilla.org/en-US/docs/Web/API/IDBCursor/delete). We
// may want to compare the performance of each method.
return control.delete();
});
};
SimpleDbStore.prototype.iterate = function (optionsOrCallback, callback) {
var options;
if (!callback) {
options = {};
callback = optionsOrCallback;
}
else {
options = optionsOrCallback;
}
var cursor = this.cursor(options);
return this.iterateCursor(cursor, callback);
};
/**
* Iterates over a store, but waits for the given callback to complete for
* each entry before iterating the next entry. This allows the callback to do
* asynchronous work to determine if this iteration should continue.
*
* The provided callback should return `true` to continue iteration, and
* `false` otherwise.
*/
SimpleDbStore.prototype.iterateSerial = function (callback) {
var cursorRequest = this.cursor({});
return new PersistencePromise(function (resolve, reject) {
cursorRequest.onerror = function (event) {
var error = checkForAndReportiOSError(event.target.error);
reject(error);
};
cursorRequest.onsuccess = function (event) {
var cursor = event.target.result;
if (!cursor) {
resolve();
return;
}
callback(cursor.primaryKey, cursor.value).next(function (shouldContinue) {
if (shouldContinue) {
cursor.continue();
}
else {
resolve();
}
});
};
});
};
SimpleDbStore.prototype.iterateCursor = function (cursorRequest, fn) {
var results = [];
return new PersistencePromise(function (resolve, reject) {
cursorRequest.onerror = function (event) {
reject(event.target.error);
};
cursorRequest.onsuccess = function (event) {
var cursor = event.target.result;
if (!cursor) {
resolve();
return;
}
var controller = new IterationController(cursor);
var userResult = fn(cursor.primaryKey, cursor.value, controller);
if (userResult instanceof PersistencePromise) {
var userPromise = userResult.catch(function (err) {
controller.done();
return PersistencePromise.reject(err);
});
results.push(userPromise);
}
if (controller.isDone) {
resolve();
}
else if (controller.skipToKey === null) {
cursor.continue();
}
else {
cursor.continue(controller.skipToKey);
}
};
}).next(function () {
return PersistencePromise.waitFor(results);
});
};
SimpleDbStore.prototype.options = function (indexOrRange, range) {
var indexName = undefined;
if (indexOrRange !== undefined) {
if (typeof indexOrRange === 'string') {
indexName = indexOrRange;
}
else {
range = indexOrRange;
}
}
return { index: indexName, range: range };
};
SimpleDbStore.prototype.cursor = function (options) {
var direction = 'next';
if (options.reverse) {
direction = 'prev';
}
if (options.index) {
var index = this.store.index(options.index);
if (options.keysOnly) {
return index.openKeyCursor(options.range, direction);
}
else {
return index.openCursor(options.range, direction);
}
}
else {
return this.store.openCursor(options.range, direction);
}
};
return SimpleDbStore;
}());
/**
* Wraps an IDBRequest in a PersistencePromise, using the onsuccess / onerror
* handlers to resolve / reject the PersistencePromise as appropriate.
*/
function wrapRequest(request) {
return new PersistencePromise(function (resolve, reject) {
request.onsuccess = function (event) {
var result = event.target.result;
resolve(result);
};
request.onerror = function (event) {
var error = checkForAndReportiOSError(event.target.error);
reject(error);
};
});
}
// Guard so we only report the error once.
var reportedIOSError = false;
function checkForAndReportiOSError(error) {
var iOSVersion = SimpleDb.getIOSVersion(util.getUA());
if (iOSVersion >= 12.2 && iOSVersion < 13) {
var IOS_ERROR = 'An internal error was encountered in the Indexed Database server';
if (error.message.indexOf(IOS_ERROR) >= 0) {
// Wrap error in a more descriptive one.
var newError_1 = new FirestoreError('internal', "IOS_INDEXEDDB_BUG1: IndexedDb has thrown '" + IOS_ERROR + "'. This is likely " +
"due to an unavoidable bug in iOS. See https://stackoverflow.com/q/56496296/110915 " +
"for details and a potential workaround.");
if (!reportedIOSError) {
reportedIOSError = true;
// Throw a global exception outside of this promise chain, for the user to
// potentially catch.
setTimeout(function () {
throw newError_1;
}, 0);
}
return newError_1;
}
}
return error;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var IndexedDbTransaction = /** @class */ (function (_super) {
tslib.__extends(IndexedDbTransaction, _super);
function IndexedDbTransaction(simpleDbTransaction, currentSequenceNumber) {
var _this = _super.call(this) || this;
_this.simpleDbTransaction = simpleDbTransaction;
_this.currentSequenceNumber = currentSequenceNumber;
return _this;
}
return IndexedDbTransaction;
}(PersistenceTransaction));
function getStore(txn, store) {
var indexedDbTransaction = debugCast(txn);
return SimpleDb.getStore(indexedDbTransaction.simpleDbTransaction, store);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Generates `nBytes` of random bytes.
*
* If `nBytes < 0` , an error will be thrown.
*/
function randomBytes(nBytes) {
return crypto.randomBytes(nBytes);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var AutoId = /** @class */ (function () {
function AutoId() {
}
AutoId.newId = function () {
// Alphanumeric characters
var chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
// The largest byte value that is a multiple of `char.length`.
var maxMultiple = Math.floor(256 / chars.length) * chars.length;
var autoId = '';
var targetLength = 20;
while (autoId.length < targetLength) {
var bytes = randomBytes(40);
for (var i = 0; i < bytes.length; ++i) {
// Only accept values that are [0, maxMultiple), this ensures they can
// be evenly mapped to indices of `chars` via a modulo operation.
if (autoId.length < targetLength && bytes[i] < maxMultiple) {
autoId += chars.charAt(bytes[i] % chars.length);
}
}
}
return autoId;
};
return AutoId;
}());
function primitiveComparator(left, right) {
if (left < right) {
return -1;
}
if (left > right) {
return 1;
}
return 0;
}
/** Helper to compare arrays using isEqual(). */
function arrayEquals(left, right, comparator) {
if (left.length !== right.length) {
return false;
}
return left.every(function (value, index) { return comparator(value, right[index]); });
}
/**
* Returns the immediate lexicographically-following string. This is useful to
* construct an inclusive range for indexeddb iterators.
*/
function immediateSuccessor(s) {
// Return the input string, with an additional NUL byte appended.
return s + '\0';
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The earliest date supported by Firestore timestamps (0001-01-01T00:00:00Z).
var MIN_SECONDS = -62135596800;
// Number of nanoseconds in a millisecond.
var MS_TO_NANOS = 1e6;
/**
* A `Timestamp` represents a point in time independent of any time zone or
* calendar, represented as seconds and fractions of seconds at nanosecond
* resolution in UTC Epoch time.
*
* It is encoded using the Proleptic Gregorian Calendar which extends the
* Gregorian calendar backwards to year one. It is encoded assuming all minutes
* are 60 seconds long, i.e. leap seconds are "smeared" so that no leap second
* table is needed for interpretation. Range is from 0001-01-01T00:00:00Z to
* 9999-12-31T23:59:59.999999999Z.
*
* For examples and further specifications, refer to the
* {@link https://github.com/google/protobuf/blob/master/src/google/protobuf/timestamp.proto | Timestamp definition}.
*/
var Timestamp = /** @class */ (function () {
/**
* Creates a new timestamp.
*
* @param seconds - The number of seconds of UTC time since Unix epoch
* 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
* 9999-12-31T23:59:59Z inclusive.
* @param nanoseconds - The non-negative fractions of a second at nanosecond
* resolution. Negative second values with fractions must still have
* non-negative nanoseconds values that count forward in time. Must be
* from 0 to 999,999,999 inclusive.
*/
function Timestamp(
/**
* The number of seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z.
*/
seconds,
/**
* The fractions of a second at nanosecond resolution.*
*/
nanoseconds) {
this.seconds = seconds;
this.nanoseconds = nanoseconds;
if (nanoseconds < 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Timestamp nanoseconds out of range: ' + nanoseconds);
}
if (nanoseconds >= 1e9) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Timestamp nanoseconds out of range: ' + nanoseconds);
}
if (seconds < MIN_SECONDS) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Timestamp seconds out of range: ' + seconds);
}
// This will break in the year 10,000.
if (seconds >= 253402300800) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Timestamp seconds out of range: ' + seconds);
}
}
/**
* Creates a new timestamp with the current date, with millisecond precision.
*
* @returns a new timestamp representing the current date.
*/
Timestamp.now = function () {
return Timestamp.fromMillis(Date.now());
};
/**
* Creates a new timestamp from the given date.
*
* @param date - The date to initialize the `Timestamp` from.
* @returns A new `Timestamp` representing the same point in time as the given
* date.
*/
Timestamp.fromDate = function (date) {
return Timestamp.fromMillis(date.getTime());
};
/**
* Creates a new timestamp from the given number of milliseconds.
*
* @param milliseconds - Number of milliseconds since Unix epoch
* 1970-01-01T00:00:00Z.
* @returns A new `Timestamp` representing the same point in time as the given
* number of milliseconds.
*/
Timestamp.fromMillis = function (milliseconds) {
var seconds = Math.floor(milliseconds / 1000);
var nanos = Math.floor((milliseconds - seconds * 1000) * MS_TO_NANOS);
return new Timestamp(seconds, nanos);
};
/**
* Converts a `Timestamp` to a JavaScript `Date` object. This conversion
* causes a loss of precision since `Date` objects only support millisecond
* precision.
*
* @returns JavaScript `Date` object representing the same point in time as
* this `Timestamp`, with millisecond precision.
*/
Timestamp.prototype.toDate = function () {
return new Date(this.toMillis());
};
/**
* Converts a `Timestamp` to a numeric timestamp (in milliseconds since
* epoch). This operation causes a loss of precision.
*
* @returns The point in time corresponding to this timestamp, represented as
* the number of milliseconds since Unix epoch 1970-01-01T00:00:00Z.
*/
Timestamp.prototype.toMillis = function () {
return this.seconds * 1000 + this.nanoseconds / MS_TO_NANOS;
};
Timestamp.prototype._compareTo = function (other) {
if (this.seconds === other.seconds) {
return primitiveComparator(this.nanoseconds, other.nanoseconds);
}
return primitiveComparator(this.seconds, other.seconds);
};
/**
* Returns true if this `Timestamp` is equal to the provided one.
*
* @param other - The `Timestamp` to compare against.
* @returns true if this `Timestamp` is equal to the provided one.
*/
Timestamp.prototype.isEqual = function (other) {
return (other.seconds === this.seconds && other.nanoseconds === this.nanoseconds);
};
/** Returns a textual representation of this Timestamp. */
Timestamp.prototype.toString = function () {
return ('Timestamp(seconds=' +
this.seconds +
', nanoseconds=' +
this.nanoseconds +
')');
};
/** Returns a JSON-serializable representation of this Timestamp. */
Timestamp.prototype.toJSON = function () {
return { seconds: this.seconds, nanoseconds: this.nanoseconds };
};
/**
* Converts this object to a primitive string, which allows Timestamp objects
* to be compared using the `>`, `<=`, `>=` and `>` operators.
*/
Timestamp.prototype.valueOf = function () {
// This method returns a string of the form . where
// is translated to have a non-negative value and both
// and are left-padded with zeroes to be a consistent length.
// Strings with this format then have a lexiographical ordering that matches
// the expected ordering. The translation is done to avoid having
// a leading negative sign (i.e. a leading '-' character) in its string
// representation, which would affect its lexiographical ordering.
var adjustedSeconds = this.seconds - MIN_SECONDS;
// Note: Up to 12 decimal digits are required to represent all valid
// 'seconds' values.
var formattedSeconds = String(adjustedSeconds).padStart(12, '0');
var formattedNanoseconds = String(this.nanoseconds).padStart(9, '0');
return formattedSeconds + '.' + formattedNanoseconds;
};
return Timestamp;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A version of a document in Firestore. This corresponds to the version
* timestamp, such as update_time or read_time.
*/
var SnapshotVersion = /** @class */ (function () {
function SnapshotVersion(timestamp) {
this.timestamp = timestamp;
}
SnapshotVersion.fromTimestamp = function (value) {
return new SnapshotVersion(value);
};
SnapshotVersion.min = function () {
return new SnapshotVersion(new Timestamp(0, 0));
};
SnapshotVersion.prototype.compareTo = function (other) {
return this.timestamp._compareTo(other.timestamp);
};
SnapshotVersion.prototype.isEqual = function (other) {
return this.timestamp.isEqual(other.timestamp);
};
/** Returns a number representation of the version for use in spec tests. */
SnapshotVersion.prototype.toMicroseconds = function () {
// Convert to microseconds.
return this.timestamp.seconds * 1e6 + this.timestamp.nanoseconds / 1000;
};
SnapshotVersion.prototype.toString = function () {
return 'SnapshotVersion(' + this.timestamp.toString() + ')';
};
SnapshotVersion.prototype.toTimestamp = function () {
return this.timestamp;
};
return SnapshotVersion;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function objectSize(obj) {
var count = 0;
for (var key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) {
count++;
}
}
return count;
}
function forEach(obj, fn) {
for (var key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) {
fn(key, obj[key]);
}
}
}
function isEmpty(obj) {
for (var key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) {
return false;
}
}
return true;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides a set of fields that can be used to partially patch a document.
* FieldMask is used in conjunction with ObjectValue.
* Examples:
* foo - Overwrites foo entirely with the provided value. If foo is not
* present in the companion ObjectValue, the field is deleted.
* foo.bar - Overwrites only the field bar of the object foo.
* If foo is not an object, foo is replaced with an object
* containing foo
*/
var FieldMask = /** @class */ (function () {
function FieldMask(fields) {
this.fields = fields;
// TODO(dimond): validation of FieldMask
// Sort the field mask to support `FieldMask.isEqual()` and assert below.
fields.sort(FieldPath$1.comparator);
}
/**
* Verifies that `fieldPath` is included by at least one field in this field
* mask.
*
* This is an O(n) operation, where `n` is the size of the field mask.
*/
FieldMask.prototype.covers = function (fieldPath) {
for (var _i = 0, _d = this.fields; _i < _d.length; _i++) {
var fieldMaskPath = _d[_i];
if (fieldMaskPath.isPrefixOf(fieldPath)) {
return true;
}
}
return false;
};
FieldMask.prototype.isEqual = function (other) {
return arrayEquals(this.fields, other.fields, function (l, r) { return l.isEqual(r); });
};
return FieldMask;
}());
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function decodeBase64(encoded) {
// Node actually doesn't validate base64 strings.
// A quick sanity check that is not a fool-proof validation
if (/[^-A-Za-z0-9+/=]/.test(encoded)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Not a valid Base64 string: ' + encoded);
}
return new Buffer(encoded, 'base64').toString('binary');
}
/** Converts a binary string to a Base64 encoded string. */
function encodeBase64(raw) {
return new Buffer(raw, 'binary').toString('base64');
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Immutable class that represents a "proto" byte string.
*
* Proto byte strings can either be Base64-encoded strings or Uint8Arrays when
* sent on the wire. This class abstracts away this differentiation by holding
* the proto byte string in a common class that must be converted into a string
* before being sent as a proto.
*/
var ByteString = /** @class */ (function () {
function ByteString(binaryString) {
this.binaryString = binaryString;
}
ByteString.fromBase64String = function (base64) {
var binaryString = decodeBase64(base64);
return new ByteString(binaryString);
};
ByteString.fromUint8Array = function (array) {
var binaryString = binaryStringFromUint8Array(array);
return new ByteString(binaryString);
};
ByteString.prototype.toBase64 = function () {
return encodeBase64(this.binaryString);
};
ByteString.prototype.toUint8Array = function () {
return uint8ArrayFromBinaryString(this.binaryString);
};
ByteString.prototype.approximateByteSize = function () {
return this.binaryString.length * 2;
};
ByteString.prototype.compareTo = function (other) {
return primitiveComparator(this.binaryString, other.binaryString);
};
ByteString.prototype.isEqual = function (other) {
return this.binaryString === other.binaryString;
};
return ByteString;
}());
ByteString.EMPTY_BYTE_STRING = new ByteString('');
/**
* Helper function to convert an Uint8array to a binary string.
*/
function binaryStringFromUint8Array(array) {
var binaryString = '';
for (var i = 0; i < array.length; ++i) {
binaryString += String.fromCharCode(array[i]);
}
return binaryString;
}
/**
* Helper function to convert a binary string to an Uint8Array.
*/
function uint8ArrayFromBinaryString(binaryString) {
var buffer = new Uint8Array(binaryString.length);
for (var i = 0; i < binaryString.length; i++) {
buffer[i] = binaryString.charCodeAt(i);
}
return buffer;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// A RegExp matching ISO 8601 UTC timestamps with optional fraction.
var ISO_TIMESTAMP_REG_EXP = new RegExp(/^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(?:\.(\d+))?Z$/);
/**
* Converts the possible Proto values for a timestamp value into a "seconds and
* nanos" representation.
*/
function normalizeTimestamp(date) {
hardAssert(!!date);
// The json interface (for the browser) will return an iso timestamp string,
// while the proto js library (for node) will return a
// google.protobuf.Timestamp instance.
if (typeof date === 'string') {
// The date string can have higher precision (nanos) than the Date class
// (millis), so we do some custom parsing here.
// Parse the nanos right out of the string.
var nanos = 0;
var fraction = ISO_TIMESTAMP_REG_EXP.exec(date);
hardAssert(!!fraction);
if (fraction[1]) {
// Pad the fraction out to 9 digits (nanos).
var nanoStr = fraction[1];
nanoStr = (nanoStr + '000000000').substr(0, 9);
nanos = Number(nanoStr);
}
// Parse the date to get the seconds.
var parsedDate = new Date(date);
var seconds = Math.floor(parsedDate.getTime() / 1000);
return { seconds: seconds, nanos: nanos };
}
else {
// TODO(b/37282237): Use strings for Proto3 timestamps
// assert(!this.options.useProto3Json,
// 'The timestamp instance format requires Proto JS.');
var seconds = normalizeNumber(date.seconds);
var nanos = normalizeNumber(date.nanos);
return { seconds: seconds, nanos: nanos };
}
}
/**
* Converts the possible Proto types for numbers into a JavaScript number.
* Returns 0 if the value is not numeric.
*/
function normalizeNumber(value) {
// TODO(bjornick): Handle int64 greater than 53 bits.
if (typeof value === 'number') {
return value;
}
else if (typeof value === 'string') {
return Number(value);
}
else {
return 0;
}
}
/** Converts the possible Proto types for Blobs into a ByteString. */
function normalizeByteString(blob) {
if (typeof blob === 'string') {
return ByteString.fromBase64String(blob);
}
else {
return ByteString.fromUint8Array(blob);
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents a locally-applied ServerTimestamp.
*
* Server Timestamps are backed by MapValues that contain an internal field
* `__type__` with a value of `server_timestamp`. The previous value and local
* write time are stored in its `__previous_value__` and `__local_write_time__`
* fields respectively.
*
* Notes:
* - ServerTimestampValue instances are created as the result of applying a
* transform. They can only exist in the local view of a document. Therefore
* they do not need to be parsed or serialized.
* - When evaluated locally (e.g. for snapshot.data()), they by default
* evaluate to `null`. This behavior can be configured by passing custom
* FieldValueOptions to value().
* - With respect to other ServerTimestampValues, they sort by their
* localWriteTime.
*/
var SERVER_TIMESTAMP_SENTINEL = 'server_timestamp';
var TYPE_KEY = '__type__';
var PREVIOUS_VALUE_KEY = '__previous_value__';
var LOCAL_WRITE_TIME_KEY = '__local_write_time__';
function isServerTimestamp(value) {
var _a, _b;
var type = (_b = (((_a = value === null || value === void 0 ? void 0 : value.mapValue) === null || _a === void 0 ? void 0 : _a.fields) || {})[TYPE_KEY]) === null || _b === void 0 ? void 0 : _b.stringValue;
return type === SERVER_TIMESTAMP_SENTINEL;
}
/**
* Creates a new ServerTimestamp proto value (using the internal format).
*/
function serverTimestamp$1(localWriteTime, previousValue) {
var _d;
var mapValue = {
fields: (_d = {},
_d[TYPE_KEY] = {
stringValue: SERVER_TIMESTAMP_SENTINEL
},
_d[LOCAL_WRITE_TIME_KEY] = {
timestampValue: {
seconds: localWriteTime.seconds,
nanos: localWriteTime.nanoseconds
}
},
_d)
};
if (previousValue) {
mapValue.fields[PREVIOUS_VALUE_KEY] = previousValue;
}
return { mapValue: mapValue };
}
/**
* Returns the value of the field before this ServerTimestamp was set.
*
* Preserving the previous values allows the user to display the last resoled
* value until the backend responds with the timestamp.
*/
function getPreviousValue(value) {
var previousValue = value.mapValue.fields[PREVIOUS_VALUE_KEY];
if (isServerTimestamp(previousValue)) {
return getPreviousValue(previousValue);
}
return previousValue;
}
/**
* Returns the local time at which this timestamp was first set.
*/
function getLocalWriteTime(value) {
var localWriteTime = normalizeTimestamp(value.mapValue.fields[LOCAL_WRITE_TIME_KEY].timestampValue);
return new Timestamp(localWriteTime.seconds, localWriteTime.nanos);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Sentinel value that sorts before any Mutation Batch ID. */
var BATCHID_UNKNOWN = -1;
/**
* Returns whether a variable is either undefined or null.
*/
function isNullOrUndefined(value) {
return value === null || value === undefined;
}
/** Returns whether the value represents -0. */
function isNegativeZero(value) {
// Detect if the value is -0.0. Based on polyfill from
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/is
return value === 0 && 1 / value === 1 / -0;
}
/**
* Returns whether a value is an integer and in the safe integer range
* @param value - The value to test for being an integer and in the safe range
*/
function isSafeInteger(value) {
return (typeof value === 'number' &&
Number.isInteger(value) &&
!isNegativeZero(value) &&
value <= Number.MAX_SAFE_INTEGER &&
value >= Number.MIN_SAFE_INTEGER);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var DocumentKey = /** @class */ (function () {
function DocumentKey(path) {
this.path = path;
}
DocumentKey.fromPath = function (path) {
return new DocumentKey(ResourcePath.fromString(path));
};
DocumentKey.fromName = function (name) {
return new DocumentKey(ResourcePath.fromString(name).popFirst(5));
};
/** Returns true if the document is in the specified collectionId. */
DocumentKey.prototype.hasCollectionId = function (collectionId) {
return (this.path.length >= 2 &&
this.path.get(this.path.length - 2) === collectionId);
};
DocumentKey.prototype.isEqual = function (other) {
return (other !== null && ResourcePath.comparator(this.path, other.path) === 0);
};
DocumentKey.prototype.toString = function () {
return this.path.toString();
};
DocumentKey.comparator = function (k1, k2) {
return ResourcePath.comparator(k1.path, k2.path);
};
DocumentKey.isDocumentKey = function (path) {
return path.length % 2 === 0;
};
/**
* Creates and returns a new document key with the given segments.
*
* @param segments - The segments of the path to the document
* @returns A new instance of DocumentKey
*/
DocumentKey.fromSegments = function (segments) {
return new DocumentKey(new ResourcePath(segments.slice()));
};
return DocumentKey;
}());
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Extracts the backend's type order for the provided value. */
function typeOrder(value) {
if ('nullValue' in value) {
return 0 /* NullValue */;
}
else if ('booleanValue' in value) {
return 1 /* BooleanValue */;
}
else if ('integerValue' in value || 'doubleValue' in value) {
return 2 /* NumberValue */;
}
else if ('timestampValue' in value) {
return 3 /* TimestampValue */;
}
else if ('stringValue' in value) {
return 5 /* StringValue */;
}
else if ('bytesValue' in value) {
return 6 /* BlobValue */;
}
else if ('referenceValue' in value) {
return 7 /* RefValue */;
}
else if ('geoPointValue' in value) {
return 8 /* GeoPointValue */;
}
else if ('arrayValue' in value) {
return 9 /* ArrayValue */;
}
else if ('mapValue' in value) {
if (isServerTimestamp(value)) {
return 4 /* ServerTimestampValue */;
}
return 10 /* ObjectValue */;
}
else {
return fail();
}
}
/** Tests `left` and `right` for equality based on the backend semantics. */
function valueEquals(left, right) {
var leftType = typeOrder(left);
var rightType = typeOrder(right);
if (leftType !== rightType) {
return false;
}
switch (leftType) {
case 0 /* NullValue */:
return true;
case 1 /* BooleanValue */:
return left.booleanValue === right.booleanValue;
case 4 /* ServerTimestampValue */:
return getLocalWriteTime(left).isEqual(getLocalWriteTime(right));
case 3 /* TimestampValue */:
return timestampEquals(left, right);
case 5 /* StringValue */:
return left.stringValue === right.stringValue;
case 6 /* BlobValue */:
return blobEquals(left, right);
case 7 /* RefValue */:
return left.referenceValue === right.referenceValue;
case 8 /* GeoPointValue */:
return geoPointEquals(left, right);
case 2 /* NumberValue */:
return numberEquals(left, right);
case 9 /* ArrayValue */:
return arrayEquals(left.arrayValue.values || [], right.arrayValue.values || [], valueEquals);
case 10 /* ObjectValue */:
return objectEquals(left, right);
default:
return fail();
}
}
function timestampEquals(left, right) {
if (typeof left.timestampValue === 'string' &&
typeof right.timestampValue === 'string' &&
left.timestampValue.length === right.timestampValue.length) {
// Use string equality for ISO 8601 timestamps
return left.timestampValue === right.timestampValue;
}
var leftTimestamp = normalizeTimestamp(left.timestampValue);
var rightTimestamp = normalizeTimestamp(right.timestampValue);
return (leftTimestamp.seconds === rightTimestamp.seconds &&
leftTimestamp.nanos === rightTimestamp.nanos);
}
function geoPointEquals(left, right) {
return (normalizeNumber(left.geoPointValue.latitude) ===
normalizeNumber(right.geoPointValue.latitude) &&
normalizeNumber(left.geoPointValue.longitude) ===
normalizeNumber(right.geoPointValue.longitude));
}
function blobEquals(left, right) {
return normalizeByteString(left.bytesValue).isEqual(normalizeByteString(right.bytesValue));
}
function numberEquals(left, right) {
if ('integerValue' in left && 'integerValue' in right) {
return (normalizeNumber(left.integerValue) === normalizeNumber(right.integerValue));
}
else if ('doubleValue' in left && 'doubleValue' in right) {
var n1 = normalizeNumber(left.doubleValue);
var n2 = normalizeNumber(right.doubleValue);
if (n1 === n2) {
return isNegativeZero(n1) === isNegativeZero(n2);
}
else {
return isNaN(n1) && isNaN(n2);
}
}
return false;
}
function objectEquals(left, right) {
var leftMap = left.mapValue.fields || {};
var rightMap = right.mapValue.fields || {};
if (objectSize(leftMap) !== objectSize(rightMap)) {
return false;
}
for (var key in leftMap) {
if (leftMap.hasOwnProperty(key)) {
if (rightMap[key] === undefined ||
!valueEquals(leftMap[key], rightMap[key])) {
return false;
}
}
}
return true;
}
/** Returns true if the ArrayValue contains the specified element. */
function arrayValueContains(haystack, needle) {
return ((haystack.values || []).find(function (v) { return valueEquals(v, needle); }) !== undefined);
}
function valueCompare(left, right) {
var leftType = typeOrder(left);
var rightType = typeOrder(right);
if (leftType !== rightType) {
return primitiveComparator(leftType, rightType);
}
switch (leftType) {
case 0 /* NullValue */:
return 0;
case 1 /* BooleanValue */:
return primitiveComparator(left.booleanValue, right.booleanValue);
case 2 /* NumberValue */:
return compareNumbers(left, right);
case 3 /* TimestampValue */:
return compareTimestamps(left.timestampValue, right.timestampValue);
case 4 /* ServerTimestampValue */:
return compareTimestamps(getLocalWriteTime(left), getLocalWriteTime(right));
case 5 /* StringValue */:
return primitiveComparator(left.stringValue, right.stringValue);
case 6 /* BlobValue */:
return compareBlobs(left.bytesValue, right.bytesValue);
case 7 /* RefValue */:
return compareReferences(left.referenceValue, right.referenceValue);
case 8 /* GeoPointValue */:
return compareGeoPoints(left.geoPointValue, right.geoPointValue);
case 9 /* ArrayValue */:
return compareArrays(left.arrayValue, right.arrayValue);
case 10 /* ObjectValue */:
return compareMaps(left.mapValue, right.mapValue);
default:
throw fail();
}
}
function compareNumbers(left, right) {
var leftNumber = normalizeNumber(left.integerValue || left.doubleValue);
var rightNumber = normalizeNumber(right.integerValue || right.doubleValue);
if (leftNumber < rightNumber) {
return -1;
}
else if (leftNumber > rightNumber) {
return 1;
}
else if (leftNumber === rightNumber) {
return 0;
}
else {
// one or both are NaN.
if (isNaN(leftNumber)) {
return isNaN(rightNumber) ? 0 : -1;
}
else {
return 1;
}
}
}
function compareTimestamps(left, right) {
if (typeof left === 'string' &&
typeof right === 'string' &&
left.length === right.length) {
return primitiveComparator(left, right);
}
var leftTimestamp = normalizeTimestamp(left);
var rightTimestamp = normalizeTimestamp(right);
var comparison = primitiveComparator(leftTimestamp.seconds, rightTimestamp.seconds);
if (comparison !== 0) {
return comparison;
}
return primitiveComparator(leftTimestamp.nanos, rightTimestamp.nanos);
}
function compareReferences(leftPath, rightPath) {
var leftSegments = leftPath.split('/');
var rightSegments = rightPath.split('/');
for (var i = 0; i < leftSegments.length && i < rightSegments.length; i++) {
var comparison = primitiveComparator(leftSegments[i], rightSegments[i]);
if (comparison !== 0) {
return comparison;
}
}
return primitiveComparator(leftSegments.length, rightSegments.length);
}
function compareGeoPoints(left, right) {
var comparison = primitiveComparator(normalizeNumber(left.latitude), normalizeNumber(right.latitude));
if (comparison !== 0) {
return comparison;
}
return primitiveComparator(normalizeNumber(left.longitude), normalizeNumber(right.longitude));
}
function compareBlobs(left, right) {
var leftBytes = normalizeByteString(left);
var rightBytes = normalizeByteString(right);
return leftBytes.compareTo(rightBytes);
}
function compareArrays(left, right) {
var leftArray = left.values || [];
var rightArray = right.values || [];
for (var i = 0; i < leftArray.length && i < rightArray.length; ++i) {
var compare = valueCompare(leftArray[i], rightArray[i]);
if (compare) {
return compare;
}
}
return primitiveComparator(leftArray.length, rightArray.length);
}
function compareMaps(left, right) {
var leftMap = left.fields || {};
var leftKeys = Object.keys(leftMap);
var rightMap = right.fields || {};
var rightKeys = Object.keys(rightMap);
// Even though MapValues are likely sorted correctly based on their insertion
// order (e.g. when received from the backend), local modifications can bring
// elements out of order. We need to re-sort the elements to ensure that
// canonical IDs are independent of insertion order.
leftKeys.sort();
rightKeys.sort();
for (var i = 0; i < leftKeys.length && i < rightKeys.length; ++i) {
var keyCompare = primitiveComparator(leftKeys[i], rightKeys[i]);
if (keyCompare !== 0) {
return keyCompare;
}
var compare = valueCompare(leftMap[leftKeys[i]], rightMap[rightKeys[i]]);
if (compare !== 0) {
return compare;
}
}
return primitiveComparator(leftKeys.length, rightKeys.length);
}
/**
* Generates the canonical ID for the provided field value (as used in Target
* serialization).
*/
function canonicalId(value) {
return canonifyValue(value);
}
function canonifyValue(value) {
if ('nullValue' in value) {
return 'null';
}
else if ('booleanValue' in value) {
return '' + value.booleanValue;
}
else if ('integerValue' in value) {
return '' + value.integerValue;
}
else if ('doubleValue' in value) {
return '' + value.doubleValue;
}
else if ('timestampValue' in value) {
return canonifyTimestamp(value.timestampValue);
}
else if ('stringValue' in value) {
return value.stringValue;
}
else if ('bytesValue' in value) {
return canonifyByteString(value.bytesValue);
}
else if ('referenceValue' in value) {
return canonifyReference(value.referenceValue);
}
else if ('geoPointValue' in value) {
return canonifyGeoPoint(value.geoPointValue);
}
else if ('arrayValue' in value) {
return canonifyArray(value.arrayValue);
}
else if ('mapValue' in value) {
return canonifyMap(value.mapValue);
}
else {
return fail();
}
}
function canonifyByteString(byteString) {
return normalizeByteString(byteString).toBase64();
}
function canonifyTimestamp(timestamp) {
var normalizedTimestamp = normalizeTimestamp(timestamp);
return "time(" + normalizedTimestamp.seconds + "," + normalizedTimestamp.nanos + ")";
}
function canonifyGeoPoint(geoPoint) {
return "geo(" + geoPoint.latitude + "," + geoPoint.longitude + ")";
}
function canonifyReference(referenceValue) {
return DocumentKey.fromName(referenceValue).toString();
}
function canonifyMap(mapValue) {
// Iteration order in JavaScript is not guaranteed. To ensure that we generate
// matching canonical IDs for identical maps, we need to sort the keys.
var sortedKeys = Object.keys(mapValue.fields || {}).sort();
var result = '{';
var first = true;
for (var _i = 0, sortedKeys_1 = sortedKeys; _i < sortedKeys_1.length; _i++) {
var key = sortedKeys_1[_i];
if (!first) {
result += ',';
}
else {
first = false;
}
result += key + ":" + canonifyValue(mapValue.fields[key]);
}
return result + '}';
}
function canonifyArray(arrayValue) {
var result = '[';
var first = true;
for (var _i = 0, _d = arrayValue.values || []; _i < _d.length; _i++) {
var value = _d[_i];
if (!first) {
result += ',';
}
else {
first = false;
}
result += canonifyValue(value);
}
return result + ']';
}
/** Returns a reference value for the provided database and key. */
function refValue(databaseId, key) {
return {
referenceValue: "projects/" + databaseId.projectId + "/databases/" + databaseId.database + "/documents/" + key.path.canonicalString()
};
}
/** Returns true if `value` is an IntegerValue . */
function isInteger(value) {
return !!value && 'integerValue' in value;
}
/** Returns true if `value` is a DoubleValue. */
function isDouble(value) {
return !!value && 'doubleValue' in value;
}
/** Returns true if `value` is either an IntegerValue or a DoubleValue. */
function isNumber(value) {
return isInteger(value) || isDouble(value);
}
/** Returns true if `value` is an ArrayValue. */
function isArray(value) {
return !!value && 'arrayValue' in value;
}
/** Returns true if `value` is a NullValue. */
function isNullValue(value) {
return !!value && 'nullValue' in value;
}
/** Returns true if `value` is NaN. */
function isNanValue(value) {
return !!value && 'doubleValue' in value && isNaN(Number(value.doubleValue));
}
/** Returns true if `value` is a MapValue. */
function isMapValue(value) {
return !!value && 'mapValue' in value;
}
/** Creates a deep copy of `source`. */
function deepClone(source) {
if (source.geoPointValue) {
return { geoPointValue: Object.assign({}, source.geoPointValue) };
}
else if (source.timestampValue &&
typeof source.timestampValue === 'object') {
return { timestampValue: Object.assign({}, source.timestampValue) };
}
else if (source.mapValue) {
var target_1 = { mapValue: { fields: {} } };
forEach(source.mapValue.fields, function (key, val) { return (target_1.mapValue.fields[key] = deepClone(val)); });
return target_1;
}
else if (source.arrayValue) {
var target = { arrayValue: { values: [] } };
for (var i = 0; i < (source.arrayValue.values || []).length; ++i) {
target.arrayValue.values[i] = deepClone(source.arrayValue.values[i]);
}
return target;
}
else {
return Object.assign({}, source);
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An ObjectValue represents a MapValue in the Firestore Proto and offers the
* ability to add and remove fields (via the ObjectValueBuilder).
*/
var ObjectValue = /** @class */ (function () {
function ObjectValue(value) {
this.value = value;
}
ObjectValue.empty = function () {
return new ObjectValue({ mapValue: {} });
};
/**
* Returns the value at the given path or null.
*
* @param path - the path to search
* @returns The value at the path or null if the path is not set.
*/
ObjectValue.prototype.field = function (path) {
if (path.isEmpty()) {
return this.value;
}
else {
var currentLevel = this.value;
for (var i = 0; i < path.length - 1; ++i) {
currentLevel = (currentLevel.mapValue.fields || {})[path.get(i)];
if (!isMapValue(currentLevel)) {
return null;
}
}
currentLevel = (currentLevel.mapValue.fields || {})[path.lastSegment()];
return currentLevel || null;
}
};
/**
* Sets the field to the provided value.
*
* @param path - The field path to set.
* @param value - The value to set.
*/
ObjectValue.prototype.set = function (path, value) {
var fieldsMap = this.getFieldsMap(path.popLast());
fieldsMap[path.lastSegment()] = deepClone(value);
};
/**
* Sets the provided fields to the provided values.
*
* @param data - A map of fields to values (or null for deletes).
*/
ObjectValue.prototype.setAll = function (data) {
var _this = this;
var parent = FieldPath$1.emptyPath();
var upserts = {};
var deletes = [];
data.forEach(function (value, path) {
if (!parent.isImmediateParentOf(path)) {
// Insert the accumulated changes at this parent location
var fieldsMap_1 = _this.getFieldsMap(parent);
_this.applyChanges(fieldsMap_1, upserts, deletes);
upserts = {};
deletes = [];
parent = path.popLast();
}
if (value) {
upserts[path.lastSegment()] = deepClone(value);
}
else {
deletes.push(path.lastSegment());
}
});
var fieldsMap = this.getFieldsMap(parent);
this.applyChanges(fieldsMap, upserts, deletes);
};
/**
* Removes the field at the specified path. If there is no field at the
* specified path, nothing is changed.
*
* @param path - The field path to remove.
*/
ObjectValue.prototype.delete = function (path) {
var nestedValue = this.field(path.popLast());
if (isMapValue(nestedValue) && nestedValue.mapValue.fields) {
delete nestedValue.mapValue.fields[path.lastSegment()];
}
};
ObjectValue.prototype.isEqual = function (other) {
return valueEquals(this.value, other.value);
};
/**
* Returns the map that contains the leaf element of `path`. If the parent
* entry does not yet exist, or if it is not a map, a new map will be created.
*/
ObjectValue.prototype.getFieldsMap = function (path) {
var current = this.value;
if (!current.mapValue.fields) {
current.mapValue = { fields: {} };
}
for (var i = 0; i < path.length; ++i) {
var next = current.mapValue.fields[path.get(i)];
if (!isMapValue(next) || !next.mapValue.fields) {
next = { mapValue: { fields: {} } };
current.mapValue.fields[path.get(i)] = next;
}
current = next;
}
return current.mapValue.fields;
};
/**
* Modifies `fieldsMap` by adding, replacing or deleting the specified
* entries.
*/
ObjectValue.prototype.applyChanges = function (fieldsMap, inserts, deletes) {
forEach(inserts, function (key, val) { return (fieldsMap[key] = val); });
for (var _i = 0, deletes_1 = deletes; _i < deletes_1.length; _i++) {
var field = deletes_1[_i];
delete fieldsMap[field];
}
};
ObjectValue.prototype.clone = function () {
return new ObjectValue(deepClone(this.value));
};
return ObjectValue;
}());
/**
* Returns a FieldMask built from all fields in a MapValue.
*/
function extractFieldMask(value) {
var fields = [];
forEach(value.fields, function (key, value) {
var currentPath = new FieldPath$1([key]);
if (isMapValue(value)) {
var nestedMask = extractFieldMask(value.mapValue);
var nestedFields = nestedMask.fields;
if (nestedFields.length === 0) {
// Preserve the empty map by adding it to the FieldMask.
fields.push(currentPath);
}
else {
// For nested and non-empty ObjectValues, add the FieldPath of the
// leaf nodes.
for (var _i = 0, nestedFields_1 = nestedFields; _i < nestedFields_1.length; _i++) {
var nestedPath = nestedFields_1[_i];
fields.push(currentPath.child(nestedPath));
}
}
}
else {
// For nested and non-empty ObjectValues, add the FieldPath of the leaf
// nodes.
fields.push(currentPath);
}
});
return new FieldMask(fields);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents a document in Firestore with a key, version, data and whether it
* has local mutations applied to it.
*
* Documents can transition between states via `convertToFoundDocument()`,
* `convertToNoDocument()` and `convertToUnknownDocument()`. If a document does
* not transition to one of these states even after all mutations have been
* applied, `isValidDocument()` returns false and the document should be removed
* from all views.
*/
var MutableDocument = /** @class */ (function () {
function MutableDocument(key, documentType, version, data, documentState) {
this.key = key;
this.documentType = documentType;
this.version = version;
this.data = data;
this.documentState = documentState;
}
/**
* Creates a document with no known version or data, but which can serve as
* base document for mutations.
*/
MutableDocument.newInvalidDocument = function (documentKey) {
return new MutableDocument(documentKey, 0 /* INVALID */, SnapshotVersion.min(), ObjectValue.empty(), 0 /* SYNCED */);
};
/**
* Creates a new document that is known to exist with the given data at the
* given version.
*/
MutableDocument.newFoundDocument = function (documentKey, version, value) {
return new MutableDocument(documentKey, 1 /* FOUND_DOCUMENT */, version, value, 0 /* SYNCED */);
};
/** Creates a new document that is known to not exist at the given version. */
MutableDocument.newNoDocument = function (documentKey, version) {
return new MutableDocument(documentKey, 2 /* NO_DOCUMENT */, version, ObjectValue.empty(), 0 /* SYNCED */);
};
/**
* Creates a new document that is known to exist at the given version but
* whose data is not known (e.g. a document that was updated without a known
* base document).
*/
MutableDocument.newUnknownDocument = function (documentKey, version) {
return new MutableDocument(documentKey, 3 /* UNKNOWN_DOCUMENT */, version, ObjectValue.empty(), 2 /* HAS_COMMITTED_MUTATIONS */);
};
/**
* Changes the document type to indicate that it exists and that its version
* and data are known.
*/
MutableDocument.prototype.convertToFoundDocument = function (version, value) {
this.version = version;
this.documentType = 1 /* FOUND_DOCUMENT */;
this.data = value;
this.documentState = 0 /* SYNCED */;
return this;
};
/**
* Changes the document type to indicate that it doesn't exist at the given
* version.
*/
MutableDocument.prototype.convertToNoDocument = function (version) {
this.version = version;
this.documentType = 2 /* NO_DOCUMENT */;
this.data = ObjectValue.empty();
this.documentState = 0 /* SYNCED */;
return this;
};
/**
* Changes the document type to indicate that it exists at a given version but
* that its data is not known (e.g. a document that was updated without a known
* base document).
*/
MutableDocument.prototype.convertToUnknownDocument = function (version) {
this.version = version;
this.documentType = 3 /* UNKNOWN_DOCUMENT */;
this.data = ObjectValue.empty();
this.documentState = 2 /* HAS_COMMITTED_MUTATIONS */;
return this;
};
MutableDocument.prototype.setHasCommittedMutations = function () {
this.documentState = 2 /* HAS_COMMITTED_MUTATIONS */;
return this;
};
MutableDocument.prototype.setHasLocalMutations = function () {
this.documentState = 1 /* HAS_LOCAL_MUTATIONS */;
return this;
};
Object.defineProperty(MutableDocument.prototype, "hasLocalMutations", {
get: function () {
return this.documentState === 1 /* HAS_LOCAL_MUTATIONS */;
},
enumerable: false,
configurable: true
});
Object.defineProperty(MutableDocument.prototype, "hasCommittedMutations", {
get: function () {
return this.documentState === 2 /* HAS_COMMITTED_MUTATIONS */;
},
enumerable: false,
configurable: true
});
Object.defineProperty(MutableDocument.prototype, "hasPendingWrites", {
get: function () {
return this.hasLocalMutations || this.hasCommittedMutations;
},
enumerable: false,
configurable: true
});
MutableDocument.prototype.isValidDocument = function () {
return this.documentType !== 0 /* INVALID */;
};
MutableDocument.prototype.isFoundDocument = function () {
return this.documentType === 1 /* FOUND_DOCUMENT */;
};
MutableDocument.prototype.isNoDocument = function () {
return this.documentType === 2 /* NO_DOCUMENT */;
};
MutableDocument.prototype.isUnknownDocument = function () {
return this.documentType === 3 /* UNKNOWN_DOCUMENT */;
};
MutableDocument.prototype.isEqual = function (other) {
return (other instanceof MutableDocument &&
this.key.isEqual(other.key) &&
this.version.isEqual(other.version) &&
this.documentType === other.documentType &&
this.documentState === other.documentState &&
this.data.isEqual(other.data));
};
MutableDocument.prototype.clone = function () {
return new MutableDocument(this.key, this.documentType, this.version, this.data.clone(), this.documentState);
};
MutableDocument.prototype.toString = function () {
return ("Document(" + this.key + ", " + this.version + ", " + JSON.stringify(this.data.value) + ", " +
("{documentType: " + this.documentType + "}), ") +
("{documentState: " + this.documentState + "})"));
};
return MutableDocument;
}());
/**
* Compares the value for field `field` in the provided documents. Throws if
* the field does not exist in both documents.
*/
function compareDocumentsByField(field, d1, d2) {
var v1 = d1.data.field(field);
var v2 = d2.data.field(field);
if (v1 !== null && v2 !== null) {
return valueCompare(v1, v2);
}
else {
return fail();
}
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Visible for testing
var TargetImpl = /** @class */ (function () {
function TargetImpl(path, collectionGroup, orderBy, filters, limit, startAt, endAt) {
if (collectionGroup === void 0) { collectionGroup = null; }
if (orderBy === void 0) { orderBy = []; }
if (filters === void 0) { filters = []; }
if (limit === void 0) { limit = null; }
if (startAt === void 0) { startAt = null; }
if (endAt === void 0) { endAt = null; }
this.path = path;
this.collectionGroup = collectionGroup;
this.orderBy = orderBy;
this.filters = filters;
this.limit = limit;
this.startAt = startAt;
this.endAt = endAt;
this.memoizedCanonicalId = null;
}
return TargetImpl;
}());
/**
* Initializes a Target with a path and optional additional query constraints.
* Path must currently be empty if this is a collection group query.
*
* NOTE: you should always construct `Target` from `Query.toTarget` instead of
* using this factory method, because `Query` provides an implicit `orderBy`
* property.
*/
function newTarget(path, collectionGroup, orderBy, filters, limit, startAt, endAt) {
if (collectionGroup === void 0) { collectionGroup = null; }
if (orderBy === void 0) { orderBy = []; }
if (filters === void 0) { filters = []; }
if (limit === void 0) { limit = null; }
if (startAt === void 0) { startAt = null; }
if (endAt === void 0) { endAt = null; }
return new TargetImpl(path, collectionGroup, orderBy, filters, limit, startAt, endAt);
}
function canonifyTarget(target) {
var targetImpl = debugCast(target);
if (targetImpl.memoizedCanonicalId === null) {
var canonicalId_1 = targetImpl.path.canonicalString();
if (targetImpl.collectionGroup !== null) {
canonicalId_1 += '|cg:' + targetImpl.collectionGroup;
}
canonicalId_1 += '|f:';
canonicalId_1 += targetImpl.filters.map(function (f) { return canonifyFilter(f); }).join(',');
canonicalId_1 += '|ob:';
canonicalId_1 += targetImpl.orderBy.map(function (o) { return canonifyOrderBy(o); }).join(',');
if (!isNullOrUndefined(targetImpl.limit)) {
canonicalId_1 += '|l:';
canonicalId_1 += targetImpl.limit;
}
if (targetImpl.startAt) {
canonicalId_1 += '|lb:';
canonicalId_1 += canonifyBound(targetImpl.startAt);
}
if (targetImpl.endAt) {
canonicalId_1 += '|ub:';
canonicalId_1 += canonifyBound(targetImpl.endAt);
}
targetImpl.memoizedCanonicalId = canonicalId_1;
}
return targetImpl.memoizedCanonicalId;
}
function stringifyTarget(target) {
var str = target.path.canonicalString();
if (target.collectionGroup !== null) {
str += ' collectionGroup=' + target.collectionGroup;
}
if (target.filters.length > 0) {
str += ", filters: [" + target.filters
.map(function (f) { return stringifyFilter(f); })
.join(', ') + "]";
}
if (!isNullOrUndefined(target.limit)) {
str += ', limit: ' + target.limit;
}
if (target.orderBy.length > 0) {
str += ", orderBy: [" + target.orderBy
.map(function (o) { return stringifyOrderBy(o); })
.join(', ') + "]";
}
if (target.startAt) {
str += ', startAt: ' + canonifyBound(target.startAt);
}
if (target.endAt) {
str += ', endAt: ' + canonifyBound(target.endAt);
}
return "Target(" + str + ")";
}
function targetEquals(left, right) {
if (left.limit !== right.limit) {
return false;
}
if (left.orderBy.length !== right.orderBy.length) {
return false;
}
for (var i = 0; i < left.orderBy.length; i++) {
if (!orderByEquals(left.orderBy[i], right.orderBy[i])) {
return false;
}
}
if (left.filters.length !== right.filters.length) {
return false;
}
for (var i = 0; i < left.filters.length; i++) {
if (!filterEquals(left.filters[i], right.filters[i])) {
return false;
}
}
if (left.collectionGroup !== right.collectionGroup) {
return false;
}
if (!left.path.isEqual(right.path)) {
return false;
}
if (!boundEquals(left.startAt, right.startAt)) {
return false;
}
return boundEquals(left.endAt, right.endAt);
}
function isDocumentTarget(target) {
return (DocumentKey.isDocumentKey(target.path) &&
target.collectionGroup === null &&
target.filters.length === 0);
}
var Filter = /** @class */ (function () {
function Filter() {
}
return Filter;
}());
var FieldFilter = /** @class */ (function (_super) {
tslib.__extends(FieldFilter, _super);
function FieldFilter(field, op, value) {
var _this = _super.call(this) || this;
_this.field = field;
_this.op = op;
_this.value = value;
return _this;
}
/**
* Creates a filter based on the provided arguments.
*/
FieldFilter.create = function (field, op, value) {
if (field.isKeyField()) {
if (op === "in" /* IN */ || op === "not-in" /* NOT_IN */) {
return this.createKeyFieldInFilter(field, op, value);
}
else {
return new KeyFieldFilter(field, op, value);
}
}
else if (op === "array-contains" /* ARRAY_CONTAINS */) {
return new ArrayContainsFilter(field, value);
}
else if (op === "in" /* IN */) {
return new InFilter(field, value);
}
else if (op === "not-in" /* NOT_IN */) {
return new NotInFilter(field, value);
}
else if (op === "array-contains-any" /* ARRAY_CONTAINS_ANY */) {
return new ArrayContainsAnyFilter(field, value);
}
else {
return new FieldFilter(field, op, value);
}
};
FieldFilter.createKeyFieldInFilter = function (field, op, value) {
return op === "in" /* IN */
? new KeyFieldInFilter(field, value)
: new KeyFieldNotInFilter(field, value);
};
FieldFilter.prototype.matches = function (doc) {
var other = doc.data.field(this.field);
// Types do not have to match in NOT_EQUAL filters.
if (this.op === "!=" /* NOT_EQUAL */) {
return (other !== null &&
this.matchesComparison(valueCompare(other, this.value)));
}
// Only compare types with matching backend order (such as double and int).
return (other !== null &&
typeOrder(this.value) === typeOrder(other) &&
this.matchesComparison(valueCompare(other, this.value)));
};
FieldFilter.prototype.matchesComparison = function (comparison) {
switch (this.op) {
case "<" /* LESS_THAN */:
return comparison < 0;
case "<=" /* LESS_THAN_OR_EQUAL */:
return comparison <= 0;
case "==" /* EQUAL */:
return comparison === 0;
case "!=" /* NOT_EQUAL */:
return comparison !== 0;
case ">" /* GREATER_THAN */:
return comparison > 0;
case ">=" /* GREATER_THAN_OR_EQUAL */:
return comparison >= 0;
default:
return fail();
}
};
FieldFilter.prototype.isInequality = function () {
return ([
"<" /* LESS_THAN */,
"<=" /* LESS_THAN_OR_EQUAL */,
">" /* GREATER_THAN */,
">=" /* GREATER_THAN_OR_EQUAL */,
"!=" /* NOT_EQUAL */,
"not-in" /* NOT_IN */
].indexOf(this.op) >= 0);
};
return FieldFilter;
}(Filter));
function canonifyFilter(filter) {
// TODO(b/29183165): Technically, this won't be unique if two values have
// the same description, such as the int 3 and the string "3". So we should
// add the types in here somehow, too.
return (filter.field.canonicalString() +
filter.op.toString() +
canonicalId(filter.value));
}
function filterEquals(f1, f2) {
return (f1.op === f2.op &&
f1.field.isEqual(f2.field) &&
valueEquals(f1.value, f2.value));
}
/** Returns a debug description for `filter`. */
function stringifyFilter(filter) {
return filter.field.canonicalString() + " " + filter.op + " " + canonicalId(filter.value);
}
/** Filter that matches on key fields (i.e. '__name__'). */
var KeyFieldFilter = /** @class */ (function (_super) {
tslib.__extends(KeyFieldFilter, _super);
function KeyFieldFilter(field, op, value) {
var _this = _super.call(this, field, op, value) || this;
_this.key = DocumentKey.fromName(value.referenceValue);
return _this;
}
KeyFieldFilter.prototype.matches = function (doc) {
var comparison = DocumentKey.comparator(doc.key, this.key);
return this.matchesComparison(comparison);
};
return KeyFieldFilter;
}(FieldFilter));
/** Filter that matches on key fields within an array. */
var KeyFieldInFilter = /** @class */ (function (_super) {
tslib.__extends(KeyFieldInFilter, _super);
function KeyFieldInFilter(field, value) {
var _this = _super.call(this, field, "in" /* IN */, value) || this;
_this.keys = extractDocumentKeysFromArrayValue("in" /* IN */, value);
return _this;
}
KeyFieldInFilter.prototype.matches = function (doc) {
return this.keys.some(function (key) { return key.isEqual(doc.key); });
};
return KeyFieldInFilter;
}(FieldFilter));
/** Filter that matches on key fields not present within an array. */
var KeyFieldNotInFilter = /** @class */ (function (_super) {
tslib.__extends(KeyFieldNotInFilter, _super);
function KeyFieldNotInFilter(field, value) {
var _this = _super.call(this, field, "not-in" /* NOT_IN */, value) || this;
_this.keys = extractDocumentKeysFromArrayValue("not-in" /* NOT_IN */, value);
return _this;
}
KeyFieldNotInFilter.prototype.matches = function (doc) {
return !this.keys.some(function (key) { return key.isEqual(doc.key); });
};
return KeyFieldNotInFilter;
}(FieldFilter));
function extractDocumentKeysFromArrayValue(op, value) {
var _a;
return (((_a = value.arrayValue) === null || _a === void 0 ? void 0 : _a.values) || []).map(function (v) {
return DocumentKey.fromName(v.referenceValue);
});
}
/** A Filter that implements the array-contains operator. */
var ArrayContainsFilter = /** @class */ (function (_super) {
tslib.__extends(ArrayContainsFilter, _super);
function ArrayContainsFilter(field, value) {
return _super.call(this, field, "array-contains" /* ARRAY_CONTAINS */, value) || this;
}
ArrayContainsFilter.prototype.matches = function (doc) {
var other = doc.data.field(this.field);
return isArray(other) && arrayValueContains(other.arrayValue, this.value);
};
return ArrayContainsFilter;
}(FieldFilter));
/** A Filter that implements the IN operator. */
var InFilter = /** @class */ (function (_super) {
tslib.__extends(InFilter, _super);
function InFilter(field, value) {
return _super.call(this, field, "in" /* IN */, value) || this;
}
InFilter.prototype.matches = function (doc) {
var other = doc.data.field(this.field);
return other !== null && arrayValueContains(this.value.arrayValue, other);
};
return InFilter;
}(FieldFilter));
/** A Filter that implements the not-in operator. */
var NotInFilter = /** @class */ (function (_super) {
tslib.__extends(NotInFilter, _super);
function NotInFilter(field, value) {
return _super.call(this, field, "not-in" /* NOT_IN */, value) || this;
}
NotInFilter.prototype.matches = function (doc) {
if (arrayValueContains(this.value.arrayValue, { nullValue: 'NULL_VALUE' })) {
return false;
}
var other = doc.data.field(this.field);
return other !== null && !arrayValueContains(this.value.arrayValue, other);
};
return NotInFilter;
}(FieldFilter));
/** A Filter that implements the array-contains-any operator. */
var ArrayContainsAnyFilter = /** @class */ (function (_super) {
tslib.__extends(ArrayContainsAnyFilter, _super);
function ArrayContainsAnyFilter(field, value) {
return _super.call(this, field, "array-contains-any" /* ARRAY_CONTAINS_ANY */, value) || this;
}
ArrayContainsAnyFilter.prototype.matches = function (doc) {
var _this = this;
var other = doc.data.field(this.field);
if (!isArray(other) || !other.arrayValue.values) {
return false;
}
return other.arrayValue.values.some(function (val) { return arrayValueContains(_this.value.arrayValue, val); });
};
return ArrayContainsAnyFilter;
}(FieldFilter));
/**
* Represents a bound of a query.
*
* The bound is specified with the given components representing a position and
* whether it's just before or just after the position (relative to whatever the
* query order is).
*
* The position represents a logical index position for a query. It's a prefix
* of values for the (potentially implicit) order by clauses of a query.
*
* Bound provides a function to determine whether a document comes before or
* after a bound. This is influenced by whether the position is just before or
* just after the provided values.
*/
var Bound = /** @class */ (function () {
function Bound(position, before) {
this.position = position;
this.before = before;
}
return Bound;
}());
function canonifyBound(bound) {
// TODO(b/29183165): Make this collision robust.
return (bound.before ? 'b' : 'a') + ":" + bound.position
.map(function (p) { return canonicalId(p); })
.join(',');
}
/**
* An ordering on a field, in some Direction. Direction defaults to ASCENDING.
*/
var OrderBy = /** @class */ (function () {
function OrderBy(field, dir /* ASCENDING */) {
if (dir === void 0) { dir = "asc"; }
this.field = field;
this.dir = dir;
}
return OrderBy;
}());
function canonifyOrderBy(orderBy) {
// TODO(b/29183165): Make this collision robust.
return orderBy.field.canonicalString() + orderBy.dir;
}
function stringifyOrderBy(orderBy) {
return orderBy.field.canonicalString() + " (" + orderBy.dir + ")";
}
function orderByEquals(left, right) {
return left.dir === right.dir && left.field.isEqual(right.field);
}
/**
* Returns true if a document sorts before a bound using the provided sort
* order.
*/
function sortsBeforeDocument(bound, orderBy, doc) {
var comparison = 0;
for (var i = 0; i < bound.position.length; i++) {
var orderByComponent = orderBy[i];
var component = bound.position[i];
if (orderByComponent.field.isKeyField()) {
comparison = DocumentKey.comparator(DocumentKey.fromName(component.referenceValue), doc.key);
}
else {
var docValue = doc.data.field(orderByComponent.field);
comparison = valueCompare(component, docValue);
}
if (orderByComponent.dir === "desc" /* DESCENDING */) {
comparison = comparison * -1;
}
if (comparison !== 0) {
break;
}
}
return bound.before ? comparison <= 0 : comparison < 0;
}
function boundEquals(left, right) {
if (left === null) {
return right === null;
}
else if (right === null) {
return false;
}
if (left.before !== right.before ||
left.position.length !== right.position.length) {
return false;
}
for (var i = 0; i < left.position.length; i++) {
var leftPosition = left.position[i];
var rightPosition = right.position[i];
if (!valueEquals(leftPosition, rightPosition)) {
return false;
}
}
return true;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Query encapsulates all the query attributes we support in the SDK. It can
* be run against the LocalStore, as well as be converted to a `Target` to
* query the RemoteStore results.
*
* Visible for testing.
*/
var QueryImpl = /** @class */ (function () {
/**
* Initializes a Query with a path and optional additional query constraints.
* Path must currently be empty if this is a collection group query.
*/
function QueryImpl(path, collectionGroup, explicitOrderBy, filters, limit, limitType /* First */, startAt, endAt) {
if (collectionGroup === void 0) { collectionGroup = null; }
if (explicitOrderBy === void 0) { explicitOrderBy = []; }
if (filters === void 0) { filters = []; }
if (limit === void 0) { limit = null; }
if (limitType === void 0) { limitType = "F"; }
if (startAt === void 0) { startAt = null; }
if (endAt === void 0) { endAt = null; }
this.path = path;
this.collectionGroup = collectionGroup;
this.explicitOrderBy = explicitOrderBy;
this.filters = filters;
this.limit = limit;
this.limitType = limitType;
this.startAt = startAt;
this.endAt = endAt;
this.memoizedOrderBy = null;
// The corresponding `Target` of this `Query` instance.
this.memoizedTarget = null;
if (this.startAt)
;
if (this.endAt)
;
}
return QueryImpl;
}());
/** Creates a new Query instance with the options provided. */
function newQuery(path, collectionGroup, explicitOrderBy, filters, limit, limitType, startAt, endAt) {
return new QueryImpl(path, collectionGroup, explicitOrderBy, filters, limit, limitType, startAt, endAt);
}
/** Creates a new Query for a query that matches all documents at `path` */
function newQueryForPath(path) {
return new QueryImpl(path);
}
/**
* Helper to convert a collection group query into a collection query at a
* specific path. This is used when executing collection group queries, since
* we have to split the query into a set of collection queries at multiple
* paths.
*/
function asCollectionQueryAtPath(query, path) {
return new QueryImpl(path,
/*collectionGroup=*/ null, query.explicitOrderBy.slice(), query.filters.slice(), query.limit, query.limitType, query.startAt, query.endAt);
}
/**
* Returns true if this query does not specify any query constraints that
* could remove results.
*/
function matchesAllDocuments(query) {
return (query.filters.length === 0 &&
query.limit === null &&
query.startAt == null &&
query.endAt == null &&
(query.explicitOrderBy.length === 0 ||
(query.explicitOrderBy.length === 1 &&
query.explicitOrderBy[0].field.isKeyField())));
}
function hasLimitToFirst(query) {
return !isNullOrUndefined(query.limit) && query.limitType === "F" /* First */;
}
function hasLimitToLast(query) {
return !isNullOrUndefined(query.limit) && query.limitType === "L" /* Last */;
}
function getFirstOrderByField(query) {
return query.explicitOrderBy.length > 0
? query.explicitOrderBy[0].field
: null;
}
function getInequalityFilterField(query) {
for (var _i = 0, _d = query.filters; _i < _d.length; _i++) {
var filter = _d[_i];
if (filter.isInequality()) {
return filter.field;
}
}
return null;
}
/**
* Checks if any of the provided Operators are included in the query and
* returns the first one that is, or null if none are.
*/
function findFilterOperator(query, operators) {
for (var _i = 0, _d = query.filters; _i < _d.length; _i++) {
var filter = _d[_i];
if (operators.indexOf(filter.op) >= 0) {
return filter.op;
}
}
return null;
}
/**
* Creates a new Query for a collection group query that matches all documents
* within the provided collection group.
*/
function newQueryForCollectionGroup(collectionId) {
return new QueryImpl(ResourcePath.emptyPath(), collectionId);
}
/**
* Returns whether the query matches a single document by path (rather than a
* collection).
*/
function isDocumentQuery$1(query) {
return (DocumentKey.isDocumentKey(query.path) &&
query.collectionGroup === null &&
query.filters.length === 0);
}
/**
* Returns whether the query matches a collection group rather than a specific
* collection.
*/
function isCollectionGroupQuery(query) {
return query.collectionGroup !== null;
}
/**
* Returns the implicit order by constraint that is used to execute the Query,
* which can be different from the order by constraints the user provided (e.g.
* the SDK and backend always orders by `__name__`).
*/
function queryOrderBy(query) {
var queryImpl = debugCast(query);
if (queryImpl.memoizedOrderBy === null) {
queryImpl.memoizedOrderBy = [];
var inequalityField = getInequalityFilterField(queryImpl);
var firstOrderByField = getFirstOrderByField(queryImpl);
if (inequalityField !== null && firstOrderByField === null) {
// In order to implicitly add key ordering, we must also add the
// inequality filter field for it to be a valid query.
// Note that the default inequality field and key ordering is ascending.
if (!inequalityField.isKeyField()) {
queryImpl.memoizedOrderBy.push(new OrderBy(inequalityField));
}
queryImpl.memoizedOrderBy.push(new OrderBy(FieldPath$1.keyField(), "asc" /* ASCENDING */));
}
else {
var foundKeyOrdering = false;
for (var _i = 0, _d = queryImpl.explicitOrderBy; _i < _d.length; _i++) {
var orderBy_1 = _d[_i];
queryImpl.memoizedOrderBy.push(orderBy_1);
if (orderBy_1.field.isKeyField()) {
foundKeyOrdering = true;
}
}
if (!foundKeyOrdering) {
// The order of the implicit key ordering always matches the last
// explicit order by
var lastDirection = queryImpl.explicitOrderBy.length > 0
? queryImpl.explicitOrderBy[queryImpl.explicitOrderBy.length - 1]
.dir
: "asc" /* ASCENDING */;
queryImpl.memoizedOrderBy.push(new OrderBy(FieldPath$1.keyField(), lastDirection));
}
}
}
return queryImpl.memoizedOrderBy;
}
/**
* Converts this `Query` instance to it's corresponding `Target` representation.
*/
function queryToTarget(query) {
var queryImpl = debugCast(query);
if (!queryImpl.memoizedTarget) {
if (queryImpl.limitType === "F" /* First */) {
queryImpl.memoizedTarget = newTarget(queryImpl.path, queryImpl.collectionGroup, queryOrderBy(queryImpl), queryImpl.filters, queryImpl.limit, queryImpl.startAt, queryImpl.endAt);
}
else {
// Flip the orderBy directions since we want the last results
var orderBys = [];
for (var _i = 0, _d = queryOrderBy(queryImpl); _i < _d.length; _i++) {
var orderBy_2 = _d[_i];
var dir = orderBy_2.dir === "desc" /* DESCENDING */
? "asc" /* ASCENDING */
: "desc" /* DESCENDING */;
orderBys.push(new OrderBy(orderBy_2.field, dir));
}
// We need to swap the cursors to match the now-flipped query ordering.
var startAt_1 = queryImpl.endAt
? new Bound(queryImpl.endAt.position, !queryImpl.endAt.before)
: null;
var endAt_1 = queryImpl.startAt
? new Bound(queryImpl.startAt.position, !queryImpl.startAt.before)
: null;
// Now return as a LimitType.First query.
queryImpl.memoizedTarget = newTarget(queryImpl.path, queryImpl.collectionGroup, orderBys, queryImpl.filters, queryImpl.limit, startAt_1, endAt_1);
}
}
return queryImpl.memoizedTarget;
}
function queryWithAddedFilter(query, filter) {
var newFilters = query.filters.concat([filter]);
return new QueryImpl(query.path, query.collectionGroup, query.explicitOrderBy.slice(), newFilters, query.limit, query.limitType, query.startAt, query.endAt);
}
function queryWithAddedOrderBy(query, orderBy) {
// TODO(dimond): validate that orderBy does not list the same key twice.
var newOrderBy = query.explicitOrderBy.concat([orderBy]);
return new QueryImpl(query.path, query.collectionGroup, newOrderBy, query.filters.slice(), query.limit, query.limitType, query.startAt, query.endAt);
}
function queryWithLimit(query, limit, limitType) {
return new QueryImpl(query.path, query.collectionGroup, query.explicitOrderBy.slice(), query.filters.slice(), limit, limitType, query.startAt, query.endAt);
}
function queryWithStartAt(query, bound) {
return new QueryImpl(query.path, query.collectionGroup, query.explicitOrderBy.slice(), query.filters.slice(), query.limit, query.limitType, bound, query.endAt);
}
function queryWithEndAt(query, bound) {
return new QueryImpl(query.path, query.collectionGroup, query.explicitOrderBy.slice(), query.filters.slice(), query.limit, query.limitType, query.startAt, bound);
}
function queryEquals(left, right) {
return (targetEquals(queryToTarget(left), queryToTarget(right)) &&
left.limitType === right.limitType);
}
// TODO(b/29183165): This is used to get a unique string from a query to, for
// example, use as a dictionary key, but the implementation is subject to
// collisions. Make it collision-free.
function canonifyQuery(query) {
return canonifyTarget(queryToTarget(query)) + "|lt:" + query.limitType;
}
function stringifyQuery(query) {
return "Query(target=" + stringifyTarget(queryToTarget(query)) + "; limitType=" + query.limitType + ")";
}
/** Returns whether `doc` matches the constraints of `query`. */
function queryMatches(query, doc) {
return (doc.isFoundDocument() &&
queryMatchesPathAndCollectionGroup(query, doc) &&
queryMatchesOrderBy(query, doc) &&
queryMatchesFilters(query, doc) &&
queryMatchesBounds(query, doc));
}
function queryMatchesPathAndCollectionGroup(query, doc) {
var docPath = doc.key.path;
if (query.collectionGroup !== null) {
// NOTE: this.path is currently always empty since we don't expose Collection
// Group queries rooted at a document path yet.
return (doc.key.hasCollectionId(query.collectionGroup) &&
query.path.isPrefixOf(docPath));
}
else if (DocumentKey.isDocumentKey(query.path)) {
// exact match for document queries
return query.path.isEqual(docPath);
}
else {
// shallow ancestor queries by default
return query.path.isImmediateParentOf(docPath);
}
}
/**
* A document must have a value for every ordering clause in order to show up
* in the results.
*/
function queryMatchesOrderBy(query, doc) {
for (var _i = 0, _d = query.explicitOrderBy; _i < _d.length; _i++) {
var orderBy_3 = _d[_i];
// order by key always matches
if (!orderBy_3.field.isKeyField() && doc.data.field(orderBy_3.field) === null) {
return false;
}
}
return true;
}
function queryMatchesFilters(query, doc) {
for (var _i = 0, _d = query.filters; _i < _d.length; _i++) {
var filter = _d[_i];
if (!filter.matches(doc)) {
return false;
}
}
return true;
}
/** Makes sure a document is within the bounds, if provided. */
function queryMatchesBounds(query, doc) {
if (query.startAt &&
!sortsBeforeDocument(query.startAt, queryOrderBy(query), doc)) {
return false;
}
if (query.endAt &&
sortsBeforeDocument(query.endAt, queryOrderBy(query), doc)) {
return false;
}
return true;
}
/**
* Returns a new comparator function that can be used to compare two documents
* based on the Query's ordering constraint.
*/
function newQueryComparator(query) {
return function (d1, d2) {
var comparedOnKeyField = false;
for (var _i = 0, _d = queryOrderBy(query); _i < _d.length; _i++) {
var orderBy_4 = _d[_i];
var comp = compareDocs(orderBy_4, d1, d2);
if (comp !== 0) {
return comp;
}
comparedOnKeyField = comparedOnKeyField || orderBy_4.field.isKeyField();
}
return 0;
};
}
function compareDocs(orderBy, d1, d2) {
var comparison = orderBy.field.isKeyField()
? DocumentKey.comparator(d1.key, d2.key)
: compareDocumentsByField(orderBy.field, d1, d2);
switch (orderBy.dir) {
case "asc" /* ASCENDING */:
return comparison;
case "desc" /* DESCENDING */:
return -1 * comparison;
default:
return fail();
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// An immutable sorted map implementation, based on a Left-leaning Red-Black
// tree.
var SortedMap = /** @class */ (function () {
function SortedMap(comparator, root) {
this.comparator = comparator;
this.root = root ? root : LLRBNode.EMPTY;
}
// Returns a copy of the map, with the specified key/value added or replaced.
SortedMap.prototype.insert = function (key, value) {
return new SortedMap(this.comparator, this.root
.insert(key, value, this.comparator)
.copy(null, null, LLRBNode.BLACK, null, null));
};
// Returns a copy of the map, with the specified key removed.
SortedMap.prototype.remove = function (key) {
return new SortedMap(this.comparator, this.root
.remove(key, this.comparator)
.copy(null, null, LLRBNode.BLACK, null, null));
};
// Returns the value of the node with the given key, or null.
SortedMap.prototype.get = function (key) {
var node = this.root;
while (!node.isEmpty()) {
var cmp = this.comparator(key, node.key);
if (cmp === 0) {
return node.value;
}
else if (cmp < 0) {
node = node.left;
}
else if (cmp > 0) {
node = node.right;
}
}
return null;
};
// Returns the index of the element in this sorted map, or -1 if it doesn't
// exist.
SortedMap.prototype.indexOf = function (key) {
// Number of nodes that were pruned when descending right
var prunedNodes = 0;
var node = this.root;
while (!node.isEmpty()) {
var cmp = this.comparator(key, node.key);
if (cmp === 0) {
return prunedNodes + node.left.size;
}
else if (cmp < 0) {
node = node.left;
}
else {
// Count all nodes left of the node plus the node itself
prunedNodes += node.left.size + 1;
node = node.right;
}
}
// Node not found
return -1;
};
SortedMap.prototype.isEmpty = function () {
return this.root.isEmpty();
};
Object.defineProperty(SortedMap.prototype, "size", {
// Returns the total number of nodes in the map.
get: function () {
return this.root.size;
},
enumerable: false,
configurable: true
});
// Returns the minimum key in the map.
SortedMap.prototype.minKey = function () {
return this.root.minKey();
};
// Returns the maximum key in the map.
SortedMap.prototype.maxKey = function () {
return this.root.maxKey();
};
// Traverses the map in key order and calls the specified action function
// for each key/value pair. If action returns true, traversal is aborted.
// Returns the first truthy value returned by action, or the last falsey
// value returned by action.
SortedMap.prototype.inorderTraversal = function (action) {
return this.root.inorderTraversal(action);
};
SortedMap.prototype.forEach = function (fn) {
this.inorderTraversal(function (k, v) {
fn(k, v);
return false;
});
};
SortedMap.prototype.toString = function () {
var descriptions = [];
this.inorderTraversal(function (k, v) {
descriptions.push(k + ":" + v);
return false;
});
return "{" + descriptions.join(', ') + "}";
};
// Traverses the map in reverse key order and calls the specified action
// function for each key/value pair. If action returns true, traversal is
// aborted.
// Returns the first truthy value returned by action, or the last falsey
// value returned by action.
SortedMap.prototype.reverseTraversal = function (action) {
return this.root.reverseTraversal(action);
};
// Returns an iterator over the SortedMap.
SortedMap.prototype.getIterator = function () {
return new SortedMapIterator(this.root, null, this.comparator, false);
};
SortedMap.prototype.getIteratorFrom = function (key) {
return new SortedMapIterator(this.root, key, this.comparator, false);
};
SortedMap.prototype.getReverseIterator = function () {
return new SortedMapIterator(this.root, null, this.comparator, true);
};
SortedMap.prototype.getReverseIteratorFrom = function (key) {
return new SortedMapIterator(this.root, key, this.comparator, true);
};
return SortedMap;
}()); // end SortedMap
// An iterator over an LLRBNode.
var SortedMapIterator = /** @class */ (function () {
function SortedMapIterator(node, startKey, comparator, isReverse) {
this.isReverse = isReverse;
this.nodeStack = [];
var cmp = 1;
while (!node.isEmpty()) {
cmp = startKey ? comparator(node.key, startKey) : 1;
// flip the comparison if we're going in reverse
if (isReverse) {
cmp *= -1;
}
if (cmp < 0) {
// This node is less than our start key. ignore it
if (this.isReverse) {
node = node.left;
}
else {
node = node.right;
}
}
else if (cmp === 0) {
// This node is exactly equal to our start key. Push it on the stack,
// but stop iterating;
this.nodeStack.push(node);
break;
}
else {
// This node is greater than our start key, add it to the stack and move
// to the next one
this.nodeStack.push(node);
if (this.isReverse) {
node = node.right;
}
else {
node = node.left;
}
}
}
}
SortedMapIterator.prototype.getNext = function () {
var node = this.nodeStack.pop();
var result = { key: node.key, value: node.value };
if (this.isReverse) {
node = node.left;
while (!node.isEmpty()) {
this.nodeStack.push(node);
node = node.right;
}
}
else {
node = node.right;
while (!node.isEmpty()) {
this.nodeStack.push(node);
node = node.left;
}
}
return result;
};
SortedMapIterator.prototype.hasNext = function () {
return this.nodeStack.length > 0;
};
SortedMapIterator.prototype.peek = function () {
if (this.nodeStack.length === 0) {
return null;
}
var node = this.nodeStack[this.nodeStack.length - 1];
return { key: node.key, value: node.value };
};
return SortedMapIterator;
}()); // end SortedMapIterator
// Represents a node in a Left-leaning Red-Black tree.
var LLRBNode = /** @class */ (function () {
function LLRBNode(key, value, color, left, right) {
this.key = key;
this.value = value;
this.color = color != null ? color : LLRBNode.RED;
this.left = left != null ? left : LLRBNode.EMPTY;
this.right = right != null ? right : LLRBNode.EMPTY;
this.size = this.left.size + 1 + this.right.size;
}
// Returns a copy of the current node, optionally replacing pieces of it.
LLRBNode.prototype.copy = function (key, value, color, left, right) {
return new LLRBNode(key != null ? key : this.key, value != null ? value : this.value, color != null ? color : this.color, left != null ? left : this.left, right != null ? right : this.right);
};
LLRBNode.prototype.isEmpty = function () {
return false;
};
// Traverses the tree in key order and calls the specified action function
// for each node. If action returns true, traversal is aborted.
// Returns the first truthy value returned by action, or the last falsey
// value returned by action.
LLRBNode.prototype.inorderTraversal = function (action) {
return (this.left.inorderTraversal(action) ||
action(this.key, this.value) ||
this.right.inorderTraversal(action));
};
// Traverses the tree in reverse key order and calls the specified action
// function for each node. If action returns true, traversal is aborted.
// Returns the first truthy value returned by action, or the last falsey
// value returned by action.
LLRBNode.prototype.reverseTraversal = function (action) {
return (this.right.reverseTraversal(action) ||
action(this.key, this.value) ||
this.left.reverseTraversal(action));
};
// Returns the minimum node in the tree.
LLRBNode.prototype.min = function () {
if (this.left.isEmpty()) {
return this;
}
else {
return this.left.min();
}
};
// Returns the maximum key in the tree.
LLRBNode.prototype.minKey = function () {
return this.min().key;
};
// Returns the maximum key in the tree.
LLRBNode.prototype.maxKey = function () {
if (this.right.isEmpty()) {
return this.key;
}
else {
return this.right.maxKey();
}
};
// Returns new tree, with the key/value added.
LLRBNode.prototype.insert = function (key, value, comparator) {
var n = this;
var cmp = comparator(key, n.key);
if (cmp < 0) {
n = n.copy(null, null, null, n.left.insert(key, value, comparator), null);
}
else if (cmp === 0) {
n = n.copy(null, value, null, null, null);
}
else {
n = n.copy(null, null, null, null, n.right.insert(key, value, comparator));
}
return n.fixUp();
};
LLRBNode.prototype.removeMin = function () {
if (this.left.isEmpty()) {
return LLRBNode.EMPTY;
}
var n = this;
if (!n.left.isRed() && !n.left.left.isRed()) {
n = n.moveRedLeft();
}
n = n.copy(null, null, null, n.left.removeMin(), null);
return n.fixUp();
};
// Returns new tree, with the specified item removed.
LLRBNode.prototype.remove = function (key, comparator) {
var smallest;
var n = this;
if (comparator(key, n.key) < 0) {
if (!n.left.isEmpty() && !n.left.isRed() && !n.left.left.isRed()) {
n = n.moveRedLeft();
}
n = n.copy(null, null, null, n.left.remove(key, comparator), null);
}
else {
if (n.left.isRed()) {
n = n.rotateRight();
}
if (!n.right.isEmpty() && !n.right.isRed() && !n.right.left.isRed()) {
n = n.moveRedRight();
}
if (comparator(key, n.key) === 0) {
if (n.right.isEmpty()) {
return LLRBNode.EMPTY;
}
else {
smallest = n.right.min();
n = n.copy(smallest.key, smallest.value, null, null, n.right.removeMin());
}
}
n = n.copy(null, null, null, null, n.right.remove(key, comparator));
}
return n.fixUp();
};
LLRBNode.prototype.isRed = function () {
return this.color;
};
// Returns new tree after performing any needed rotations.
LLRBNode.prototype.fixUp = function () {
var n = this;
if (n.right.isRed() && !n.left.isRed()) {
n = n.rotateLeft();
}
if (n.left.isRed() && n.left.left.isRed()) {
n = n.rotateRight();
}
if (n.left.isRed() && n.right.isRed()) {
n = n.colorFlip();
}
return n;
};
LLRBNode.prototype.moveRedLeft = function () {
var n = this.colorFlip();
if (n.right.left.isRed()) {
n = n.copy(null, null, null, null, n.right.rotateRight());
n = n.rotateLeft();
n = n.colorFlip();
}
return n;
};
LLRBNode.prototype.moveRedRight = function () {
var n = this.colorFlip();
if (n.left.left.isRed()) {
n = n.rotateRight();
n = n.colorFlip();
}
return n;
};
LLRBNode.prototype.rotateLeft = function () {
var nl = this.copy(null, null, LLRBNode.RED, null, this.right.left);
return this.right.copy(null, null, this.color, nl, null);
};
LLRBNode.prototype.rotateRight = function () {
var nr = this.copy(null, null, LLRBNode.RED, this.left.right, null);
return this.left.copy(null, null, this.color, null, nr);
};
LLRBNode.prototype.colorFlip = function () {
var left = this.left.copy(null, null, !this.left.color, null, null);
var right = this.right.copy(null, null, !this.right.color, null, null);
return this.copy(null, null, !this.color, left, right);
};
// For testing.
LLRBNode.prototype.checkMaxDepth = function () {
var blackDepth = this.check();
if (Math.pow(2.0, blackDepth) <= this.size + 1) {
return true;
}
else {
return false;
}
};
// In a balanced RB tree, the black-depth (number of black nodes) from root to
// leaves is equal on both sides. This function verifies that or asserts.
LLRBNode.prototype.check = function () {
if (this.isRed() && this.left.isRed()) {
throw fail();
}
if (this.right.isRed()) {
throw fail();
}
var blackDepth = this.left.check();
if (blackDepth !== this.right.check()) {
throw fail();
}
else {
return blackDepth + (this.isRed() ? 0 : 1);
}
};
return LLRBNode;
}()); // end LLRBNode
// Empty node is shared between all LLRB trees.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
LLRBNode.EMPTY = null;
LLRBNode.RED = true;
LLRBNode.BLACK = false;
// Represents an empty node (a leaf node in the Red-Black Tree).
var LLRBEmptyNode = /** @class */ (function () {
function LLRBEmptyNode() {
this.size = 0;
}
Object.defineProperty(LLRBEmptyNode.prototype, "key", {
get: function () {
throw fail();
},
enumerable: false,
configurable: true
});
Object.defineProperty(LLRBEmptyNode.prototype, "value", {
get: function () {
throw fail();
},
enumerable: false,
configurable: true
});
Object.defineProperty(LLRBEmptyNode.prototype, "color", {
get: function () {
throw fail();
},
enumerable: false,
configurable: true
});
Object.defineProperty(LLRBEmptyNode.prototype, "left", {
get: function () {
throw fail();
},
enumerable: false,
configurable: true
});
Object.defineProperty(LLRBEmptyNode.prototype, "right", {
get: function () {
throw fail();
},
enumerable: false,
configurable: true
});
// Returns a copy of the current node.
LLRBEmptyNode.prototype.copy = function (key, value, color, left, right) {
return this;
};
// Returns a copy of the tree, with the specified key/value added.
LLRBEmptyNode.prototype.insert = function (key, value, comparator) {
return new LLRBNode(key, value);
};
// Returns a copy of the tree, with the specified key removed.
LLRBEmptyNode.prototype.remove = function (key, comparator) {
return this;
};
LLRBEmptyNode.prototype.isEmpty = function () {
return true;
};
LLRBEmptyNode.prototype.inorderTraversal = function (action) {
return false;
};
LLRBEmptyNode.prototype.reverseTraversal = function (action) {
return false;
};
LLRBEmptyNode.prototype.minKey = function () {
return null;
};
LLRBEmptyNode.prototype.maxKey = function () {
return null;
};
LLRBEmptyNode.prototype.isRed = function () {
return false;
};
// For testing.
LLRBEmptyNode.prototype.checkMaxDepth = function () {
return true;
};
LLRBEmptyNode.prototype.check = function () {
return 0;
};
return LLRBEmptyNode;
}()); // end LLRBEmptyNode
LLRBNode.EMPTY = new LLRBEmptyNode();
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* SortedSet is an immutable (copy-on-write) collection that holds elements
* in order specified by the provided comparator.
*
* NOTE: if provided comparator returns 0 for two elements, we consider them to
* be equal!
*/
var SortedSet = /** @class */ (function () {
function SortedSet(comparator) {
this.comparator = comparator;
this.data = new SortedMap(this.comparator);
}
SortedSet.prototype.has = function (elem) {
return this.data.get(elem) !== null;
};
SortedSet.prototype.first = function () {
return this.data.minKey();
};
SortedSet.prototype.last = function () {
return this.data.maxKey();
};
Object.defineProperty(SortedSet.prototype, "size", {
get: function () {
return this.data.size;
},
enumerable: false,
configurable: true
});
SortedSet.prototype.indexOf = function (elem) {
return this.data.indexOf(elem);
};
/** Iterates elements in order defined by "comparator" */
SortedSet.prototype.forEach = function (cb) {
this.data.inorderTraversal(function (k, v) {
cb(k);
return false;
});
};
/** Iterates over `elem`s such that: range[0] <= elem < range[1]. */
SortedSet.prototype.forEachInRange = function (range, cb) {
var iter = this.data.getIteratorFrom(range[0]);
while (iter.hasNext()) {
var elem = iter.getNext();
if (this.comparator(elem.key, range[1]) >= 0) {
return;
}
cb(elem.key);
}
};
/**
* Iterates over `elem`s such that: start <= elem until false is returned.
*/
SortedSet.prototype.forEachWhile = function (cb, start) {
var iter;
if (start !== undefined) {
iter = this.data.getIteratorFrom(start);
}
else {
iter = this.data.getIterator();
}
while (iter.hasNext()) {
var elem = iter.getNext();
var result = cb(elem.key);
if (!result) {
return;
}
}
};
/** Finds the least element greater than or equal to `elem`. */
SortedSet.prototype.firstAfterOrEqual = function (elem) {
var iter = this.data.getIteratorFrom(elem);
return iter.hasNext() ? iter.getNext().key : null;
};
SortedSet.prototype.getIterator = function () {
return new SortedSetIterator(this.data.getIterator());
};
SortedSet.prototype.getIteratorFrom = function (key) {
return new SortedSetIterator(this.data.getIteratorFrom(key));
};
/** Inserts or updates an element */
SortedSet.prototype.add = function (elem) {
return this.copy(this.data.remove(elem).insert(elem, true));
};
/** Deletes an element */
SortedSet.prototype.delete = function (elem) {
if (!this.has(elem)) {
return this;
}
return this.copy(this.data.remove(elem));
};
SortedSet.prototype.isEmpty = function () {
return this.data.isEmpty();
};
SortedSet.prototype.unionWith = function (other) {
var result = this;
// Make sure `result` always refers to the larger one of the two sets.
if (result.size < other.size) {
result = other;
other = this;
}
other.forEach(function (elem) {
result = result.add(elem);
});
return result;
};
SortedSet.prototype.isEqual = function (other) {
if (!(other instanceof SortedSet)) {
return false;
}
if (this.size !== other.size) {
return false;
}
var thisIt = this.data.getIterator();
var otherIt = other.data.getIterator();
while (thisIt.hasNext()) {
var thisElem = thisIt.getNext().key;
var otherElem = otherIt.getNext().key;
if (this.comparator(thisElem, otherElem) !== 0) {
return false;
}
}
return true;
};
SortedSet.prototype.toArray = function () {
var res = [];
this.forEach(function (targetId) {
res.push(targetId);
});
return res;
};
SortedSet.prototype.toString = function () {
var result = [];
this.forEach(function (elem) { return result.push(elem); });
return 'SortedSet(' + result.toString() + ')';
};
SortedSet.prototype.copy = function (data) {
var result = new SortedSet(this.comparator);
result.data = data;
return result;
};
return SortedSet;
}());
var SortedSetIterator = /** @class */ (function () {
function SortedSetIterator(iter) {
this.iter = iter;
}
SortedSetIterator.prototype.getNext = function () {
return this.iter.getNext().key;
};
SortedSetIterator.prototype.hasNext = function () {
return this.iter.hasNext();
};
return SortedSetIterator;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var EMPTY_MUTABLE_DOCUMENT_MAP = new SortedMap(DocumentKey.comparator);
function mutableDocumentMap() {
return EMPTY_MUTABLE_DOCUMENT_MAP;
}
var EMPTY_DOCUMENT_MAP = new SortedMap(DocumentKey.comparator);
function documentMap() {
return EMPTY_DOCUMENT_MAP;
}
var EMPTY_DOCUMENT_VERSION_MAP = new SortedMap(DocumentKey.comparator);
function documentVersionMap() {
return EMPTY_DOCUMENT_VERSION_MAP;
}
var EMPTY_DOCUMENT_KEY_SET = new SortedSet(DocumentKey.comparator);
function documentKeySet() {
var keys = [];
for (var _i = 0; _i < arguments.length; _i++) {
keys[_i] = arguments[_i];
}
var set = EMPTY_DOCUMENT_KEY_SET;
for (var _d = 0, keys_1 = keys; _d < keys_1.length; _d++) {
var key = keys_1[_d];
set = set.add(key);
}
return set;
}
var EMPTY_TARGET_ID_SET = new SortedSet(primitiveComparator);
function targetIdSet() {
return EMPTY_TARGET_ID_SET;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Returns an DoubleValue for `value` that is encoded based the serializer's
* `useProto3Json` setting.
*/
function toDouble(serializer, value) {
if (serializer.useProto3Json) {
if (isNaN(value)) {
return { doubleValue: 'NaN' };
}
else if (value === Infinity) {
return { doubleValue: 'Infinity' };
}
else if (value === -Infinity) {
return { doubleValue: '-Infinity' };
}
}
return { doubleValue: isNegativeZero(value) ? '-0' : value };
}
/**
* Returns an IntegerValue for `value`.
*/
function toInteger(value) {
return { integerValue: '' + value };
}
/**
* Returns a value for a number that's appropriate to put into a proto.
* The return value is an IntegerValue if it can safely represent the value,
* otherwise a DoubleValue is returned.
*/
function toNumber(serializer, value) {
return isSafeInteger(value) ? toInteger(value) : toDouble(serializer, value);
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Used to represent a field transform on a mutation. */
var TransformOperation = /** @class */ (function () {
function TransformOperation() {
// Make sure that the structural type of `TransformOperation` is unique.
// See https://github.com/microsoft/TypeScript/issues/5451
this._ = undefined;
}
return TransformOperation;
}());
/**
* Computes the local transform result against the provided `previousValue`,
* optionally using the provided localWriteTime.
*/
function applyTransformOperationToLocalView(transform, previousValue, localWriteTime) {
if (transform instanceof ServerTimestampTransform) {
return serverTimestamp$1(localWriteTime, previousValue);
}
else if (transform instanceof ArrayUnionTransformOperation) {
return applyArrayUnionTransformOperation(transform, previousValue);
}
else if (transform instanceof ArrayRemoveTransformOperation) {
return applyArrayRemoveTransformOperation(transform, previousValue);
}
else {
return applyNumericIncrementTransformOperationToLocalView(transform, previousValue);
}
}
/**
* Computes a final transform result after the transform has been acknowledged
* by the server, potentially using the server-provided transformResult.
*/
function applyTransformOperationToRemoteDocument(transform, previousValue, transformResult) {
// The server just sends null as the transform result for array operations,
// so we have to calculate a result the same as we do for local
// applications.
if (transform instanceof ArrayUnionTransformOperation) {
return applyArrayUnionTransformOperation(transform, previousValue);
}
else if (transform instanceof ArrayRemoveTransformOperation) {
return applyArrayRemoveTransformOperation(transform, previousValue);
}
return transformResult;
}
/**
* If this transform operation is not idempotent, returns the base value to
* persist for this transform. If a base value is returned, the transform
* operation is always applied to this base value, even if document has
* already been updated.
*
* Base values provide consistent behavior for non-idempotent transforms and
* allow us to return the same latency-compensated value even if the backend
* has already applied the transform operation. The base value is null for
* idempotent transforms, as they can be re-played even if the backend has
* already applied them.
*
* @returns a base value to store along with the mutation, or null for
* idempotent transforms.
*/
function computeTransformOperationBaseValue(transform, previousValue) {
if (transform instanceof NumericIncrementTransformOperation) {
return isNumber(previousValue) ? previousValue : { integerValue: 0 };
}
return null;
}
function transformOperationEquals(left, right) {
if (left instanceof ArrayUnionTransformOperation &&
right instanceof ArrayUnionTransformOperation) {
return arrayEquals(left.elements, right.elements, valueEquals);
}
else if (left instanceof ArrayRemoveTransformOperation &&
right instanceof ArrayRemoveTransformOperation) {
return arrayEquals(left.elements, right.elements, valueEquals);
}
else if (left instanceof NumericIncrementTransformOperation &&
right instanceof NumericIncrementTransformOperation) {
return valueEquals(left.operand, right.operand);
}
return (left instanceof ServerTimestampTransform &&
right instanceof ServerTimestampTransform);
}
/** Transforms a value into a server-generated timestamp. */
var ServerTimestampTransform = /** @class */ (function (_super) {
tslib.__extends(ServerTimestampTransform, _super);
function ServerTimestampTransform() {
return _super !== null && _super.apply(this, arguments) || this;
}
return ServerTimestampTransform;
}(TransformOperation));
/** Transforms an array value via a union operation. */
var ArrayUnionTransformOperation = /** @class */ (function (_super) {
tslib.__extends(ArrayUnionTransformOperation, _super);
function ArrayUnionTransformOperation(elements) {
var _this = _super.call(this) || this;
_this.elements = elements;
return _this;
}
return ArrayUnionTransformOperation;
}(TransformOperation));
function applyArrayUnionTransformOperation(transform, previousValue) {
var values = coercedFieldValuesArray(previousValue);
var _loop_3 = function (toUnion) {
if (!values.some(function (element) { return valueEquals(element, toUnion); })) {
values.push(toUnion);
}
};
for (var _i = 0, _d = transform.elements; _i < _d.length; _i++) {
var toUnion = _d[_i];
_loop_3(toUnion);
}
return { arrayValue: { values: values } };
}
/** Transforms an array value via a remove operation. */
var ArrayRemoveTransformOperation = /** @class */ (function (_super) {
tslib.__extends(ArrayRemoveTransformOperation, _super);
function ArrayRemoveTransformOperation(elements) {
var _this = _super.call(this) || this;
_this.elements = elements;
return _this;
}
return ArrayRemoveTransformOperation;
}(TransformOperation));
function applyArrayRemoveTransformOperation(transform, previousValue) {
var values = coercedFieldValuesArray(previousValue);
var _loop_4 = function (toRemove) {
values = values.filter(function (element) { return !valueEquals(element, toRemove); });
};
for (var _i = 0, _d = transform.elements; _i < _d.length; _i++) {
var toRemove = _d[_i];
_loop_4(toRemove);
}
return { arrayValue: { values: values } };
}
/**
* Implements the backend semantics for locally computed NUMERIC_ADD (increment)
* transforms. Converts all field values to integers or doubles, but unlike the
* backend does not cap integer values at 2^63. Instead, JavaScript number
* arithmetic is used and precision loss can occur for values greater than 2^53.
*/
var NumericIncrementTransformOperation = /** @class */ (function (_super) {
tslib.__extends(NumericIncrementTransformOperation, _super);
function NumericIncrementTransformOperation(serializer, operand) {
var _this = _super.call(this) || this;
_this.serializer = serializer;
_this.operand = operand;
return _this;
}
return NumericIncrementTransformOperation;
}(TransformOperation));
function applyNumericIncrementTransformOperationToLocalView(transform, previousValue) {
// PORTING NOTE: Since JavaScript's integer arithmetic is limited to 53 bit
// precision and resolves overflows by reducing precision, we do not
// manually cap overflows at 2^63.
var baseValue = computeTransformOperationBaseValue(transform, previousValue);
var sum = asNumber(baseValue) + asNumber(transform.operand);
if (isInteger(baseValue) && isInteger(transform.operand)) {
return toInteger(sum);
}
else {
return toDouble(transform.serializer, sum);
}
}
function asNumber(value) {
return normalizeNumber(value.integerValue || value.doubleValue);
}
function coercedFieldValuesArray(value) {
return isArray(value) && value.arrayValue.values
? value.arrayValue.values.slice()
: [];
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** A field path and the TransformOperation to perform upon it. */
var FieldTransform = /** @class */ (function () {
function FieldTransform(field, transform) {
this.field = field;
this.transform = transform;
}
return FieldTransform;
}());
function fieldTransformEquals(left, right) {
return (left.field.isEqual(right.field) &&
transformOperationEquals(left.transform, right.transform));
}
function fieldTransformsAreEqual(left, right) {
if (left === undefined && right === undefined) {
return true;
}
if (left && right) {
return arrayEquals(left, right, function (l, r) { return fieldTransformEquals(l, r); });
}
return false;
}
/** The result of successfully applying a mutation to the backend. */
var MutationResult = /** @class */ (function () {
function MutationResult(
/**
* The version at which the mutation was committed:
*
* - For most operations, this is the updateTime in the WriteResult.
* - For deletes, the commitTime of the WriteResponse (because deletes are
* not stored and have no updateTime).
*
* Note that these versions can be different: No-op writes will not change
* the updateTime even though the commitTime advances.
*/
version,
/**
* The resulting fields returned from the backend after a mutation
* containing field transforms has been committed. Contains one FieldValue
* for each FieldTransform that was in the mutation.
*
* Will be empty if the mutation did not contain any field transforms.
*/
transformResults) {
this.version = version;
this.transformResults = transformResults;
}
return MutationResult;
}());
/**
* Encodes a precondition for a mutation. This follows the model that the
* backend accepts with the special case of an explicit "empty" precondition
* (meaning no precondition).
*/
var Precondition = /** @class */ (function () {
function Precondition(updateTime, exists) {
this.updateTime = updateTime;
this.exists = exists;
}
/** Creates a new empty Precondition. */
Precondition.none = function () {
return new Precondition();
};
/** Creates a new Precondition with an exists flag. */
Precondition.exists = function (exists) {
return new Precondition(undefined, exists);
};
/** Creates a new Precondition based on a version a document exists at. */
Precondition.updateTime = function (version) {
return new Precondition(version);
};
Object.defineProperty(Precondition.prototype, "isNone", {
/** Returns whether this Precondition is empty. */
get: function () {
return this.updateTime === undefined && this.exists === undefined;
},
enumerable: false,
configurable: true
});
Precondition.prototype.isEqual = function (other) {
return (this.exists === other.exists &&
(this.updateTime
? !!other.updateTime && this.updateTime.isEqual(other.updateTime)
: !other.updateTime));
};
return Precondition;
}());
/** Returns true if the preconditions is valid for the given document. */
function preconditionIsValidForDocument(precondition, document) {
if (precondition.updateTime !== undefined) {
return (document.isFoundDocument() &&
document.version.isEqual(precondition.updateTime));
}
else if (precondition.exists !== undefined) {
return precondition.exists === document.isFoundDocument();
}
else {
return true;
}
}
/**
* A mutation describes a self-contained change to a document. Mutations can
* create, replace, delete, and update subsets of documents.
*
* Mutations not only act on the value of the document but also its version.
*
* For local mutations (mutations that haven't been committed yet), we preserve
* the existing version for Set and Patch mutations. For Delete mutations, we
* reset the version to 0.
*
* Here's the expected transition table.
*
* MUTATION APPLIED TO RESULTS IN
*
* SetMutation Document(v3) Document(v3)
* SetMutation NoDocument(v3) Document(v0)
* SetMutation InvalidDocument(v0) Document(v0)
* PatchMutation Document(v3) Document(v3)
* PatchMutation NoDocument(v3) NoDocument(v3)
* PatchMutation InvalidDocument(v0) UnknownDocument(v3)
* DeleteMutation Document(v3) NoDocument(v0)
* DeleteMutation NoDocument(v3) NoDocument(v0)
* DeleteMutation InvalidDocument(v0) NoDocument(v0)
*
* For acknowledged mutations, we use the updateTime of the WriteResponse as
* the resulting version for Set and Patch mutations. As deletes have no
* explicit update time, we use the commitTime of the WriteResponse for
* Delete mutations.
*
* If a mutation is acknowledged by the backend but fails the precondition check
* locally, we transition to an `UnknownDocument` and rely on Watch to send us
* the updated version.
*
* Field transforms are used only with Patch and Set Mutations. We use the
* `updateTransforms` message to store transforms, rather than the `transforms`s
* messages.
*
* ## Subclassing Notes
*
* Every type of mutation needs to implement its own applyToRemoteDocument() and
* applyToLocalView() to implement the actual behavior of applying the mutation
* to some source document (see `applySetMutationToRemoteDocument()` for an
* example).
*/
var Mutation = /** @class */ (function () {
function Mutation() {
}
return Mutation;
}());
/**
* Applies this mutation to the given document for the purposes of computing a
* new remote document. If the input document doesn't match the expected state
* (e.g. it is invalid or outdated), the document type may transition to
* unknown.
*
* @param mutation - The mutation to apply.
* @param document - The document to mutate. The input document can be an
* invalid document if the client has no knowledge of the pre-mutation state
* of the document.
* @param mutationResult - The result of applying the mutation from the backend.
*/
function applyMutationToRemoteDocument(mutation, document, mutationResult) {
if (mutation instanceof SetMutation) {
applySetMutationToRemoteDocument(mutation, document, mutationResult);
}
else if (mutation instanceof PatchMutation) {
applyPatchMutationToRemoteDocument(mutation, document, mutationResult);
}
else {
applyDeleteMutationToRemoteDocument(mutation, document, mutationResult);
}
}
/**
* Applies this mutation to the given document for the purposes of computing
* the new local view of a document. If the input document doesn't match the
* expected state, the document is not modified.
*
* @param mutation - The mutation to apply.
* @param document - The document to mutate. The input document can be an
* invalid document if the client has no knowledge of the pre-mutation state
* of the document.
* @param localWriteTime - A timestamp indicating the local write time of the
* batch this mutation is a part of.
*/
function applyMutationToLocalView(mutation, document, localWriteTime) {
if (mutation instanceof SetMutation) {
applySetMutationToLocalView(mutation, document, localWriteTime);
}
else if (mutation instanceof PatchMutation) {
applyPatchMutationToLocalView(mutation, document, localWriteTime);
}
else {
applyDeleteMutationToLocalView(mutation, document);
}
}
/**
* If this mutation is not idempotent, returns the base value to persist with
* this mutation. If a base value is returned, the mutation is always applied
* to this base value, even if document has already been updated.
*
* The base value is a sparse object that consists of only the document
* fields for which this mutation contains a non-idempotent transformation
* (e.g. a numeric increment). The provided value guarantees consistent
* behavior for non-idempotent transforms and allow us to return the same
* latency-compensated value even if the backend has already applied the
* mutation. The base value is null for idempotent mutations, as they can be
* re-played even if the backend has already applied them.
*
* @returns a base value to store along with the mutation, or null for
* idempotent mutations.
*/
function extractMutationBaseValue(mutation, document) {
var baseObject = null;
for (var _i = 0, _d = mutation.fieldTransforms; _i < _d.length; _i++) {
var fieldTransform = _d[_i];
var existingValue = document.data.field(fieldTransform.field);
var coercedValue = computeTransformOperationBaseValue(fieldTransform.transform, existingValue || null);
if (coercedValue != null) {
if (baseObject == null) {
baseObject = ObjectValue.empty();
}
baseObject.set(fieldTransform.field, coercedValue);
}
}
return baseObject ? baseObject : null;
}
function mutationEquals(left, right) {
if (left.type !== right.type) {
return false;
}
if (!left.key.isEqual(right.key)) {
return false;
}
if (!left.precondition.isEqual(right.precondition)) {
return false;
}
if (!fieldTransformsAreEqual(left.fieldTransforms, right.fieldTransforms)) {
return false;
}
if (left.type === 0 /* Set */) {
return left.value.isEqual(right.value);
}
if (left.type === 1 /* Patch */) {
return (left.data.isEqual(right.data) &&
left.fieldMask.isEqual(right.fieldMask));
}
return true;
}
/**
* Returns the version from the given document for use as the result of a
* mutation. Mutations are defined to return the version of the base document
* only if it is an existing document. Deleted and unknown documents have a
* post-mutation version of SnapshotVersion.min().
*/
function getPostMutationVersion(document) {
return document.isFoundDocument() ? document.version : SnapshotVersion.min();
}
/**
* A mutation that creates or replaces the document at the given key with the
* object value contents.
*/
var SetMutation = /** @class */ (function (_super) {
tslib.__extends(SetMutation, _super);
function SetMutation(key, value, precondition, fieldTransforms) {
if (fieldTransforms === void 0) { fieldTransforms = []; }
var _this = _super.call(this) || this;
_this.key = key;
_this.value = value;
_this.precondition = precondition;
_this.fieldTransforms = fieldTransforms;
_this.type = 0 /* Set */;
return _this;
}
return SetMutation;
}(Mutation));
function applySetMutationToRemoteDocument(mutation, document, mutationResult) {
// Unlike applySetMutationToLocalView, if we're applying a mutation to a
// remote document the server has accepted the mutation so the precondition
// must have held.
var newData = mutation.value.clone();
var transformResults = serverTransformResults(mutation.fieldTransforms, document, mutationResult.transformResults);
newData.setAll(transformResults);
document
.convertToFoundDocument(mutationResult.version, newData)
.setHasCommittedMutations();
}
function applySetMutationToLocalView(mutation, document, localWriteTime) {
if (!preconditionIsValidForDocument(mutation.precondition, document)) {
// The mutation failed to apply (e.g. a document ID created with add()
// caused a name collision).
return;
}
var newData = mutation.value.clone();
var transformResults = localTransformResults(mutation.fieldTransforms, localWriteTime, document);
newData.setAll(transformResults);
document
.convertToFoundDocument(getPostMutationVersion(document), newData)
.setHasLocalMutations();
}
/**
* A mutation that modifies fields of the document at the given key with the
* given values. The values are applied through a field mask:
*
* * When a field is in both the mask and the values, the corresponding field
* is updated.
* * When a field is in neither the mask nor the values, the corresponding
* field is unmodified.
* * When a field is in the mask but not in the values, the corresponding field
* is deleted.
* * When a field is not in the mask but is in the values, the values map is
* ignored.
*/
var PatchMutation = /** @class */ (function (_super) {
tslib.__extends(PatchMutation, _super);
function PatchMutation(key, data, fieldMask, precondition, fieldTransforms) {
if (fieldTransforms === void 0) { fieldTransforms = []; }
var _this = _super.call(this) || this;
_this.key = key;
_this.data = data;
_this.fieldMask = fieldMask;
_this.precondition = precondition;
_this.fieldTransforms = fieldTransforms;
_this.type = 1 /* Patch */;
return _this;
}
return PatchMutation;
}(Mutation));
function applyPatchMutationToRemoteDocument(mutation, document, mutationResult) {
if (!preconditionIsValidForDocument(mutation.precondition, document)) {
// Since the mutation was not rejected, we know that the precondition
// matched on the backend. We therefore must not have the expected version
// of the document in our cache and convert to an UnknownDocument with a
// known updateTime.
document.convertToUnknownDocument(mutationResult.version);
return;
}
var transformResults = serverTransformResults(mutation.fieldTransforms, document, mutationResult.transformResults);
var newData = document.data;
newData.setAll(getPatch(mutation));
newData.setAll(transformResults);
document
.convertToFoundDocument(mutationResult.version, newData)
.setHasCommittedMutations();
}
function applyPatchMutationToLocalView(mutation, document, localWriteTime) {
if (!preconditionIsValidForDocument(mutation.precondition, document)) {
return;
}
var transformResults = localTransformResults(mutation.fieldTransforms, localWriteTime, document);
var newData = document.data;
newData.setAll(getPatch(mutation));
newData.setAll(transformResults);
document
.convertToFoundDocument(getPostMutationVersion(document), newData)
.setHasLocalMutations();
}
/**
* Returns a FieldPath/Value map with the content of the PatchMutation.
*/
function getPatch(mutation) {
var result = new Map();
mutation.fieldMask.fields.forEach(function (fieldPath) {
if (!fieldPath.isEmpty()) {
var newValue = mutation.data.field(fieldPath);
result.set(fieldPath, newValue);
}
});
return result;
}
/**
* Creates a list of "transform results" (a transform result is a field value
* representing the result of applying a transform) for use after a mutation
* containing transforms has been acknowledged by the server.
*
* @param fieldTransforms - The field transforms to apply the result to.
* @param mutableDocument - The current state of the document after applying all
* previous mutations.
* @param serverTransformResults - The transform results received by the server.
* @returns The transform results list.
*/
function serverTransformResults(fieldTransforms, mutableDocument, serverTransformResults) {
var transformResults = new Map();
hardAssert(fieldTransforms.length === serverTransformResults.length);
for (var i = 0; i < serverTransformResults.length; i++) {
var fieldTransform = fieldTransforms[i];
var transform = fieldTransform.transform;
var previousValue = mutableDocument.data.field(fieldTransform.field);
transformResults.set(fieldTransform.field, applyTransformOperationToRemoteDocument(transform, previousValue, serverTransformResults[i]));
}
return transformResults;
}
/**
* Creates a list of "transform results" (a transform result is a field value
* representing the result of applying a transform) for use when applying a
* transform locally.
*
* @param fieldTransforms - The field transforms to apply the result to.
* @param localWriteTime - The local time of the mutation (used to
* generate ServerTimestampValues).
* @param mutableDocument - The current state of the document after applying all
* previous mutations.
* @returns The transform results list.
*/
function localTransformResults(fieldTransforms, localWriteTime, mutableDocument) {
var transformResults = new Map();
for (var _i = 0, fieldTransforms_1 = fieldTransforms; _i < fieldTransforms_1.length; _i++) {
var fieldTransform = fieldTransforms_1[_i];
var transform = fieldTransform.transform;
var previousValue = mutableDocument.data.field(fieldTransform.field);
transformResults.set(fieldTransform.field, applyTransformOperationToLocalView(transform, previousValue, localWriteTime));
}
return transformResults;
}
/** A mutation that deletes the document at the given key. */
var DeleteMutation = /** @class */ (function (_super) {
tslib.__extends(DeleteMutation, _super);
function DeleteMutation(key, precondition) {
var _this = _super.call(this) || this;
_this.key = key;
_this.precondition = precondition;
_this.type = 2 /* Delete */;
_this.fieldTransforms = [];
return _this;
}
return DeleteMutation;
}(Mutation));
function applyDeleteMutationToRemoteDocument(mutation, document, mutationResult) {
// Unlike applyToLocalView, if we're applying a mutation to a remote
// document the server has accepted the mutation so the precondition must
// have held.
document
.convertToNoDocument(mutationResult.version)
.setHasCommittedMutations();
}
function applyDeleteMutationToLocalView(mutation, document) {
if (preconditionIsValidForDocument(mutation.precondition, document)) {
// We don't call `setHasLocalMutations()` since we want to be backwards
// compatible with the existing SDK behavior.
document.convertToNoDocument(SnapshotVersion.min());
}
}
/**
* A mutation that verifies the existence of the document at the given key with
* the provided precondition.
*
* The `verify` operation is only used in Transactions, and this class serves
* primarily to facilitate serialization into protos.
*/
var VerifyMutation = /** @class */ (function (_super) {
tslib.__extends(VerifyMutation, _super);
function VerifyMutation(key, precondition) {
var _this = _super.call(this) || this;
_this.key = key;
_this.precondition = precondition;
_this.type = 3 /* Verify */;
_this.fieldTransforms = [];
return _this;
}
return VerifyMutation;
}(Mutation));
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A batch of mutations that will be sent as one unit to the backend.
*/
var MutationBatch = /** @class */ (function () {
/**
* @param batchId - The unique ID of this mutation batch.
* @param localWriteTime - The original write time of this mutation.
* @param baseMutations - Mutations that are used to populate the base
* values when this mutation is applied locally. This can be used to locally
* overwrite values that are persisted in the remote document cache. Base
* mutations are never sent to the backend.
* @param mutations - The user-provided mutations in this mutation batch.
* User-provided mutations are applied both locally and remotely on the
* backend.
*/
function MutationBatch(batchId, localWriteTime, baseMutations, mutations) {
this.batchId = batchId;
this.localWriteTime = localWriteTime;
this.baseMutations = baseMutations;
this.mutations = mutations;
}
/**
* Applies all the mutations in this MutationBatch to the specified document
* to compute the state of the remote document
*
* @param document - The document to apply mutations to.
* @param batchResult - The result of applying the MutationBatch to the
* backend.
*/
MutationBatch.prototype.applyToRemoteDocument = function (document, batchResult) {
var mutationResults = batchResult.mutationResults;
for (var i = 0; i < this.mutations.length; i++) {
var mutation = this.mutations[i];
if (mutation.key.isEqual(document.key)) {
var mutationResult = mutationResults[i];
applyMutationToRemoteDocument(mutation, document, mutationResult);
}
}
};
/**
* Computes the local view of a document given all the mutations in this
* batch.
*
* @param document - The document to apply mutations to.
*/
MutationBatch.prototype.applyToLocalView = function (document) {
// First, apply the base state. This allows us to apply non-idempotent
// transform against a consistent set of values.
for (var _i = 0, _d = this.baseMutations; _i < _d.length; _i++) {
var mutation = _d[_i];
if (mutation.key.isEqual(document.key)) {
applyMutationToLocalView(mutation, document, this.localWriteTime);
}
}
// Second, apply all user-provided mutations.
for (var _e = 0, _f = this.mutations; _e < _f.length; _e++) {
var mutation = _f[_e];
if (mutation.key.isEqual(document.key)) {
applyMutationToLocalView(mutation, document, this.localWriteTime);
}
}
};
/**
* Computes the local view for all provided documents given the mutations in
* this batch.
*/
MutationBatch.prototype.applyToLocalDocumentSet = function (documentMap) {
var _this = this;
// TODO(mrschmidt): This implementation is O(n^2). If we apply the mutations
// directly (as done in `applyToLocalView()`), we can reduce the complexity
// to O(n).
this.mutations.forEach(function (m) {
var document = documentMap.get(m.key);
// TODO(mutabledocuments): This method should take a MutableDocumentMap
// and we should remove this cast.
var mutableDocument = document;
_this.applyToLocalView(mutableDocument);
if (!document.isValidDocument()) {
mutableDocument.convertToNoDocument(SnapshotVersion.min());
}
});
};
MutationBatch.prototype.keys = function () {
return this.mutations.reduce(function (keys, m) { return keys.add(m.key); }, documentKeySet());
};
MutationBatch.prototype.isEqual = function (other) {
return (this.batchId === other.batchId &&
arrayEquals(this.mutations, other.mutations, function (l, r) { return mutationEquals(l, r); }) &&
arrayEquals(this.baseMutations, other.baseMutations, function (l, r) { return mutationEquals(l, r); }));
};
return MutationBatch;
}());
/** The result of applying a mutation batch to the backend. */
var MutationBatchResult = /** @class */ (function () {
function MutationBatchResult(batch, commitVersion, mutationResults,
/**
* A pre-computed mapping from each mutated document to the resulting
* version.
*/
docVersions) {
this.batch = batch;
this.commitVersion = commitVersion;
this.mutationResults = mutationResults;
this.docVersions = docVersions;
}
/**
* Creates a new MutationBatchResult for the given batch and results. There
* must be one result for each mutation in the batch. This static factory
* caches a document=>version mapping (docVersions).
*/
MutationBatchResult.from = function (batch, commitVersion, results) {
hardAssert(batch.mutations.length === results.length);
var versionMap = documentVersionMap();
var mutations = batch.mutations;
for (var i = 0; i < mutations.length; i++) {
versionMap = versionMap.insert(mutations[i].key, results[i].version);
}
return new MutationBatchResult(batch, commitVersion, results, versionMap);
};
return MutationBatchResult;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var ExistenceFilter = /** @class */ (function () {
// TODO(b/33078163): just use simplest form of existence filter for now
function ExistenceFilter(count) {
this.count = count;
}
return ExistenceFilter;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Error Codes describing the different ways GRPC can fail. These are copied
* directly from GRPC's sources here:
*
* https://github.com/grpc/grpc/blob/bceec94ea4fc5f0085d81235d8e1c06798dc341a/include/grpc%2B%2B/impl/codegen/status_code_enum.h
*
* Important! The names of these identifiers matter because the string forms
* are used for reverse lookups from the webchannel stream. Do NOT change the
* names of these identifiers or change this into a const enum.
*/
var RpcCode;
(function (RpcCode) {
RpcCode[RpcCode["OK"] = 0] = "OK";
RpcCode[RpcCode["CANCELLED"] = 1] = "CANCELLED";
RpcCode[RpcCode["UNKNOWN"] = 2] = "UNKNOWN";
RpcCode[RpcCode["INVALID_ARGUMENT"] = 3] = "INVALID_ARGUMENT";
RpcCode[RpcCode["DEADLINE_EXCEEDED"] = 4] = "DEADLINE_EXCEEDED";
RpcCode[RpcCode["NOT_FOUND"] = 5] = "NOT_FOUND";
RpcCode[RpcCode["ALREADY_EXISTS"] = 6] = "ALREADY_EXISTS";
RpcCode[RpcCode["PERMISSION_DENIED"] = 7] = "PERMISSION_DENIED";
RpcCode[RpcCode["UNAUTHENTICATED"] = 16] = "UNAUTHENTICATED";
RpcCode[RpcCode["RESOURCE_EXHAUSTED"] = 8] = "RESOURCE_EXHAUSTED";
RpcCode[RpcCode["FAILED_PRECONDITION"] = 9] = "FAILED_PRECONDITION";
RpcCode[RpcCode["ABORTED"] = 10] = "ABORTED";
RpcCode[RpcCode["OUT_OF_RANGE"] = 11] = "OUT_OF_RANGE";
RpcCode[RpcCode["UNIMPLEMENTED"] = 12] = "UNIMPLEMENTED";
RpcCode[RpcCode["INTERNAL"] = 13] = "INTERNAL";
RpcCode[RpcCode["UNAVAILABLE"] = 14] = "UNAVAILABLE";
RpcCode[RpcCode["DATA_LOSS"] = 15] = "DATA_LOSS";
})(RpcCode || (RpcCode = {}));
/**
* Determines whether an error code represents a permanent error when received
* in response to a non-write operation.
*
* See isPermanentWriteError for classifying write errors.
*/
function isPermanentError(code) {
switch (code) {
case Code.OK:
return fail();
case Code.CANCELLED:
case Code.UNKNOWN:
case Code.DEADLINE_EXCEEDED:
case Code.RESOURCE_EXHAUSTED:
case Code.INTERNAL:
case Code.UNAVAILABLE:
// Unauthenticated means something went wrong with our token and we need
// to retry with new credentials which will happen automatically.
case Code.UNAUTHENTICATED:
return false;
case Code.INVALID_ARGUMENT:
case Code.NOT_FOUND:
case Code.ALREADY_EXISTS:
case Code.PERMISSION_DENIED:
case Code.FAILED_PRECONDITION:
// Aborted might be retried in some scenarios, but that is dependant on
// the context and should handled individually by the calling code.
// See https://cloud.google.com/apis/design/errors.
case Code.ABORTED:
case Code.OUT_OF_RANGE:
case Code.UNIMPLEMENTED:
case Code.DATA_LOSS:
return true;
default:
return fail();
}
}
/**
* Determines whether an error code represents a permanent error when received
* in response to a write operation.
*
* Write operations must be handled specially because as of b/119437764, ABORTED
* errors on the write stream should be retried too (even though ABORTED errors
* are not generally retryable).
*
* Note that during the initial handshake on the write stream an ABORTED error
* signals that we should discard our stream token (i.e. it is permanent). This
* means a handshake error should be classified with isPermanentError, above.
*/
function isPermanentWriteError(code) {
return isPermanentError(code) && code !== Code.ABORTED;
}
/**
* Maps an error Code from GRPC status code number, like 0, 1, or 14. These
* are not the same as HTTP status codes.
*
* @returns The Code equivalent to the given GRPC status code. Fails if there
* is no match.
*/
function mapCodeFromRpcCode(code) {
if (code === undefined) {
// This shouldn't normally happen, but in certain error cases (like trying
// to send invalid proto messages) we may get an error with no GRPC code.
logError('GRPC error has no .code');
return Code.UNKNOWN;
}
switch (code) {
case RpcCode.OK:
return Code.OK;
case RpcCode.CANCELLED:
return Code.CANCELLED;
case RpcCode.UNKNOWN:
return Code.UNKNOWN;
case RpcCode.DEADLINE_EXCEEDED:
return Code.DEADLINE_EXCEEDED;
case RpcCode.RESOURCE_EXHAUSTED:
return Code.RESOURCE_EXHAUSTED;
case RpcCode.INTERNAL:
return Code.INTERNAL;
case RpcCode.UNAVAILABLE:
return Code.UNAVAILABLE;
case RpcCode.UNAUTHENTICATED:
return Code.UNAUTHENTICATED;
case RpcCode.INVALID_ARGUMENT:
return Code.INVALID_ARGUMENT;
case RpcCode.NOT_FOUND:
return Code.NOT_FOUND;
case RpcCode.ALREADY_EXISTS:
return Code.ALREADY_EXISTS;
case RpcCode.PERMISSION_DENIED:
return Code.PERMISSION_DENIED;
case RpcCode.FAILED_PRECONDITION:
return Code.FAILED_PRECONDITION;
case RpcCode.ABORTED:
return Code.ABORTED;
case RpcCode.OUT_OF_RANGE:
return Code.OUT_OF_RANGE;
case RpcCode.UNIMPLEMENTED:
return Code.UNIMPLEMENTED;
case RpcCode.DATA_LOSS:
return Code.DATA_LOSS;
default:
return fail();
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An event from the RemoteStore. It is split into targetChanges (changes to the
* state or the set of documents in our watched targets) and documentUpdates
* (changes to the actual documents).
*/
var RemoteEvent = /** @class */ (function () {
function RemoteEvent(
/**
* The snapshot version this event brings us up to, or MIN if not set.
*/
snapshotVersion,
/**
* A map from target to changes to the target. See TargetChange.
*/
targetChanges,
/**
* A set of targets that is known to be inconsistent. Listens for these
* targets should be re-established without resume tokens.
*/
targetMismatches,
/**
* A set of which documents have changed or been deleted, along with the
* doc's new values (if not deleted).
*/
documentUpdates,
/**
* A set of which document updates are due only to limbo resolution targets.
*/
resolvedLimboDocuments) {
this.snapshotVersion = snapshotVersion;
this.targetChanges = targetChanges;
this.targetMismatches = targetMismatches;
this.documentUpdates = documentUpdates;
this.resolvedLimboDocuments = resolvedLimboDocuments;
}
/**
* HACK: Views require RemoteEvents in order to determine whether the view is
* CURRENT, but secondary tabs don't receive remote events. So this method is
* used to create a synthesized RemoteEvent that can be used to apply a
* CURRENT status change to a View, for queries executed in a different tab.
*/
// PORTING NOTE: Multi-tab only
RemoteEvent.createSynthesizedRemoteEventForCurrentChange = function (targetId, current) {
var targetChanges = new Map();
targetChanges.set(targetId, TargetChange.createSynthesizedTargetChangeForCurrentChange(targetId, current));
return new RemoteEvent(SnapshotVersion.min(), targetChanges, targetIdSet(), mutableDocumentMap(), documentKeySet());
};
return RemoteEvent;
}());
/**
* A TargetChange specifies the set of changes for a specific target as part of
* a RemoteEvent. These changes track which documents are added, modified or
* removed, as well as the target's resume token and whether the target is
* marked CURRENT.
* The actual changes *to* documents are not part of the TargetChange since
* documents may be part of multiple targets.
*/
var TargetChange = /** @class */ (function () {
function TargetChange(
/**
* An opaque, server-assigned token that allows watching a query to be resumed
* after disconnecting without retransmitting all the data that matches the
* query. The resume token essentially identifies a point in time from which
* the server should resume sending results.
*/
resumeToken,
/**
* The "current" (synced) status of this target. Note that "current"
* has special meaning in the RPC protocol that implies that a target is
* both up-to-date and consistent with the rest of the watch stream.
*/
current,
/**
* The set of documents that were newly assigned to this target as part of
* this remote event.
*/
addedDocuments,
/**
* The set of documents that were already assigned to this target but received
* an update during this remote event.
*/
modifiedDocuments,
/**
* The set of documents that were removed from this target as part of this
* remote event.
*/
removedDocuments) {
this.resumeToken = resumeToken;
this.current = current;
this.addedDocuments = addedDocuments;
this.modifiedDocuments = modifiedDocuments;
this.removedDocuments = removedDocuments;
}
/**
* This method is used to create a synthesized TargetChanges that can be used to
* apply a CURRENT status change to a View (for queries executed in a different
* tab) or for new queries (to raise snapshots with correct CURRENT status).
*/
TargetChange.createSynthesizedTargetChangeForCurrentChange = function (targetId, current) {
return new TargetChange(ByteString.EMPTY_BYTE_STRING, current, documentKeySet(), documentKeySet(), documentKeySet());
};
return TargetChange;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents a changed document and a list of target ids to which this change
* applies.
*
* If document has been deleted NoDocument will be provided.
*/
var DocumentWatchChange = /** @class */ (function () {
function DocumentWatchChange(
/** The new document applies to all of these targets. */
updatedTargetIds,
/** The new document is removed from all of these targets. */
removedTargetIds,
/** The key of the document for this change. */
key,
/**
* The new document or NoDocument if it was deleted. Is null if the
* document went out of view without the server sending a new document.
*/
newDoc) {
this.updatedTargetIds = updatedTargetIds;
this.removedTargetIds = removedTargetIds;
this.key = key;
this.newDoc = newDoc;
}
return DocumentWatchChange;
}());
var ExistenceFilterChange = /** @class */ (function () {
function ExistenceFilterChange(targetId, existenceFilter) {
this.targetId = targetId;
this.existenceFilter = existenceFilter;
}
return ExistenceFilterChange;
}());
var WatchTargetChange = /** @class */ (function () {
function WatchTargetChange(
/** What kind of change occurred to the watch target. */
state,
/** The target IDs that were added/removed/set. */
targetIds,
/**
* An opaque, server-assigned token that allows watching a target to be
* resumed after disconnecting without retransmitting all the data that
* matches the target. The resume token essentially identifies a point in
* time from which the server should resume sending results.
*/
resumeToken,
/** An RPC error indicating why the watch failed. */
cause) {
if (resumeToken === void 0) { resumeToken = ByteString.EMPTY_BYTE_STRING; }
if (cause === void 0) { cause = null; }
this.state = state;
this.targetIds = targetIds;
this.resumeToken = resumeToken;
this.cause = cause;
}
return WatchTargetChange;
}());
/** Tracks the internal state of a Watch target. */
var TargetState = /** @class */ (function () {
function TargetState() {
/**
* The number of pending responses (adds or removes) that we are waiting on.
* We only consider targets active that have no pending responses.
*/
this.pendingResponses = 0;
/**
* Keeps track of the document changes since the last raised snapshot.
*
* These changes are continuously updated as we receive document updates and
* always reflect the current set of changes against the last issued snapshot.
*/
this.documentChanges = snapshotChangesMap();
/** See public getters for explanations of these fields. */
this._resumeToken = ByteString.EMPTY_BYTE_STRING;
this._current = false;
/**
* Whether this target state should be included in the next snapshot. We
* initialize to true so that newly-added targets are included in the next
* RemoteEvent.
*/
this._hasPendingChanges = true;
}
Object.defineProperty(TargetState.prototype, "current", {
/**
* Whether this target has been marked 'current'.
*
* 'Current' has special meaning in the RPC protocol: It implies that the
* Watch backend has sent us all changes up to the point at which the target
* was added and that the target is consistent with the rest of the watch
* stream.
*/
get: function () {
return this._current;
},
enumerable: false,
configurable: true
});
Object.defineProperty(TargetState.prototype, "resumeToken", {
/** The last resume token sent to us for this target. */
get: function () {
return this._resumeToken;
},
enumerable: false,
configurable: true
});
Object.defineProperty(TargetState.prototype, "isPending", {
/** Whether this target has pending target adds or target removes. */
get: function () {
return this.pendingResponses !== 0;
},
enumerable: false,
configurable: true
});
Object.defineProperty(TargetState.prototype, "hasPendingChanges", {
/** Whether we have modified any state that should trigger a snapshot. */
get: function () {
return this._hasPendingChanges;
},
enumerable: false,
configurable: true
});
/**
* Applies the resume token to the TargetChange, but only when it has a new
* value. Empty resumeTokens are discarded.
*/
TargetState.prototype.updateResumeToken = function (resumeToken) {
if (resumeToken.approximateByteSize() > 0) {
this._hasPendingChanges = true;
this._resumeToken = resumeToken;
}
};
/**
* Creates a target change from the current set of changes.
*
* To reset the document changes after raising this snapshot, call
* `clearPendingChanges()`.
*/
TargetState.prototype.toTargetChange = function () {
var addedDocuments = documentKeySet();
var modifiedDocuments = documentKeySet();
var removedDocuments = documentKeySet();
this.documentChanges.forEach(function (key, changeType) {
switch (changeType) {
case 0 /* Added */:
addedDocuments = addedDocuments.add(key);
break;
case 2 /* Modified */:
modifiedDocuments = modifiedDocuments.add(key);
break;
case 1 /* Removed */:
removedDocuments = removedDocuments.add(key);
break;
default:
fail();
}
});
return new TargetChange(this._resumeToken, this._current, addedDocuments, modifiedDocuments, removedDocuments);
};
/**
* Resets the document changes and sets `hasPendingChanges` to false.
*/
TargetState.prototype.clearPendingChanges = function () {
this._hasPendingChanges = false;
this.documentChanges = snapshotChangesMap();
};
TargetState.prototype.addDocumentChange = function (key, changeType) {
this._hasPendingChanges = true;
this.documentChanges = this.documentChanges.insert(key, changeType);
};
TargetState.prototype.removeDocumentChange = function (key) {
this._hasPendingChanges = true;
this.documentChanges = this.documentChanges.remove(key);
};
TargetState.prototype.recordPendingTargetRequest = function () {
this.pendingResponses += 1;
};
TargetState.prototype.recordTargetResponse = function () {
this.pendingResponses -= 1;
};
TargetState.prototype.markCurrent = function () {
this._hasPendingChanges = true;
this._current = true;
};
return TargetState;
}());
var LOG_TAG$f = 'WatchChangeAggregator';
/**
* A helper class to accumulate watch changes into a RemoteEvent.
*/
var WatchChangeAggregator = /** @class */ (function () {
function WatchChangeAggregator(metadataProvider) {
this.metadataProvider = metadataProvider;
/** The internal state of all tracked targets. */
this.targetStates = new Map();
/** Keeps track of the documents to update since the last raised snapshot. */
this.pendingDocumentUpdates = mutableDocumentMap();
/** A mapping of document keys to their set of target IDs. */
this.pendingDocumentTargetMapping = documentTargetMap();
/**
* A list of targets with existence filter mismatches. These targets are
* known to be inconsistent and their listens needs to be re-established by
* RemoteStore.
*/
this.pendingTargetResets = new SortedSet(primitiveComparator);
}
/**
* Processes and adds the DocumentWatchChange to the current set of changes.
*/
WatchChangeAggregator.prototype.handleDocumentChange = function (docChange) {
for (var _i = 0, _d = docChange.updatedTargetIds; _i < _d.length; _i++) {
var targetId = _d[_i];
if (docChange.newDoc && docChange.newDoc.isFoundDocument()) {
this.addDocumentToTarget(targetId, docChange.newDoc);
}
else {
this.removeDocumentFromTarget(targetId, docChange.key, docChange.newDoc);
}
}
for (var _e = 0, _f = docChange.removedTargetIds; _e < _f.length; _e++) {
var targetId = _f[_e];
this.removeDocumentFromTarget(targetId, docChange.key, docChange.newDoc);
}
};
/** Processes and adds the WatchTargetChange to the current set of changes. */
WatchChangeAggregator.prototype.handleTargetChange = function (targetChange) {
var _this = this;
this.forEachTarget(targetChange, function (targetId) {
var targetState = _this.ensureTargetState(targetId);
switch (targetChange.state) {
case 0 /* NoChange */:
if (_this.isActiveTarget(targetId)) {
targetState.updateResumeToken(targetChange.resumeToken);
}
break;
case 1 /* Added */:
// We need to decrement the number of pending acks needed from watch
// for this targetId.
targetState.recordTargetResponse();
if (!targetState.isPending) {
// We have a freshly added target, so we need to reset any state
// that we had previously. This can happen e.g. when remove and add
// back a target for existence filter mismatches.
targetState.clearPendingChanges();
}
targetState.updateResumeToken(targetChange.resumeToken);
break;
case 2 /* Removed */:
// We need to keep track of removed targets to we can post-filter and
// remove any target changes.
// We need to decrement the number of pending acks needed from watch
// for this targetId.
targetState.recordTargetResponse();
if (!targetState.isPending) {
_this.removeTarget(targetId);
}
break;
case 3 /* Current */:
if (_this.isActiveTarget(targetId)) {
targetState.markCurrent();
targetState.updateResumeToken(targetChange.resumeToken);
}
break;
case 4 /* Reset */:
if (_this.isActiveTarget(targetId)) {
// Reset the target and synthesizes removes for all existing
// documents. The backend will re-add any documents that still
// match the target before it sends the next global snapshot.
_this.resetTarget(targetId);
targetState.updateResumeToken(targetChange.resumeToken);
}
break;
default:
fail();
}
});
};
/**
* Iterates over all targetIds that the watch change applies to: either the
* targetIds explicitly listed in the change or the targetIds of all currently
* active targets.
*/
WatchChangeAggregator.prototype.forEachTarget = function (targetChange, fn) {
var _this = this;
if (targetChange.targetIds.length > 0) {
targetChange.targetIds.forEach(fn);
}
else {
this.targetStates.forEach(function (_, targetId) {
if (_this.isActiveTarget(targetId)) {
fn(targetId);
}
});
}
};
/**
* Handles existence filters and synthesizes deletes for filter mismatches.
* Targets that are invalidated by filter mismatches are added to
* `pendingTargetResets`.
*/
WatchChangeAggregator.prototype.handleExistenceFilter = function (watchChange) {
var targetId = watchChange.targetId;
var expectedCount = watchChange.existenceFilter.count;
var targetData = this.targetDataForActiveTarget(targetId);
if (targetData) {
var target = targetData.target;
if (isDocumentTarget(target)) {
if (expectedCount === 0) {
// The existence filter told us the document does not exist. We deduce
// that this document does not exist and apply a deleted document to
// our updates. Without applying this deleted document there might be
// another query that will raise this document as part of a snapshot
// until it is resolved, essentially exposing inconsistency between
// queries.
var key = new DocumentKey(target.path);
this.removeDocumentFromTarget(targetId, key, MutableDocument.newNoDocument(key, SnapshotVersion.min()));
}
else {
hardAssert(expectedCount === 1);
}
}
else {
var currentSize = this.getCurrentDocumentCountForTarget(targetId);
if (currentSize !== expectedCount) {
// Existence filter mismatch: We reset the mapping and raise a new
// snapshot with `isFromCache:true`.
this.resetTarget(targetId);
this.pendingTargetResets = this.pendingTargetResets.add(targetId);
}
}
}
};
/**
* Converts the currently accumulated state into a remote event at the
* provided snapshot version. Resets the accumulated changes before returning.
*/
WatchChangeAggregator.prototype.createRemoteEvent = function (snapshotVersion) {
var _this = this;
var targetChanges = new Map();
this.targetStates.forEach(function (targetState, targetId) {
var targetData = _this.targetDataForActiveTarget(targetId);
if (targetData) {
if (targetState.current && isDocumentTarget(targetData.target)) {
// Document queries for document that don't exist can produce an empty
// result set. To update our local cache, we synthesize a document
// delete if we have not previously received the document. This
// resolves the limbo state of the document, removing it from
// limboDocumentRefs.
//
// TODO(dimond): Ideally we would have an explicit lookup target
// instead resulting in an explicit delete message and we could
// remove this special logic.
var key = new DocumentKey(targetData.target.path);
if (_this.pendingDocumentUpdates.get(key) === null &&
!_this.targetContainsDocument(targetId, key)) {
_this.removeDocumentFromTarget(targetId, key, MutableDocument.newNoDocument(key, snapshotVersion));
}
}
if (targetState.hasPendingChanges) {
targetChanges.set(targetId, targetState.toTargetChange());
targetState.clearPendingChanges();
}
}
});
var resolvedLimboDocuments = documentKeySet();
// We extract the set of limbo-only document updates as the GC logic
// special-cases documents that do not appear in the target cache.
//
// TODO(gsoltis): Expand on this comment once GC is available in the JS
// client.
this.pendingDocumentTargetMapping.forEach(function (key, targets) {
var isOnlyLimboTarget = true;
targets.forEachWhile(function (targetId) {
var targetData = _this.targetDataForActiveTarget(targetId);
if (targetData &&
targetData.purpose !== 2 /* LimboResolution */) {
isOnlyLimboTarget = false;
return false;
}
return true;
});
if (isOnlyLimboTarget) {
resolvedLimboDocuments = resolvedLimboDocuments.add(key);
}
});
var remoteEvent = new RemoteEvent(snapshotVersion, targetChanges, this.pendingTargetResets, this.pendingDocumentUpdates, resolvedLimboDocuments);
this.pendingDocumentUpdates = mutableDocumentMap();
this.pendingDocumentTargetMapping = documentTargetMap();
this.pendingTargetResets = new SortedSet(primitiveComparator);
return remoteEvent;
};
/**
* Adds the provided document to the internal list of document updates and
* its document key to the given target's mapping.
*/
// Visible for testing.
WatchChangeAggregator.prototype.addDocumentToTarget = function (targetId, document) {
if (!this.isActiveTarget(targetId)) {
return;
}
var changeType = this.targetContainsDocument(targetId, document.key)
? 2 /* Modified */
: 0 /* Added */;
var targetState = this.ensureTargetState(targetId);
targetState.addDocumentChange(document.key, changeType);
this.pendingDocumentUpdates = this.pendingDocumentUpdates.insert(document.key, document);
this.pendingDocumentTargetMapping = this.pendingDocumentTargetMapping.insert(document.key, this.ensureDocumentTargetMapping(document.key).add(targetId));
};
/**
* Removes the provided document from the target mapping. If the
* document no longer matches the target, but the document's state is still
* known (e.g. we know that the document was deleted or we received the change
* that caused the filter mismatch), the new document can be provided
* to update the remote document cache.
*/
// Visible for testing.
WatchChangeAggregator.prototype.removeDocumentFromTarget = function (targetId, key, updatedDocument) {
if (!this.isActiveTarget(targetId)) {
return;
}
var targetState = this.ensureTargetState(targetId);
if (this.targetContainsDocument(targetId, key)) {
targetState.addDocumentChange(key, 1 /* Removed */);
}
else {
// The document may have entered and left the target before we raised a
// snapshot, so we can just ignore the change.
targetState.removeDocumentChange(key);
}
this.pendingDocumentTargetMapping = this.pendingDocumentTargetMapping.insert(key, this.ensureDocumentTargetMapping(key).delete(targetId));
if (updatedDocument) {
this.pendingDocumentUpdates = this.pendingDocumentUpdates.insert(key, updatedDocument);
}
};
WatchChangeAggregator.prototype.removeTarget = function (targetId) {
this.targetStates.delete(targetId);
};
/**
* Returns the current count of documents in the target. This includes both
* the number of documents that the LocalStore considers to be part of the
* target as well as any accumulated changes.
*/
WatchChangeAggregator.prototype.getCurrentDocumentCountForTarget = function (targetId) {
var targetState = this.ensureTargetState(targetId);
var targetChange = targetState.toTargetChange();
return (this.metadataProvider.getRemoteKeysForTarget(targetId).size +
targetChange.addedDocuments.size -
targetChange.removedDocuments.size);
};
/**
* Increment the number of acks needed from watch before we can consider the
* server to be 'in-sync' with the client's active targets.
*/
WatchChangeAggregator.prototype.recordPendingTargetRequest = function (targetId) {
// For each request we get we need to record we need a response for it.
var targetState = this.ensureTargetState(targetId);
targetState.recordPendingTargetRequest();
};
WatchChangeAggregator.prototype.ensureTargetState = function (targetId) {
var result = this.targetStates.get(targetId);
if (!result) {
result = new TargetState();
this.targetStates.set(targetId, result);
}
return result;
};
WatchChangeAggregator.prototype.ensureDocumentTargetMapping = function (key) {
var targetMapping = this.pendingDocumentTargetMapping.get(key);
if (!targetMapping) {
targetMapping = new SortedSet(primitiveComparator);
this.pendingDocumentTargetMapping = this.pendingDocumentTargetMapping.insert(key, targetMapping);
}
return targetMapping;
};
/**
* Verifies that the user is still interested in this target (by calling
* `getTargetDataForTarget()`) and that we are not waiting for pending ADDs
* from watch.
*/
WatchChangeAggregator.prototype.isActiveTarget = function (targetId) {
var targetActive = this.targetDataForActiveTarget(targetId) !== null;
if (!targetActive) {
logDebug(LOG_TAG$f, 'Detected inactive target', targetId);
}
return targetActive;
};
/**
* Returns the TargetData for an active target (i.e. a target that the user
* is still interested in that has no outstanding target change requests).
*/
WatchChangeAggregator.prototype.targetDataForActiveTarget = function (targetId) {
var targetState = this.targetStates.get(targetId);
return targetState && targetState.isPending
? null
: this.metadataProvider.getTargetDataForTarget(targetId);
};
/**
* Resets the state of a Watch target to its initial state (e.g. sets
* 'current' to false, clears the resume token and removes its target mapping
* from all documents).
*/
WatchChangeAggregator.prototype.resetTarget = function (targetId) {
var _this = this;
this.targetStates.set(targetId, new TargetState());
// Trigger removal for any documents currently mapped to this target.
// These removals will be part of the initial snapshot if Watch does not
// resend these documents.
var existingKeys = this.metadataProvider.getRemoteKeysForTarget(targetId);
existingKeys.forEach(function (key) {
_this.removeDocumentFromTarget(targetId, key, /*updatedDocument=*/ null);
});
};
/**
* Returns whether the LocalStore considers the document to be part of the
* specified target.
*/
WatchChangeAggregator.prototype.targetContainsDocument = function (targetId, key) {
var existingKeys = this.metadataProvider.getRemoteKeysForTarget(targetId);
return existingKeys.has(key);
};
return WatchChangeAggregator;
}());
function documentTargetMap() {
return new SortedMap(DocumentKey.comparator);
}
function snapshotChangesMap() {
return new SortedMap(DocumentKey.comparator);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var DIRECTIONS = (function () {
var dirs = {};
dirs["asc" /* ASCENDING */] = 'ASCENDING';
dirs["desc" /* DESCENDING */] = 'DESCENDING';
return dirs;
})();
var OPERATORS = (function () {
var ops = {};
ops["<" /* LESS_THAN */] = 'LESS_THAN';
ops["<=" /* LESS_THAN_OR_EQUAL */] = 'LESS_THAN_OR_EQUAL';
ops[">" /* GREATER_THAN */] = 'GREATER_THAN';
ops[">=" /* GREATER_THAN_OR_EQUAL */] = 'GREATER_THAN_OR_EQUAL';
ops["==" /* EQUAL */] = 'EQUAL';
ops["!=" /* NOT_EQUAL */] = 'NOT_EQUAL';
ops["array-contains" /* ARRAY_CONTAINS */] = 'ARRAY_CONTAINS';
ops["in" /* IN */] = 'IN';
ops["not-in" /* NOT_IN */] = 'NOT_IN';
ops["array-contains-any" /* ARRAY_CONTAINS_ANY */] = 'ARRAY_CONTAINS_ANY';
return ops;
})();
function assertPresent(value, description) {
}
/**
* This class generates JsonObject values for the Datastore API suitable for
* sending to either GRPC stub methods or via the JSON/HTTP REST API.
*
* The serializer supports both Protobuf.js and Proto3 JSON formats. By
* setting `useProto3Json` to true, the serializer will use the Proto3 JSON
* format.
*
* For a description of the Proto3 JSON format check
* https://developers.google.com/protocol-buffers/docs/proto3#json
*
* TODO(klimt): We can remove the databaseId argument if we keep the full
* resource name in documents.
*/
var JsonProtoSerializer = /** @class */ (function () {
function JsonProtoSerializer(databaseId, useProto3Json) {
this.databaseId = databaseId;
this.useProto3Json = useProto3Json;
}
return JsonProtoSerializer;
}());
function fromRpcStatus(status) {
var code = status.code === undefined ? Code.UNKNOWN : mapCodeFromRpcCode(status.code);
return new FirestoreError(code, status.message || '');
}
/**
* Returns a value for a number (or null) that's appropriate to put into
* a google.protobuf.Int32Value proto.
* DO NOT USE THIS FOR ANYTHING ELSE.
* This method cheats. It's typed as returning "number" because that's what
* our generated proto interfaces say Int32Value must be. But GRPC actually
* expects a { value: } struct.
*/
function toInt32Proto(serializer, val) {
if (serializer.useProto3Json || isNullOrUndefined(val)) {
return val;
}
else {
return { value: val };
}
}
/**
* Returns a number (or null) from a google.protobuf.Int32Value proto.
*/
function fromInt32Proto(val) {
var result;
if (typeof val === 'object') {
result = val.value;
}
else {
result = val;
}
return isNullOrUndefined(result) ? null : result;
}
/**
* Returns a value for a Date that's appropriate to put into a proto.
*/
function toTimestamp(serializer, timestamp) {
if (serializer.useProto3Json) {
// Serialize to ISO-8601 date format, but with full nano resolution.
// Since JS Date has only millis, let's only use it for the seconds and
// then manually add the fractions to the end.
var jsDateStr = new Date(timestamp.seconds * 1000).toISOString();
// Remove .xxx frac part and Z in the end.
var strUntilSeconds = jsDateStr.replace(/\.\d*/, '').replace('Z', '');
// Pad the fraction out to 9 digits (nanos).
var nanoStr = ('000000000' + timestamp.nanoseconds).slice(-9);
return strUntilSeconds + "." + nanoStr + "Z";
}
else {
return {
seconds: '' + timestamp.seconds,
nanos: timestamp.nanoseconds
// eslint-disable-next-line @typescript-eslint/no-explicit-any
};
}
}
function fromTimestamp(date) {
var timestamp = normalizeTimestamp(date);
return new Timestamp(timestamp.seconds, timestamp.nanos);
}
/**
* Returns a value for bytes that's appropriate to put in a proto.
*
* Visible for testing.
*/
function toBytes(serializer, bytes) {
if (serializer.useProto3Json) {
return bytes.toBase64();
}
else {
return bytes.toUint8Array();
}
}
/**
* Returns a ByteString based on the proto string value.
*/
function fromBytes(serializer, value) {
if (serializer.useProto3Json) {
hardAssert(value === undefined || typeof value === 'string');
return ByteString.fromBase64String(value ? value : '');
}
else {
hardAssert(value === undefined || value instanceof Uint8Array);
return ByteString.fromUint8Array(value ? value : new Uint8Array());
}
}
function toVersion(serializer, version) {
return toTimestamp(serializer, version.toTimestamp());
}
function fromVersion(version) {
hardAssert(!!version);
return SnapshotVersion.fromTimestamp(fromTimestamp(version));
}
function toResourceName(databaseId, path) {
return fullyQualifiedPrefixPath(databaseId)
.child('documents')
.child(path)
.canonicalString();
}
function fromResourceName(name) {
var resource = ResourcePath.fromString(name);
hardAssert(isValidResourceName(resource));
return resource;
}
function toName(serializer, key) {
return toResourceName(serializer.databaseId, key.path);
}
function fromName(serializer, name) {
var resource = fromResourceName(name);
if (resource.get(1) !== serializer.databaseId.projectId) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Tried to deserialize key from different project: ' +
resource.get(1) +
' vs ' +
serializer.databaseId.projectId);
}
if (resource.get(3) !== serializer.databaseId.database) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Tried to deserialize key from different database: ' +
resource.get(3) +
' vs ' +
serializer.databaseId.database);
}
return new DocumentKey(extractLocalPathFromResourceName(resource));
}
function toQueryPath(serializer, path) {
return toResourceName(serializer.databaseId, path);
}
function fromQueryPath(name) {
var resourceName = fromResourceName(name);
// In v1beta1 queries for collections at the root did not have a trailing
// "/documents". In v1 all resource paths contain "/documents". Preserve the
// ability to read the v1beta1 form for compatibility with queries persisted
// in the local target cache.
if (resourceName.length === 4) {
return ResourcePath.emptyPath();
}
return extractLocalPathFromResourceName(resourceName);
}
function getEncodedDatabaseId(serializer) {
var path = new ResourcePath([
'projects',
serializer.databaseId.projectId,
'databases',
serializer.databaseId.database
]);
return path.canonicalString();
}
function fullyQualifiedPrefixPath(databaseId) {
return new ResourcePath([
'projects',
databaseId.projectId,
'databases',
databaseId.database
]);
}
function extractLocalPathFromResourceName(resourceName) {
hardAssert(resourceName.length > 4 && resourceName.get(4) === 'documents');
return resourceName.popFirst(5);
}
/** Creates a Document proto from key and fields (but no create/update time) */
function toMutationDocument(serializer, key, fields) {
return {
name: toName(serializer, key),
fields: fields.value.mapValue.fields
};
}
function toDocument(serializer, document) {
return {
name: toName(serializer, document.key),
fields: document.data.value.mapValue.fields,
updateTime: toTimestamp(serializer, document.version.toTimestamp())
};
}
function fromDocument(serializer, document, hasCommittedMutations) {
var key = fromName(serializer, document.name);
var version = fromVersion(document.updateTime);
var data = new ObjectValue({ mapValue: { fields: document.fields } });
var result = MutableDocument.newFoundDocument(key, version, data);
if (hasCommittedMutations) {
result.setHasCommittedMutations();
}
return hasCommittedMutations ? result.setHasCommittedMutations() : result;
}
function fromFound(serializer, doc) {
hardAssert(!!doc.found);
assertPresent(doc.found.name);
assertPresent(doc.found.updateTime);
var key = fromName(serializer, doc.found.name);
var version = fromVersion(doc.found.updateTime);
var data = new ObjectValue({ mapValue: { fields: doc.found.fields } });
return MutableDocument.newFoundDocument(key, version, data);
}
function fromMissing(serializer, result) {
hardAssert(!!result.missing);
hardAssert(!!result.readTime);
var key = fromName(serializer, result.missing);
var version = fromVersion(result.readTime);
return MutableDocument.newNoDocument(key, version);
}
function fromBatchGetDocumentsResponse(serializer, result) {
if ('found' in result) {
return fromFound(serializer, result);
}
else if ('missing' in result) {
return fromMissing(serializer, result);
}
return fail();
}
function fromWatchChange(serializer, change) {
var watchChange;
if ('targetChange' in change) {
assertPresent(change.targetChange);
// proto3 default value is unset in JSON (undefined), so use 'NO_CHANGE'
// if unset
var state = fromWatchTargetChangeState(change.targetChange.targetChangeType || 'NO_CHANGE');
var targetIds = change.targetChange.targetIds || [];
var resumeToken = fromBytes(serializer, change.targetChange.resumeToken);
var causeProto = change.targetChange.cause;
var cause = causeProto && fromRpcStatus(causeProto);
watchChange = new WatchTargetChange(state, targetIds, resumeToken, cause || null);
}
else if ('documentChange' in change) {
assertPresent(change.documentChange);
var entityChange = change.documentChange;
assertPresent(entityChange.document);
assertPresent(entityChange.document.name);
assertPresent(entityChange.document.updateTime);
var key = fromName(serializer, entityChange.document.name);
var version_1 = fromVersion(entityChange.document.updateTime);
var data = new ObjectValue({
mapValue: { fields: entityChange.document.fields }
});
var doc_1 = MutableDocument.newFoundDocument(key, version_1, data);
var updatedTargetIds = entityChange.targetIds || [];
var removedTargetIds = entityChange.removedTargetIds || [];
watchChange = new DocumentWatchChange(updatedTargetIds, removedTargetIds, doc_1.key, doc_1);
}
else if ('documentDelete' in change) {
assertPresent(change.documentDelete);
var docDelete = change.documentDelete;
assertPresent(docDelete.document);
var key = fromName(serializer, docDelete.document);
var version_2 = docDelete.readTime
? fromVersion(docDelete.readTime)
: SnapshotVersion.min();
var doc_2 = MutableDocument.newNoDocument(key, version_2);
var removedTargetIds = docDelete.removedTargetIds || [];
watchChange = new DocumentWatchChange([], removedTargetIds, doc_2.key, doc_2);
}
else if ('documentRemove' in change) {
assertPresent(change.documentRemove);
var docRemove = change.documentRemove;
assertPresent(docRemove.document);
var key = fromName(serializer, docRemove.document);
var removedTargetIds = docRemove.removedTargetIds || [];
watchChange = new DocumentWatchChange([], removedTargetIds, key, null);
}
else if ('filter' in change) {
// TODO(dimond): implement existence filter parsing with strategy.
assertPresent(change.filter);
var filter = change.filter;
assertPresent(filter.targetId);
var count = filter.count || 0;
var existenceFilter = new ExistenceFilter(count);
var targetId = filter.targetId;
watchChange = new ExistenceFilterChange(targetId, existenceFilter);
}
else {
return fail();
}
return watchChange;
}
function fromWatchTargetChangeState(state) {
if (state === 'NO_CHANGE') {
return 0 /* NoChange */;
}
else if (state === 'ADD') {
return 1 /* Added */;
}
else if (state === 'REMOVE') {
return 2 /* Removed */;
}
else if (state === 'CURRENT') {
return 3 /* Current */;
}
else if (state === 'RESET') {
return 4 /* Reset */;
}
else {
return fail();
}
}
function versionFromListenResponse(change) {
// We have only reached a consistent snapshot for the entire stream if there
// is a read_time set and it applies to all targets (i.e. the list of
// targets is empty). The backend is guaranteed to send such responses.
if (!('targetChange' in change)) {
return SnapshotVersion.min();
}
var targetChange = change.targetChange;
if (targetChange.targetIds && targetChange.targetIds.length) {
return SnapshotVersion.min();
}
if (!targetChange.readTime) {
return SnapshotVersion.min();
}
return fromVersion(targetChange.readTime);
}
function toMutation(serializer, mutation) {
var result;
if (mutation instanceof SetMutation) {
result = {
update: toMutationDocument(serializer, mutation.key, mutation.value)
};
}
else if (mutation instanceof DeleteMutation) {
result = { delete: toName(serializer, mutation.key) };
}
else if (mutation instanceof PatchMutation) {
result = {
update: toMutationDocument(serializer, mutation.key, mutation.data),
updateMask: toDocumentMask(mutation.fieldMask)
};
}
else if (mutation instanceof VerifyMutation) {
result = {
verify: toName(serializer, mutation.key)
};
}
else {
return fail();
}
if (mutation.fieldTransforms.length > 0) {
result.updateTransforms = mutation.fieldTransforms.map(function (transform) { return toFieldTransform(serializer, transform); });
}
if (!mutation.precondition.isNone) {
result.currentDocument = toPrecondition(serializer, mutation.precondition);
}
return result;
}
function fromMutation(serializer, proto) {
var precondition = proto.currentDocument
? fromPrecondition(proto.currentDocument)
: Precondition.none();
var fieldTransforms = proto.updateTransforms
? proto.updateTransforms.map(function (transform) { return fromFieldTransform(serializer, transform); })
: [];
if (proto.update) {
assertPresent(proto.update.name);
var key = fromName(serializer, proto.update.name);
var value = new ObjectValue({
mapValue: { fields: proto.update.fields }
});
if (proto.updateMask) {
var fieldMask = fromDocumentMask(proto.updateMask);
return new PatchMutation(key, value, fieldMask, precondition, fieldTransforms);
}
else {
return new SetMutation(key, value, precondition, fieldTransforms);
}
}
else if (proto.delete) {
var key = fromName(serializer, proto.delete);
return new DeleteMutation(key, precondition);
}
else if (proto.verify) {
var key = fromName(serializer, proto.verify);
return new VerifyMutation(key, precondition);
}
else {
return fail();
}
}
function toPrecondition(serializer, precondition) {
if (precondition.updateTime !== undefined) {
return {
updateTime: toVersion(serializer, precondition.updateTime)
};
}
else if (precondition.exists !== undefined) {
return { exists: precondition.exists };
}
else {
return fail();
}
}
function fromPrecondition(precondition) {
if (precondition.updateTime !== undefined) {
return Precondition.updateTime(fromVersion(precondition.updateTime));
}
else if (precondition.exists !== undefined) {
return Precondition.exists(precondition.exists);
}
else {
return Precondition.none();
}
}
function fromWriteResult(proto, commitTime) {
// NOTE: Deletes don't have an updateTime.
var version = proto.updateTime
? fromVersion(proto.updateTime)
: fromVersion(commitTime);
if (version.isEqual(SnapshotVersion.min())) {
// The Firestore Emulator currently returns an update time of 0 for
// deletes of non-existing documents (rather than null). This breaks the
// test "get deleted doc while offline with source=cache" as NoDocuments
// with version 0 are filtered by IndexedDb's RemoteDocumentCache.
// TODO(#2149): Remove this when Emulator is fixed
version = fromVersion(commitTime);
}
return new MutationResult(version, proto.transformResults || []);
}
function fromWriteResults(protos, commitTime) {
if (protos && protos.length > 0) {
hardAssert(commitTime !== undefined);
return protos.map(function (proto) { return fromWriteResult(proto, commitTime); });
}
else {
return [];
}
}
function toFieldTransform(serializer, fieldTransform) {
var transform = fieldTransform.transform;
if (transform instanceof ServerTimestampTransform) {
return {
fieldPath: fieldTransform.field.canonicalString(),
setToServerValue: 'REQUEST_TIME'
};
}
else if (transform instanceof ArrayUnionTransformOperation) {
return {
fieldPath: fieldTransform.field.canonicalString(),
appendMissingElements: {
values: transform.elements
}
};
}
else if (transform instanceof ArrayRemoveTransformOperation) {
return {
fieldPath: fieldTransform.field.canonicalString(),
removeAllFromArray: {
values: transform.elements
}
};
}
else if (transform instanceof NumericIncrementTransformOperation) {
return {
fieldPath: fieldTransform.field.canonicalString(),
increment: transform.operand
};
}
else {
throw fail();
}
}
function fromFieldTransform(serializer, proto) {
var transform = null;
if ('setToServerValue' in proto) {
hardAssert(proto.setToServerValue === 'REQUEST_TIME');
transform = new ServerTimestampTransform();
}
else if ('appendMissingElements' in proto) {
var values = proto.appendMissingElements.values || [];
transform = new ArrayUnionTransformOperation(values);
}
else if ('removeAllFromArray' in proto) {
var values = proto.removeAllFromArray.values || [];
transform = new ArrayRemoveTransformOperation(values);
}
else if ('increment' in proto) {
transform = new NumericIncrementTransformOperation(serializer, proto.increment);
}
else {
fail();
}
var fieldPath = FieldPath$1.fromServerFormat(proto.fieldPath);
return new FieldTransform(fieldPath, transform);
}
function toDocumentsTarget(serializer, target) {
return { documents: [toQueryPath(serializer, target.path)] };
}
function fromDocumentsTarget(documentsTarget) {
var count = documentsTarget.documents.length;
hardAssert(count === 1);
var name = documentsTarget.documents[0];
return queryToTarget(newQueryForPath(fromQueryPath(name)));
}
function toQueryTarget(serializer, target) {
// Dissect the path into parent, collectionId, and optional key filter.
var result = { structuredQuery: {} };
var path = target.path;
if (target.collectionGroup !== null) {
result.parent = toQueryPath(serializer, path);
result.structuredQuery.from = [
{
collectionId: target.collectionGroup,
allDescendants: true
}
];
}
else {
result.parent = toQueryPath(serializer, path.popLast());
result.structuredQuery.from = [{ collectionId: path.lastSegment() }];
}
var where = toFilter(target.filters);
if (where) {
result.structuredQuery.where = where;
}
var orderBy = toOrder(target.orderBy);
if (orderBy) {
result.structuredQuery.orderBy = orderBy;
}
var limit = toInt32Proto(serializer, target.limit);
if (limit !== null) {
result.structuredQuery.limit = limit;
}
if (target.startAt) {
result.structuredQuery.startAt = toCursor(target.startAt);
}
if (target.endAt) {
result.structuredQuery.endAt = toCursor(target.endAt);
}
return result;
}
function convertQueryTargetToQuery(target) {
var path = fromQueryPath(target.parent);
var query = target.structuredQuery;
var fromCount = query.from ? query.from.length : 0;
var collectionGroup = null;
if (fromCount > 0) {
hardAssert(fromCount === 1);
var from = query.from[0];
if (from.allDescendants) {
collectionGroup = from.collectionId;
}
else {
path = path.child(from.collectionId);
}
}
var filterBy = [];
if (query.where) {
filterBy = fromFilter(query.where);
}
var orderBy = [];
if (query.orderBy) {
orderBy = fromOrder(query.orderBy);
}
var limit = null;
if (query.limit) {
limit = fromInt32Proto(query.limit);
}
var startAt = null;
if (query.startAt) {
startAt = fromCursor(query.startAt);
}
var endAt = null;
if (query.endAt) {
endAt = fromCursor(query.endAt);
}
return newQuery(path, collectionGroup, orderBy, filterBy, limit, "F" /* First */, startAt, endAt);
}
function fromQueryTarget(target) {
return queryToTarget(convertQueryTargetToQuery(target));
}
function toListenRequestLabels(serializer, targetData) {
var value = toLabel(serializer, targetData.purpose);
if (value == null) {
return null;
}
else {
return {
'goog-listen-tags': value
};
}
}
function toLabel(serializer, purpose) {
switch (purpose) {
case 0 /* Listen */:
return null;
case 1 /* ExistenceFilterMismatch */:
return 'existence-filter-mismatch';
case 2 /* LimboResolution */:
return 'limbo-document';
default:
return fail();
}
}
function toTarget(serializer, targetData) {
var result;
var target = targetData.target;
if (isDocumentTarget(target)) {
result = { documents: toDocumentsTarget(serializer, target) };
}
else {
result = { query: toQueryTarget(serializer, target) };
}
result.targetId = targetData.targetId;
if (targetData.resumeToken.approximateByteSize() > 0) {
result.resumeToken = toBytes(serializer, targetData.resumeToken);
}
else if (targetData.snapshotVersion.compareTo(SnapshotVersion.min()) > 0) {
// TODO(wuandy): Consider removing above check because it is most likely true.
// Right now, many tests depend on this behaviour though (leaving min() out
// of serialization).
result.readTime = toTimestamp(serializer, targetData.snapshotVersion.toTimestamp());
}
return result;
}
function toFilter(filters) {
if (filters.length === 0) {
return;
}
var protos = filters.map(function (filter) {
return toUnaryOrFieldFilter(filter);
});
if (protos.length === 1) {
return protos[0];
}
return { compositeFilter: { op: 'AND', filters: protos } };
}
function fromFilter(filter) {
if (!filter) {
return [];
}
else if (filter.unaryFilter !== undefined) {
return [fromUnaryFilter(filter)];
}
else if (filter.fieldFilter !== undefined) {
return [fromFieldFilter(filter)];
}
else if (filter.compositeFilter !== undefined) {
return filter.compositeFilter
.filters.map(function (f) { return fromFilter(f); })
.reduce(function (accum, current) { return accum.concat(current); });
}
else {
return fail();
}
}
function toOrder(orderBys) {
if (orderBys.length === 0) {
return;
}
return orderBys.map(function (order) { return toPropertyOrder(order); });
}
function fromOrder(orderBys) {
return orderBys.map(function (order) { return fromPropertyOrder(order); });
}
function toCursor(cursor) {
return {
before: cursor.before,
values: cursor.position
};
}
function fromCursor(cursor) {
var before = !!cursor.before;
var position = cursor.values || [];
return new Bound(position, before);
}
// visible for testing
function toDirection(dir) {
return DIRECTIONS[dir];
}
// visible for testing
function fromDirection(dir) {
switch (dir) {
case 'ASCENDING':
return "asc" /* ASCENDING */;
case 'DESCENDING':
return "desc" /* DESCENDING */;
default:
return undefined;
}
}
// visible for testing
function toOperatorName(op) {
return OPERATORS[op];
}
function fromOperatorName(op) {
switch (op) {
case 'EQUAL':
return "==" /* EQUAL */;
case 'NOT_EQUAL':
return "!=" /* NOT_EQUAL */;
case 'GREATER_THAN':
return ">" /* GREATER_THAN */;
case 'GREATER_THAN_OR_EQUAL':
return ">=" /* GREATER_THAN_OR_EQUAL */;
case 'LESS_THAN':
return "<" /* LESS_THAN */;
case 'LESS_THAN_OR_EQUAL':
return "<=" /* LESS_THAN_OR_EQUAL */;
case 'ARRAY_CONTAINS':
return "array-contains" /* ARRAY_CONTAINS */;
case 'IN':
return "in" /* IN */;
case 'NOT_IN':
return "not-in" /* NOT_IN */;
case 'ARRAY_CONTAINS_ANY':
return "array-contains-any" /* ARRAY_CONTAINS_ANY */;
case 'OPERATOR_UNSPECIFIED':
return fail();
default:
return fail();
}
}
function toFieldPathReference(path) {
return { fieldPath: path.canonicalString() };
}
function fromFieldPathReference(fieldReference) {
return FieldPath$1.fromServerFormat(fieldReference.fieldPath);
}
// visible for testing
function toPropertyOrder(orderBy) {
return {
field: toFieldPathReference(orderBy.field),
direction: toDirection(orderBy.dir)
};
}
function fromPropertyOrder(orderBy) {
return new OrderBy(fromFieldPathReference(orderBy.field), fromDirection(orderBy.direction));
}
function fromFieldFilter(filter) {
return FieldFilter.create(fromFieldPathReference(filter.fieldFilter.field), fromOperatorName(filter.fieldFilter.op), filter.fieldFilter.value);
}
// visible for testing
function toUnaryOrFieldFilter(filter) {
if (filter.op === "==" /* EQUAL */) {
if (isNanValue(filter.value)) {
return {
unaryFilter: {
field: toFieldPathReference(filter.field),
op: 'IS_NAN'
}
};
}
else if (isNullValue(filter.value)) {
return {
unaryFilter: {
field: toFieldPathReference(filter.field),
op: 'IS_NULL'
}
};
}
}
else if (filter.op === "!=" /* NOT_EQUAL */) {
if (isNanValue(filter.value)) {
return {
unaryFilter: {
field: toFieldPathReference(filter.field),
op: 'IS_NOT_NAN'
}
};
}
else if (isNullValue(filter.value)) {
return {
unaryFilter: {
field: toFieldPathReference(filter.field),
op: 'IS_NOT_NULL'
}
};
}
}
return {
fieldFilter: {
field: toFieldPathReference(filter.field),
op: toOperatorName(filter.op),
value: filter.value
}
};
}
function fromUnaryFilter(filter) {
switch (filter.unaryFilter.op) {
case 'IS_NAN':
var nanField = fromFieldPathReference(filter.unaryFilter.field);
return FieldFilter.create(nanField, "==" /* EQUAL */, {
doubleValue: NaN
});
case 'IS_NULL':
var nullField = fromFieldPathReference(filter.unaryFilter.field);
return FieldFilter.create(nullField, "==" /* EQUAL */, {
nullValue: 'NULL_VALUE'
});
case 'IS_NOT_NAN':
var notNanField = fromFieldPathReference(filter.unaryFilter.field);
return FieldFilter.create(notNanField, "!=" /* NOT_EQUAL */, {
doubleValue: NaN
});
case 'IS_NOT_NULL':
var notNullField = fromFieldPathReference(filter.unaryFilter.field);
return FieldFilter.create(notNullField, "!=" /* NOT_EQUAL */, {
nullValue: 'NULL_VALUE'
});
case 'OPERATOR_UNSPECIFIED':
return fail();
default:
return fail();
}
}
function toDocumentMask(fieldMask) {
var canonicalFields = [];
fieldMask.fields.forEach(function (field) { return canonicalFields.push(field.canonicalString()); });
return {
fieldPaths: canonicalFields
};
}
function fromDocumentMask(proto) {
var paths = proto.fieldPaths || [];
return new FieldMask(paths.map(function (path) { return FieldPath$1.fromServerFormat(path); }));
}
function isValidResourceName(path) {
// Resource names have at least 4 components (project ID, database ID)
return (path.length >= 4 &&
path.get(0) === 'projects' &&
path.get(2) === 'databases');
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An immutable set of metadata that the local store tracks for each target.
*/
var TargetData = /** @class */ (function () {
function TargetData(
/** The target being listened to. */
target,
/**
* The target ID to which the target corresponds; Assigned by the
* LocalStore for user listens and by the SyncEngine for limbo watches.
*/
targetId,
/** The purpose of the target. */
purpose,
/**
* The sequence number of the last transaction during which this target data
* was modified.
*/
sequenceNumber,
/** The latest snapshot version seen for this target. */
snapshotVersion,
/**
* The maximum snapshot version at which the associated view
* contained no limbo documents.
*/
lastLimboFreeSnapshotVersion,
/**
* An opaque, server-assigned token that allows watching a target to be
* resumed after disconnecting without retransmitting all the data that
* matches the target. The resume token essentially identifies a point in
* time from which the server should resume sending results.
*/
resumeToken) {
if (snapshotVersion === void 0) { snapshotVersion = SnapshotVersion.min(); }
if (lastLimboFreeSnapshotVersion === void 0) { lastLimboFreeSnapshotVersion = SnapshotVersion.min(); }
if (resumeToken === void 0) { resumeToken = ByteString.EMPTY_BYTE_STRING; }
this.target = target;
this.targetId = targetId;
this.purpose = purpose;
this.sequenceNumber = sequenceNumber;
this.snapshotVersion = snapshotVersion;
this.lastLimboFreeSnapshotVersion = lastLimboFreeSnapshotVersion;
this.resumeToken = resumeToken;
}
/** Creates a new target data instance with an updated sequence number. */
TargetData.prototype.withSequenceNumber = function (sequenceNumber) {
return new TargetData(this.target, this.targetId, this.purpose, sequenceNumber, this.snapshotVersion, this.lastLimboFreeSnapshotVersion, this.resumeToken);
};
/**
* Creates a new target data instance with an updated resume token and
* snapshot version.
*/
TargetData.prototype.withResumeToken = function (resumeToken, snapshotVersion) {
return new TargetData(this.target, this.targetId, this.purpose, this.sequenceNumber, snapshotVersion, this.lastLimboFreeSnapshotVersion, resumeToken);
};
/**
* Creates a new target data instance with an updated last limbo free
* snapshot version number.
*/
TargetData.prototype.withLastLimboFreeSnapshotVersion = function (lastLimboFreeSnapshotVersion) {
return new TargetData(this.target, this.targetId, this.purpose, this.sequenceNumber, this.snapshotVersion, lastLimboFreeSnapshotVersion, this.resumeToken);
};
return TargetData;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Serializer for values stored in the LocalStore. */
var LocalSerializer = /** @class */ (function () {
function LocalSerializer(remoteSerializer) {
this.remoteSerializer = remoteSerializer;
}
return LocalSerializer;
}());
/** Decodes a remote document from storage locally to a Document. */
function fromDbRemoteDocument(localSerializer, remoteDoc) {
if (remoteDoc.document) {
return fromDocument(localSerializer.remoteSerializer, remoteDoc.document, !!remoteDoc.hasCommittedMutations);
}
else if (remoteDoc.noDocument) {
var key = DocumentKey.fromSegments(remoteDoc.noDocument.path);
var version_3 = fromDbTimestamp(remoteDoc.noDocument.readTime);
var document_1 = MutableDocument.newNoDocument(key, version_3);
return remoteDoc.hasCommittedMutations
? document_1.setHasCommittedMutations()
: document_1;
}
else if (remoteDoc.unknownDocument) {
var key = DocumentKey.fromSegments(remoteDoc.unknownDocument.path);
var version_4 = fromDbTimestamp(remoteDoc.unknownDocument.version);
return MutableDocument.newUnknownDocument(key, version_4);
}
else {
return fail();
}
}
/** Encodes a document for storage locally. */
function toDbRemoteDocument(localSerializer, document, readTime) {
var dbReadTime = toDbTimestampKey(readTime);
var parentPath = document.key.path.popLast().toArray();
if (document.isFoundDocument()) {
var doc_3 = toDocument(localSerializer.remoteSerializer, document);
var hasCommittedMutations = document.hasCommittedMutations;
return new DbRemoteDocument(
/* unknownDocument= */ null,
/* noDocument= */ null, doc_3, hasCommittedMutations, dbReadTime, parentPath);
}
else if (document.isNoDocument()) {
var path = document.key.path.toArray();
var readTime_1 = toDbTimestamp(document.version);
var hasCommittedMutations = document.hasCommittedMutations;
return new DbRemoteDocument(
/* unknownDocument= */ null, new DbNoDocument(path, readTime_1),
/* document= */ null, hasCommittedMutations, dbReadTime, parentPath);
}
else if (document.isUnknownDocument()) {
var path = document.key.path.toArray();
var readTime_2 = toDbTimestamp(document.version);
return new DbRemoteDocument(new DbUnknownDocument(path, readTime_2),
/* noDocument= */ null,
/* document= */ null,
/* hasCommittedMutations= */ true, dbReadTime, parentPath);
}
else {
return fail();
}
}
function toDbTimestampKey(snapshotVersion) {
var timestamp = snapshotVersion.toTimestamp();
return [timestamp.seconds, timestamp.nanoseconds];
}
function fromDbTimestampKey(dbTimestampKey) {
var timestamp = new Timestamp(dbTimestampKey[0], dbTimestampKey[1]);
return SnapshotVersion.fromTimestamp(timestamp);
}
function toDbTimestamp(snapshotVersion) {
var timestamp = snapshotVersion.toTimestamp();
return new DbTimestamp(timestamp.seconds, timestamp.nanoseconds);
}
function fromDbTimestamp(dbTimestamp) {
var timestamp = new Timestamp(dbTimestamp.seconds, dbTimestamp.nanoseconds);
return SnapshotVersion.fromTimestamp(timestamp);
}
/** Encodes a batch of mutations into a DbMutationBatch for local storage. */
function toDbMutationBatch(localSerializer, userId, batch) {
var serializedBaseMutations = batch.baseMutations.map(function (m) { return toMutation(localSerializer.remoteSerializer, m); });
var serializedMutations = batch.mutations.map(function (m) { return toMutation(localSerializer.remoteSerializer, m); });
return new DbMutationBatch(userId, batch.batchId, batch.localWriteTime.toMillis(), serializedBaseMutations, serializedMutations);
}
/** Decodes a DbMutationBatch into a MutationBatch */
function fromDbMutationBatch(localSerializer, dbBatch) {
var baseMutations = (dbBatch.baseMutations || []).map(function (m) { return fromMutation(localSerializer.remoteSerializer, m); });
// Squash old transform mutations into existing patch or set mutations.
// The replacement of representing `transforms` with `update_transforms`
// on the SDK means that old `transform` mutations stored in IndexedDB need
// to be updated to `update_transforms`.
// TODO(b/174608374): Remove this code once we perform a schema migration.
for (var i = 0; i < dbBatch.mutations.length - 1; ++i) {
var currentMutation = dbBatch.mutations[i];
var hasTransform = i + 1 < dbBatch.mutations.length &&
dbBatch.mutations[i + 1].transform !== undefined;
if (hasTransform) {
var transformMutation = dbBatch.mutations[i + 1];
currentMutation.updateTransforms = transformMutation.transform.fieldTransforms;
dbBatch.mutations.splice(i + 1, 1);
++i;
}
}
var mutations = dbBatch.mutations.map(function (m) { return fromMutation(localSerializer.remoteSerializer, m); });
var timestamp = Timestamp.fromMillis(dbBatch.localWriteTimeMs);
return new MutationBatch(dbBatch.batchId, timestamp, baseMutations, mutations);
}
/** Decodes a DbTarget into TargetData */
function fromDbTarget(dbTarget) {
var version = fromDbTimestamp(dbTarget.readTime);
var lastLimboFreeSnapshotVersion = dbTarget.lastLimboFreeSnapshotVersion !== undefined
? fromDbTimestamp(dbTarget.lastLimboFreeSnapshotVersion)
: SnapshotVersion.min();
var target;
if (isDocumentQuery(dbTarget.query)) {
target = fromDocumentsTarget(dbTarget.query);
}
else {
target = fromQueryTarget(dbTarget.query);
}
return new TargetData(target, dbTarget.targetId, 0 /* Listen */, dbTarget.lastListenSequenceNumber, version, lastLimboFreeSnapshotVersion, ByteString.fromBase64String(dbTarget.resumeToken));
}
/** Encodes TargetData into a DbTarget for storage locally. */
function toDbTarget(localSerializer, targetData) {
var dbTimestamp = toDbTimestamp(targetData.snapshotVersion);
var dbLastLimboFreeTimestamp = toDbTimestamp(targetData.lastLimboFreeSnapshotVersion);
var queryProto;
if (isDocumentTarget(targetData.target)) {
queryProto = toDocumentsTarget(localSerializer.remoteSerializer, targetData.target);
}
else {
queryProto = toQueryTarget(localSerializer.remoteSerializer, targetData.target);
}
// We can't store the resumeToken as a ByteString in IndexedDb, so we
// convert it to a base64 string for storage.
var resumeToken = targetData.resumeToken.toBase64();
// lastListenSequenceNumber is always 0 until we do real GC.
return new DbTarget(targetData.targetId, canonifyTarget(targetData.target), dbTimestamp, resumeToken, targetData.sequenceNumber, dbLastLimboFreeTimestamp, queryProto);
}
/**
* A helper function for figuring out what kind of query has been stored.
*/
function isDocumentQuery(dbQuery) {
return dbQuery.documents !== undefined;
}
/** Encodes a DbBundle to a BundleMetadata object. */
function fromDbBundle(dbBundle) {
return {
id: dbBundle.bundleId,
createTime: fromDbTimestamp(dbBundle.createTime),
version: dbBundle.version
};
}
/** Encodes a BundleMetadata to a DbBundle. */
function toDbBundle(metadata) {
return {
bundleId: metadata.id,
createTime: toDbTimestamp(fromVersion(metadata.createTime)),
version: metadata.version
};
}
/** Encodes a DbNamedQuery to a NamedQuery. */
function fromDbNamedQuery(dbNamedQuery) {
return {
name: dbNamedQuery.name,
query: fromBundledQuery(dbNamedQuery.bundledQuery),
readTime: fromDbTimestamp(dbNamedQuery.readTime)
};
}
/** Encodes a NamedQuery from a bundle proto to a DbNamedQuery. */
function toDbNamedQuery(query) {
return {
name: query.name,
readTime: toDbTimestamp(fromVersion(query.readTime)),
bundledQuery: query.bundledQuery
};
}
/**
* Encodes a `BundledQuery` from bundle proto to a Query object.
*
* This reconstructs the original query used to build the bundle being loaded,
* including features exists only in SDKs (for example: limit-to-last).
*/
function fromBundledQuery(bundledQuery) {
var query = convertQueryTargetToQuery({
parent: bundledQuery.parent,
structuredQuery: bundledQuery.structuredQuery
});
if (bundledQuery.limitType === 'LAST') {
return queryWithLimit(query, query.limit, "L" /* Last */);
}
return query;
}
/** Encodes a NamedQuery proto object to a NamedQuery model object. */
function fromProtoNamedQuery(namedQuery) {
return {
name: namedQuery.name,
query: fromBundledQuery(namedQuery.bundledQuery),
readTime: fromVersion(namedQuery.readTime)
};
}
/** Decodes a BundleMetadata proto into a BundleMetadata object. */
function fromBundleMetadata(metadata) {
return {
id: metadata.id,
version: metadata.version,
createTime: fromVersion(metadata.createTime)
};
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var IndexedDbBundleCache = /** @class */ (function () {
function IndexedDbBundleCache() {
}
IndexedDbBundleCache.prototype.getBundleMetadata = function (transaction, bundleId) {
return bundlesStore(transaction)
.get(bundleId)
.next(function (bundle) {
if (bundle) {
return fromDbBundle(bundle);
}
return undefined;
});
};
IndexedDbBundleCache.prototype.saveBundleMetadata = function (transaction, bundleMetadata) {
return bundlesStore(transaction).put(toDbBundle(bundleMetadata));
};
IndexedDbBundleCache.prototype.getNamedQuery = function (transaction, queryName) {
return namedQueriesStore(transaction)
.get(queryName)
.next(function (query) {
if (query) {
return fromDbNamedQuery(query);
}
return undefined;
});
};
IndexedDbBundleCache.prototype.saveNamedQuery = function (transaction, query) {
return namedQueriesStore(transaction).put(toDbNamedQuery(query));
};
return IndexedDbBundleCache;
}());
/**
* Helper to get a typed SimpleDbStore for the bundles object store.
*/
function bundlesStore(txn) {
return getStore(txn, DbBundle.store);
}
/**
* Helper to get a typed SimpleDbStore for the namedQueries object store.
*/
function namedQueriesStore(txn) {
return getStore(txn, DbNamedQuery.store);
}
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An in-memory implementation of IndexManager.
*/
var MemoryIndexManager = /** @class */ (function () {
function MemoryIndexManager() {
this.collectionParentIndex = new MemoryCollectionParentIndex();
}
MemoryIndexManager.prototype.addToCollectionParentIndex = function (transaction, collectionPath) {
this.collectionParentIndex.add(collectionPath);
return PersistencePromise.resolve();
};
MemoryIndexManager.prototype.getCollectionParents = function (transaction, collectionId) {
return PersistencePromise.resolve(this.collectionParentIndex.getEntries(collectionId));
};
return MemoryIndexManager;
}());
/**
* Internal implementation of the collection-parent index exposed by MemoryIndexManager.
* Also used for in-memory caching by IndexedDbIndexManager and initial index population
* in indexeddb_schema.ts
*/
var MemoryCollectionParentIndex = /** @class */ (function () {
function MemoryCollectionParentIndex() {
this.index = {};
}
// Returns false if the entry already existed.
MemoryCollectionParentIndex.prototype.add = function (collectionPath) {
var collectionId = collectionPath.lastSegment();
var parentPath = collectionPath.popLast();
var existingParents = this.index[collectionId] ||
new SortedSet(ResourcePath.comparator);
var added = !existingParents.has(parentPath);
this.index[collectionId] = existingParents.add(parentPath);
return added;
};
MemoryCollectionParentIndex.prototype.has = function (collectionPath) {
var collectionId = collectionPath.lastSegment();
var parentPath = collectionPath.popLast();
var existingParents = this.index[collectionId];
return existingParents && existingParents.has(parentPath);
};
MemoryCollectionParentIndex.prototype.getEntries = function (collectionId) {
var parentPaths = this.index[collectionId] ||
new SortedSet(ResourcePath.comparator);
return parentPaths.toArray();
};
return MemoryCollectionParentIndex;
}());
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A persisted implementation of IndexManager.
*/
var IndexedDbIndexManager = /** @class */ (function () {
function IndexedDbIndexManager() {
/**
* An in-memory copy of the index entries we've already written since the SDK
* launched. Used to avoid re-writing the same entry repeatedly.
*
* This is *NOT* a complete cache of what's in persistence and so can never be used to
* satisfy reads.
*/
this.collectionParentsCache = new MemoryCollectionParentIndex();
}
/**
* Adds a new entry to the collection parent index.
*
* Repeated calls for the same collectionPath should be avoided within a
* transaction as IndexedDbIndexManager only caches writes once a transaction
* has been committed.
*/
IndexedDbIndexManager.prototype.addToCollectionParentIndex = function (transaction, collectionPath) {
var _this = this;
if (!this.collectionParentsCache.has(collectionPath)) {
var collectionId = collectionPath.lastSegment();
var parentPath = collectionPath.popLast();
transaction.addOnCommittedListener(function () {
// Add the collection to the in memory cache only if the transaction was
// successfully committed.
_this.collectionParentsCache.add(collectionPath);
});
var collectionParent = {
collectionId: collectionId,
parent: encodeResourcePath(parentPath)
};
return collectionParentsStore(transaction).put(collectionParent);
}
return PersistencePromise.resolve();
};
IndexedDbIndexManager.prototype.getCollectionParents = function (transaction, collectionId) {
var parentPaths = [];
var range = IDBKeyRange.bound([collectionId, ''], [immediateSuccessor(collectionId), ''],
/*lowerOpen=*/ false,
/*upperOpen=*/ true);
return collectionParentsStore(transaction)
.loadAll(range)
.next(function (entries) {
for (var _i = 0, entries_1 = entries; _i < entries_1.length; _i++) {
var entry = entries_1[_i];
// This collectionId guard shouldn't be necessary (and isn't as long
// as we're running in a real browser), but there's a bug in
// indexeddbshim that breaks our range in our tests running in node:
// https://github.com/axemclion/IndexedDBShim/issues/334
if (entry.collectionId !== collectionId) {
break;
}
parentPaths.push(decodeResourcePath(entry.parent));
}
return parentPaths;
});
};
return IndexedDbIndexManager;
}());
/**
* Helper to get a typed SimpleDbStore for the collectionParents
* document store.
*/
function collectionParentsStore(txn) {
return getStore(txn, DbCollectionParent.store);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Delete a mutation batch and the associated document mutations.
* @returns A PersistencePromise of the document mutations that were removed.
*/
function removeMutationBatch(txn, userId, batch) {
var mutationStore = txn.store(DbMutationBatch.store);
var indexTxn = txn.store(DbDocumentMutation.store);
var promises = [];
var range = IDBKeyRange.only(batch.batchId);
var numDeleted = 0;
var removePromise = mutationStore.iterate({ range: range }, function (key, value, control) {
numDeleted++;
return control.delete();
});
promises.push(removePromise.next(function () {
hardAssert(numDeleted === 1);
}));
var removedDocuments = [];
for (var _i = 0, _d = batch.mutations; _i < _d.length; _i++) {
var mutation = _d[_i];
var indexKey = DbDocumentMutation.key(userId, mutation.key.path, batch.batchId);
promises.push(indexTxn.delete(indexKey));
removedDocuments.push(mutation.key);
}
return PersistencePromise.waitFor(promises).next(function () { return removedDocuments; });
}
/**
* Returns an approximate size for the given document.
*/
function dbDocumentSize(doc) {
if (!doc) {
return 0;
}
var value;
if (doc.document) {
value = doc.document;
}
else if (doc.unknownDocument) {
value = doc.unknownDocument;
}
else if (doc.noDocument) {
value = doc.noDocument;
}
else {
throw fail();
}
return JSON.stringify(value).length;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** A mutation queue for a specific user, backed by IndexedDB. */
var IndexedDbMutationQueue = /** @class */ (function () {
function IndexedDbMutationQueue(
/**
* The normalized userId (e.g. null UID => "" userId) used to store /
* retrieve mutations.
*/
userId, serializer, indexManager, referenceDelegate) {
this.userId = userId;
this.serializer = serializer;
this.indexManager = indexManager;
this.referenceDelegate = referenceDelegate;
/**
* Caches the document keys for pending mutation batches. If the mutation
* has been removed from IndexedDb, the cached value may continue to
* be used to retrieve the batch's document keys. To remove a cached value
* locally, `removeCachedMutationKeys()` should be invoked either directly
* or through `removeMutationBatches()`.
*
* With multi-tab, when the primary client acknowledges or rejects a mutation,
* this cache is used by secondary clients to invalidate the local
* view of the documents that were previously affected by the mutation.
*/
// PORTING NOTE: Multi-tab only.
this.documentKeysByBatchId = {};
}
/**
* Creates a new mutation queue for the given user.
* @param user - The user for which to create a mutation queue.
* @param serializer - The serializer to use when persisting to IndexedDb.
*/
IndexedDbMutationQueue.forUser = function (user, serializer, indexManager, referenceDelegate) {
// TODO(mcg): Figure out what constraints there are on userIDs
// In particular, are there any reserved characters? are empty ids allowed?
// For the moment store these together in the same mutations table assuming
// that empty userIDs aren't allowed.
hardAssert(user.uid !== '');
var userId = user.isAuthenticated() ? user.uid : '';
return new IndexedDbMutationQueue(userId, serializer, indexManager, referenceDelegate);
};
IndexedDbMutationQueue.prototype.checkEmpty = function (transaction) {
var empty = true;
var range = IDBKeyRange.bound([this.userId, Number.NEGATIVE_INFINITY], [this.userId, Number.POSITIVE_INFINITY]);
return mutationsStore(transaction)
.iterate({ index: DbMutationBatch.userMutationsIndex, range: range }, function (key, value, control) {
empty = false;
control.done();
})
.next(function () { return empty; });
};
IndexedDbMutationQueue.prototype.addMutationBatch = function (transaction, localWriteTime, baseMutations, mutations) {
var _this = this;
var documentStore = documentMutationsStore(transaction);
var mutationStore = mutationsStore(transaction);
// The IndexedDb implementation in Chrome (and Firefox) does not handle
// compound indices that include auto-generated keys correctly. To ensure
// that the index entry is added correctly in all browsers, we perform two
// writes: The first write is used to retrieve the next auto-generated Batch
// ID, and the second write populates the index and stores the actual
// mutation batch.
// See: https://bugs.chromium.org/p/chromium/issues/detail?id=701972
// We write an empty object to obtain key
// eslint-disable-next-line @typescript-eslint/no-explicit-any
return mutationStore.add({}).next(function (batchId) {
hardAssert(typeof batchId === 'number');
var batch = new MutationBatch(batchId, localWriteTime, baseMutations, mutations);
var dbBatch = toDbMutationBatch(_this.serializer, _this.userId, batch);
var promises = [];
var collectionParents = new SortedSet(function (l, r) { return primitiveComparator(l.canonicalString(), r.canonicalString()); });
for (var _i = 0, mutations_1 = mutations; _i < mutations_1.length; _i++) {
var mutation = mutations_1[_i];
var indexKey = DbDocumentMutation.key(_this.userId, mutation.key.path, batchId);
collectionParents = collectionParents.add(mutation.key.path.popLast());
promises.push(mutationStore.put(dbBatch));
promises.push(documentStore.put(indexKey, DbDocumentMutation.PLACEHOLDER));
}
collectionParents.forEach(function (parent) {
promises.push(_this.indexManager.addToCollectionParentIndex(transaction, parent));
});
transaction.addOnCommittedListener(function () {
_this.documentKeysByBatchId[batchId] = batch.keys();
});
return PersistencePromise.waitFor(promises).next(function () { return batch; });
});
};
IndexedDbMutationQueue.prototype.lookupMutationBatch = function (transaction, batchId) {
var _this = this;
return mutationsStore(transaction)
.get(batchId)
.next(function (dbBatch) {
if (dbBatch) {
hardAssert(dbBatch.userId === _this.userId);
return fromDbMutationBatch(_this.serializer, dbBatch);
}
return null;
});
};
/**
* Returns the document keys for the mutation batch with the given batchId.
* For primary clients, this method returns `null` after
* `removeMutationBatches()` has been called. Secondary clients return a
* cached result until `removeCachedMutationKeys()` is invoked.
*/
// PORTING NOTE: Multi-tab only.
IndexedDbMutationQueue.prototype.lookupMutationKeys = function (transaction, batchId) {
var _this = this;
if (this.documentKeysByBatchId[batchId]) {
return PersistencePromise.resolve(this.documentKeysByBatchId[batchId]);
}
else {
return this.lookupMutationBatch(transaction, batchId).next(function (batch) {
if (batch) {
var keys = batch.keys();
_this.documentKeysByBatchId[batchId] = keys;
return keys;
}
else {
return null;
}
});
}
};
IndexedDbMutationQueue.prototype.getNextMutationBatchAfterBatchId = function (transaction, batchId) {
var _this = this;
var nextBatchId = batchId + 1;
var range = IDBKeyRange.lowerBound([this.userId, nextBatchId]);
var foundBatch = null;
return mutationsStore(transaction)
.iterate({ index: DbMutationBatch.userMutationsIndex, range: range }, function (key, dbBatch, control) {
if (dbBatch.userId === _this.userId) {
hardAssert(dbBatch.batchId >= nextBatchId);
foundBatch = fromDbMutationBatch(_this.serializer, dbBatch);
}
control.done();
})
.next(function () { return foundBatch; });
};
IndexedDbMutationQueue.prototype.getHighestUnacknowledgedBatchId = function (transaction) {
var range = IDBKeyRange.upperBound([
this.userId,
Number.POSITIVE_INFINITY
]);
var batchId = BATCHID_UNKNOWN;
return mutationsStore(transaction)
.iterate({ index: DbMutationBatch.userMutationsIndex, range: range, reverse: true }, function (key, dbBatch, control) {
batchId = dbBatch.batchId;
control.done();
})
.next(function () { return batchId; });
};
IndexedDbMutationQueue.prototype.getAllMutationBatches = function (transaction) {
var _this = this;
var range = IDBKeyRange.bound([this.userId, BATCHID_UNKNOWN], [this.userId, Number.POSITIVE_INFINITY]);
return mutationsStore(transaction)
.loadAll(DbMutationBatch.userMutationsIndex, range)
.next(function (dbBatches) { return dbBatches.map(function (dbBatch) { return fromDbMutationBatch(_this.serializer, dbBatch); }); });
};
IndexedDbMutationQueue.prototype.getAllMutationBatchesAffectingDocumentKey = function (transaction, documentKey) {
var _this = this;
// Scan the document-mutation index starting with a prefix starting with
// the given documentKey.
var indexPrefix = DbDocumentMutation.prefixForPath(this.userId, documentKey.path);
var indexStart = IDBKeyRange.lowerBound(indexPrefix);
var results = [];
return documentMutationsStore(transaction)
.iterate({ range: indexStart }, function (indexKey, _, control) {
var userID = indexKey[0], encodedPath = indexKey[1], batchId = indexKey[2];
// Only consider rows matching exactly the specific key of
// interest. Note that because we order by path first, and we
// order terminators before path separators, we'll encounter all
// the index rows for documentKey contiguously. In particular, all
// the rows for documentKey will occur before any rows for
// documents nested in a subcollection beneath documentKey so we
// can stop as soon as we hit any such row.
var path = decodeResourcePath(encodedPath);
if (userID !== _this.userId || !documentKey.path.isEqual(path)) {
control.done();
return;
}
// Look up the mutation batch in the store.
return mutationsStore(transaction)
.get(batchId)
.next(function (mutation) {
if (!mutation) {
throw fail();
}
hardAssert(mutation.userId === _this.userId);
results.push(fromDbMutationBatch(_this.serializer, mutation));
});
})
.next(function () { return results; });
};
IndexedDbMutationQueue.prototype.getAllMutationBatchesAffectingDocumentKeys = function (transaction, documentKeys) {
var _this = this;
var uniqueBatchIDs = new SortedSet(primitiveComparator);
var promises = [];
documentKeys.forEach(function (documentKey) {
var indexStart = DbDocumentMutation.prefixForPath(_this.userId, documentKey.path);
var range = IDBKeyRange.lowerBound(indexStart);
var promise = documentMutationsStore(transaction).iterate({ range: range }, function (indexKey, _, control) {
var userID = indexKey[0], encodedPath = indexKey[1], batchID = indexKey[2];
// Only consider rows matching exactly the specific key of
// interest. Note that because we order by path first, and we
// order terminators before path separators, we'll encounter all
// the index rows for documentKey contiguously. In particular, all
// the rows for documentKey will occur before any rows for
// documents nested in a subcollection beneath documentKey so we
// can stop as soon as we hit any such row.
var path = decodeResourcePath(encodedPath);
if (userID !== _this.userId || !documentKey.path.isEqual(path)) {
control.done();
return;
}
uniqueBatchIDs = uniqueBatchIDs.add(batchID);
});
promises.push(promise);
});
return PersistencePromise.waitFor(promises).next(function () { return _this.lookupMutationBatches(transaction, uniqueBatchIDs); });
};
IndexedDbMutationQueue.prototype.getAllMutationBatchesAffectingQuery = function (transaction, query) {
var _this = this;
var queryPath = query.path;
var immediateChildrenLength = queryPath.length + 1;
// TODO(mcg): Actually implement a single-collection query
//
// This is actually executing an ancestor query, traversing the whole
// subtree below the collection which can be horrifically inefficient for
// some structures. The right way to solve this is to implement the full
// value index, but that's not in the cards in the near future so this is
// the best we can do for the moment.
//
// Since we don't yet index the actual properties in the mutations, our
// current approach is to just return all mutation batches that affect
// documents in the collection being queried.
var indexPrefix = DbDocumentMutation.prefixForPath(this.userId, queryPath);
var indexStart = IDBKeyRange.lowerBound(indexPrefix);
// Collect up unique batchIDs encountered during a scan of the index. Use a
// SortedSet to accumulate batch IDs so they can be traversed in order in a
// scan of the main table.
var uniqueBatchIDs = new SortedSet(primitiveComparator);
return documentMutationsStore(transaction)
.iterate({ range: indexStart }, function (indexKey, _, control) {
var userID = indexKey[0], encodedPath = indexKey[1], batchID = indexKey[2];
var path = decodeResourcePath(encodedPath);
if (userID !== _this.userId || !queryPath.isPrefixOf(path)) {
control.done();
return;
}
// Rows with document keys more than one segment longer than the
// query path can't be matches. For example, a query on 'rooms'
// can't match the document /rooms/abc/messages/xyx.
// TODO(mcg): we'll need a different scanner when we implement
// ancestor queries.
if (path.length !== immediateChildrenLength) {
return;
}
uniqueBatchIDs = uniqueBatchIDs.add(batchID);
})
.next(function () { return _this.lookupMutationBatches(transaction, uniqueBatchIDs); });
};
IndexedDbMutationQueue.prototype.lookupMutationBatches = function (transaction, batchIDs) {
var _this = this;
var results = [];
var promises = [];
// TODO(rockwood): Implement this using iterate.
batchIDs.forEach(function (batchId) {
promises.push(mutationsStore(transaction)
.get(batchId)
.next(function (mutation) {
if (mutation === null) {
throw fail();
}
hardAssert(mutation.userId === _this.userId);
results.push(fromDbMutationBatch(_this.serializer, mutation));
}));
});
return PersistencePromise.waitFor(promises).next(function () { return results; });
};
IndexedDbMutationQueue.prototype.removeMutationBatch = function (transaction, batch) {
var _this = this;
return removeMutationBatch(transaction.simpleDbTransaction, this.userId, batch).next(function (removedDocuments) {
transaction.addOnCommittedListener(function () {
_this.removeCachedMutationKeys(batch.batchId);
});
return PersistencePromise.forEach(removedDocuments, function (key) {
return _this.referenceDelegate.markPotentiallyOrphaned(transaction, key);
});
});
};
/**
* Clears the cached keys for a mutation batch. This method should be
* called by secondary clients after they process mutation updates.
*
* Note that this method does not have to be called from primary clients as
* the corresponding cache entries are cleared when an acknowledged or
* rejected batch is removed from the mutation queue.
*/
// PORTING NOTE: Multi-tab only
IndexedDbMutationQueue.prototype.removeCachedMutationKeys = function (batchId) {
delete this.documentKeysByBatchId[batchId];
};
IndexedDbMutationQueue.prototype.performConsistencyCheck = function (txn) {
var _this = this;
return this.checkEmpty(txn).next(function (empty) {
if (!empty) {
return PersistencePromise.resolve();
}
// Verify that there are no entries in the documentMutations index if
// the queue is empty.
var startRange = IDBKeyRange.lowerBound(DbDocumentMutation.prefixForUser(_this.userId));
var danglingMutationReferences = [];
return documentMutationsStore(txn)
.iterate({ range: startRange }, function (key, _, control) {
var userID = key[0];
if (userID !== _this.userId) {
control.done();
return;
}
else {
var path = decodeResourcePath(key[1]);
danglingMutationReferences.push(path);
}
})
.next(function () {
hardAssert(danglingMutationReferences.length === 0);
});
});
};
IndexedDbMutationQueue.prototype.containsKey = function (txn, key) {
return mutationQueueContainsKey(txn, this.userId, key);
};
// PORTING NOTE: Multi-tab only (state is held in memory in other clients).
/** Returns the mutation queue's metadata from IndexedDb. */
IndexedDbMutationQueue.prototype.getMutationQueueMetadata = function (transaction) {
var _this = this;
return mutationQueuesStore(transaction)
.get(this.userId)
.next(function (metadata) {
return (metadata ||
new DbMutationQueue(_this.userId, BATCHID_UNKNOWN,
/*lastStreamToken=*/ ''));
});
};
return IndexedDbMutationQueue;
}());
/**
* @returns true if the mutation queue for the given user contains a pending
* mutation for the given key.
*/
function mutationQueueContainsKey(txn, userId, key) {
var indexKey = DbDocumentMutation.prefixForPath(userId, key.path);
var encodedPath = indexKey[1];
var startRange = IDBKeyRange.lowerBound(indexKey);
var containsKey = false;
return documentMutationsStore(txn)
.iterate({ range: startRange, keysOnly: true }, function (key, value, control) {
var userID = key[0], keyPath = key[1]; /*batchID*/ key[2];
if (userID === userId && keyPath === encodedPath) {
containsKey = true;
}
control.done();
})
.next(function () { return containsKey; });
}
/** Returns true if any mutation queue contains the given document. */
function mutationQueuesContainKey(txn, docKey) {
var found = false;
return mutationQueuesStore(txn)
.iterateSerial(function (userId) {
return mutationQueueContainsKey(txn, userId, docKey).next(function (containsKey) {
if (containsKey) {
found = true;
}
return PersistencePromise.resolve(!containsKey);
});
})
.next(function () { return found; });
}
/**
* Helper to get a typed SimpleDbStore for the mutations object store.
*/
function mutationsStore(txn) {
return getStore(txn, DbMutationBatch.store);
}
/**
* Helper to get a typed SimpleDbStore for the mutationQueues object store.
*/
function documentMutationsStore(txn) {
return getStore(txn, DbDocumentMutation.store);
}
/**
* Helper to get a typed SimpleDbStore for the mutationQueues object store.
*/
function mutationQueuesStore(txn) {
return getStore(txn, DbMutationQueue.store);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Offset to ensure non-overlapping target ids. */
var OFFSET = 2;
/**
* Generates monotonically increasing target IDs for sending targets to the
* watch stream.
*
* The client constructs two generators, one for the target cache, and one for
* for the sync engine (to generate limbo documents targets). These
* generators produce non-overlapping IDs (by using even and odd IDs
* respectively).
*
* By separating the target ID space, the query cache can generate target IDs
* that persist across client restarts, while sync engine can independently
* generate in-memory target IDs that are transient and can be reused after a
* restart.
*/
var TargetIdGenerator = /** @class */ (function () {
function TargetIdGenerator(lastId) {
this.lastId = lastId;
}
TargetIdGenerator.prototype.next = function () {
this.lastId += OFFSET;
return this.lastId;
};
TargetIdGenerator.forTargetCache = function () {
// The target cache generator must return '2' in its first call to `next()`
// as there is no differentiation in the protocol layer between an unset
// number and the number '0'. If we were to sent a target with target ID
// '0', the backend would consider it unset and replace it with its own ID.
return new TargetIdGenerator(2 - OFFSET);
};
TargetIdGenerator.forSyncEngine = function () {
// Sync engine assigns target IDs for limbo document detection.
return new TargetIdGenerator(1 - OFFSET);
};
return TargetIdGenerator;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var IndexedDbTargetCache = /** @class */ (function () {
function IndexedDbTargetCache(referenceDelegate, serializer) {
this.referenceDelegate = referenceDelegate;
this.serializer = serializer;
}
// PORTING NOTE: We don't cache global metadata for the target cache, since
// some of it (in particular `highestTargetId`) can be modified by secondary
// tabs. We could perhaps be more granular (and e.g. still cache
// `lastRemoteSnapshotVersion` in memory) but for simplicity we currently go
// to IndexedDb whenever we need to read metadata. We can revisit if it turns
// out to have a meaningful performance impact.
IndexedDbTargetCache.prototype.allocateTargetId = function (transaction) {
var _this = this;
return this.retrieveMetadata(transaction).next(function (metadata) {
var targetIdGenerator = new TargetIdGenerator(metadata.highestTargetId);
metadata.highestTargetId = targetIdGenerator.next();
return _this.saveMetadata(transaction, metadata).next(function () { return metadata.highestTargetId; });
});
};
IndexedDbTargetCache.prototype.getLastRemoteSnapshotVersion = function (transaction) {
return this.retrieveMetadata(transaction).next(function (metadata) {
return SnapshotVersion.fromTimestamp(new Timestamp(metadata.lastRemoteSnapshotVersion.seconds, metadata.lastRemoteSnapshotVersion.nanoseconds));
});
};
IndexedDbTargetCache.prototype.getHighestSequenceNumber = function (transaction) {
return this.retrieveMetadata(transaction).next(function (targetGlobal) { return targetGlobal.highestListenSequenceNumber; });
};
IndexedDbTargetCache.prototype.setTargetsMetadata = function (transaction, highestListenSequenceNumber, lastRemoteSnapshotVersion) {
var _this = this;
return this.retrieveMetadata(transaction).next(function (metadata) {
metadata.highestListenSequenceNumber = highestListenSequenceNumber;
if (lastRemoteSnapshotVersion) {
metadata.lastRemoteSnapshotVersion = lastRemoteSnapshotVersion.toTimestamp();
}
if (highestListenSequenceNumber > metadata.highestListenSequenceNumber) {
metadata.highestListenSequenceNumber = highestListenSequenceNumber;
}
return _this.saveMetadata(transaction, metadata);
});
};
IndexedDbTargetCache.prototype.addTargetData = function (transaction, targetData) {
var _this = this;
return this.saveTargetData(transaction, targetData).next(function () {
return _this.retrieveMetadata(transaction).next(function (metadata) {
metadata.targetCount += 1;
_this.updateMetadataFromTargetData(targetData, metadata);
return _this.saveMetadata(transaction, metadata);
});
});
};
IndexedDbTargetCache.prototype.updateTargetData = function (transaction, targetData) {
return this.saveTargetData(transaction, targetData);
};
IndexedDbTargetCache.prototype.removeTargetData = function (transaction, targetData) {
var _this = this;
return this.removeMatchingKeysForTargetId(transaction, targetData.targetId)
.next(function () { return targetsStore(transaction).delete(targetData.targetId); })
.next(function () { return _this.retrieveMetadata(transaction); })
.next(function (metadata) {
hardAssert(metadata.targetCount > 0);
metadata.targetCount -= 1;
return _this.saveMetadata(transaction, metadata);
});
};
/**
* Drops any targets with sequence number less than or equal to the upper bound, excepting those
* present in `activeTargetIds`. Document associations for the removed targets are also removed.
* Returns the number of targets removed.
*/
IndexedDbTargetCache.prototype.removeTargets = function (txn, upperBound, activeTargetIds) {
var _this = this;
var count = 0;
var promises = [];
return targetsStore(txn)
.iterate(function (key, value) {
var targetData = fromDbTarget(value);
if (targetData.sequenceNumber <= upperBound &&
activeTargetIds.get(targetData.targetId) === null) {
count++;
promises.push(_this.removeTargetData(txn, targetData));
}
})
.next(function () { return PersistencePromise.waitFor(promises); })
.next(function () { return count; });
};
/**
* Call provided function with each `TargetData` that we have cached.
*/
IndexedDbTargetCache.prototype.forEachTarget = function (txn, f) {
return targetsStore(txn).iterate(function (key, value) {
var targetData = fromDbTarget(value);
f(targetData);
});
};
IndexedDbTargetCache.prototype.retrieveMetadata = function (transaction) {
return globalTargetStore(transaction)
.get(DbTargetGlobal.key)
.next(function (metadata) {
hardAssert(metadata !== null);
return metadata;
});
};
IndexedDbTargetCache.prototype.saveMetadata = function (transaction, metadata) {
return globalTargetStore(transaction).put(DbTargetGlobal.key, metadata);
};
IndexedDbTargetCache.prototype.saveTargetData = function (transaction, targetData) {
return targetsStore(transaction).put(toDbTarget(this.serializer, targetData));
};
/**
* In-place updates the provided metadata to account for values in the given
* TargetData. Saving is done separately. Returns true if there were any
* changes to the metadata.
*/
IndexedDbTargetCache.prototype.updateMetadataFromTargetData = function (targetData, metadata) {
var updated = false;
if (targetData.targetId > metadata.highestTargetId) {
metadata.highestTargetId = targetData.targetId;
updated = true;
}
if (targetData.sequenceNumber > metadata.highestListenSequenceNumber) {
metadata.highestListenSequenceNumber = targetData.sequenceNumber;
updated = true;
}
return updated;
};
IndexedDbTargetCache.prototype.getTargetCount = function (transaction) {
return this.retrieveMetadata(transaction).next(function (metadata) { return metadata.targetCount; });
};
IndexedDbTargetCache.prototype.getTargetData = function (transaction, target) {
// Iterating by the canonicalId may yield more than one result because
// canonicalId values are not required to be unique per target. This query
// depends on the queryTargets index to be efficient.
var canonicalId = canonifyTarget(target);
var range = IDBKeyRange.bound([canonicalId, Number.NEGATIVE_INFINITY], [canonicalId, Number.POSITIVE_INFINITY]);
var result = null;
return targetsStore(transaction)
.iterate({ range: range, index: DbTarget.queryTargetsIndexName }, function (key, value, control) {
var found = fromDbTarget(value);
// After finding a potential match, check that the target is
// actually equal to the requested target.
if (targetEquals(target, found.target)) {
result = found;
control.done();
}
})
.next(function () { return result; });
};
IndexedDbTargetCache.prototype.addMatchingKeys = function (txn, keys, targetId) {
var _this = this;
// PORTING NOTE: The reverse index (documentsTargets) is maintained by
// IndexedDb.
var promises = [];
var store = documentTargetStore(txn);
keys.forEach(function (key) {
var path = encodeResourcePath(key.path);
promises.push(store.put(new DbTargetDocument(targetId, path)));
promises.push(_this.referenceDelegate.addReference(txn, targetId, key));
});
return PersistencePromise.waitFor(promises);
};
IndexedDbTargetCache.prototype.removeMatchingKeys = function (txn, keys, targetId) {
var _this = this;
// PORTING NOTE: The reverse index (documentsTargets) is maintained by
// IndexedDb.
var store = documentTargetStore(txn);
return PersistencePromise.forEach(keys, function (key) {
var path = encodeResourcePath(key.path);
return PersistencePromise.waitFor([
store.delete([targetId, path]),
_this.referenceDelegate.removeReference(txn, targetId, key)
]);
});
};
IndexedDbTargetCache.prototype.removeMatchingKeysForTargetId = function (txn, targetId) {
var store = documentTargetStore(txn);
var range = IDBKeyRange.bound([targetId], [targetId + 1],
/*lowerOpen=*/ false,
/*upperOpen=*/ true);
return store.delete(range);
};
IndexedDbTargetCache.prototype.getMatchingKeysForTargetId = function (txn, targetId) {
var range = IDBKeyRange.bound([targetId], [targetId + 1],
/*lowerOpen=*/ false,
/*upperOpen=*/ true);
var store = documentTargetStore(txn);
var result = documentKeySet();
return store
.iterate({ range: range, keysOnly: true }, function (key, _, control) {
var path = decodeResourcePath(key[1]);
var docKey = new DocumentKey(path);
result = result.add(docKey);
})
.next(function () { return result; });
};
IndexedDbTargetCache.prototype.containsKey = function (txn, key) {
var path = encodeResourcePath(key.path);
var range = IDBKeyRange.bound([path], [immediateSuccessor(path)],
/*lowerOpen=*/ false,
/*upperOpen=*/ true);
var count = 0;
return documentTargetStore(txn)
.iterate({
index: DbTargetDocument.documentTargetsIndex,
keysOnly: true,
range: range
}, function (_d, _, control) {
var targetId = _d[0]; _d[1];
// Having a sentinel row for a document does not count as containing that document;
// For the target cache, containing the document means the document is part of some
// target.
if (targetId !== 0) {
count++;
control.done();
}
})
.next(function () { return count > 0; });
};
/**
* Looks up a TargetData entry by target ID.
*
* @param targetId - The target ID of the TargetData entry to look up.
* @returns The cached TargetData entry, or null if the cache has no entry for
* the target.
*/
// PORTING NOTE: Multi-tab only.
IndexedDbTargetCache.prototype.getTargetDataForTarget = function (transaction, targetId) {
return targetsStore(transaction)
.get(targetId)
.next(function (found) {
if (found) {
return fromDbTarget(found);
}
else {
return null;
}
});
};
return IndexedDbTargetCache;
}());
/**
* Helper to get a typed SimpleDbStore for the queries object store.
*/
function targetsStore(txn) {
return getStore(txn, DbTarget.store);
}
/**
* Helper to get a typed SimpleDbStore for the target globals object store.
*/
function globalTargetStore(txn) {
return getStore(txn, DbTargetGlobal.store);
}
/**
* Helper to get a typed SimpleDbStore for the document target object store.
*/
function documentTargetStore(txn) {
return getStore(txn, DbTargetDocument.store);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Verifies the error thrown by a LocalStore operation. If a LocalStore
* operation fails because the primary lease has been taken by another client,
* we ignore the error (the persistence layer will immediately call
* `applyPrimaryLease` to propagate the primary state change). All other errors
* are re-thrown.
*
* @param err - An error returned by a LocalStore operation.
* @returns A Promise that resolves after we recovered, or the original error.
*/
function ignoreIfPrimaryLeaseLoss(err) {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
if (err.code === Code.FAILED_PRECONDITION &&
err.message === PRIMARY_LEASE_LOST_ERROR_MSG) {
logDebug('LocalStore', 'Unexpectedly lost primary lease');
}
else {
throw err;
}
return [2 /*return*/];
});
});
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var GC_DID_NOT_RUN = {
didRun: false,
sequenceNumbersCollected: 0,
targetsRemoved: 0,
documentsRemoved: 0
};
var LRU_COLLECTION_DISABLED = -1;
var LRU_DEFAULT_CACHE_SIZE_BYTES = 40 * 1024 * 1024;
var LruParams = /** @class */ (function () {
function LruParams(
// When we attempt to collect, we will only do so if the cache size is greater than this
// threshold. Passing `COLLECTION_DISABLED` here will cause collection to always be skipped.
cacheSizeCollectionThreshold,
// The percentage of sequence numbers that we will attempt to collect
percentileToCollect,
// A cap on the total number of sequence numbers that will be collected. This prevents
// us from collecting a huge number of sequence numbers if the cache has grown very large.
maximumSequenceNumbersToCollect) {
this.cacheSizeCollectionThreshold = cacheSizeCollectionThreshold;
this.percentileToCollect = percentileToCollect;
this.maximumSequenceNumbersToCollect = maximumSequenceNumbersToCollect;
}
LruParams.withCacheSize = function (cacheSize) {
return new LruParams(cacheSize, LruParams.DEFAULT_COLLECTION_PERCENTILE, LruParams.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT);
};
return LruParams;
}());
LruParams.DEFAULT_COLLECTION_PERCENTILE = 10;
LruParams.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT = 1000;
LruParams.DEFAULT = new LruParams(LRU_DEFAULT_CACHE_SIZE_BYTES, LruParams.DEFAULT_COLLECTION_PERCENTILE, LruParams.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT);
LruParams.DISABLED = new LruParams(LRU_COLLECTION_DISABLED, 0, 0);
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG$e = 'LruGarbageCollector';
var LRU_MINIMUM_CACHE_SIZE_BYTES = 1 * 1024 * 1024;
/** How long we wait to try running LRU GC after SDK initialization. */
var INITIAL_GC_DELAY_MS = 1 * 60 * 1000;
/** Minimum amount of time between GC checks, after the first one. */
var REGULAR_GC_DELAY_MS = 5 * 60 * 1000;
function bufferEntryComparator(_d, _e) {
var aSequence = _d[0], aIndex = _d[1];
var bSequence = _e[0], bIndex = _e[1];
var seqCmp = primitiveComparator(aSequence, bSequence);
if (seqCmp === 0) {
// This order doesn't matter, but we can bias against churn by sorting
// entries created earlier as less than newer entries.
return primitiveComparator(aIndex, bIndex);
}
else {
return seqCmp;
}
}
/**
* Used to calculate the nth sequence number. Keeps a rolling buffer of the
* lowest n values passed to `addElement`, and finally reports the largest of
* them in `maxValue`.
*/
var RollingSequenceNumberBuffer = /** @class */ (function () {
function RollingSequenceNumberBuffer(maxElements) {
this.maxElements = maxElements;
this.buffer = new SortedSet(bufferEntryComparator);
this.previousIndex = 0;
}
RollingSequenceNumberBuffer.prototype.nextIndex = function () {
return ++this.previousIndex;
};
RollingSequenceNumberBuffer.prototype.addElement = function (sequenceNumber) {
var entry = [sequenceNumber, this.nextIndex()];
if (this.buffer.size < this.maxElements) {
this.buffer = this.buffer.add(entry);
}
else {
var highestValue = this.buffer.last();
if (bufferEntryComparator(entry, highestValue) < 0) {
this.buffer = this.buffer.delete(highestValue).add(entry);
}
}
};
Object.defineProperty(RollingSequenceNumberBuffer.prototype, "maxValue", {
get: function () {
// Guaranteed to be non-empty. If we decide we are not collecting any
// sequence numbers, nthSequenceNumber below short-circuits. If we have
// decided that we are collecting n sequence numbers, it's because n is some
// percentage of the existing sequence numbers. That means we should never
// be in a situation where we are collecting sequence numbers but don't
// actually have any.
return this.buffer.last()[0];
},
enumerable: false,
configurable: true
});
return RollingSequenceNumberBuffer;
}());
/**
* This class is responsible for the scheduling of LRU garbage collection. It handles checking
* whether or not GC is enabled, as well as which delay to use before the next run.
*/
var LruScheduler = /** @class */ (function () {
function LruScheduler(garbageCollector, asyncQueue) {
this.garbageCollector = garbageCollector;
this.asyncQueue = asyncQueue;
this.hasRun = false;
this.gcTask = null;
}
LruScheduler.prototype.start = function (localStore) {
if (this.garbageCollector.params.cacheSizeCollectionThreshold !==
LRU_COLLECTION_DISABLED) {
this.scheduleGC(localStore);
}
};
LruScheduler.prototype.stop = function () {
if (this.gcTask) {
this.gcTask.cancel();
this.gcTask = null;
}
};
Object.defineProperty(LruScheduler.prototype, "started", {
get: function () {
return this.gcTask !== null;
},
enumerable: false,
configurable: true
});
LruScheduler.prototype.scheduleGC = function (localStore) {
var _this = this;
var delay = this.hasRun ? REGULAR_GC_DELAY_MS : INITIAL_GC_DELAY_MS;
logDebug('LruGarbageCollector', "Garbage collection scheduled in " + delay + "ms");
this.gcTask = this.asyncQueue.enqueueAfterDelay("lru_garbage_collection" /* LruGarbageCollection */, delay, function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var e_1;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
this.gcTask = null;
this.hasRun = true;
_d.label = 1;
case 1:
_d.trys.push([1, 3, , 7]);
return [4 /*yield*/, localStore.collectGarbage(this.garbageCollector)];
case 2:
_d.sent();
return [3 /*break*/, 7];
case 3:
e_1 = _d.sent();
if (!isIndexedDbTransactionError(e_1)) return [3 /*break*/, 4];
logDebug(LOG_TAG$e, 'Ignoring IndexedDB error during garbage collection: ', e_1);
return [3 /*break*/, 6];
case 4: return [4 /*yield*/, ignoreIfPrimaryLeaseLoss(e_1)];
case 5:
_d.sent();
_d.label = 6;
case 6: return [3 /*break*/, 7];
case 7: return [4 /*yield*/, this.scheduleGC(localStore)];
case 8:
_d.sent();
return [2 /*return*/];
}
});
}); });
};
return LruScheduler;
}());
/** Implements the steps for LRU garbage collection. */
var LruGarbageCollectorImpl = /** @class */ (function () {
function LruGarbageCollectorImpl(delegate, params) {
this.delegate = delegate;
this.params = params;
}
LruGarbageCollectorImpl.prototype.calculateTargetCount = function (txn, percentile) {
return this.delegate.getSequenceNumberCount(txn).next(function (targetCount) {
return Math.floor((percentile / 100.0) * targetCount);
});
};
LruGarbageCollectorImpl.prototype.nthSequenceNumber = function (txn, n) {
var _this = this;
if (n === 0) {
return PersistencePromise.resolve(ListenSequence.INVALID);
}
var buffer = new RollingSequenceNumberBuffer(n);
return this.delegate
.forEachTarget(txn, function (target) { return buffer.addElement(target.sequenceNumber); })
.next(function () {
return _this.delegate.forEachOrphanedDocumentSequenceNumber(txn, function (sequenceNumber) { return buffer.addElement(sequenceNumber); });
})
.next(function () { return buffer.maxValue; });
};
LruGarbageCollectorImpl.prototype.removeTargets = function (txn, upperBound, activeTargetIds) {
return this.delegate.removeTargets(txn, upperBound, activeTargetIds);
};
LruGarbageCollectorImpl.prototype.removeOrphanedDocuments = function (txn, upperBound) {
return this.delegate.removeOrphanedDocuments(txn, upperBound);
};
LruGarbageCollectorImpl.prototype.collect = function (txn, activeTargetIds) {
var _this = this;
if (this.params.cacheSizeCollectionThreshold === LRU_COLLECTION_DISABLED) {
logDebug('LruGarbageCollector', 'Garbage collection skipped; disabled');
return PersistencePromise.resolve(GC_DID_NOT_RUN);
}
return this.getCacheSize(txn).next(function (cacheSize) {
if (cacheSize < _this.params.cacheSizeCollectionThreshold) {
logDebug('LruGarbageCollector', "Garbage collection skipped; Cache size " + cacheSize + " " +
("is lower than threshold " + _this.params.cacheSizeCollectionThreshold));
return GC_DID_NOT_RUN;
}
else {
return _this.runGarbageCollection(txn, activeTargetIds);
}
});
};
LruGarbageCollectorImpl.prototype.getCacheSize = function (txn) {
return this.delegate.getCacheSize(txn);
};
LruGarbageCollectorImpl.prototype.runGarbageCollection = function (txn, activeTargetIds) {
var _this = this;
var upperBoundSequenceNumber;
var sequenceNumbersToCollect, targetsRemoved;
// Timestamps for various pieces of the process
var countedTargetsTs, foundUpperBoundTs, removedTargetsTs, removedDocumentsTs;
var startTs = Date.now();
return this.calculateTargetCount(txn, this.params.percentileToCollect)
.next(function (sequenceNumbers) {
// Cap at the configured max
if (sequenceNumbers > _this.params.maximumSequenceNumbersToCollect) {
logDebug('LruGarbageCollector', 'Capping sequence numbers to collect down ' +
("to the maximum of " + _this.params.maximumSequenceNumbersToCollect + " ") +
("from " + sequenceNumbers));
sequenceNumbersToCollect = _this.params
.maximumSequenceNumbersToCollect;
}
else {
sequenceNumbersToCollect = sequenceNumbers;
}
countedTargetsTs = Date.now();
return _this.nthSequenceNumber(txn, sequenceNumbersToCollect);
})
.next(function (upperBound) {
upperBoundSequenceNumber = upperBound;
foundUpperBoundTs = Date.now();
return _this.removeTargets(txn, upperBoundSequenceNumber, activeTargetIds);
})
.next(function (numTargetsRemoved) {
targetsRemoved = numTargetsRemoved;
removedTargetsTs = Date.now();
return _this.removeOrphanedDocuments(txn, upperBoundSequenceNumber);
})
.next(function (documentsRemoved) {
removedDocumentsTs = Date.now();
if (getLogLevel() <= logger.LogLevel.DEBUG) {
var desc = 'LRU Garbage Collection\n' +
("\tCounted targets in " + (countedTargetsTs - startTs) + "ms\n") +
("\tDetermined least recently used " + sequenceNumbersToCollect + " in ") +
(foundUpperBoundTs - countedTargetsTs + "ms\n") +
("\tRemoved " + targetsRemoved + " targets in ") +
(removedTargetsTs - foundUpperBoundTs + "ms\n") +
("\tRemoved " + documentsRemoved + " documents in ") +
(removedDocumentsTs - removedTargetsTs + "ms\n") +
("Total Duration: " + (removedDocumentsTs - startTs) + "ms");
logDebug('LruGarbageCollector', desc);
}
return PersistencePromise.resolve({
didRun: true,
sequenceNumbersCollected: sequenceNumbersToCollect,
targetsRemoved: targetsRemoved,
documentsRemoved: documentsRemoved
});
});
};
return LruGarbageCollectorImpl;
}());
function newLruGarbageCollector(delegate, params) {
return new LruGarbageCollectorImpl(delegate, params);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Provides LRU functionality for IndexedDB persistence. */
var IndexedDbLruDelegateImpl = /** @class */ (function () {
function IndexedDbLruDelegateImpl(db, params) {
this.db = db;
this.garbageCollector = newLruGarbageCollector(this, params);
}
IndexedDbLruDelegateImpl.prototype.getSequenceNumberCount = function (txn) {
var docCountPromise = this.orphanedDocumentCount(txn);
var targetCountPromise = this.db.getTargetCache().getTargetCount(txn);
return targetCountPromise.next(function (targetCount) { return docCountPromise.next(function (docCount) { return targetCount + docCount; }); });
};
IndexedDbLruDelegateImpl.prototype.orphanedDocumentCount = function (txn) {
var orphanedCount = 0;
return this.forEachOrphanedDocumentSequenceNumber(txn, function (_) {
orphanedCount++;
}).next(function () { return orphanedCount; });
};
IndexedDbLruDelegateImpl.prototype.forEachTarget = function (txn, f) {
return this.db.getTargetCache().forEachTarget(txn, f);
};
IndexedDbLruDelegateImpl.prototype.forEachOrphanedDocumentSequenceNumber = function (txn, f) {
return this.forEachOrphanedDocument(txn, function (docKey, sequenceNumber) { return f(sequenceNumber); });
};
IndexedDbLruDelegateImpl.prototype.addReference = function (txn, targetId, key) {
return writeSentinelKey(txn, key);
};
IndexedDbLruDelegateImpl.prototype.removeReference = function (txn, targetId, key) {
return writeSentinelKey(txn, key);
};
IndexedDbLruDelegateImpl.prototype.removeTargets = function (txn, upperBound, activeTargetIds) {
return this.db.getTargetCache().removeTargets(txn, upperBound, activeTargetIds);
};
IndexedDbLruDelegateImpl.prototype.markPotentiallyOrphaned = function (txn, key) {
return writeSentinelKey(txn, key);
};
/**
* Returns true if anything would prevent this document from being garbage
* collected, given that the document in question is not present in any
* targets and has a sequence number less than or equal to the upper bound for
* the collection run.
*/
IndexedDbLruDelegateImpl.prototype.isPinned = function (txn, docKey) {
return mutationQueuesContainKey(txn, docKey);
};
IndexedDbLruDelegateImpl.prototype.removeOrphanedDocuments = function (txn, upperBound) {
var _this = this;
var documentCache = this.db.getRemoteDocumentCache();
var changeBuffer = documentCache.newChangeBuffer();
var promises = [];
var documentCount = 0;
var iteration = this.forEachOrphanedDocument(txn, function (docKey, sequenceNumber) {
if (sequenceNumber <= upperBound) {
var p = _this.isPinned(txn, docKey).next(function (isPinned) {
if (!isPinned) {
documentCount++;
// Our size accounting requires us to read all documents before
// removing them.
return changeBuffer.getEntry(txn, docKey).next(function () {
changeBuffer.removeEntry(docKey);
return documentTargetStore(txn).delete(sentinelKey$1(docKey));
});
}
});
promises.push(p);
}
});
return iteration
.next(function () { return PersistencePromise.waitFor(promises); })
.next(function () { return changeBuffer.apply(txn); })
.next(function () { return documentCount; });
};
IndexedDbLruDelegateImpl.prototype.removeTarget = function (txn, targetData) {
var updated = targetData.withSequenceNumber(txn.currentSequenceNumber);
return this.db.getTargetCache().updateTargetData(txn, updated);
};
IndexedDbLruDelegateImpl.prototype.updateLimboDocument = function (txn, key) {
return writeSentinelKey(txn, key);
};
/**
* Call provided function for each document in the cache that is 'orphaned'. Orphaned
* means not a part of any target, so the only entry in the target-document index for
* that document will be the sentinel row (targetId 0), which will also have the sequence
* number for the last time the document was accessed.
*/
IndexedDbLruDelegateImpl.prototype.forEachOrphanedDocument = function (txn, f) {
var store = documentTargetStore(txn);
var nextToReport = ListenSequence.INVALID;
var nextPath;
return store
.iterate({
index: DbTargetDocument.documentTargetsIndex
}, function (_d, _e) {
var targetId = _d[0]; _d[1];
var path = _e.path, sequenceNumber = _e.sequenceNumber;
if (targetId === 0) {
// if nextToReport is valid, report it, this is a new key so the
// last one must not be a member of any targets.
if (nextToReport !== ListenSequence.INVALID) {
f(new DocumentKey(decodeResourcePath(nextPath)), nextToReport);
}
// set nextToReport to be this sequence number. It's the next one we
// might report, if we don't find any targets for this document.
// Note that the sequence number must be defined when the targetId
// is 0.
nextToReport = sequenceNumber;
nextPath = path;
}
else {
// set nextToReport to be invalid, we know we don't need to report
// this one since we found a target for it.
nextToReport = ListenSequence.INVALID;
}
})
.next(function () {
// Since we report sequence numbers after getting to the next key, we
// need to check if the last key we iterated over was an orphaned
// document and report it.
if (nextToReport !== ListenSequence.INVALID) {
f(new DocumentKey(decodeResourcePath(nextPath)), nextToReport);
}
});
};
IndexedDbLruDelegateImpl.prototype.getCacheSize = function (txn) {
return this.db.getRemoteDocumentCache().getSize(txn);
};
return IndexedDbLruDelegateImpl;
}());
function sentinelKey$1(key) {
return [0, encodeResourcePath(key.path)];
}
/**
* @returns A value suitable for writing a sentinel row in the target-document
* store.
*/
function sentinelRow(key, sequenceNumber) {
return new DbTargetDocument(0, encodeResourcePath(key.path), sequenceNumber);
}
function writeSentinelKey(txn, key) {
return documentTargetStore(txn).put(sentinelRow(key, txn.currentSequenceNumber));
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A map implementation that uses objects as keys. Objects must have an
* associated equals function and must be immutable. Entries in the map are
* stored together with the key being produced from the mapKeyFn. This map
* automatically handles collisions of keys.
*/
var ObjectMap = /** @class */ (function () {
function ObjectMap(mapKeyFn, equalsFn) {
this.mapKeyFn = mapKeyFn;
this.equalsFn = equalsFn;
/**
* The inner map for a key/value pair. Due to the possibility of collisions we
* keep a list of entries that we do a linear search through to find an actual
* match. Note that collisions should be rare, so we still expect near
* constant time lookups in practice.
*/
this.inner = {};
}
/** Get a value for this key, or undefined if it does not exist. */
ObjectMap.prototype.get = function (key) {
var id = this.mapKeyFn(key);
var matches = this.inner[id];
if (matches === undefined) {
return undefined;
}
for (var _i = 0, matches_1 = matches; _i < matches_1.length; _i++) {
var _d = matches_1[_i], otherKey = _d[0], value = _d[1];
if (this.equalsFn(otherKey, key)) {
return value;
}
}
return undefined;
};
ObjectMap.prototype.has = function (key) {
return this.get(key) !== undefined;
};
/** Put this key and value in the map. */
ObjectMap.prototype.set = function (key, value) {
var id = this.mapKeyFn(key);
var matches = this.inner[id];
if (matches === undefined) {
this.inner[id] = [[key, value]];
return;
}
for (var i = 0; i < matches.length; i++) {
if (this.equalsFn(matches[i][0], key)) {
matches[i] = [key, value];
return;
}
}
matches.push([key, value]);
};
/**
* Remove this key from the map. Returns a boolean if anything was deleted.
*/
ObjectMap.prototype.delete = function (key) {
var id = this.mapKeyFn(key);
var matches = this.inner[id];
if (matches === undefined) {
return false;
}
for (var i = 0; i < matches.length; i++) {
if (this.equalsFn(matches[i][0], key)) {
if (matches.length === 1) {
delete this.inner[id];
}
else {
matches.splice(i, 1);
}
return true;
}
}
return false;
};
ObjectMap.prototype.forEach = function (fn) {
forEach(this.inner, function (_, entries) {
for (var _i = 0, entries_2 = entries; _i < entries_2.length; _i++) {
var _d = entries_2[_i], k = _d[0], v = _d[1];
fn(k, v);
}
});
};
ObjectMap.prototype.isEmpty = function () {
return isEmpty(this.inner);
};
return ObjectMap;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An in-memory buffer of entries to be written to a RemoteDocumentCache.
* It can be used to batch up a set of changes to be written to the cache, but
* additionally supports reading entries back with the `getEntry()` method,
* falling back to the underlying RemoteDocumentCache if no entry is
* buffered.
*
* Entries added to the cache *must* be read first. This is to facilitate
* calculating the size delta of the pending changes.
*
* PORTING NOTE: This class was implemented then removed from other platforms.
* If byte-counting ends up being needed on the other platforms, consider
* porting this class as part of that implementation work.
*/
var RemoteDocumentChangeBuffer = /** @class */ (function () {
function RemoteDocumentChangeBuffer() {
// A mapping of document key to the new cache entry that should be written (or null if any
// existing cache entry should be removed).
this.changes = new ObjectMap(function (key) { return key.toString(); }, function (l, r) { return l.isEqual(r); });
this.changesApplied = false;
}
RemoteDocumentChangeBuffer.prototype.getReadTime = function (key) {
var change = this.changes.get(key);
if (change) {
return change.readTime;
}
return SnapshotVersion.min();
};
/**
* Buffers a `RemoteDocumentCache.addEntry()` call.
*
* You can only modify documents that have already been retrieved via
* `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).
*/
RemoteDocumentChangeBuffer.prototype.addEntry = function (document, readTime) {
this.assertNotApplied();
this.changes.set(document.key, { document: document, readTime: readTime });
};
/**
* Buffers a `RemoteDocumentCache.removeEntry()` call.
*
* You can only remove documents that have already been retrieved via
* `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).
*/
RemoteDocumentChangeBuffer.prototype.removeEntry = function (key, readTime) {
if (readTime === void 0) { readTime = null; }
this.assertNotApplied();
this.changes.set(key, {
document: MutableDocument.newInvalidDocument(key),
readTime: readTime
});
};
/**
* Looks up an entry in the cache. The buffered changes will first be checked,
* and if no buffered change applies, this will forward to
* `RemoteDocumentCache.getEntry()`.
*
* @param transaction - The transaction in which to perform any persistence
* operations.
* @param documentKey - The key of the entry to look up.
* @returns The cached document or an invalid document if we have nothing
* cached.
*/
RemoteDocumentChangeBuffer.prototype.getEntry = function (transaction, documentKey) {
this.assertNotApplied();
var bufferedEntry = this.changes.get(documentKey);
if (bufferedEntry !== undefined) {
return PersistencePromise.resolve(bufferedEntry.document);
}
else {
return this.getFromCache(transaction, documentKey);
}
};
/**
* Looks up several entries in the cache, forwarding to
* `RemoteDocumentCache.getEntry()`.
*
* @param transaction - The transaction in which to perform any persistence
* operations.
* @param documentKeys - The keys of the entries to look up.
* @returns A map of cached documents, indexed by key. If an entry cannot be
* found, the corresponding key will be mapped to an invalid document.
*/
RemoteDocumentChangeBuffer.prototype.getEntries = function (transaction, documentKeys) {
return this.getAllFromCache(transaction, documentKeys);
};
/**
* Applies buffered changes to the underlying RemoteDocumentCache, using
* the provided transaction.
*/
RemoteDocumentChangeBuffer.prototype.apply = function (transaction) {
this.assertNotApplied();
this.changesApplied = true;
return this.applyChanges(transaction);
};
/** Helper to assert this.changes is not null */
RemoteDocumentChangeBuffer.prototype.assertNotApplied = function () {
};
return RemoteDocumentChangeBuffer;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The RemoteDocumentCache for IndexedDb. To construct, invoke
* `newIndexedDbRemoteDocumentCache()`.
*/
var IndexedDbRemoteDocumentCacheImpl = /** @class */ (function () {
/**
* @param serializer - The document serializer.
* @param indexManager - The query indexes that need to be maintained.
*/
function IndexedDbRemoteDocumentCacheImpl(serializer, indexManager) {
this.serializer = serializer;
this.indexManager = indexManager;
}
/**
* Adds the supplied entries to the cache.
*
* All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()` to ensure proper accounting of metadata.
*/
IndexedDbRemoteDocumentCacheImpl.prototype.addEntry = function (transaction, key, doc) {
var documentStore = remoteDocumentsStore(transaction);
return documentStore.put(dbKey(key), doc);
};
/**
* Removes a document from the cache.
*
* All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()` to ensure proper accounting of metadata.
*/
IndexedDbRemoteDocumentCacheImpl.prototype.removeEntry = function (transaction, documentKey) {
var store = remoteDocumentsStore(transaction);
var key = dbKey(documentKey);
return store.delete(key);
};
/**
* Updates the current cache size.
*
* Callers to `addEntry()` and `removeEntry()` *must* call this afterwards to update the
* cache's metadata.
*/
IndexedDbRemoteDocumentCacheImpl.prototype.updateMetadata = function (transaction, sizeDelta) {
var _this = this;
return this.getMetadata(transaction).next(function (metadata) {
metadata.byteSize += sizeDelta;
return _this.setMetadata(transaction, metadata);
});
};
IndexedDbRemoteDocumentCacheImpl.prototype.getEntry = function (transaction, documentKey) {
var _this = this;
return remoteDocumentsStore(transaction)
.get(dbKey(documentKey))
.next(function (dbRemoteDoc) {
return _this.maybeDecodeDocument(documentKey, dbRemoteDoc);
});
};
/**
* Looks up an entry in the cache.
*
* @param documentKey - The key of the entry to look up.
* @returns The cached document entry and its size.
*/
IndexedDbRemoteDocumentCacheImpl.prototype.getSizedEntry = function (transaction, documentKey) {
var _this = this;
return remoteDocumentsStore(transaction)
.get(dbKey(documentKey))
.next(function (dbRemoteDoc) {
var doc = _this.maybeDecodeDocument(documentKey, dbRemoteDoc);
return {
document: doc,
size: dbDocumentSize(dbRemoteDoc)
};
});
};
IndexedDbRemoteDocumentCacheImpl.prototype.getEntries = function (transaction, documentKeys) {
var _this = this;
var results = mutableDocumentMap();
return this.forEachDbEntry(transaction, documentKeys, function (key, dbRemoteDoc) {
var doc = _this.maybeDecodeDocument(key, dbRemoteDoc);
results = results.insert(key, doc);
}).next(function () { return results; });
};
/**
* Looks up several entries in the cache.
*
* @param documentKeys - The set of keys entries to look up.
* @returns A map of documents indexed by key and a map of sizes indexed by
* key (zero if the document does not exist).
*/
IndexedDbRemoteDocumentCacheImpl.prototype.getSizedEntries = function (transaction, documentKeys) {
var _this = this;
var results = mutableDocumentMap();
var sizeMap = new SortedMap(DocumentKey.comparator);
return this.forEachDbEntry(transaction, documentKeys, function (key, dbRemoteDoc) {
var doc = _this.maybeDecodeDocument(key, dbRemoteDoc);
results = results.insert(key, doc);
sizeMap = sizeMap.insert(key, dbDocumentSize(dbRemoteDoc));
}).next(function () {
return { documents: results, sizeMap: sizeMap };
});
};
IndexedDbRemoteDocumentCacheImpl.prototype.forEachDbEntry = function (transaction, documentKeys, callback) {
if (documentKeys.isEmpty()) {
return PersistencePromise.resolve();
}
var range = IDBKeyRange.bound(documentKeys.first().path.toArray(), documentKeys.last().path.toArray());
var keyIter = documentKeys.getIterator();
var nextKey = keyIter.getNext();
return remoteDocumentsStore(transaction)
.iterate({ range: range }, function (potentialKeyRaw, dbRemoteDoc, control) {
var potentialKey = DocumentKey.fromSegments(potentialKeyRaw);
// Go through keys not found in cache.
while (nextKey && DocumentKey.comparator(nextKey, potentialKey) < 0) {
callback(nextKey, null);
nextKey = keyIter.getNext();
}
if (nextKey && nextKey.isEqual(potentialKey)) {
// Key found in cache.
callback(nextKey, dbRemoteDoc);
nextKey = keyIter.hasNext() ? keyIter.getNext() : null;
}
// Skip to the next key (if there is one).
if (nextKey) {
control.skip(nextKey.path.toArray());
}
else {
control.done();
}
})
.next(function () {
// The rest of the keys are not in the cache. One case where `iterate`
// above won't go through them is when the cache is empty.
while (nextKey) {
callback(nextKey, null);
nextKey = keyIter.hasNext() ? keyIter.getNext() : null;
}
});
};
IndexedDbRemoteDocumentCacheImpl.prototype.getDocumentsMatchingQuery = function (transaction, query, sinceReadTime) {
var _this = this;
var results = mutableDocumentMap();
var immediateChildrenPathLength = query.path.length + 1;
var iterationOptions = {};
if (sinceReadTime.isEqual(SnapshotVersion.min())) {
// Documents are ordered by key, so we can use a prefix scan to narrow
// down the documents we need to match the query against.
var startKey = query.path.toArray();
iterationOptions.range = IDBKeyRange.lowerBound(startKey);
}
else {
// Execute an index-free query and filter by read time. This is safe
// since all document changes to queries that have a
// lastLimboFreeSnapshotVersion (`sinceReadTime`) have a read time set.
var collectionKey = query.path.toArray();
var readTimeKey = toDbTimestampKey(sinceReadTime);
iterationOptions.range = IDBKeyRange.lowerBound([collectionKey, readTimeKey],
/* open= */ true);
iterationOptions.index = DbRemoteDocument.collectionReadTimeIndex;
}
return remoteDocumentsStore(transaction)
.iterate(iterationOptions, function (key, dbRemoteDoc, control) {
// The query is actually returning any path that starts with the query
// path prefix which may include documents in subcollections. For
// example, a query on 'rooms' will return rooms/abc/messages/xyx but we
// shouldn't match it. Fix this by discarding rows with document keys
// more than one segment longer than the query path.
if (key.length !== immediateChildrenPathLength) {
return;
}
var document = fromDbRemoteDocument(_this.serializer, dbRemoteDoc);
if (!query.path.isPrefixOf(document.key.path)) {
control.done();
}
else if (queryMatches(query, document)) {
results = results.insert(document.key, document);
}
})
.next(function () { return results; });
};
IndexedDbRemoteDocumentCacheImpl.prototype.newChangeBuffer = function (options) {
return new IndexedDbRemoteDocumentChangeBuffer(this, !!options && options.trackRemovals);
};
IndexedDbRemoteDocumentCacheImpl.prototype.getSize = function (txn) {
return this.getMetadata(txn).next(function (metadata) { return metadata.byteSize; });
};
IndexedDbRemoteDocumentCacheImpl.prototype.getMetadata = function (txn) {
return documentGlobalStore(txn)
.get(DbRemoteDocumentGlobal.key)
.next(function (metadata) {
hardAssert(!!metadata);
return metadata;
});
};
IndexedDbRemoteDocumentCacheImpl.prototype.setMetadata = function (txn, metadata) {
return documentGlobalStore(txn).put(DbRemoteDocumentGlobal.key, metadata);
};
/**
* Decodes `remoteDoc` and returns the document (or null, if the document
* corresponds to the format used for sentinel deletes).
*/
IndexedDbRemoteDocumentCacheImpl.prototype.maybeDecodeDocument = function (documentKey, dbRemoteDoc) {
if (dbRemoteDoc) {
var doc_4 = fromDbRemoteDocument(this.serializer, dbRemoteDoc);
// Whether the document is a sentinel removal and should only be used in the
// `getNewDocumentChanges()`
var isSentinelRemoval = doc_4.isNoDocument() && doc_4.version.isEqual(SnapshotVersion.min());
if (!isSentinelRemoval) {
return doc_4;
}
}
return MutableDocument.newInvalidDocument(documentKey);
};
return IndexedDbRemoteDocumentCacheImpl;
}());
/**
* Creates a new IndexedDbRemoteDocumentCache.
*
* @param serializer - The document serializer.
* @param indexManager - The query indexes that need to be maintained.
*/
function newIndexedDbRemoteDocumentCache(serializer, indexManager) {
return new IndexedDbRemoteDocumentCacheImpl(serializer, indexManager);
}
/**
* Returns the set of documents that have changed since the specified read
* time.
*/
// PORTING NOTE: This is only used for multi-tab synchronization.
function remoteDocumentCacheGetNewDocumentChanges(remoteDocumentCache, transaction, sinceReadTime) {
var remoteDocumentCacheImpl = debugCast(remoteDocumentCache);
var changedDocs = mutableDocumentMap();
var lastReadTime = toDbTimestampKey(sinceReadTime);
var documentsStore = remoteDocumentsStore(transaction);
var range = IDBKeyRange.lowerBound(lastReadTime, true);
return documentsStore
.iterate({ index: DbRemoteDocument.readTimeIndex, range: range }, function (_, dbRemoteDoc) {
// Unlike `getEntry()` and others, `getNewDocumentChanges()` parses
// the documents directly since we want to keep sentinel deletes.
var doc = fromDbRemoteDocument(remoteDocumentCacheImpl.serializer, dbRemoteDoc);
changedDocs = changedDocs.insert(doc.key, doc);
lastReadTime = dbRemoteDoc.readTime;
})
.next(function () {
return {
changedDocs: changedDocs,
readTime: fromDbTimestampKey(lastReadTime)
};
});
}
/**
* Returns the read time of the most recently read document in the cache, or
* SnapshotVersion.min() if not available.
*/
// PORTING NOTE: This is only used for multi-tab synchronization.
function remoteDocumentCacheGetLastReadTime(transaction) {
var documentsStore = remoteDocumentsStore(transaction);
// If there are no existing entries, we return SnapshotVersion.min().
var readTime = SnapshotVersion.min();
return documentsStore
.iterate({ index: DbRemoteDocument.readTimeIndex, reverse: true }, function (key, dbRemoteDoc, control) {
if (dbRemoteDoc.readTime) {
readTime = fromDbTimestampKey(dbRemoteDoc.readTime);
}
control.done();
})
.next(function () { return readTime; });
}
/**
* Handles the details of adding and updating documents in the IndexedDbRemoteDocumentCache.
*
* Unlike the MemoryRemoteDocumentChangeBuffer, the IndexedDb implementation computes the size
* delta for all submitted changes. This avoids having to re-read all documents from IndexedDb
* when we apply the changes.
*/
var IndexedDbRemoteDocumentChangeBuffer = /** @class */ (function (_super) {
tslib.__extends(IndexedDbRemoteDocumentChangeBuffer, _super);
/**
* @param documentCache - The IndexedDbRemoteDocumentCache to apply the changes to.
* @param trackRemovals - Whether to create sentinel deletes that can be tracked by
* `getNewDocumentChanges()`.
*/
function IndexedDbRemoteDocumentChangeBuffer(documentCache, trackRemovals) {
var _this = _super.call(this) || this;
_this.documentCache = documentCache;
_this.trackRemovals = trackRemovals;
// A map of document sizes prior to applying the changes in this buffer.
_this.documentSizes = new ObjectMap(function (key) { return key.toString(); }, function (l, r) { return l.isEqual(r); });
return _this;
}
IndexedDbRemoteDocumentChangeBuffer.prototype.applyChanges = function (transaction) {
var _this = this;
var promises = [];
var sizeDelta = 0;
var collectionParents = new SortedSet(function (l, r) { return primitiveComparator(l.canonicalString(), r.canonicalString()); });
this.changes.forEach(function (key, documentChange) {
var previousSize = _this.documentSizes.get(key);
if (documentChange.document.isValidDocument()) {
var doc_5 = toDbRemoteDocument(_this.documentCache.serializer, documentChange.document, _this.getReadTime(key));
collectionParents = collectionParents.add(key.path.popLast());
var size = dbDocumentSize(doc_5);
sizeDelta += size - previousSize;
promises.push(_this.documentCache.addEntry(transaction, key, doc_5));
}
else {
sizeDelta -= previousSize;
if (_this.trackRemovals) {
// In order to track removals, we store a "sentinel delete" in the
// RemoteDocumentCache. This entry is represented by a NoDocument
// with a version of 0 and ignored by `maybeDecodeDocument()` but
// preserved in `getNewDocumentChanges()`.
var deletedDoc = toDbRemoteDocument(_this.documentCache.serializer, MutableDocument.newNoDocument(key, SnapshotVersion.min()), _this.getReadTime(key));
promises.push(_this.documentCache.addEntry(transaction, key, deletedDoc));
}
else {
promises.push(_this.documentCache.removeEntry(transaction, key));
}
}
});
collectionParents.forEach(function (parent) {
promises.push(_this.documentCache.indexManager.addToCollectionParentIndex(transaction, parent));
});
promises.push(this.documentCache.updateMetadata(transaction, sizeDelta));
return PersistencePromise.waitFor(promises);
};
IndexedDbRemoteDocumentChangeBuffer.prototype.getFromCache = function (transaction, documentKey) {
var _this = this;
// Record the size of everything we load from the cache so we can compute a delta later.
return this.documentCache
.getSizedEntry(transaction, documentKey)
.next(function (getResult) {
_this.documentSizes.set(documentKey, getResult.size);
return getResult.document;
});
};
IndexedDbRemoteDocumentChangeBuffer.prototype.getAllFromCache = function (transaction, documentKeys) {
var _this = this;
// Record the size of everything we load from the cache so we can compute
// a delta later.
return this.documentCache
.getSizedEntries(transaction, documentKeys)
.next(function (_d) {
var documents = _d.documents, sizeMap = _d.sizeMap;
// Note: `getAllFromCache` returns two maps instead of a single map from
// keys to `DocumentSizeEntry`s. This is to allow returning the
// `MutableDocumentMap` directly, without a conversion.
sizeMap.forEach(function (documentKey, size) {
_this.documentSizes.set(documentKey, size);
});
return documents;
});
};
return IndexedDbRemoteDocumentChangeBuffer;
}(RemoteDocumentChangeBuffer));
function documentGlobalStore(txn) {
return getStore(txn, DbRemoteDocumentGlobal.store);
}
/**
* Helper to get a typed SimpleDbStore for the remoteDocuments object store.
*/
function remoteDocumentsStore(txn) {
return getStore(txn, DbRemoteDocument.store);
}
function dbKey(docKey) {
return docKey.path.toArray();
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Performs database creation and schema upgrades. */
var SchemaConverter = /** @class */ (function () {
function SchemaConverter(serializer) {
this.serializer = serializer;
}
/**
* Performs database creation and schema upgrades.
*
* Note that in production, this method is only ever used to upgrade the schema
* to SCHEMA_VERSION. Different values of toVersion are only used for testing
* and local feature development.
*/
SchemaConverter.prototype.createOrUpgrade = function (db, txn, fromVersion, toVersion) {
var _this = this;
hardAssert(fromVersion < toVersion &&
fromVersion >= 0 &&
toVersion <= SCHEMA_VERSION);
var simpleDbTransaction = new SimpleDbTransaction('createOrUpgrade', txn);
if (fromVersion < 1 && toVersion >= 1) {
createPrimaryClientStore(db);
createMutationQueue(db);
createQueryCache(db);
createRemoteDocumentCache(db);
}
// Migration 2 to populate the targetGlobal object no longer needed since
// migration 3 unconditionally clears it.
var p = PersistencePromise.resolve();
if (fromVersion < 3 && toVersion >= 3) {
// Brand new clients don't need to drop and recreate--only clients that
// potentially have corrupt data.
if (fromVersion !== 0) {
dropQueryCache(db);
createQueryCache(db);
}
p = p.next(function () { return writeEmptyTargetGlobalEntry(simpleDbTransaction); });
}
if (fromVersion < 4 && toVersion >= 4) {
if (fromVersion !== 0) {
// Schema version 3 uses auto-generated keys to generate globally unique
// mutation batch IDs (this was previously ensured internally by the
// client). To migrate to the new schema, we have to read all mutations
// and write them back out. We preserve the existing batch IDs to guarantee
// consistency with other object stores. Any further mutation batch IDs will
// be auto-generated.
p = p.next(function () { return upgradeMutationBatchSchemaAndMigrateData(db, simpleDbTransaction); });
}
p = p.next(function () {
createClientMetadataStore(db);
});
}
if (fromVersion < 5 && toVersion >= 5) {
p = p.next(function () { return _this.removeAcknowledgedMutations(simpleDbTransaction); });
}
if (fromVersion < 6 && toVersion >= 6) {
p = p.next(function () {
createDocumentGlobalStore(db);
return _this.addDocumentGlobal(simpleDbTransaction);
});
}
if (fromVersion < 7 && toVersion >= 7) {
p = p.next(function () { return _this.ensureSequenceNumbers(simpleDbTransaction); });
}
if (fromVersion < 8 && toVersion >= 8) {
p = p.next(function () { return _this.createCollectionParentIndex(db, simpleDbTransaction); });
}
if (fromVersion < 9 && toVersion >= 9) {
p = p.next(function () {
// Multi-Tab used to manage its own changelog, but this has been moved
// to the DbRemoteDocument object store itself. Since the previous change
// log only contained transient data, we can drop its object store.
dropRemoteDocumentChangesStore(db);
createRemoteDocumentReadTimeIndex(txn);
});
}
if (fromVersion < 10 && toVersion >= 10) {
p = p.next(function () { return _this.rewriteCanonicalIds(simpleDbTransaction); });
}
if (fromVersion < 11 && toVersion >= 11) {
p = p.next(function () {
createBundlesStore(db);
createNamedQueriesStore(db);
});
}
return p;
};
SchemaConverter.prototype.addDocumentGlobal = function (txn) {
var byteCount = 0;
return txn
.store(DbRemoteDocument.store)
.iterate(function (_, doc) {
byteCount += dbDocumentSize(doc);
})
.next(function () {
var metadata = new DbRemoteDocumentGlobal(byteCount);
return txn
.store(DbRemoteDocumentGlobal.store)
.put(DbRemoteDocumentGlobal.key, metadata);
});
};
SchemaConverter.prototype.removeAcknowledgedMutations = function (txn) {
var _this = this;
var queuesStore = txn.store(DbMutationQueue.store);
var mutationsStore = txn.store(DbMutationBatch.store);
return queuesStore.loadAll().next(function (queues) {
return PersistencePromise.forEach(queues, function (queue) {
var range = IDBKeyRange.bound([queue.userId, BATCHID_UNKNOWN], [queue.userId, queue.lastAcknowledgedBatchId]);
return mutationsStore
.loadAll(DbMutationBatch.userMutationsIndex, range)
.next(function (dbBatches) {
return PersistencePromise.forEach(dbBatches, function (dbBatch) {
hardAssert(dbBatch.userId === queue.userId);
var batch = fromDbMutationBatch(_this.serializer, dbBatch);
return removeMutationBatch(txn, queue.userId, batch).next(function () { });
});
});
});
});
};
/**
* Ensures that every document in the remote document cache has a corresponding sentinel row
* with a sequence number. Missing rows are given the most recently used sequence number.
*/
SchemaConverter.prototype.ensureSequenceNumbers = function (txn) {
var documentTargetStore = txn.store(DbTargetDocument.store);
var documentsStore = txn.store(DbRemoteDocument.store);
var globalTargetStore = txn.store(DbTargetGlobal.store);
return globalTargetStore.get(DbTargetGlobal.key).next(function (metadata) {
var writeSentinelKey = function (path) {
return documentTargetStore.put(new DbTargetDocument(0, encodeResourcePath(path), metadata.highestListenSequenceNumber));
};
var promises = [];
return documentsStore
.iterate(function (key, doc) {
var path = new ResourcePath(key);
var docSentinelKey = sentinelKey(path);
promises.push(documentTargetStore.get(docSentinelKey).next(function (maybeSentinel) {
if (!maybeSentinel) {
return writeSentinelKey(path);
}
else {
return PersistencePromise.resolve();
}
}));
})
.next(function () { return PersistencePromise.waitFor(promises); });
});
};
SchemaConverter.prototype.createCollectionParentIndex = function (db, txn) {
// Create the index.
db.createObjectStore(DbCollectionParent.store, {
keyPath: DbCollectionParent.keyPath
});
var collectionParentsStore = txn.store(DbCollectionParent.store);
// Helper to add an index entry iff we haven't already written it.
var cache = new MemoryCollectionParentIndex();
var addEntry = function (collectionPath) {
if (cache.add(collectionPath)) {
var collectionId = collectionPath.lastSegment();
var parentPath = collectionPath.popLast();
return collectionParentsStore.put({
collectionId: collectionId,
parent: encodeResourcePath(parentPath)
});
}
};
// Index existing remote documents.
return txn
.store(DbRemoteDocument.store)
.iterate({ keysOnly: true }, function (pathSegments, _) {
var path = new ResourcePath(pathSegments);
return addEntry(path.popLast());
})
.next(function () {
// Index existing mutations.
return txn
.store(DbDocumentMutation.store)
.iterate({ keysOnly: true }, function (_d, _) {
_d[0]; var encodedPath = _d[1]; _d[2];
var path = decodeResourcePath(encodedPath);
return addEntry(path.popLast());
});
});
};
SchemaConverter.prototype.rewriteCanonicalIds = function (txn) {
var _this = this;
var targetStore = txn.store(DbTarget.store);
return targetStore.iterate(function (key, originalDbTarget) {
var originalTargetData = fromDbTarget(originalDbTarget);
var updatedDbTarget = toDbTarget(_this.serializer, originalTargetData);
return targetStore.put(updatedDbTarget);
});
};
return SchemaConverter;
}());
function sentinelKey(path) {
return [0, encodeResourcePath(path)];
}
function createPrimaryClientStore(db) {
db.createObjectStore(DbPrimaryClient.store);
}
function createMutationQueue(db) {
db.createObjectStore(DbMutationQueue.store, {
keyPath: DbMutationQueue.keyPath
});
var mutationBatchesStore = db.createObjectStore(DbMutationBatch.store, {
keyPath: DbMutationBatch.keyPath,
autoIncrement: true
});
mutationBatchesStore.createIndex(DbMutationBatch.userMutationsIndex, DbMutationBatch.userMutationsKeyPath, { unique: true });
db.createObjectStore(DbDocumentMutation.store);
}
/**
* Upgrade function to migrate the 'mutations' store from V1 to V3. Loads
* and rewrites all data.
*/
function upgradeMutationBatchSchemaAndMigrateData(db, txn) {
var v1MutationsStore = txn.store(DbMutationBatch.store);
return v1MutationsStore.loadAll().next(function (existingMutations) {
db.deleteObjectStore(DbMutationBatch.store);
var mutationsStore = db.createObjectStore(DbMutationBatch.store, {
keyPath: DbMutationBatch.keyPath,
autoIncrement: true
});
mutationsStore.createIndex(DbMutationBatch.userMutationsIndex, DbMutationBatch.userMutationsKeyPath, { unique: true });
var v3MutationsStore = txn.store(DbMutationBatch.store);
var writeAll = existingMutations.map(function (mutation) { return v3MutationsStore.put(mutation); });
return PersistencePromise.waitFor(writeAll);
});
}
function createRemoteDocumentCache(db) {
db.createObjectStore(DbRemoteDocument.store);
}
function createDocumentGlobalStore(db) {
db.createObjectStore(DbRemoteDocumentGlobal.store);
}
function createQueryCache(db) {
var targetDocumentsStore = db.createObjectStore(DbTargetDocument.store, {
keyPath: DbTargetDocument.keyPath
});
targetDocumentsStore.createIndex(DbTargetDocument.documentTargetsIndex, DbTargetDocument.documentTargetsKeyPath, { unique: true });
var targetStore = db.createObjectStore(DbTarget.store, {
keyPath: DbTarget.keyPath
});
// NOTE: This is unique only because the TargetId is the suffix.
targetStore.createIndex(DbTarget.queryTargetsIndexName, DbTarget.queryTargetsKeyPath, { unique: true });
db.createObjectStore(DbTargetGlobal.store);
}
function dropQueryCache(db) {
db.deleteObjectStore(DbTargetDocument.store);
db.deleteObjectStore(DbTarget.store);
db.deleteObjectStore(DbTargetGlobal.store);
}
function dropRemoteDocumentChangesStore(db) {
if (db.objectStoreNames.contains('remoteDocumentChanges')) {
db.deleteObjectStore('remoteDocumentChanges');
}
}
/**
* Creates the target global singleton row.
*
* @param txn - The version upgrade transaction for indexeddb
*/
function writeEmptyTargetGlobalEntry(txn) {
var globalStore = txn.store(DbTargetGlobal.store);
var metadata = new DbTargetGlobal(
/*highestTargetId=*/ 0,
/*lastListenSequenceNumber=*/ 0, SnapshotVersion.min().toTimestamp(),
/*targetCount=*/ 0);
return globalStore.put(DbTargetGlobal.key, metadata);
}
/**
* Creates indices on the RemoteDocuments store used for both multi-tab
* and Index-Free queries.
*/
function createRemoteDocumentReadTimeIndex(txn) {
var remoteDocumentStore = txn.objectStore(DbRemoteDocument.store);
remoteDocumentStore.createIndex(DbRemoteDocument.readTimeIndex, DbRemoteDocument.readTimeIndexPath, { unique: false });
remoteDocumentStore.createIndex(DbRemoteDocument.collectionReadTimeIndex, DbRemoteDocument.collectionReadTimeIndexPath, { unique: false });
}
function createClientMetadataStore(db) {
db.createObjectStore(DbClientMetadata.store, {
keyPath: DbClientMetadata.keyPath
});
}
function createBundlesStore(db) {
db.createObjectStore(DbBundle.store, {
keyPath: DbBundle.keyPath
});
}
function createNamedQueriesStore(db) {
db.createObjectStore(DbNamedQuery.store, {
keyPath: DbNamedQuery.keyPath
});
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG$d = 'IndexedDbPersistence';
/**
* Oldest acceptable age in milliseconds for client metadata before the client
* is considered inactive and its associated data is garbage collected.
*/
var MAX_CLIENT_AGE_MS = 30 * 60 * 1000; // 30 minutes
/**
* Oldest acceptable metadata age for clients that may participate in the
* primary lease election. Clients that have not updated their client metadata
* within 5 seconds are not eligible to receive a primary lease.
*/
var MAX_PRIMARY_ELIGIBLE_AGE_MS = 5000;
/**
* The interval at which clients will update their metadata, including
* refreshing their primary lease if held or potentially trying to acquire it if
* not held.
*
* Primary clients may opportunistically refresh their metadata earlier
* if they're already performing an IndexedDB operation.
*/
var CLIENT_METADATA_REFRESH_INTERVAL_MS = 4000;
/** User-facing error when the primary lease is required but not available. */
var PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG = 'Failed to obtain exclusive access to the persistence layer. To allow ' +
'shared access, multi-tab synchronization has to be enabled in all tabs. ' +
'If you are using `experimentalForceOwningTab:true`, make sure that only ' +
'one tab has persistence enabled at any given time.';
var UNSUPPORTED_PLATFORM_ERROR_MSG = 'This platform is either missing IndexedDB or is known to have ' +
'an incomplete implementation. Offline persistence has been disabled.';
// The format of the LocalStorage key that stores zombied client is:
// firestore_zombie__
var ZOMBIED_CLIENTS_KEY_PREFIX = 'firestore_zombie';
/**
* The name of the main (and currently only) IndexedDB database. This name is
* appended to the prefix provided to the IndexedDbPersistence constructor.
*/
var MAIN_DATABASE = 'main';
/**
* An IndexedDB-backed instance of Persistence. Data is stored persistently
* across sessions.
*
* On Web only, the Firestore SDKs support shared access to its persistence
* layer. This allows multiple browser tabs to read and write to IndexedDb and
* to synchronize state even without network connectivity. Shared access is
* currently optional and not enabled unless all clients invoke
* `enablePersistence()` with `{synchronizeTabs:true}`.
*
* In multi-tab mode, if multiple clients are active at the same time, the SDK
* will designate one client as the “primary client”. An effort is made to pick
* a visible, network-connected and active client, and this client is
* responsible for letting other clients know about its presence. The primary
* client writes a unique client-generated identifier (the client ID) to
* IndexedDb’s “owner” store every 4 seconds. If the primary client fails to
* update this entry, another client can acquire the lease and take over as
* primary.
*
* Some persistence operations in the SDK are designated as primary-client only
* operations. This includes the acknowledgment of mutations and all updates of
* remote documents. The effects of these operations are written to persistence
* and then broadcast to other tabs via LocalStorage (see
* `WebStorageSharedClientState`), which then refresh their state from
* persistence.
*
* Similarly, the primary client listens to notifications sent by secondary
* clients to discover persistence changes written by secondary clients, such as
* the addition of new mutations and query targets.
*
* If multi-tab is not enabled and another tab already obtained the primary
* lease, IndexedDbPersistence enters a failed state and all subsequent
* operations will automatically fail.
*
* Additionally, there is an optimization so that when a tab is closed, the
* primary lease is released immediately (this is especially important to make
* sure that a refreshed tab is able to immediately re-acquire the primary
* lease). Unfortunately, IndexedDB cannot be reliably used in window.unload
* since it is an asynchronous API. So in addition to attempting to give up the
* lease, the leaseholder writes its client ID to a "zombiedClient" entry in
* LocalStorage which acts as an indicator that another tab should go ahead and
* take the primary lease immediately regardless of the current lease timestamp.
*
* TODO(b/114226234): Remove `synchronizeTabs` section when multi-tab is no
* longer optional.
*/
var IndexedDbPersistence = /** @class */ (function () {
function IndexedDbPersistence(
/**
* Whether to synchronize the in-memory state of multiple tabs and share
* access to local persistence.
*/
allowTabSynchronization, persistenceKey, clientId, lruParams, queue, window, document, serializer, sequenceNumberSyncer,
/**
* If set to true, forcefully obtains database access. Existing tabs will
* no longer be able to access IndexedDB.
*/
forceOwningTab) {
this.allowTabSynchronization = allowTabSynchronization;
this.persistenceKey = persistenceKey;
this.clientId = clientId;
this.queue = queue;
this.window = window;
this.document = document;
this.sequenceNumberSyncer = sequenceNumberSyncer;
this.forceOwningTab = forceOwningTab;
this.listenSequence = null;
this._started = false;
this.isPrimary = false;
this.networkEnabled = true;
/** Our window.unload handler, if registered. */
this.windowUnloadHandler = null;
this.inForeground = false;
/** Our 'visibilitychange' listener if registered. */
this.documentVisibilityHandler = null;
/** The client metadata refresh task. */
this.clientMetadataRefresher = null;
/** The last time we garbage collected the client metadata object store. */
this.lastGarbageCollectionTime = Number.NEGATIVE_INFINITY;
/** A listener to notify on primary state changes. */
this.primaryStateListener = function (_) { return Promise.resolve(); };
if (!IndexedDbPersistence.isAvailable()) {
throw new FirestoreError(Code.UNIMPLEMENTED, UNSUPPORTED_PLATFORM_ERROR_MSG);
}
this.referenceDelegate = new IndexedDbLruDelegateImpl(this, lruParams);
this.dbName = persistenceKey + MAIN_DATABASE;
this.serializer = new LocalSerializer(serializer);
this.simpleDb = new SimpleDb(this.dbName, SCHEMA_VERSION, new SchemaConverter(this.serializer));
this.targetCache = new IndexedDbTargetCache(this.referenceDelegate, this.serializer);
this.indexManager = new IndexedDbIndexManager();
this.remoteDocumentCache = newIndexedDbRemoteDocumentCache(this.serializer, this.indexManager);
this.bundleCache = new IndexedDbBundleCache();
if (this.window && this.window.localStorage) {
this.webStorage = this.window.localStorage;
}
else {
this.webStorage = null;
if (forceOwningTab === false) {
logError(LOG_TAG$d, 'LocalStorage is unavailable. As a result, persistence may not work ' +
'reliably. In particular enablePersistence() could fail immediately ' +
'after refreshing the page.');
}
}
}
/**
* Attempt to start IndexedDb persistence.
*
* @returns Whether persistence was enabled.
*/
IndexedDbPersistence.prototype.start = function () {
var _this = this;
// NOTE: This is expected to fail sometimes (in the case of another tab
// already having the persistence lock), so it's the first thing we should
// do.
return this.updateClientMetadataAndTryBecomePrimary()
.then(function () {
if (!_this.isPrimary && !_this.allowTabSynchronization) {
// Fail `start()` if `synchronizeTabs` is disabled and we cannot
// obtain the primary lease.
throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG);
}
_this.attachVisibilityHandler();
_this.attachWindowUnloadHook();
_this.scheduleClientMetadataAndPrimaryLeaseRefreshes();
return _this.runTransaction('getHighestListenSequenceNumber', 'readonly', function (txn) { return _this.targetCache.getHighestSequenceNumber(txn); });
})
.then(function (highestListenSequenceNumber) {
_this.listenSequence = new ListenSequence(highestListenSequenceNumber, _this.sequenceNumberSyncer);
})
.then(function () {
_this._started = true;
})
.catch(function (reason) {
_this.simpleDb && _this.simpleDb.close();
return Promise.reject(reason);
});
};
/**
* Registers a listener that gets called when the primary state of the
* instance changes. Upon registering, this listener is invoked immediately
* with the current primary state.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
IndexedDbPersistence.prototype.setPrimaryStateListener = function (primaryStateListener) {
var _this = this;
this.primaryStateListener = function (primaryState) { return tslib.__awaiter(_this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
if (this.started) {
return [2 /*return*/, primaryStateListener(primaryState)];
}
return [2 /*return*/];
});
}); };
return primaryStateListener(this.isPrimary);
};
/**
* Registers a listener that gets called when the database receives a
* version change event indicating that it has deleted.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
IndexedDbPersistence.prototype.setDatabaseDeletedListener = function (databaseDeletedListener) {
var _this = this;
this.simpleDb.setVersionChangeListener(function (event) { return tslib.__awaiter(_this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!(event.newVersion === null)) return [3 /*break*/, 2];
return [4 /*yield*/, databaseDeletedListener()];
case 1:
_d.sent();
_d.label = 2;
case 2: return [2 /*return*/];
}
});
}); });
};
/**
* Adjusts the current network state in the client's metadata, potentially
* affecting the primary lease.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
IndexedDbPersistence.prototype.setNetworkEnabled = function (networkEnabled) {
var _this = this;
if (this.networkEnabled !== networkEnabled) {
this.networkEnabled = networkEnabled;
// Schedule a primary lease refresh for immediate execution. The eventual
// lease update will be propagated via `primaryStateListener`.
this.queue.enqueueAndForget(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!this.started) return [3 /*break*/, 2];
return [4 /*yield*/, this.updateClientMetadataAndTryBecomePrimary()];
case 1:
_d.sent();
_d.label = 2;
case 2: return [2 /*return*/];
}
});
}); });
}
};
/**
* Updates the client metadata in IndexedDb and attempts to either obtain or
* extend the primary lease for the local client. Asynchronously notifies the
* primary state listener if the client either newly obtained or released its
* primary lease.
*/
IndexedDbPersistence.prototype.updateClientMetadataAndTryBecomePrimary = function () {
var _this = this;
return this.runTransaction('updateClientMetadataAndTryBecomePrimary', 'readwrite', function (txn) {
var metadataStore = clientMetadataStore(txn);
return metadataStore
.put(new DbClientMetadata(_this.clientId, Date.now(), _this.networkEnabled, _this.inForeground))
.next(function () {
if (_this.isPrimary) {
return _this.verifyPrimaryLease(txn).next(function (success) {
if (!success) {
_this.isPrimary = false;
_this.queue.enqueueRetryable(function () { return _this.primaryStateListener(false); });
}
});
}
})
.next(function () { return _this.canActAsPrimary(txn); })
.next(function (canActAsPrimary) {
if (_this.isPrimary && !canActAsPrimary) {
return _this.releasePrimaryLeaseIfHeld(txn).next(function () { return false; });
}
else if (canActAsPrimary) {
return _this.acquireOrExtendPrimaryLease(txn).next(function () { return true; });
}
else {
return /* canActAsPrimary= */ false;
}
});
})
.catch(function (e) {
if (isIndexedDbTransactionError(e)) {
logDebug(LOG_TAG$d, 'Failed to extend owner lease: ', e);
// Proceed with the existing state. Any subsequent access to
// IndexedDB will verify the lease.
return _this.isPrimary;
}
if (!_this.allowTabSynchronization) {
throw e;
}
logDebug(LOG_TAG$d, 'Releasing owner lease after error during lease refresh', e);
return /* isPrimary= */ false;
})
.then(function (isPrimary) {
if (_this.isPrimary !== isPrimary) {
_this.queue.enqueueRetryable(function () { return _this.primaryStateListener(isPrimary); });
}
_this.isPrimary = isPrimary;
});
};
IndexedDbPersistence.prototype.verifyPrimaryLease = function (txn) {
var _this = this;
var store = primaryClientStore(txn);
return store.get(DbPrimaryClient.key).next(function (primaryClient) {
return PersistencePromise.resolve(_this.isLocalClient(primaryClient));
});
};
IndexedDbPersistence.prototype.removeClientMetadata = function (txn) {
var metadataStore = clientMetadataStore(txn);
return metadataStore.delete(this.clientId);
};
/**
* If the garbage collection threshold has passed, prunes the
* RemoteDocumentChanges and the ClientMetadata store based on the last update
* time of all clients.
*/
IndexedDbPersistence.prototype.maybeGarbageCollectMultiClientState = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
var inactiveClients, _i, inactiveClients_1, inactiveClient;
var _this = this;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!(this.isPrimary &&
!this.isWithinAge(this.lastGarbageCollectionTime, MAX_CLIENT_AGE_MS))) return [3 /*break*/, 2];
this.lastGarbageCollectionTime = Date.now();
return [4 /*yield*/, this.runTransaction('maybeGarbageCollectMultiClientState', 'readwrite-primary', function (txn) {
var metadataStore = getStore(txn, DbClientMetadata.store);
return metadataStore.loadAll().next(function (existingClients) {
var active = _this.filterActiveClients(existingClients, MAX_CLIENT_AGE_MS);
var inactive = existingClients.filter(function (client) { return active.indexOf(client) === -1; });
// Delete metadata for clients that are no longer considered active.
return PersistencePromise.forEach(inactive, function (inactiveClient) { return metadataStore.delete(inactiveClient.clientId); }).next(function () { return inactive; });
});
}).catch(function () {
// Ignore primary lease violations or any other type of error. The next
// primary will run `maybeGarbageCollectMultiClientState()` again.
// We don't use `ignoreIfPrimaryLeaseLoss()` since we don't want to depend
// on LocalStore.
return [];
})];
case 1:
inactiveClients = _d.sent();
// Delete potential leftover entries that may continue to mark the
// inactive clients as zombied in LocalStorage.
// Ideally we'd delete the IndexedDb and LocalStorage zombie entries for
// the client atomically, but we can't. So we opt to delete the IndexedDb
// entries first to avoid potentially reviving a zombied client.
if (this.webStorage) {
for (_i = 0, inactiveClients_1 = inactiveClients; _i < inactiveClients_1.length; _i++) {
inactiveClient = inactiveClients_1[_i];
this.webStorage.removeItem(this.zombiedClientLocalStorageKey(inactiveClient.clientId));
}
}
_d.label = 2;
case 2: return [2 /*return*/];
}
});
});
};
/**
* Schedules a recurring timer to update the client metadata and to either
* extend or acquire the primary lease if the client is eligible.
*/
IndexedDbPersistence.prototype.scheduleClientMetadataAndPrimaryLeaseRefreshes = function () {
var _this = this;
this.clientMetadataRefresher = this.queue.enqueueAfterDelay("client_metadata_refresh" /* ClientMetadataRefresh */, CLIENT_METADATA_REFRESH_INTERVAL_MS, function () {
return _this.updateClientMetadataAndTryBecomePrimary()
.then(function () { return _this.maybeGarbageCollectMultiClientState(); })
.then(function () { return _this.scheduleClientMetadataAndPrimaryLeaseRefreshes(); });
});
};
/** Checks whether `client` is the local client. */
IndexedDbPersistence.prototype.isLocalClient = function (client) {
return client ? client.ownerId === this.clientId : false;
};
/**
* Evaluate the state of all active clients and determine whether the local
* client is or can act as the holder of the primary lease. Returns whether
* the client is eligible for the lease, but does not actually acquire it.
* May return 'false' even if there is no active leaseholder and another
* (foreground) client should become leaseholder instead.
*/
IndexedDbPersistence.prototype.canActAsPrimary = function (txn) {
var _this = this;
if (this.forceOwningTab) {
return PersistencePromise.resolve(true);
}
var store = primaryClientStore(txn);
return store
.get(DbPrimaryClient.key)
.next(function (currentPrimary) {
var currentLeaseIsValid = currentPrimary !== null &&
_this.isWithinAge(currentPrimary.leaseTimestampMs, MAX_PRIMARY_ELIGIBLE_AGE_MS) &&
!_this.isClientZombied(currentPrimary.ownerId);
// A client is eligible for the primary lease if:
// - its network is enabled and the client's tab is in the foreground.
// - its network is enabled and no other client's tab is in the
// foreground.
// - every clients network is disabled and the client's tab is in the
// foreground.
// - every clients network is disabled and no other client's tab is in
// the foreground.
// - the `forceOwningTab` setting was passed in.
if (currentLeaseIsValid) {
if (_this.isLocalClient(currentPrimary) && _this.networkEnabled) {
return true;
}
if (!_this.isLocalClient(currentPrimary)) {
if (!currentPrimary.allowTabSynchronization) {
// Fail the `canActAsPrimary` check if the current leaseholder has
// not opted into multi-tab synchronization. If this happens at
// client startup, we reject the Promise returned by
// `enablePersistence()` and the user can continue to use Firestore
// with in-memory persistence.
// If this fails during a lease refresh, we will instead block the
// AsyncQueue from executing further operations. Note that this is
// acceptable since mixing & matching different `synchronizeTabs`
// settings is not supported.
//
// TODO(b/114226234): Remove this check when `synchronizeTabs` can
// no longer be turned off.
throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG);
}
return false;
}
}
if (_this.networkEnabled && _this.inForeground) {
return true;
}
return clientMetadataStore(txn)
.loadAll()
.next(function (existingClients) {
// Process all existing clients and determine whether at least one of
// them is better suited to obtain the primary lease.
var preferredCandidate = _this.filterActiveClients(existingClients, MAX_PRIMARY_ELIGIBLE_AGE_MS).find(function (otherClient) {
if (_this.clientId !== otherClient.clientId) {
var otherClientHasBetterNetworkState = !_this.networkEnabled && otherClient.networkEnabled;
var otherClientHasBetterVisibility = !_this.inForeground && otherClient.inForeground;
var otherClientHasSameNetworkState = _this.networkEnabled === otherClient.networkEnabled;
if (otherClientHasBetterNetworkState ||
(otherClientHasBetterVisibility &&
otherClientHasSameNetworkState)) {
return true;
}
}
return false;
});
return preferredCandidate === undefined;
});
})
.next(function (canActAsPrimary) {
if (_this.isPrimary !== canActAsPrimary) {
logDebug(LOG_TAG$d, "Client " + (canActAsPrimary ? 'is' : 'is not') + " eligible for a primary lease.");
}
return canActAsPrimary;
});
};
IndexedDbPersistence.prototype.shutdown = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
var _this = this;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
// The shutdown() operations are idempotent and can be called even when
// start() aborted (e.g. because it couldn't acquire the persistence lease).
this._started = false;
this.markClientZombied();
if (this.clientMetadataRefresher) {
this.clientMetadataRefresher.cancel();
this.clientMetadataRefresher = null;
}
this.detachVisibilityHandler();
this.detachWindowUnloadHook();
// Use `SimpleDb.runTransaction` directly to avoid failing if another tab
// has obtained the primary lease.
return [4 /*yield*/, this.simpleDb.runTransaction('shutdown', 'readwrite', [DbPrimaryClient.store, DbClientMetadata.store], function (simpleDbTxn) {
var persistenceTransaction = new IndexedDbTransaction(simpleDbTxn, ListenSequence.INVALID);
return _this.releasePrimaryLeaseIfHeld(persistenceTransaction).next(function () { return _this.removeClientMetadata(persistenceTransaction); });
})];
case 1:
// Use `SimpleDb.runTransaction` directly to avoid failing if another tab
// has obtained the primary lease.
_d.sent();
this.simpleDb.close();
// Remove the entry marking the client as zombied from LocalStorage since
// we successfully deleted its metadata from IndexedDb.
this.removeClientZombiedEntry();
return [2 /*return*/];
}
});
});
};
/**
* Returns clients that are not zombied and have an updateTime within the
* provided threshold.
*/
IndexedDbPersistence.prototype.filterActiveClients = function (clients, activityThresholdMs) {
var _this = this;
return clients.filter(function (client) { return _this.isWithinAge(client.updateTimeMs, activityThresholdMs) &&
!_this.isClientZombied(client.clientId); });
};
/**
* Returns the IDs of the clients that are currently active. If multi-tab
* is not supported, returns an array that only contains the local client's
* ID.
*
* PORTING NOTE: This is only used for Web multi-tab.
*/
IndexedDbPersistence.prototype.getActiveClients = function () {
var _this = this;
return this.runTransaction('getActiveClients', 'readonly', function (txn) {
return clientMetadataStore(txn)
.loadAll()
.next(function (clients) { return _this.filterActiveClients(clients, MAX_CLIENT_AGE_MS).map(function (clientMetadata) { return clientMetadata.clientId; }); });
});
};
Object.defineProperty(IndexedDbPersistence.prototype, "started", {
get: function () {
return this._started;
},
enumerable: false,
configurable: true
});
IndexedDbPersistence.prototype.getMutationQueue = function (user) {
return IndexedDbMutationQueue.forUser(user, this.serializer, this.indexManager, this.referenceDelegate);
};
IndexedDbPersistence.prototype.getTargetCache = function () {
return this.targetCache;
};
IndexedDbPersistence.prototype.getRemoteDocumentCache = function () {
return this.remoteDocumentCache;
};
IndexedDbPersistence.prototype.getIndexManager = function () {
return this.indexManager;
};
IndexedDbPersistence.prototype.getBundleCache = function () {
return this.bundleCache;
};
IndexedDbPersistence.prototype.runTransaction = function (action, mode, transactionOperation) {
var _this = this;
logDebug(LOG_TAG$d, 'Starting transaction:', action);
var simpleDbMode = mode === 'readonly' ? 'readonly' : 'readwrite';
var persistenceTransaction;
// Do all transactions as readwrite against all object stores, since we
// are the only reader/writer.
return this.simpleDb
.runTransaction(action, simpleDbMode, ALL_STORES, function (simpleDbTxn) {
persistenceTransaction = new IndexedDbTransaction(simpleDbTxn, _this.listenSequence
? _this.listenSequence.next()
: ListenSequence.INVALID);
if (mode === 'readwrite-primary') {
// While we merely verify that we have (or can acquire) the lease
// immediately, we wait to extend the primary lease until after
// executing transactionOperation(). This ensures that even if the
// transactionOperation takes a long time, we'll use a recent
// leaseTimestampMs in the extended (or newly acquired) lease.
return _this.verifyPrimaryLease(persistenceTransaction)
.next(function (holdsPrimaryLease) {
if (holdsPrimaryLease) {
return /* holdsPrimaryLease= */ true;
}
return _this.canActAsPrimary(persistenceTransaction);
})
.next(function (holdsPrimaryLease) {
if (!holdsPrimaryLease) {
logError("Failed to obtain primary lease for action '" + action + "'.");
_this.isPrimary = false;
_this.queue.enqueueRetryable(function () { return _this.primaryStateListener(false); });
throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_LOST_ERROR_MSG);
}
return transactionOperation(persistenceTransaction);
})
.next(function (result) {
return _this.acquireOrExtendPrimaryLease(persistenceTransaction).next(function () { return result; });
});
}
else {
return _this.verifyAllowTabSynchronization(persistenceTransaction).next(function () { return transactionOperation(persistenceTransaction); });
}
})
.then(function (result) {
persistenceTransaction.raiseOnCommittedEvent();
return result;
});
};
/**
* Verifies that the current tab is the primary leaseholder or alternatively
* that the leaseholder has opted into multi-tab synchronization.
*/
// TODO(b/114226234): Remove this check when `synchronizeTabs` can no longer
// be turned off.
IndexedDbPersistence.prototype.verifyAllowTabSynchronization = function (txn) {
var _this = this;
var store = primaryClientStore(txn);
return store.get(DbPrimaryClient.key).next(function (currentPrimary) {
var currentLeaseIsValid = currentPrimary !== null &&
_this.isWithinAge(currentPrimary.leaseTimestampMs, MAX_PRIMARY_ELIGIBLE_AGE_MS) &&
!_this.isClientZombied(currentPrimary.ownerId);
if (currentLeaseIsValid && !_this.isLocalClient(currentPrimary)) {
if (!_this.forceOwningTab &&
(!_this.allowTabSynchronization ||
!currentPrimary.allowTabSynchronization)) {
throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG);
}
}
});
};
/**
* Obtains or extends the new primary lease for the local client. This
* method does not verify that the client is eligible for this lease.
*/
IndexedDbPersistence.prototype.acquireOrExtendPrimaryLease = function (txn) {
var newPrimary = new DbPrimaryClient(this.clientId, this.allowTabSynchronization, Date.now());
return primaryClientStore(txn).put(DbPrimaryClient.key, newPrimary);
};
IndexedDbPersistence.isAvailable = function () {
return SimpleDb.isAvailable();
};
/** Checks the primary lease and removes it if we are the current primary. */
IndexedDbPersistence.prototype.releasePrimaryLeaseIfHeld = function (txn) {
var _this = this;
var store = primaryClientStore(txn);
return store.get(DbPrimaryClient.key).next(function (primaryClient) {
if (_this.isLocalClient(primaryClient)) {
logDebug(LOG_TAG$d, 'Releasing primary lease.');
return store.delete(DbPrimaryClient.key);
}
else {
return PersistencePromise.resolve();
}
});
};
/** Verifies that `updateTimeMs` is within `maxAgeMs`. */
IndexedDbPersistence.prototype.isWithinAge = function (updateTimeMs, maxAgeMs) {
var now = Date.now();
var minAcceptable = now - maxAgeMs;
var maxAcceptable = now;
if (updateTimeMs < minAcceptable) {
return false;
}
else if (updateTimeMs > maxAcceptable) {
logError("Detected an update time that is in the future: " + updateTimeMs + " > " + maxAcceptable);
return false;
}
return true;
};
IndexedDbPersistence.prototype.attachVisibilityHandler = function () {
var _this = this;
if (this.document !== null &&
typeof this.document.addEventListener === 'function') {
this.documentVisibilityHandler = function () {
_this.queue.enqueueAndForget(function () {
_this.inForeground = _this.document.visibilityState === 'visible';
return _this.updateClientMetadataAndTryBecomePrimary();
});
};
this.document.addEventListener('visibilitychange', this.documentVisibilityHandler);
this.inForeground = this.document.visibilityState === 'visible';
}
};
IndexedDbPersistence.prototype.detachVisibilityHandler = function () {
if (this.documentVisibilityHandler) {
this.document.removeEventListener('visibilitychange', this.documentVisibilityHandler);
this.documentVisibilityHandler = null;
}
};
/**
* Attaches a window.unload handler that will synchronously write our
* clientId to a "zombie client id" location in LocalStorage. This can be used
* by tabs trying to acquire the primary lease to determine that the lease
* is no longer valid even if the timestamp is recent. This is particularly
* important for the refresh case (so the tab correctly re-acquires the
* primary lease). LocalStorage is used for this rather than IndexedDb because
* it is a synchronous API and so can be used reliably from an unload
* handler.
*/
IndexedDbPersistence.prototype.attachWindowUnloadHook = function () {
var _this = this;
var _a;
if (typeof ((_a = this.window) === null || _a === void 0 ? void 0 : _a.addEventListener) === 'function') {
this.windowUnloadHandler = function () {
// Note: In theory, this should be scheduled on the AsyncQueue since it
// accesses internal state. We execute this code directly during shutdown
// to make sure it gets a chance to run.
_this.markClientZombied();
if (util.isSafari() && navigator.appVersion.match("Version/14")) {
// On Safari 14, we do not run any cleanup actions as it might trigger
// a bug that prevents Safari from re-opening IndexedDB during the
// next page load.
// See https://bugs.webkit.org/show_bug.cgi?id=226547
_this.queue.enterRestrictedMode(/* purgeExistingTasks= */ true);
}
_this.queue.enqueueAndForget(function () {
// Attempt graceful shutdown (including releasing our primary lease),
// but there's no guarantee it will complete.
return _this.shutdown();
});
};
this.window.addEventListener('pagehide', this.windowUnloadHandler);
}
};
IndexedDbPersistence.prototype.detachWindowUnloadHook = function () {
if (this.windowUnloadHandler) {
this.window.removeEventListener('pagehide', this.windowUnloadHandler);
this.windowUnloadHandler = null;
}
};
/**
* Returns whether a client is "zombied" based on its LocalStorage entry.
* Clients become zombied when their tab closes without running all of the
* cleanup logic in `shutdown()`.
*/
IndexedDbPersistence.prototype.isClientZombied = function (clientId) {
var _a;
try {
var isZombied = ((_a = this.webStorage) === null || _a === void 0 ? void 0 : _a.getItem(this.zombiedClientLocalStorageKey(clientId))) !== null;
logDebug(LOG_TAG$d, "Client '" + clientId + "' " + (isZombied ? 'is' : 'is not') + " zombied in LocalStorage");
return isZombied;
}
catch (e) {
// Gracefully handle if LocalStorage isn't working.
logError(LOG_TAG$d, 'Failed to get zombied client id.', e);
return false;
}
};
/**
* Record client as zombied (a client that had its tab closed). Zombied
* clients are ignored during primary tab selection.
*/
IndexedDbPersistence.prototype.markClientZombied = function () {
if (!this.webStorage) {
return;
}
try {
this.webStorage.setItem(this.zombiedClientLocalStorageKey(this.clientId), String(Date.now()));
}
catch (e) {
// Gracefully handle if LocalStorage isn't available / working.
logError('Failed to set zombie client id.', e);
}
};
/** Removes the zombied client entry if it exists. */
IndexedDbPersistence.prototype.removeClientZombiedEntry = function () {
if (!this.webStorage) {
return;
}
try {
this.webStorage.removeItem(this.zombiedClientLocalStorageKey(this.clientId));
}
catch (e) {
// Ignore
}
};
IndexedDbPersistence.prototype.zombiedClientLocalStorageKey = function (clientId) {
return ZOMBIED_CLIENTS_KEY_PREFIX + "_" + this.persistenceKey + "_" + clientId;
};
return IndexedDbPersistence;
}());
/**
* Helper to get a typed SimpleDbStore for the primary client object store.
*/
function primaryClientStore(txn) {
return getStore(txn, DbPrimaryClient.store);
}
/**
* Helper to get a typed SimpleDbStore for the client metadata object store.
*/
function clientMetadataStore(txn) {
return getStore(txn, DbClientMetadata.store);
}
/**
* Generates a string used as a prefix when storing data in IndexedDB and
* LocalStorage.
*/
function indexedDbStoragePrefix(databaseId, persistenceKey) {
// Use two different prefix formats:
//
// * firestore / persistenceKey / projectID . databaseID / ...
// * firestore / persistenceKey / projectID / ...
//
// projectIDs are DNS-compatible names and cannot contain dots
// so there's no danger of collisions.
var database = databaseId.projectId;
if (!databaseId.isDefaultDatabase) {
database += '.' + databaseId.database;
}
return 'firestore/' + persistenceKey + '/' + database + '/';
}
function indexedDbClearPersistence(persistenceKey) {
return tslib.__awaiter(this, void 0, void 0, function () {
var dbName;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!SimpleDb.isAvailable()) {
return [2 /*return*/, Promise.resolve()];
}
dbName = persistenceKey + MAIN_DATABASE;
return [4 /*yield*/, SimpleDb.delete(dbName)];
case 1:
_d.sent();
return [2 /*return*/];
}
});
});
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A readonly view of the local state of all documents we're tracking (i.e. we
* have a cached version in remoteDocumentCache or local mutations for the
* document). The view is computed by applying the mutations in the
* MutationQueue to the RemoteDocumentCache.
*/
var LocalDocumentsView = /** @class */ (function () {
function LocalDocumentsView(remoteDocumentCache, mutationQueue, indexManager) {
this.remoteDocumentCache = remoteDocumentCache;
this.mutationQueue = mutationQueue;
this.indexManager = indexManager;
}
/**
* Get the local view of the document identified by `key`.
*
* @returns Local view of the document or null if we don't have any cached
* state for it.
*/
LocalDocumentsView.prototype.getDocument = function (transaction, key) {
var _this = this;
return this.mutationQueue
.getAllMutationBatchesAffectingDocumentKey(transaction, key)
.next(function (batches) { return _this.getDocumentInternal(transaction, key, batches); });
};
/** Internal version of `getDocument` that allows reusing batches. */
LocalDocumentsView.prototype.getDocumentInternal = function (transaction, key, inBatches) {
return this.remoteDocumentCache.getEntry(transaction, key).next(function (doc) {
for (var _i = 0, inBatches_1 = inBatches; _i < inBatches_1.length; _i++) {
var batch = inBatches_1[_i];
batch.applyToLocalView(doc);
}
return doc;
});
};
// Returns the view of the given `docs` as they would appear after applying
// all mutations in the given `batches`.
LocalDocumentsView.prototype.applyLocalMutationsToDocuments = function (docs, batches) {
docs.forEach(function (key, localView) {
for (var _i = 0, batches_1 = batches; _i < batches_1.length; _i++) {
var batch = batches_1[_i];
batch.applyToLocalView(localView);
}
});
};
/**
* Gets the local view of the documents identified by `keys`.
*
* If we don't have cached state for a document in `keys`, a NoDocument will
* be stored for that key in the resulting set.
*/
LocalDocumentsView.prototype.getDocuments = function (transaction, keys) {
var _this = this;
return this.remoteDocumentCache
.getEntries(transaction, keys)
.next(function (docs) { return _this.applyLocalViewToDocuments(transaction, docs).next(function () { return docs; }); });
};
/**
* Applies the local view the given `baseDocs` without retrieving documents
* from the local store.
*/
LocalDocumentsView.prototype.applyLocalViewToDocuments = function (transaction, baseDocs) {
var _this = this;
return this.mutationQueue
.getAllMutationBatchesAffectingDocumentKeys(transaction, baseDocs)
.next(function (batches) { return _this.applyLocalMutationsToDocuments(baseDocs, batches); });
};
/**
* Performs a query against the local view of all documents.
*
* @param transaction - The persistence transaction.
* @param query - The query to match documents against.
* @param sinceReadTime - If not set to SnapshotVersion.min(), return only
* documents that have been read since this snapshot version (exclusive).
*/
LocalDocumentsView.prototype.getDocumentsMatchingQuery = function (transaction, query, sinceReadTime) {
if (isDocumentQuery$1(query)) {
return this.getDocumentsMatchingDocumentQuery(transaction, query.path);
}
else if (isCollectionGroupQuery(query)) {
return this.getDocumentsMatchingCollectionGroupQuery(transaction, query, sinceReadTime);
}
else {
return this.getDocumentsMatchingCollectionQuery(transaction, query, sinceReadTime);
}
};
LocalDocumentsView.prototype.getDocumentsMatchingDocumentQuery = function (transaction, docPath) {
// Just do a simple document lookup.
return this.getDocument(transaction, new DocumentKey(docPath)).next(function (document) {
var result = documentMap();
if (document.isFoundDocument()) {
result = result.insert(document.key, document);
}
return result;
});
};
LocalDocumentsView.prototype.getDocumentsMatchingCollectionGroupQuery = function (transaction, query, sinceReadTime) {
var _this = this;
var collectionId = query.collectionGroup;
var results = documentMap();
return this.indexManager
.getCollectionParents(transaction, collectionId)
.next(function (parents) {
// Perform a collection query against each parent that contains the
// collectionId and aggregate the results.
return PersistencePromise.forEach(parents, function (parent) {
var collectionQuery = asCollectionQueryAtPath(query, parent.child(collectionId));
return _this.getDocumentsMatchingCollectionQuery(transaction, collectionQuery, sinceReadTime).next(function (r) {
r.forEach(function (key, doc) {
results = results.insert(key, doc);
});
});
}).next(function () { return results; });
});
};
LocalDocumentsView.prototype.getDocumentsMatchingCollectionQuery = function (transaction, query, sinceReadTime) {
var _this = this;
// Query the remote documents and overlay mutations.
var results;
var mutationBatches;
return this.remoteDocumentCache
.getDocumentsMatchingQuery(transaction, query, sinceReadTime)
.next(function (queryResults) {
results = queryResults;
return _this.mutationQueue.getAllMutationBatchesAffectingQuery(transaction, query);
})
.next(function (matchingMutationBatches) {
mutationBatches = matchingMutationBatches;
// It is possible that a PatchMutation can make a document match a query, even if
// the version in the RemoteDocumentCache is not a match yet (waiting for server
// to ack). To handle this, we find all document keys affected by the PatchMutations
// that are not in `result` yet, and back fill them via `remoteDocumentCache.getEntries`,
// otherwise those `PatchMutations` will be ignored because no base document can be found,
// and lead to missing result for the query.
return _this.addMissingBaseDocuments(transaction, mutationBatches, results).next(function (mergedDocuments) {
results = mergedDocuments;
for (var _i = 0, mutationBatches_1 = mutationBatches; _i < mutationBatches_1.length; _i++) {
var batch = mutationBatches_1[_i];
for (var _d = 0, _e = batch.mutations; _d < _e.length; _d++) {
var mutation = _e[_d];
var key = mutation.key;
var document_2 = results.get(key);
if (document_2 == null) {
// Create invalid document to apply mutations on top of
document_2 = MutableDocument.newInvalidDocument(key);
results = results.insert(key, document_2);
}
applyMutationToLocalView(mutation, document_2, batch.localWriteTime);
if (!document_2.isFoundDocument()) {
results = results.remove(key);
}
}
}
});
})
.next(function () {
// Finally, filter out any documents that don't actually match
// the query.
results.forEach(function (key, doc) {
if (!queryMatches(query, doc)) {
results = results.remove(key);
}
});
return results;
});
};
LocalDocumentsView.prototype.addMissingBaseDocuments = function (transaction, matchingMutationBatches, existingDocuments) {
var missingBaseDocEntriesForPatching = documentKeySet();
for (var _i = 0, matchingMutationBatches_1 = matchingMutationBatches; _i < matchingMutationBatches_1.length; _i++) {
var batch = matchingMutationBatches_1[_i];
for (var _d = 0, _e = batch.mutations; _d < _e.length; _d++) {
var mutation = _e[_d];
if (mutation instanceof PatchMutation &&
existingDocuments.get(mutation.key) === null) {
missingBaseDocEntriesForPatching = missingBaseDocEntriesForPatching.add(mutation.key);
}
}
}
var mergedDocuments = existingDocuments;
return this.remoteDocumentCache
.getEntries(transaction, missingBaseDocEntriesForPatching)
.next(function (missingBaseDocs) {
missingBaseDocs.forEach(function (key, doc) {
if (doc.isFoundDocument()) {
mergedDocuments = mergedDocuments.insert(key, doc);
}
});
return mergedDocuments;
});
};
return LocalDocumentsView;
}());
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG$c = 'LocalStore';
/**
* The maximum time to leave a resume token buffered without writing it out.
* This value is arbitrary: it's long enough to avoid several writes
* (possibly indefinitely if updates come more frequently than this) but
* short enough that restarting after crashing will still have a pretty
* recent resume token.
*/
var RESUME_TOKEN_MAX_AGE_MICROS = 5 * 60 * 1e6;
/**
* Implements `LocalStore` interface.
*
* Note: some field defined in this class might have public access level, but
* the class is not exported so they are only accessible from this module.
* This is useful to implement optional features (like bundles) in free
* functions, such that they are tree-shakeable.
*/
var LocalStoreImpl = /** @class */ (function () {
function LocalStoreImpl(
/** Manages our in-memory or durable persistence. */
persistence, queryEngine, initialUser, serializer) {
this.persistence = persistence;
this.queryEngine = queryEngine;
this.serializer = serializer;
/**
* Maps a targetID to data about its target.
*
* PORTING NOTE: We are using an immutable data structure on Web to make re-runs
* of `applyRemoteEvent()` idempotent.
*/
this.targetDataByTarget = new SortedMap(primitiveComparator);
/** Maps a target to its targetID. */
// TODO(wuandy): Evaluate if TargetId can be part of Target.
this.targetIdByTarget = new ObjectMap(function (t) { return canonifyTarget(t); }, targetEquals);
/**
* The read time of the last entry processed by `getNewDocumentChanges()`.
*
* PORTING NOTE: This is only used for multi-tab synchronization.
*/
this.lastDocumentChangeReadTime = SnapshotVersion.min();
this.mutationQueue = persistence.getMutationQueue(initialUser);
this.remoteDocuments = persistence.getRemoteDocumentCache();
this.targetCache = persistence.getTargetCache();
this.localDocuments = new LocalDocumentsView(this.remoteDocuments, this.mutationQueue, this.persistence.getIndexManager());
this.bundleCache = persistence.getBundleCache();
this.queryEngine.setLocalDocumentsView(this.localDocuments);
}
LocalStoreImpl.prototype.collectGarbage = function (garbageCollector) {
var _this = this;
return this.persistence.runTransaction('Collect garbage', 'readwrite-primary', function (txn) { return garbageCollector.collect(txn, _this.targetDataByTarget); });
};
return LocalStoreImpl;
}());
function newLocalStore(
/** Manages our in-memory or durable persistence. */
persistence, queryEngine, initialUser, serializer) {
return new LocalStoreImpl(persistence, queryEngine, initialUser, serializer);
}
/**
* Tells the LocalStore that the currently authenticated user has changed.
*
* In response the local store switches the mutation queue to the new user and
* returns any resulting document changes.
*/
// PORTING NOTE: Android and iOS only return the documents affected by the
// change.
function localStoreHandleUserChange(localStore, user) {
return tslib.__awaiter(this, void 0, void 0, function () {
var localStoreImpl, newMutationQueue, newLocalDocuments, result;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
localStoreImpl = debugCast(localStore);
newMutationQueue = localStoreImpl.mutationQueue;
newLocalDocuments = localStoreImpl.localDocuments;
return [4 /*yield*/, localStoreImpl.persistence.runTransaction('Handle user change', 'readonly', function (txn) {
// Swap out the mutation queue, grabbing the pending mutation batches
// before and after.
var oldBatches;
return localStoreImpl.mutationQueue
.getAllMutationBatches(txn)
.next(function (promisedOldBatches) {
oldBatches = promisedOldBatches;
newMutationQueue = localStoreImpl.persistence.getMutationQueue(user);
// Recreate our LocalDocumentsView using the new
// MutationQueue.
newLocalDocuments = new LocalDocumentsView(localStoreImpl.remoteDocuments, newMutationQueue, localStoreImpl.persistence.getIndexManager());
return newMutationQueue.getAllMutationBatches(txn);
})
.next(function (newBatches) {
var removedBatchIds = [];
var addedBatchIds = [];
// Union the old/new changed keys.
var changedKeys = documentKeySet();
for (var _i = 0, oldBatches_1 = oldBatches; _i < oldBatches_1.length; _i++) {
var batch = oldBatches_1[_i];
removedBatchIds.push(batch.batchId);
for (var _d = 0, _e = batch.mutations; _d < _e.length; _d++) {
var mutation = _e[_d];
changedKeys = changedKeys.add(mutation.key);
}
}
for (var _f = 0, newBatches_1 = newBatches; _f < newBatches_1.length; _f++) {
var batch = newBatches_1[_f];
addedBatchIds.push(batch.batchId);
for (var _g = 0, _h = batch.mutations; _g < _h.length; _g++) {
var mutation = _h[_g];
changedKeys = changedKeys.add(mutation.key);
}
}
// Return the set of all (potentially) changed documents and the list
// of mutation batch IDs that were affected by change.
return newLocalDocuments
.getDocuments(txn, changedKeys)
.next(function (affectedDocuments) {
return {
affectedDocuments: affectedDocuments,
removedBatchIds: removedBatchIds,
addedBatchIds: addedBatchIds
};
});
});
})];
case 1:
result = _d.sent();
localStoreImpl.mutationQueue = newMutationQueue;
localStoreImpl.localDocuments = newLocalDocuments;
localStoreImpl.queryEngine.setLocalDocumentsView(localStoreImpl.localDocuments);
return [2 /*return*/, result];
}
});
});
}
/* Accepts locally generated Mutations and commit them to storage. */
function localStoreWriteLocally(localStore, mutations) {
var localStoreImpl = debugCast(localStore);
var localWriteTime = Timestamp.now();
var keys = mutations.reduce(function (keys, m) { return keys.add(m.key); }, documentKeySet());
var existingDocs;
return localStoreImpl.persistence
.runTransaction('Locally write mutations', 'readwrite', function (txn) {
// Load and apply all existing mutations. This lets us compute the
// current base state for all non-idempotent transforms before applying
// any additional user-provided writes.
return localStoreImpl.localDocuments
.getDocuments(txn, keys)
.next(function (docs) {
existingDocs = docs;
// For non-idempotent mutations (such as `FieldValue.increment()`),
// we record the base state in a separate patch mutation. This is
// later used to guarantee consistent values and prevents flicker
// even if the backend sends us an update that already includes our
// transform.
var baseMutations = [];
for (var _i = 0, mutations_2 = mutations; _i < mutations_2.length; _i++) {
var mutation = mutations_2[_i];
var baseValue = extractMutationBaseValue(mutation, existingDocs.get(mutation.key));
if (baseValue != null) {
// NOTE: The base state should only be applied if there's some
// existing document to override, so use a Precondition of
// exists=true
baseMutations.push(new PatchMutation(mutation.key, baseValue, extractFieldMask(baseValue.value.mapValue), Precondition.exists(true)));
}
}
return localStoreImpl.mutationQueue.addMutationBatch(txn, localWriteTime, baseMutations, mutations);
});
})
.then(function (batch) {
batch.applyToLocalDocumentSet(existingDocs);
return { batchId: batch.batchId, changes: existingDocs };
});
}
/**
* Acknowledges the given batch.
*
* On the happy path when a batch is acknowledged, the local store will
*
* + remove the batch from the mutation queue;
* + apply the changes to the remote document cache;
* + recalculate the latency compensated view implied by those changes (there
* may be mutations in the queue that affect the documents but haven't been
* acknowledged yet); and
* + give the changed documents back the sync engine
*
* @returns The resulting (modified) documents.
*/
function localStoreAcknowledgeBatch(localStore, batchResult) {
var localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Acknowledge batch', 'readwrite-primary', function (txn) {
var affected = batchResult.batch.keys();
var documentBuffer = localStoreImpl.remoteDocuments.newChangeBuffer({
trackRemovals: true // Make sure document removals show up in `getNewDocumentChanges()`
});
return applyWriteToRemoteDocuments(localStoreImpl, txn, batchResult, documentBuffer)
.next(function () { return documentBuffer.apply(txn); })
.next(function () { return localStoreImpl.mutationQueue.performConsistencyCheck(txn); })
.next(function () { return localStoreImpl.localDocuments.getDocuments(txn, affected); });
});
}
/**
* Removes mutations from the MutationQueue for the specified batch;
* LocalDocuments will be recalculated.
*
* @returns The resulting modified documents.
*/
function localStoreRejectBatch(localStore, batchId) {
var localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Reject batch', 'readwrite-primary', function (txn) {
var affectedKeys;
return localStoreImpl.mutationQueue
.lookupMutationBatch(txn, batchId)
.next(function (batch) {
hardAssert(batch !== null);
affectedKeys = batch.keys();
return localStoreImpl.mutationQueue.removeMutationBatch(txn, batch);
})
.next(function () { return localStoreImpl.mutationQueue.performConsistencyCheck(txn); })
.next(function () { return localStoreImpl.localDocuments.getDocuments(txn, affectedKeys); });
});
}
/**
* Returns the largest (latest) batch id in mutation queue that is pending
* server response.
*
* Returns `BATCHID_UNKNOWN` if the queue is empty.
*/
function localStoreGetHighestUnacknowledgedBatchId(localStore) {
var localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Get highest unacknowledged batch id', 'readonly', function (txn) { return localStoreImpl.mutationQueue.getHighestUnacknowledgedBatchId(txn); });
}
/**
* Returns the last consistent snapshot processed (used by the RemoteStore to
* determine whether to buffer incoming snapshots from the backend).
*/
function localStoreGetLastRemoteSnapshotVersion(localStore) {
var localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Get last remote snapshot version', 'readonly', function (txn) { return localStoreImpl.targetCache.getLastRemoteSnapshotVersion(txn); });
}
/**
* Updates the "ground-state" (remote) documents. We assume that the remote
* event reflects any write batches that have been acknowledged or rejected
* (i.e. we do not re-apply local mutations to updates from this event).
*
* LocalDocuments are re-calculated if there are remaining mutations in the
* queue.
*/
function localStoreApplyRemoteEventToLocalCache(localStore, remoteEvent) {
var localStoreImpl = debugCast(localStore);
var remoteVersion = remoteEvent.snapshotVersion;
var newTargetDataByTargetMap = localStoreImpl.targetDataByTarget;
return localStoreImpl.persistence
.runTransaction('Apply remote event', 'readwrite-primary', function (txn) {
var documentBuffer = localStoreImpl.remoteDocuments.newChangeBuffer({
trackRemovals: true // Make sure document removals show up in `getNewDocumentChanges()`
});
// Reset newTargetDataByTargetMap in case this transaction gets re-run.
newTargetDataByTargetMap = localStoreImpl.targetDataByTarget;
var promises = [];
remoteEvent.targetChanges.forEach(function (change, targetId) {
var oldTargetData = newTargetDataByTargetMap.get(targetId);
if (!oldTargetData) {
return;
}
// Only update the remote keys if the target is still active. This
// ensures that we can persist the updated target data along with
// the updated assignment.
promises.push(localStoreImpl.targetCache
.removeMatchingKeys(txn, change.removedDocuments, targetId)
.next(function () {
return localStoreImpl.targetCache.addMatchingKeys(txn, change.addedDocuments, targetId);
}));
var resumeToken = change.resumeToken;
// Update the resume token if the change includes one.
if (resumeToken.approximateByteSize() > 0) {
var newTargetData = oldTargetData
.withResumeToken(resumeToken, remoteVersion)
.withSequenceNumber(txn.currentSequenceNumber);
newTargetDataByTargetMap = newTargetDataByTargetMap.insert(targetId, newTargetData);
// Update the target data if there are target changes (or if
// sufficient time has passed since the last update).
if (shouldPersistTargetData(oldTargetData, newTargetData, change)) {
promises.push(localStoreImpl.targetCache.updateTargetData(txn, newTargetData));
}
}
});
var changedDocs = mutableDocumentMap();
remoteEvent.documentUpdates.forEach(function (key, doc) {
if (remoteEvent.resolvedLimboDocuments.has(key)) {
promises.push(localStoreImpl.persistence.referenceDelegate.updateLimboDocument(txn, key));
}
});
// Each loop iteration only affects its "own" doc, so it's safe to get all the remote
// documents in advance in a single call.
promises.push(populateDocumentChangeBuffer(txn, documentBuffer, remoteEvent.documentUpdates, remoteVersion, undefined).next(function (result) {
changedDocs = result;
}));
// HACK: The only reason we allow a null snapshot version is so that we
// can synthesize remote events when we get permission denied errors while
// trying to resolve the state of a locally cached document that is in
// limbo.
if (!remoteVersion.isEqual(SnapshotVersion.min())) {
var updateRemoteVersion = localStoreImpl.targetCache
.getLastRemoteSnapshotVersion(txn)
.next(function (lastRemoteSnapshotVersion) {
return localStoreImpl.targetCache.setTargetsMetadata(txn, txn.currentSequenceNumber, remoteVersion);
});
promises.push(updateRemoteVersion);
}
return PersistencePromise.waitFor(promises)
.next(function () { return documentBuffer.apply(txn); })
.next(function () { return localStoreImpl.localDocuments.applyLocalViewToDocuments(txn, changedDocs); })
.next(function () { return changedDocs; });
})
.then(function (changedDocs) {
localStoreImpl.targetDataByTarget = newTargetDataByTargetMap;
return changedDocs;
});
}
/**
* Populates document change buffer with documents from backend or a bundle.
* Returns the document changes resulting from applying those documents.
*
* @param txn - Transaction to use to read existing documents from storage.
* @param documentBuffer - Document buffer to collect the resulted changes to be
* applied to storage.
* @param documents - Documents to be applied.
* @param globalVersion - A `SnapshotVersion` representing the read time if all
* documents have the same read time.
* @param documentVersions - A DocumentKey-to-SnapshotVersion map if documents
* have their own read time.
*
* Note: this function will use `documentVersions` if it is defined;
* when it is not defined, resorts to `globalVersion`.
*/
function populateDocumentChangeBuffer(txn, documentBuffer, documents, globalVersion,
// TODO(wuandy): We could add `readTime` to MaybeDocument instead to remove
// this parameter.
documentVersions) {
var updatedKeys = documentKeySet();
documents.forEach(function (k) { return (updatedKeys = updatedKeys.add(k)); });
return documentBuffer.getEntries(txn, updatedKeys).next(function (existingDocs) {
var changedDocs = mutableDocumentMap();
documents.forEach(function (key, doc) {
var existingDoc = existingDocs.get(key);
var docReadTime = (documentVersions === null || documentVersions === void 0 ? void 0 : documentVersions.get(key)) || globalVersion;
// Note: The order of the steps below is important, since we want
// to ensure that rejected limbo resolutions (which fabricate
// NoDocuments with SnapshotVersion.min()) never add documents to
// cache.
if (doc.isNoDocument() && doc.version.isEqual(SnapshotVersion.min())) {
// NoDocuments with SnapshotVersion.min() are used in manufactured
// events. We remove these documents from cache since we lost
// access.
documentBuffer.removeEntry(key, docReadTime);
changedDocs = changedDocs.insert(key, doc);
}
else if (!existingDoc.isValidDocument() ||
doc.version.compareTo(existingDoc.version) > 0 ||
(doc.version.compareTo(existingDoc.version) === 0 &&
existingDoc.hasPendingWrites)) {
documentBuffer.addEntry(doc, docReadTime);
changedDocs = changedDocs.insert(key, doc);
}
else {
logDebug(LOG_TAG$c, 'Ignoring outdated watch update for ', key, '. Current version:', existingDoc.version, ' Watch version:', doc.version);
}
});
return changedDocs;
});
}
/**
* Returns true if the newTargetData should be persisted during an update of
* an active target. TargetData should always be persisted when a target is
* being released and should not call this function.
*
* While the target is active, TargetData updates can be omitted when nothing
* about the target has changed except metadata like the resume token or
* snapshot version. Occasionally it's worth the extra write to prevent these
* values from getting too stale after a crash, but this doesn't have to be
* too frequent.
*/
function shouldPersistTargetData(oldTargetData, newTargetData, change) {
hardAssert(newTargetData.resumeToken.approximateByteSize() > 0);
// Always persist target data if we don't already have a resume token.
if (oldTargetData.resumeToken.approximateByteSize() === 0) {
return true;
}
// Don't allow resume token changes to be buffered indefinitely. This
// allows us to be reasonably up-to-date after a crash and avoids needing
// to loop over all active queries on shutdown. Especially in the browser
// we may not get time to do anything interesting while the current tab is
// closing.
var timeDelta = newTargetData.snapshotVersion.toMicroseconds() -
oldTargetData.snapshotVersion.toMicroseconds();
if (timeDelta >= RESUME_TOKEN_MAX_AGE_MICROS) {
return true;
}
// Otherwise if the only thing that has changed about a target is its resume
// token it's not worth persisting. Note that the RemoteStore keeps an
// in-memory view of the currently active targets which includes the current
// resume token, so stream failure or user changes will still use an
// up-to-date resume token regardless of what we do here.
var changes = change.addedDocuments.size +
change.modifiedDocuments.size +
change.removedDocuments.size;
return changes > 0;
}
/**
* Notifies local store of the changed views to locally pin documents.
*/
function localStoreNotifyLocalViewChanges(localStore, viewChanges) {
return tslib.__awaiter(this, void 0, void 0, function () {
var localStoreImpl, e_2, _i, viewChanges_1, viewChange, targetId, targetData, lastLimboFreeSnapshotVersion, updatedTargetData;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
localStoreImpl = debugCast(localStore);
_d.label = 1;
case 1:
_d.trys.push([1, 3, , 4]);
return [4 /*yield*/, localStoreImpl.persistence.runTransaction('notifyLocalViewChanges', 'readwrite', function (txn) {
return PersistencePromise.forEach(viewChanges, function (viewChange) {
return PersistencePromise.forEach(viewChange.addedKeys, function (key) { return localStoreImpl.persistence.referenceDelegate.addReference(txn, viewChange.targetId, key); }).next(function () { return PersistencePromise.forEach(viewChange.removedKeys, function (key) { return localStoreImpl.persistence.referenceDelegate.removeReference(txn, viewChange.targetId, key); }); });
});
})];
case 2:
_d.sent();
return [3 /*break*/, 4];
case 3:
e_2 = _d.sent();
if (isIndexedDbTransactionError(e_2)) {
// If `notifyLocalViewChanges` fails, we did not advance the sequence
// number for the documents that were included in this transaction.
// This might trigger them to be deleted earlier than they otherwise
// would have, but it should not invalidate the integrity of the data.
logDebug(LOG_TAG$c, 'Failed to update sequence numbers: ' + e_2);
}
else {
throw e_2;
}
return [3 /*break*/, 4];
case 4:
for (_i = 0, viewChanges_1 = viewChanges; _i < viewChanges_1.length; _i++) {
viewChange = viewChanges_1[_i];
targetId = viewChange.targetId;
if (!viewChange.fromCache) {
targetData = localStoreImpl.targetDataByTarget.get(targetId);
lastLimboFreeSnapshotVersion = targetData.snapshotVersion;
updatedTargetData = targetData.withLastLimboFreeSnapshotVersion(lastLimboFreeSnapshotVersion);
localStoreImpl.targetDataByTarget = localStoreImpl.targetDataByTarget.insert(targetId, updatedTargetData);
}
}
return [2 /*return*/];
}
});
});
}
/**
* Gets the mutation batch after the passed in batchId in the mutation queue
* or null if empty.
* @param afterBatchId - If provided, the batch to search after.
* @returns The next mutation or null if there wasn't one.
*/
function localStoreGetNextMutationBatch(localStore, afterBatchId) {
var localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Get next mutation batch', 'readonly', function (txn) {
if (afterBatchId === undefined) {
afterBatchId = BATCHID_UNKNOWN;
}
return localStoreImpl.mutationQueue.getNextMutationBatchAfterBatchId(txn, afterBatchId);
});
}
/**
* Reads the current value of a Document with a given key or null if not
* found - used for testing.
*/
function localStoreReadDocument(localStore, key) {
var localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('read document', 'readonly', function (txn) { return localStoreImpl.localDocuments.getDocument(txn, key); });
}
/**
* Assigns the given target an internal ID so that its results can be pinned so
* they don't get GC'd. A target must be allocated in the local store before
* the store can be used to manage its view.
*
* Allocating an already allocated `Target` will return the existing `TargetData`
* for that `Target`.
*/
function localStoreAllocateTarget(localStore, target) {
var localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence
.runTransaction('Allocate target', 'readwrite', function (txn) {
var targetData;
return localStoreImpl.targetCache
.getTargetData(txn, target)
.next(function (cached) {
if (cached) {
// This target has been listened to previously, so reuse the
// previous targetID.
// TODO(mcg): freshen last accessed date?
targetData = cached;
return PersistencePromise.resolve(targetData);
}
else {
return localStoreImpl.targetCache
.allocateTargetId(txn)
.next(function (targetId) {
targetData = new TargetData(target, targetId, 0 /* Listen */, txn.currentSequenceNumber);
return localStoreImpl.targetCache
.addTargetData(txn, targetData)
.next(function () { return targetData; });
});
}
});
})
.then(function (targetData) {
// If Multi-Tab is enabled, the existing target data may be newer than
// the in-memory data
var cachedTargetData = localStoreImpl.targetDataByTarget.get(targetData.targetId);
if (cachedTargetData === null ||
targetData.snapshotVersion.compareTo(cachedTargetData.snapshotVersion) >
0) {
localStoreImpl.targetDataByTarget = localStoreImpl.targetDataByTarget.insert(targetData.targetId, targetData);
localStoreImpl.targetIdByTarget.set(target, targetData.targetId);
}
return targetData;
});
}
/**
* Returns the TargetData as seen by the LocalStore, including updates that may
* have not yet been persisted to the TargetCache.
*/
// Visible for testing.
function localStoreGetTargetData(localStore, transaction, target) {
var localStoreImpl = debugCast(localStore);
var targetId = localStoreImpl.targetIdByTarget.get(target);
if (targetId !== undefined) {
return PersistencePromise.resolve(localStoreImpl.targetDataByTarget.get(targetId));
}
else {
return localStoreImpl.targetCache.getTargetData(transaction, target);
}
}
/**
* Unpins all the documents associated with the given target. If
* `keepPersistedTargetData` is set to false and Eager GC enabled, the method
* directly removes the associated target data from the target cache.
*
* Releasing a non-existing `Target` is a no-op.
*/
// PORTING NOTE: `keepPersistedTargetData` is multi-tab only.
function localStoreReleaseTarget(localStore, targetId, keepPersistedTargetData) {
return tslib.__awaiter(this, void 0, void 0, function () {
var localStoreImpl, targetData, mode, e_3;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
localStoreImpl = debugCast(localStore);
targetData = localStoreImpl.targetDataByTarget.get(targetId);
mode = keepPersistedTargetData ? 'readwrite' : 'readwrite-primary';
_d.label = 1;
case 1:
_d.trys.push([1, 4, , 5]);
if (!!keepPersistedTargetData) return [3 /*break*/, 3];
return [4 /*yield*/, localStoreImpl.persistence.runTransaction('Release target', mode, function (txn) {
return localStoreImpl.persistence.referenceDelegate.removeTarget(txn, targetData);
})];
case 2:
_d.sent();
_d.label = 3;
case 3: return [3 /*break*/, 5];
case 4:
e_3 = _d.sent();
if (isIndexedDbTransactionError(e_3)) {
// All `releaseTarget` does is record the final metadata state for the
// target, but we've been recording this periodically during target
// activity. If we lose this write this could cause a very slight
// difference in the order of target deletion during GC, but we
// don't define exact LRU semantics so this is acceptable.
logDebug(LOG_TAG$c, "Failed to update sequence numbers for target " + targetId + ": " + e_3);
}
else {
throw e_3;
}
return [3 /*break*/, 5];
case 5:
localStoreImpl.targetDataByTarget = localStoreImpl.targetDataByTarget.remove(targetId);
localStoreImpl.targetIdByTarget.delete(targetData.target);
return [2 /*return*/];
}
});
});
}
/**
* Runs the specified query against the local store and returns the results,
* potentially taking advantage of query data from previous executions (such
* as the set of remote keys).
*
* @param usePreviousResults - Whether results from previous executions can
* be used to optimize this query execution.
*/
function localStoreExecuteQuery(localStore, query, usePreviousResults) {
var localStoreImpl = debugCast(localStore);
var lastLimboFreeSnapshotVersion = SnapshotVersion.min();
var remoteKeys = documentKeySet();
return localStoreImpl.persistence.runTransaction('Execute query', 'readonly', function (txn) {
return localStoreGetTargetData(localStoreImpl, txn, queryToTarget(query))
.next(function (targetData) {
if (targetData) {
lastLimboFreeSnapshotVersion =
targetData.lastLimboFreeSnapshotVersion;
return localStoreImpl.targetCache
.getMatchingKeysForTargetId(txn, targetData.targetId)
.next(function (result) {
remoteKeys = result;
});
}
})
.next(function () { return localStoreImpl.queryEngine.getDocumentsMatchingQuery(txn, query, usePreviousResults
? lastLimboFreeSnapshotVersion
: SnapshotVersion.min(), usePreviousResults ? remoteKeys : documentKeySet()); })
.next(function (documents) {
return { documents: documents, remoteKeys: remoteKeys };
});
});
}
function applyWriteToRemoteDocuments(localStoreImpl, txn, batchResult, documentBuffer) {
var batch = batchResult.batch;
var docKeys = batch.keys();
var promiseChain = PersistencePromise.resolve();
docKeys.forEach(function (docKey) {
promiseChain = promiseChain
.next(function () { return documentBuffer.getEntry(txn, docKey); })
.next(function (doc) {
var ackVersion = batchResult.docVersions.get(docKey);
hardAssert(ackVersion !== null);
if (doc.version.compareTo(ackVersion) < 0) {
batch.applyToRemoteDocument(doc, batchResult);
if (doc.isValidDocument()) {
// We use the commitVersion as the readTime rather than the
// document's updateTime since the updateTime is not advanced
// for updates that do not modify the underlying document.
documentBuffer.addEntry(doc, batchResult.commitVersion);
}
}
});
});
return promiseChain.next(function () { return localStoreImpl.mutationQueue.removeMutationBatch(txn, batch); });
}
/** Returns the local view of the documents affected by a mutation batch. */
// PORTING NOTE: Multi-Tab only.
function localStoreLookupMutationDocuments(localStore, batchId) {
var localStoreImpl = debugCast(localStore);
var mutationQueueImpl = debugCast(localStoreImpl.mutationQueue);
return localStoreImpl.persistence.runTransaction('Lookup mutation documents', 'readonly', function (txn) {
return mutationQueueImpl.lookupMutationKeys(txn, batchId).next(function (keys) {
if (keys) {
return localStoreImpl.localDocuments.getDocuments(txn, keys);
}
else {
return PersistencePromise.resolve(null);
}
});
});
}
// PORTING NOTE: Multi-Tab only.
function localStoreRemoveCachedMutationBatchMetadata(localStore, batchId) {
var mutationQueueImpl = debugCast(debugCast(localStore, LocalStoreImpl).mutationQueue);
mutationQueueImpl.removeCachedMutationKeys(batchId);
}
// PORTING NOTE: Multi-Tab only.
function localStoreGetActiveClients(localStore) {
var persistenceImpl = debugCast(debugCast(localStore, LocalStoreImpl).persistence);
return persistenceImpl.getActiveClients();
}
// PORTING NOTE: Multi-Tab only.
function localStoreGetCachedTarget(localStore, targetId) {
var localStoreImpl = debugCast(localStore);
var targetCacheImpl = debugCast(localStoreImpl.targetCache);
var cachedTargetData = localStoreImpl.targetDataByTarget.get(targetId);
if (cachedTargetData) {
return Promise.resolve(cachedTargetData.target);
}
else {
return localStoreImpl.persistence.runTransaction('Get target data', 'readonly', function (txn) {
return targetCacheImpl
.getTargetDataForTarget(txn, targetId)
.next(function (targetData) { return (targetData ? targetData.target : null); });
});
}
}
/**
* Returns the set of documents that have been updated since the last call.
* If this is the first call, returns the set of changes since client
* initialization. Further invocations will return document that have changed
* since the prior call.
*/
// PORTING NOTE: Multi-Tab only.
function localStoreGetNewDocumentChanges(localStore) {
var localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence
.runTransaction('Get new document changes', 'readonly', function (txn) { return remoteDocumentCacheGetNewDocumentChanges(localStoreImpl.remoteDocuments, txn, localStoreImpl.lastDocumentChangeReadTime); })
.then(function (_d) {
var changedDocs = _d.changedDocs, readTime = _d.readTime;
localStoreImpl.lastDocumentChangeReadTime = readTime;
return changedDocs;
});
}
/**
* Reads the newest document change from persistence and moves the internal
* synchronization marker forward so that calls to `getNewDocumentChanges()`
* only return changes that happened after client initialization.
*/
// PORTING NOTE: Multi-Tab only.
function localStoreSynchronizeLastDocumentChangeReadTime(localStore) {
return tslib.__awaiter(this, void 0, void 0, function () {
var localStoreImpl;
return tslib.__generator(this, function (_d) {
localStoreImpl = debugCast(localStore);
return [2 /*return*/, localStoreImpl.persistence
.runTransaction('Synchronize last document change read time', 'readonly', function (txn) { return remoteDocumentCacheGetLastReadTime(txn); })
.then(function (readTime) {
localStoreImpl.lastDocumentChangeReadTime = readTime;
})];
});
});
}
/**
* Creates a new target using the given bundle name, which will be used to
* hold the keys of all documents from the bundle in query-document mappings.
* This ensures that the loaded documents do not get garbage collected
* right away.
*/
function umbrellaTarget(bundleName) {
// It is OK that the path used for the query is not valid, because this will
// not be read and queried.
return queryToTarget(newQueryForPath(ResourcePath.fromString("__bundle__/docs/" + bundleName)));
}
/**
* Applies the documents from a bundle to the "ground-state" (remote)
* documents.
*
* LocalDocuments are re-calculated if there are remaining mutations in the
* queue.
*/
function localStoreApplyBundledDocuments(localStore, bundleConverter, documents, bundleName) {
return tslib.__awaiter(this, void 0, void 0, function () {
var localStoreImpl, documentKeys, documentMap, versionMap, _i, documents_1, bundleDoc, documentKey, documentBuffer, umbrellaTargetData;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
localStoreImpl = debugCast(localStore);
documentKeys = documentKeySet();
documentMap = mutableDocumentMap();
versionMap = documentVersionMap();
for (_i = 0, documents_1 = documents; _i < documents_1.length; _i++) {
bundleDoc = documents_1[_i];
documentKey = bundleConverter.toDocumentKey(bundleDoc.metadata.name);
if (bundleDoc.document) {
documentKeys = documentKeys.add(documentKey);
}
documentMap = documentMap.insert(documentKey, bundleConverter.toMutableDocument(bundleDoc));
versionMap = versionMap.insert(documentKey, bundleConverter.toSnapshotVersion(bundleDoc.metadata.readTime));
}
documentBuffer = localStoreImpl.remoteDocuments.newChangeBuffer({
trackRemovals: true // Make sure document removals show up in `getNewDocumentChanges()`
});
return [4 /*yield*/, localStoreAllocateTarget(localStoreImpl, umbrellaTarget(bundleName))];
case 1:
umbrellaTargetData = _d.sent();
return [2 /*return*/, localStoreImpl.persistence.runTransaction('Apply bundle documents', 'readwrite', function (txn) {
return populateDocumentChangeBuffer(txn, documentBuffer, documentMap, SnapshotVersion.min(), versionMap)
.next(function (changedDocs) {
documentBuffer.apply(txn);
return changedDocs;
})
.next(function (changedDocs) {
return localStoreImpl.targetCache
.removeMatchingKeysForTargetId(txn, umbrellaTargetData.targetId)
.next(function () { return localStoreImpl.targetCache.addMatchingKeys(txn, documentKeys, umbrellaTargetData.targetId); })
.next(function () { return localStoreImpl.localDocuments.applyLocalViewToDocuments(txn, changedDocs); })
.next(function () { return changedDocs; });
});
})];
}
});
});
}
/**
* Returns a promise of a boolean to indicate if the given bundle has already
* been loaded and the create time is newer than the current loading bundle.
*/
function localStoreHasNewerBundle(localStore, bundleMetadata) {
var localStoreImpl = debugCast(localStore);
var currentReadTime = fromVersion(bundleMetadata.createTime);
return localStoreImpl.persistence
.runTransaction('hasNewerBundle', 'readonly', function (transaction) {
return localStoreImpl.bundleCache.getBundleMetadata(transaction, bundleMetadata.id);
})
.then(function (cached) {
return !!cached && cached.createTime.compareTo(currentReadTime) >= 0;
});
}
/**
* Saves the given `BundleMetadata` to local persistence.
*/
function localStoreSaveBundle(localStore, bundleMetadata) {
var localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Save bundle', 'readwrite', function (transaction) {
return localStoreImpl.bundleCache.saveBundleMetadata(transaction, bundleMetadata);
});
}
/**
* Returns a promise of a `NamedQuery` associated with given query name. Promise
* resolves to undefined if no persisted data can be found.
*/
function localStoreGetNamedQuery(localStore, queryName) {
var localStoreImpl = debugCast(localStore);
return localStoreImpl.persistence.runTransaction('Get named query', 'readonly', function (transaction) { return localStoreImpl.bundleCache.getNamedQuery(transaction, queryName); });
}
/**
* Saves the given `NamedQuery` to local persistence.
*/
function localStoreSaveNamedQuery(localStore, query, documents) {
if (documents === void 0) { documents = documentKeySet(); }
return tslib.__awaiter(this, void 0, void 0, function () {
var allocated, localStoreImpl;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, localStoreAllocateTarget(localStore, queryToTarget(fromBundledQuery(query.bundledQuery)))];
case 1:
allocated = _d.sent();
localStoreImpl = debugCast(localStore);
return [2 /*return*/, localStoreImpl.persistence.runTransaction('Save named query', 'readwrite', function (transaction) {
var readTime = fromVersion(query.readTime);
// Simply save the query itself if it is older than what the SDK already
// has.
if (allocated.snapshotVersion.compareTo(readTime) >= 0) {
return localStoreImpl.bundleCache.saveNamedQuery(transaction, query);
}
// Update existing target data because the query from the bundle is newer.
var newTargetData = allocated.withResumeToken(ByteString.EMPTY_BYTE_STRING, readTime);
localStoreImpl.targetDataByTarget = localStoreImpl.targetDataByTarget.insert(newTargetData.targetId, newTargetData);
return localStoreImpl.targetCache
.updateTargetData(transaction, newTargetData)
.next(function () { return localStoreImpl.targetCache.removeMatchingKeysForTargetId(transaction, allocated.targetId); })
.next(function () { return localStoreImpl.targetCache.addMatchingKeys(transaction, documents, allocated.targetId); })
.next(function () { return localStoreImpl.bundleCache.saveNamedQuery(transaction, query); });
})];
}
});
});
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var MemoryBundleCache = /** @class */ (function () {
function MemoryBundleCache(serializer) {
this.serializer = serializer;
this.bundles = new Map();
this.namedQueries = new Map();
}
MemoryBundleCache.prototype.getBundleMetadata = function (transaction, bundleId) {
return PersistencePromise.resolve(this.bundles.get(bundleId));
};
MemoryBundleCache.prototype.saveBundleMetadata = function (transaction, bundleMetadata) {
this.bundles.set(bundleMetadata.id, fromBundleMetadata(bundleMetadata));
return PersistencePromise.resolve();
};
MemoryBundleCache.prototype.getNamedQuery = function (transaction, queryName) {
return PersistencePromise.resolve(this.namedQueries.get(queryName));
};
MemoryBundleCache.prototype.saveNamedQuery = function (transaction, query) {
this.namedQueries.set(query.name, fromProtoNamedQuery(query));
return PersistencePromise.resolve();
};
return MemoryBundleCache;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A collection of references to a document from some kind of numbered entity
* (either a target ID or batch ID). As references are added to or removed from
* the set corresponding events are emitted to a registered garbage collector.
*
* Each reference is represented by a DocumentReference object. Each of them
* contains enough information to uniquely identify the reference. They are all
* stored primarily in a set sorted by key. A document is considered garbage if
* there's no references in that set (this can be efficiently checked thanks to
* sorting by key).
*
* ReferenceSet also keeps a secondary set that contains references sorted by
* IDs. This one is used to efficiently implement removal of all references by
* some target ID.
*/
var ReferenceSet = /** @class */ (function () {
function ReferenceSet() {
// A set of outstanding references to a document sorted by key.
this.refsByKey = new SortedSet(DocReference.compareByKey);
// A set of outstanding references to a document sorted by target id.
this.refsByTarget = new SortedSet(DocReference.compareByTargetId);
}
/** Returns true if the reference set contains no references. */
ReferenceSet.prototype.isEmpty = function () {
return this.refsByKey.isEmpty();
};
/** Adds a reference to the given document key for the given ID. */
ReferenceSet.prototype.addReference = function (key, id) {
var ref = new DocReference(key, id);
this.refsByKey = this.refsByKey.add(ref);
this.refsByTarget = this.refsByTarget.add(ref);
};
/** Add references to the given document keys for the given ID. */
ReferenceSet.prototype.addReferences = function (keys, id) {
var _this = this;
keys.forEach(function (key) { return _this.addReference(key, id); });
};
/**
* Removes a reference to the given document key for the given
* ID.
*/
ReferenceSet.prototype.removeReference = function (key, id) {
this.removeRef(new DocReference(key, id));
};
ReferenceSet.prototype.removeReferences = function (keys, id) {
var _this = this;
keys.forEach(function (key) { return _this.removeReference(key, id); });
};
/**
* Clears all references with a given ID. Calls removeRef() for each key
* removed.
*/
ReferenceSet.prototype.removeReferencesForId = function (id) {
var _this = this;
var emptyKey = new DocumentKey(new ResourcePath([]));
var startRef = new DocReference(emptyKey, id);
var endRef = new DocReference(emptyKey, id + 1);
var keys = [];
this.refsByTarget.forEachInRange([startRef, endRef], function (ref) {
_this.removeRef(ref);
keys.push(ref.key);
});
return keys;
};
ReferenceSet.prototype.removeAllReferences = function () {
var _this = this;
this.refsByKey.forEach(function (ref) { return _this.removeRef(ref); });
};
ReferenceSet.prototype.removeRef = function (ref) {
this.refsByKey = this.refsByKey.delete(ref);
this.refsByTarget = this.refsByTarget.delete(ref);
};
ReferenceSet.prototype.referencesForId = function (id) {
var emptyKey = new DocumentKey(new ResourcePath([]));
var startRef = new DocReference(emptyKey, id);
var endRef = new DocReference(emptyKey, id + 1);
var keys = documentKeySet();
this.refsByTarget.forEachInRange([startRef, endRef], function (ref) {
keys = keys.add(ref.key);
});
return keys;
};
ReferenceSet.prototype.containsKey = function (key) {
var ref = new DocReference(key, 0);
var firstRef = this.refsByKey.firstAfterOrEqual(ref);
return firstRef !== null && key.isEqual(firstRef.key);
};
return ReferenceSet;
}());
var DocReference = /** @class */ (function () {
function DocReference(key, targetOrBatchId) {
this.key = key;
this.targetOrBatchId = targetOrBatchId;
}
/** Compare by key then by ID */
DocReference.compareByKey = function (left, right) {
return (DocumentKey.comparator(left.key, right.key) ||
primitiveComparator(left.targetOrBatchId, right.targetOrBatchId));
};
/** Compare by ID then by key */
DocReference.compareByTargetId = function (left, right) {
return (primitiveComparator(left.targetOrBatchId, right.targetOrBatchId) ||
DocumentKey.comparator(left.key, right.key));
};
return DocReference;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var MemoryMutationQueue = /** @class */ (function () {
function MemoryMutationQueue(indexManager, referenceDelegate) {
this.indexManager = indexManager;
this.referenceDelegate = referenceDelegate;
/**
* The set of all mutations that have been sent but not yet been applied to
* the backend.
*/
this.mutationQueue = [];
/** Next value to use when assigning sequential IDs to each mutation batch. */
this.nextBatchId = 1;
/** An ordered mapping between documents and the mutations batch IDs. */
this.batchesByDocumentKey = new SortedSet(DocReference.compareByKey);
}
MemoryMutationQueue.prototype.checkEmpty = function (transaction) {
return PersistencePromise.resolve(this.mutationQueue.length === 0);
};
MemoryMutationQueue.prototype.addMutationBatch = function (transaction, localWriteTime, baseMutations, mutations) {
var batchId = this.nextBatchId;
this.nextBatchId++;
if (this.mutationQueue.length > 0) {
this.mutationQueue[this.mutationQueue.length - 1];
}
var batch = new MutationBatch(batchId, localWriteTime, baseMutations, mutations);
this.mutationQueue.push(batch);
// Track references by document key and index collection parents.
for (var _i = 0, mutations_3 = mutations; _i < mutations_3.length; _i++) {
var mutation = mutations_3[_i];
this.batchesByDocumentKey = this.batchesByDocumentKey.add(new DocReference(mutation.key, batchId));
this.indexManager.addToCollectionParentIndex(transaction, mutation.key.path.popLast());
}
return PersistencePromise.resolve(batch);
};
MemoryMutationQueue.prototype.lookupMutationBatch = function (transaction, batchId) {
return PersistencePromise.resolve(this.findMutationBatch(batchId));
};
MemoryMutationQueue.prototype.getNextMutationBatchAfterBatchId = function (transaction, batchId) {
var nextBatchId = batchId + 1;
// The requested batchId may still be out of range so normalize it to the
// start of the queue.
var rawIndex = this.indexOfBatchId(nextBatchId);
var index = rawIndex < 0 ? 0 : rawIndex;
return PersistencePromise.resolve(this.mutationQueue.length > index ? this.mutationQueue[index] : null);
};
MemoryMutationQueue.prototype.getHighestUnacknowledgedBatchId = function () {
return PersistencePromise.resolve(this.mutationQueue.length === 0 ? BATCHID_UNKNOWN : this.nextBatchId - 1);
};
MemoryMutationQueue.prototype.getAllMutationBatches = function (transaction) {
return PersistencePromise.resolve(this.mutationQueue.slice());
};
MemoryMutationQueue.prototype.getAllMutationBatchesAffectingDocumentKey = function (transaction, documentKey) {
var _this = this;
var start = new DocReference(documentKey, 0);
var end = new DocReference(documentKey, Number.POSITIVE_INFINITY);
var result = [];
this.batchesByDocumentKey.forEachInRange([start, end], function (ref) {
var batch = _this.findMutationBatch(ref.targetOrBatchId);
result.push(batch);
});
return PersistencePromise.resolve(result);
};
MemoryMutationQueue.prototype.getAllMutationBatchesAffectingDocumentKeys = function (transaction, documentKeys) {
var _this = this;
var uniqueBatchIDs = new SortedSet(primitiveComparator);
documentKeys.forEach(function (documentKey) {
var start = new DocReference(documentKey, 0);
var end = new DocReference(documentKey, Number.POSITIVE_INFINITY);
_this.batchesByDocumentKey.forEachInRange([start, end], function (ref) {
uniqueBatchIDs = uniqueBatchIDs.add(ref.targetOrBatchId);
});
});
return PersistencePromise.resolve(this.findMutationBatches(uniqueBatchIDs));
};
MemoryMutationQueue.prototype.getAllMutationBatchesAffectingQuery = function (transaction, query) {
// Use the query path as a prefix for testing if a document matches the
// query.
var prefix = query.path;
var immediateChildrenPathLength = prefix.length + 1;
// Construct a document reference for actually scanning the index. Unlike
// the prefix the document key in this reference must have an even number of
// segments. The empty segment can be used a suffix of the query path
// because it precedes all other segments in an ordered traversal.
var startPath = prefix;
if (!DocumentKey.isDocumentKey(startPath)) {
startPath = startPath.child('');
}
var start = new DocReference(new DocumentKey(startPath), 0);
// Find unique batchIDs referenced by all documents potentially matching the
// query.
var uniqueBatchIDs = new SortedSet(primitiveComparator);
this.batchesByDocumentKey.forEachWhile(function (ref) {
var rowKeyPath = ref.key.path;
if (!prefix.isPrefixOf(rowKeyPath)) {
return false;
}
else {
// Rows with document keys more than one segment longer than the query
// path can't be matches. For example, a query on 'rooms' can't match
// the document /rooms/abc/messages/xyx.
// TODO(mcg): we'll need a different scanner when we implement
// ancestor queries.
if (rowKeyPath.length === immediateChildrenPathLength) {
uniqueBatchIDs = uniqueBatchIDs.add(ref.targetOrBatchId);
}
return true;
}
}, start);
return PersistencePromise.resolve(this.findMutationBatches(uniqueBatchIDs));
};
MemoryMutationQueue.prototype.findMutationBatches = function (batchIDs) {
var _this = this;
// Construct an array of matching batches, sorted by batchID to ensure that
// multiple mutations affecting the same document key are applied in order.
var result = [];
batchIDs.forEach(function (batchId) {
var batch = _this.findMutationBatch(batchId);
if (batch !== null) {
result.push(batch);
}
});
return result;
};
MemoryMutationQueue.prototype.removeMutationBatch = function (transaction, batch) {
var _this = this;
// Find the position of the first batch for removal.
var batchIndex = this.indexOfExistingBatchId(batch.batchId, 'removed');
hardAssert(batchIndex === 0);
this.mutationQueue.shift();
var references = this.batchesByDocumentKey;
return PersistencePromise.forEach(batch.mutations, function (mutation) {
var ref = new DocReference(mutation.key, batch.batchId);
references = references.delete(ref);
return _this.referenceDelegate.markPotentiallyOrphaned(transaction, mutation.key);
}).next(function () {
_this.batchesByDocumentKey = references;
});
};
MemoryMutationQueue.prototype.removeCachedMutationKeys = function (batchId) {
// No-op since the memory mutation queue does not maintain a separate cache.
};
MemoryMutationQueue.prototype.containsKey = function (txn, key) {
var ref = new DocReference(key, 0);
var firstRef = this.batchesByDocumentKey.firstAfterOrEqual(ref);
return PersistencePromise.resolve(key.isEqual(firstRef && firstRef.key));
};
MemoryMutationQueue.prototype.performConsistencyCheck = function (txn) {
if (this.mutationQueue.length === 0)
;
return PersistencePromise.resolve();
};
/**
* Finds the index of the given batchId in the mutation queue and asserts that
* the resulting index is within the bounds of the queue.
*
* @param batchId - The batchId to search for
* @param action - A description of what the caller is doing, phrased in passive
* form (e.g. "acknowledged" in a routine that acknowledges batches).
*/
MemoryMutationQueue.prototype.indexOfExistingBatchId = function (batchId, action) {
var index = this.indexOfBatchId(batchId);
return index;
};
/**
* Finds the index of the given batchId in the mutation queue. This operation
* is O(1).
*
* @returns The computed index of the batch with the given batchId, based on
* the state of the queue. Note this index can be negative if the requested
* batchId has already been remvoed from the queue or past the end of the
* queue if the batchId is larger than the last added batch.
*/
MemoryMutationQueue.prototype.indexOfBatchId = function (batchId) {
if (this.mutationQueue.length === 0) {
// As an index this is past the end of the queue
return 0;
}
// Examine the front of the queue to figure out the difference between the
// batchId and indexes in the array. Note that since the queue is ordered
// by batchId, if the first batch has a larger batchId then the requested
// batchId doesn't exist in the queue.
var firstBatchId = this.mutationQueue[0].batchId;
return batchId - firstBatchId;
};
/**
* A version of lookupMutationBatch that doesn't return a promise, this makes
* other functions that uses this code easier to read and more efficent.
*/
MemoryMutationQueue.prototype.findMutationBatch = function (batchId) {
var index = this.indexOfBatchId(batchId);
if (index < 0 || index >= this.mutationQueue.length) {
return null;
}
var batch = this.mutationQueue[index];
return batch;
};
return MemoryMutationQueue;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function documentEntryMap() {
return new SortedMap(DocumentKey.comparator);
}
/**
* The memory-only RemoteDocumentCache for IndexedDb. To construct, invoke
* `newMemoryRemoteDocumentCache()`.
*/
var MemoryRemoteDocumentCacheImpl = /** @class */ (function () {
/**
* @param sizer - Used to assess the size of a document. For eager GC, this is
* expected to just return 0 to avoid unnecessarily doing the work of
* calculating the size.
*/
function MemoryRemoteDocumentCacheImpl(indexManager, sizer) {
this.indexManager = indexManager;
this.sizer = sizer;
/** Underlying cache of documents and their read times. */
this.docs = documentEntryMap();
/** Size of all cached documents. */
this.size = 0;
}
/**
* Adds the supplied entry to the cache and updates the cache size as appropriate.
*
* All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()`.
*/
MemoryRemoteDocumentCacheImpl.prototype.addEntry = function (transaction, doc, readTime) {
var key = doc.key;
var entry = this.docs.get(key);
var previousSize = entry ? entry.size : 0;
var currentSize = this.sizer(doc);
this.docs = this.docs.insert(key, {
document: doc.clone(),
size: currentSize,
readTime: readTime
});
this.size += currentSize - previousSize;
return this.indexManager.addToCollectionParentIndex(transaction, key.path.popLast());
};
/**
* Removes the specified entry from the cache and updates the cache size as appropriate.
*
* All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer
* returned by `newChangeBuffer()`.
*/
MemoryRemoteDocumentCacheImpl.prototype.removeEntry = function (documentKey) {
var entry = this.docs.get(documentKey);
if (entry) {
this.docs = this.docs.remove(documentKey);
this.size -= entry.size;
}
};
MemoryRemoteDocumentCacheImpl.prototype.getEntry = function (transaction, documentKey) {
var entry = this.docs.get(documentKey);
return PersistencePromise.resolve(entry
? entry.document.clone()
: MutableDocument.newInvalidDocument(documentKey));
};
MemoryRemoteDocumentCacheImpl.prototype.getEntries = function (transaction, documentKeys) {
var _this = this;
var results = mutableDocumentMap();
documentKeys.forEach(function (documentKey) {
var entry = _this.docs.get(documentKey);
results = results.insert(documentKey, entry
? entry.document.clone()
: MutableDocument.newInvalidDocument(documentKey));
});
return PersistencePromise.resolve(results);
};
MemoryRemoteDocumentCacheImpl.prototype.getDocumentsMatchingQuery = function (transaction, query, sinceReadTime) {
var results = mutableDocumentMap();
// Documents are ordered by key, so we can use a prefix scan to narrow down
// the documents we need to match the query against.
var prefix = new DocumentKey(query.path.child(''));
var iterator = this.docs.getIteratorFrom(prefix);
while (iterator.hasNext()) {
var _d = iterator.getNext(), key = _d.key, _e = _d.value, document_3 = _e.document, readTime = _e.readTime;
if (!query.path.isPrefixOf(key.path)) {
break;
}
if (readTime.compareTo(sinceReadTime) <= 0) {
continue;
}
if (!queryMatches(query, document_3)) {
continue;
}
results = results.insert(document_3.key, document_3.clone());
}
return PersistencePromise.resolve(results);
};
MemoryRemoteDocumentCacheImpl.prototype.forEachDocumentKey = function (transaction, f) {
return PersistencePromise.forEach(this.docs, function (key) { return f(key); });
};
MemoryRemoteDocumentCacheImpl.prototype.newChangeBuffer = function (options) {
// `trackRemovals` is ignores since the MemoryRemoteDocumentCache keeps
// a separate changelog and does not need special handling for removals.
return new MemoryRemoteDocumentChangeBuffer(this);
};
MemoryRemoteDocumentCacheImpl.prototype.getSize = function (txn) {
return PersistencePromise.resolve(this.size);
};
return MemoryRemoteDocumentCacheImpl;
}());
/**
* Creates a new memory-only RemoteDocumentCache.
*
* @param indexManager - A class that manages collection group indices.
* @param sizer - Used to assess the size of a document. For eager GC, this is
* expected to just return 0 to avoid unnecessarily doing the work of
* calculating the size.
*/
function newMemoryRemoteDocumentCache(indexManager, sizer) {
return new MemoryRemoteDocumentCacheImpl(indexManager, sizer);
}
/**
* Handles the details of adding and updating documents in the MemoryRemoteDocumentCache.
*/
var MemoryRemoteDocumentChangeBuffer = /** @class */ (function (_super) {
tslib.__extends(MemoryRemoteDocumentChangeBuffer, _super);
function MemoryRemoteDocumentChangeBuffer(documentCache) {
var _this = _super.call(this) || this;
_this.documentCache = documentCache;
return _this;
}
MemoryRemoteDocumentChangeBuffer.prototype.applyChanges = function (transaction) {
var _this = this;
var promises = [];
this.changes.forEach(function (key, doc) {
if (doc.document.isValidDocument()) {
promises.push(_this.documentCache.addEntry(transaction, doc.document, _this.getReadTime(key)));
}
else {
_this.documentCache.removeEntry(key);
}
});
return PersistencePromise.waitFor(promises);
};
MemoryRemoteDocumentChangeBuffer.prototype.getFromCache = function (transaction, documentKey) {
return this.documentCache.getEntry(transaction, documentKey);
};
MemoryRemoteDocumentChangeBuffer.prototype.getAllFromCache = function (transaction, documentKeys) {
return this.documentCache.getEntries(transaction, documentKeys);
};
return MemoryRemoteDocumentChangeBuffer;
}(RemoteDocumentChangeBuffer));
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var MemoryTargetCache = /** @class */ (function () {
function MemoryTargetCache(persistence) {
this.persistence = persistence;
/**
* Maps a target to the data about that target
*/
this.targets = new ObjectMap(function (t) { return canonifyTarget(t); }, targetEquals);
/** The last received snapshot version. */
this.lastRemoteSnapshotVersion = SnapshotVersion.min();
/** The highest numbered target ID encountered. */
this.highestTargetId = 0;
/** The highest sequence number encountered. */
this.highestSequenceNumber = 0;
/**
* A ordered bidirectional mapping between documents and the remote target
* IDs.
*/
this.references = new ReferenceSet();
this.targetCount = 0;
this.targetIdGenerator = TargetIdGenerator.forTargetCache();
}
MemoryTargetCache.prototype.forEachTarget = function (txn, f) {
this.targets.forEach(function (_, targetData) { return f(targetData); });
return PersistencePromise.resolve();
};
MemoryTargetCache.prototype.getLastRemoteSnapshotVersion = function (transaction) {
return PersistencePromise.resolve(this.lastRemoteSnapshotVersion);
};
MemoryTargetCache.prototype.getHighestSequenceNumber = function (transaction) {
return PersistencePromise.resolve(this.highestSequenceNumber);
};
MemoryTargetCache.prototype.allocateTargetId = function (transaction) {
this.highestTargetId = this.targetIdGenerator.next();
return PersistencePromise.resolve(this.highestTargetId);
};
MemoryTargetCache.prototype.setTargetsMetadata = function (transaction, highestListenSequenceNumber, lastRemoteSnapshotVersion) {
if (lastRemoteSnapshotVersion) {
this.lastRemoteSnapshotVersion = lastRemoteSnapshotVersion;
}
if (highestListenSequenceNumber > this.highestSequenceNumber) {
this.highestSequenceNumber = highestListenSequenceNumber;
}
return PersistencePromise.resolve();
};
MemoryTargetCache.prototype.saveTargetData = function (targetData) {
this.targets.set(targetData.target, targetData);
var targetId = targetData.targetId;
if (targetId > this.highestTargetId) {
this.targetIdGenerator = new TargetIdGenerator(targetId);
this.highestTargetId = targetId;
}
if (targetData.sequenceNumber > this.highestSequenceNumber) {
this.highestSequenceNumber = targetData.sequenceNumber;
}
};
MemoryTargetCache.prototype.addTargetData = function (transaction, targetData) {
this.saveTargetData(targetData);
this.targetCount += 1;
return PersistencePromise.resolve();
};
MemoryTargetCache.prototype.updateTargetData = function (transaction, targetData) {
this.saveTargetData(targetData);
return PersistencePromise.resolve();
};
MemoryTargetCache.prototype.removeTargetData = function (transaction, targetData) {
this.targets.delete(targetData.target);
this.references.removeReferencesForId(targetData.targetId);
this.targetCount -= 1;
return PersistencePromise.resolve();
};
MemoryTargetCache.prototype.removeTargets = function (transaction, upperBound, activeTargetIds) {
var _this = this;
var count = 0;
var removals = [];
this.targets.forEach(function (key, targetData) {
if (targetData.sequenceNumber <= upperBound &&
activeTargetIds.get(targetData.targetId) === null) {
_this.targets.delete(key);
removals.push(_this.removeMatchingKeysForTargetId(transaction, targetData.targetId));
count++;
}
});
return PersistencePromise.waitFor(removals).next(function () { return count; });
};
MemoryTargetCache.prototype.getTargetCount = function (transaction) {
return PersistencePromise.resolve(this.targetCount);
};
MemoryTargetCache.prototype.getTargetData = function (transaction, target) {
var targetData = this.targets.get(target) || null;
return PersistencePromise.resolve(targetData);
};
MemoryTargetCache.prototype.addMatchingKeys = function (txn, keys, targetId) {
this.references.addReferences(keys, targetId);
return PersistencePromise.resolve();
};
MemoryTargetCache.prototype.removeMatchingKeys = function (txn, keys, targetId) {
this.references.removeReferences(keys, targetId);
var referenceDelegate = this.persistence.referenceDelegate;
var promises = [];
if (referenceDelegate) {
keys.forEach(function (key) {
promises.push(referenceDelegate.markPotentiallyOrphaned(txn, key));
});
}
return PersistencePromise.waitFor(promises);
};
MemoryTargetCache.prototype.removeMatchingKeysForTargetId = function (txn, targetId) {
this.references.removeReferencesForId(targetId);
return PersistencePromise.resolve();
};
MemoryTargetCache.prototype.getMatchingKeysForTargetId = function (txn, targetId) {
var matchingKeys = this.references.referencesForId(targetId);
return PersistencePromise.resolve(matchingKeys);
};
MemoryTargetCache.prototype.containsKey = function (txn, key) {
return PersistencePromise.resolve(this.references.containsKey(key));
};
return MemoryTargetCache;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG$b = 'MemoryPersistence';
/**
* A memory-backed instance of Persistence. Data is stored only in RAM and
* not persisted across sessions.
*/
var MemoryPersistence = /** @class */ (function () {
/**
* The constructor accepts a factory for creating a reference delegate. This
* allows both the delegate and this instance to have strong references to
* each other without having nullable fields that would then need to be
* checked or asserted on every access.
*/
function MemoryPersistence(referenceDelegateFactory, serializer) {
var _this = this;
this.mutationQueues = {};
this.listenSequence = new ListenSequence(0);
this._started = false;
this._started = true;
this.referenceDelegate = referenceDelegateFactory(this);
this.targetCache = new MemoryTargetCache(this);
var sizer = function (doc) { return _this.referenceDelegate.documentSize(doc); };
this.indexManager = new MemoryIndexManager();
this.remoteDocumentCache = newMemoryRemoteDocumentCache(this.indexManager, sizer);
this.serializer = new LocalSerializer(serializer);
this.bundleCache = new MemoryBundleCache(this.serializer);
}
MemoryPersistence.prototype.start = function () {
return Promise.resolve();
};
MemoryPersistence.prototype.shutdown = function () {
// No durable state to ensure is closed on shutdown.
this._started = false;
return Promise.resolve();
};
Object.defineProperty(MemoryPersistence.prototype, "started", {
get: function () {
return this._started;
},
enumerable: false,
configurable: true
});
MemoryPersistence.prototype.setDatabaseDeletedListener = function () {
// No op.
};
MemoryPersistence.prototype.setNetworkEnabled = function () {
// No op.
};
MemoryPersistence.prototype.getIndexManager = function () {
return this.indexManager;
};
MemoryPersistence.prototype.getMutationQueue = function (user) {
var queue = this.mutationQueues[user.toKey()];
if (!queue) {
queue = new MemoryMutationQueue(this.indexManager, this.referenceDelegate);
this.mutationQueues[user.toKey()] = queue;
}
return queue;
};
MemoryPersistence.prototype.getTargetCache = function () {
return this.targetCache;
};
MemoryPersistence.prototype.getRemoteDocumentCache = function () {
return this.remoteDocumentCache;
};
MemoryPersistence.prototype.getBundleCache = function () {
return this.bundleCache;
};
MemoryPersistence.prototype.runTransaction = function (action, mode, transactionOperation) {
var _this = this;
logDebug(LOG_TAG$b, 'Starting transaction:', action);
var txn = new MemoryTransaction(this.listenSequence.next());
this.referenceDelegate.onTransactionStarted();
return transactionOperation(txn)
.next(function (result) {
return _this.referenceDelegate
.onTransactionCommitted(txn)
.next(function () { return result; });
})
.toPromise()
.then(function (result) {
txn.raiseOnCommittedEvent();
return result;
});
};
MemoryPersistence.prototype.mutationQueuesContainKey = function (transaction, key) {
return PersistencePromise.or(Object.values(this.mutationQueues).map(function (queue) { return function () { return queue.containsKey(transaction, key); }; }));
};
return MemoryPersistence;
}());
/**
* Memory persistence is not actually transactional, but future implementations
* may have transaction-scoped state.
*/
var MemoryTransaction = /** @class */ (function (_super) {
tslib.__extends(MemoryTransaction, _super);
function MemoryTransaction(currentSequenceNumber) {
var _this = _super.call(this) || this;
_this.currentSequenceNumber = currentSequenceNumber;
return _this;
}
return MemoryTransaction;
}(PersistenceTransaction));
var MemoryEagerDelegate = /** @class */ (function () {
function MemoryEagerDelegate(persistence) {
this.persistence = persistence;
/** Tracks all documents that are active in Query views. */
this.localViewReferences = new ReferenceSet();
/** The list of documents that are potentially GCed after each transaction. */
this._orphanedDocuments = null;
}
MemoryEagerDelegate.factory = function (persistence) {
return new MemoryEagerDelegate(persistence);
};
Object.defineProperty(MemoryEagerDelegate.prototype, "orphanedDocuments", {
get: function () {
if (!this._orphanedDocuments) {
throw fail();
}
else {
return this._orphanedDocuments;
}
},
enumerable: false,
configurable: true
});
MemoryEagerDelegate.prototype.addReference = function (txn, targetId, key) {
this.localViewReferences.addReference(key, targetId);
this.orphanedDocuments.delete(key.toString());
return PersistencePromise.resolve();
};
MemoryEagerDelegate.prototype.removeReference = function (txn, targetId, key) {
this.localViewReferences.removeReference(key, targetId);
this.orphanedDocuments.add(key.toString());
return PersistencePromise.resolve();
};
MemoryEagerDelegate.prototype.markPotentiallyOrphaned = function (txn, key) {
this.orphanedDocuments.add(key.toString());
return PersistencePromise.resolve();
};
MemoryEagerDelegate.prototype.removeTarget = function (txn, targetData) {
var _this = this;
var orphaned = this.localViewReferences.removeReferencesForId(targetData.targetId);
orphaned.forEach(function (key) { return _this.orphanedDocuments.add(key.toString()); });
var cache = this.persistence.getTargetCache();
return cache
.getMatchingKeysForTargetId(txn, targetData.targetId)
.next(function (keys) {
keys.forEach(function (key) { return _this.orphanedDocuments.add(key.toString()); });
})
.next(function () { return cache.removeTargetData(txn, targetData); });
};
MemoryEagerDelegate.prototype.onTransactionStarted = function () {
this._orphanedDocuments = new Set();
};
MemoryEagerDelegate.prototype.onTransactionCommitted = function (txn) {
var _this = this;
// Remove newly orphaned documents.
var cache = this.persistence.getRemoteDocumentCache();
var changeBuffer = cache.newChangeBuffer();
return PersistencePromise.forEach(this.orphanedDocuments, function (path) {
var key = DocumentKey.fromPath(path);
return _this.isReferenced(txn, key).next(function (isReferenced) {
if (!isReferenced) {
changeBuffer.removeEntry(key);
}
});
}).next(function () {
_this._orphanedDocuments = null;
return changeBuffer.apply(txn);
});
};
MemoryEagerDelegate.prototype.updateLimboDocument = function (txn, key) {
var _this = this;
return this.isReferenced(txn, key).next(function (isReferenced) {
if (isReferenced) {
_this.orphanedDocuments.delete(key.toString());
}
else {
_this.orphanedDocuments.add(key.toString());
}
});
};
MemoryEagerDelegate.prototype.documentSize = function (doc) {
// For eager GC, we don't care about the document size, there are no size thresholds.
return 0;
};
MemoryEagerDelegate.prototype.isReferenced = function (txn, key) {
var _this = this;
return PersistencePromise.or([
function () { return PersistencePromise.resolve(_this.localViewReferences.containsKey(key)); },
function () { return _this.persistence.getTargetCache().containsKey(txn, key); },
function () { return _this.persistence.mutationQueuesContainKey(txn, key); }
]);
};
return MemoryEagerDelegate;
}());
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A query engine that takes advantage of the target document mapping in the
* QueryCache. Query execution is optimized by only reading the documents that
* previously matched a query plus any documents that were edited after the
* query was last listened to.
*
* There are some cases when this optimization is not guaranteed to produce
* the same results as full collection scans. In these cases, query
* processing falls back to full scans. These cases are:
*
* - Limit queries where a document that matched the query previously no longer
* matches the query.
*
* - Limit queries where a document edit may cause the document to sort below
* another document that is in the local cache.
*
* - Queries that have never been CURRENT or free of limbo documents.
*/
var QueryEngine = /** @class */ (function () {
function QueryEngine() {
}
/** Sets the document view to query against. */
QueryEngine.prototype.setLocalDocumentsView = function (localDocuments) {
this.localDocumentsView = localDocuments;
};
/** Returns all local documents matching the specified query. */
QueryEngine.prototype.getDocumentsMatchingQuery = function (transaction, query, lastLimboFreeSnapshotVersion, remoteKeys) {
var _this = this;
// Queries that match all documents don't benefit from using
// key-based lookups. It is more efficient to scan all documents in a
// collection, rather than to perform individual lookups.
if (matchesAllDocuments(query)) {
return this.executeFullCollectionScan(transaction, query);
}
// Queries that have never seen a snapshot without limbo free documents
// should also be run as a full collection scan.
if (lastLimboFreeSnapshotVersion.isEqual(SnapshotVersion.min())) {
return this.executeFullCollectionScan(transaction, query);
}
return this.localDocumentsView.getDocuments(transaction, remoteKeys).next(function (documents) {
var previousResults = _this.applyQuery(query, documents);
if ((hasLimitToFirst(query) || hasLimitToLast(query)) &&
_this.needsRefill(query.limitType, previousResults, remoteKeys, lastLimboFreeSnapshotVersion)) {
return _this.executeFullCollectionScan(transaction, query);
}
if (getLogLevel() <= logger.LogLevel.DEBUG) {
logDebug('QueryEngine', 'Re-using previous result from %s to execute query: %s', lastLimboFreeSnapshotVersion.toString(), stringifyQuery(query));
}
// Retrieve all results for documents that were updated since the last
// limbo-document free remote snapshot.
return _this.localDocumentsView.getDocumentsMatchingQuery(transaction, query, lastLimboFreeSnapshotVersion).next(function (updatedResults) {
// We merge `previousResults` into `updateResults`, since
// `updateResults` is already a DocumentMap. If a document is
// contained in both lists, then its contents are the same.
previousResults.forEach(function (doc) {
updatedResults = updatedResults.insert(doc.key, doc);
});
return updatedResults;
});
});
};
/** Applies the query filter and sorting to the provided documents. */
QueryEngine.prototype.applyQuery = function (query, documents) {
// Sort the documents and re-apply the query filter since previously
// matching documents do not necessarily still match the query.
var queryResults = new SortedSet(newQueryComparator(query));
documents.forEach(function (_, maybeDoc) {
if (queryMatches(query, maybeDoc)) {
queryResults = queryResults.add(maybeDoc);
}
});
return queryResults;
};
/**
* Determines if a limit query needs to be refilled from cache, making it
* ineligible for index-free execution.
*
* @param sortedPreviousResults - The documents that matched the query when it
* was last synchronized, sorted by the query's comparator.
* @param remoteKeys - The document keys that matched the query at the last
* snapshot.
* @param limboFreeSnapshotVersion - The version of the snapshot when the
* query was last synchronized.
*/
QueryEngine.prototype.needsRefill = function (limitType, sortedPreviousResults, remoteKeys, limboFreeSnapshotVersion) {
// The query needs to be refilled if a previously matching document no
// longer matches.
if (remoteKeys.size !== sortedPreviousResults.size) {
return true;
}
// Limit queries are not eligible for index-free query execution if there is
// a potential that an older document from cache now sorts before a document
// that was previously part of the limit. This, however, can only happen if
// the document at the edge of the limit goes out of limit.
// If a document that is not the limit boundary sorts differently,
// the boundary of the limit itself did not change and documents from cache
// will continue to be "rejected" by this boundary. Therefore, we can ignore
// any modifications that don't affect the last document.
var docAtLimitEdge = limitType === "F" /* First */
? sortedPreviousResults.last()
: sortedPreviousResults.first();
if (!docAtLimitEdge) {
// We don't need to refill the query if there were already no documents.
return false;
}
return (docAtLimitEdge.hasPendingWrites ||
docAtLimitEdge.version.compareTo(limboFreeSnapshotVersion) > 0);
};
QueryEngine.prototype.executeFullCollectionScan = function (transaction, query) {
if (getLogLevel() <= logger.LogLevel.DEBUG) {
logDebug('QueryEngine', 'Using full collection scan to execute query:', stringifyQuery(query));
}
return this.localDocumentsView.getDocumentsMatchingQuery(transaction, query, SnapshotVersion.min());
};
return QueryEngine;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Simple wrapper around a nullable UID. Mostly exists to make code more
* readable.
*/
var User = /** @class */ (function () {
function User(uid) {
this.uid = uid;
}
User.prototype.isAuthenticated = function () {
return this.uid != null;
};
/**
* Returns a key representing this user, suitable for inclusion in a
* dictionary.
*/
User.prototype.toKey = function () {
if (this.isAuthenticated()) {
return 'uid:' + this.uid;
}
else {
return 'anonymous-user';
}
};
User.prototype.isEqual = function (otherUser) {
return otherUser.uid === this.uid;
};
return User;
}());
/** A user with a null UID. */
User.UNAUTHENTICATED = new User(null);
// TODO(mikelehen): Look into getting a proper uid-equivalent for
// non-FirebaseAuth providers.
User.GOOGLE_CREDENTIALS = new User('google-credentials-uid');
User.FIRST_PARTY = new User('first-party-uid');
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The format of the LocalStorage key that stores the client state is:
// firestore_clients__
var CLIENT_STATE_KEY_PREFIX = 'firestore_clients';
/** Assembles the key for a client state in WebStorage */
function createWebStorageClientStateKey(persistenceKey, clientId) {
return CLIENT_STATE_KEY_PREFIX + "_" + persistenceKey + "_" + clientId;
}
// The format of the WebStorage key that stores the mutation state is:
// firestore_mutations__
// (for unauthenticated users)
// or: firestore_mutations___
//
// 'user_uid' is last to avoid needing to escape '_' characters that it might
// contain.
var MUTATION_BATCH_KEY_PREFIX = 'firestore_mutations';
/** Assembles the key for a mutation batch in WebStorage */
function createWebStorageMutationBatchKey(persistenceKey, user, batchId) {
var mutationKey = MUTATION_BATCH_KEY_PREFIX + "_" + persistenceKey + "_" + batchId;
if (user.isAuthenticated()) {
mutationKey += "_" + user.uid;
}
return mutationKey;
}
// The format of the WebStorage key that stores a query target's metadata is:
// firestore_targets__
var QUERY_TARGET_KEY_PREFIX = 'firestore_targets';
/** Assembles the key for a query state in WebStorage */
function createWebStorageQueryTargetMetadataKey(persistenceKey, targetId) {
return QUERY_TARGET_KEY_PREFIX + "_" + persistenceKey + "_" + targetId;
}
// The WebStorage prefix that stores the primary tab's online state. The
// format of the key is:
// firestore_online_state_
var ONLINE_STATE_KEY_PREFIX = 'firestore_online_state';
/** Assembles the key for the online state of the primary tab. */
function createWebStorageOnlineStateKey(persistenceKey) {
return ONLINE_STATE_KEY_PREFIX + "_" + persistenceKey;
}
// The WebStorage prefix that plays as a event to indicate the remote documents
// might have changed due to some secondary tabs loading a bundle.
// format of the key is:
// firestore_bundle_loaded_
var BUNDLE_LOADED_KEY_PREFIX = 'firestore_bundle_loaded';
function createBundleLoadedKey(persistenceKey) {
return BUNDLE_LOADED_KEY_PREFIX + "_" + persistenceKey;
}
// The WebStorage key prefix for the key that stores the last sequence number allocated. The key
// looks like 'firestore_sequence_number_'.
var SEQUENCE_NUMBER_KEY_PREFIX = 'firestore_sequence_number';
/** Assembles the key for the current sequence number. */
function createWebStorageSequenceNumberKey(persistenceKey) {
return SEQUENCE_NUMBER_KEY_PREFIX + "_" + persistenceKey;
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG$a = 'SharedClientState';
/**
* Holds the state of a mutation batch, including its user ID, batch ID and
* whether the batch is 'pending', 'acknowledged' or 'rejected'.
*/
// Visible for testing
var MutationMetadata = /** @class */ (function () {
function MutationMetadata(user, batchId, state, error) {
this.user = user;
this.batchId = batchId;
this.state = state;
this.error = error;
}
/**
* Parses a MutationMetadata from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
MutationMetadata.fromWebStorageEntry = function (user, batchId, value) {
var mutationBatch = JSON.parse(value);
var validData = typeof mutationBatch === 'object' &&
['pending', 'acknowledged', 'rejected'].indexOf(mutationBatch.state) !==
-1 &&
(mutationBatch.error === undefined ||
typeof mutationBatch.error === 'object');
var firestoreError = undefined;
if (validData && mutationBatch.error) {
validData =
typeof mutationBatch.error.message === 'string' &&
typeof mutationBatch.error.code === 'string';
if (validData) {
firestoreError = new FirestoreError(mutationBatch.error.code, mutationBatch.error.message);
}
}
if (validData) {
return new MutationMetadata(user, batchId, mutationBatch.state, firestoreError);
}
else {
logError(LOG_TAG$a, "Failed to parse mutation state for ID '" + batchId + "': " + value);
return null;
}
};
MutationMetadata.prototype.toWebStorageJSON = function () {
var batchMetadata = {
state: this.state,
updateTimeMs: Date.now() // Modify the existing value to trigger update.
};
if (this.error) {
batchMetadata.error = {
code: this.error.code,
message: this.error.message
};
}
return JSON.stringify(batchMetadata);
};
return MutationMetadata;
}());
/**
* Holds the state of a query target, including its target ID and whether the
* target is 'not-current', 'current' or 'rejected'.
*/
// Visible for testing
var QueryTargetMetadata = /** @class */ (function () {
function QueryTargetMetadata(targetId, state, error) {
this.targetId = targetId;
this.state = state;
this.error = error;
}
/**
* Parses a QueryTargetMetadata from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
QueryTargetMetadata.fromWebStorageEntry = function (targetId, value) {
var targetState = JSON.parse(value);
var validData = typeof targetState === 'object' &&
['not-current', 'current', 'rejected'].indexOf(targetState.state) !==
-1 &&
(targetState.error === undefined ||
typeof targetState.error === 'object');
var firestoreError = undefined;
if (validData && targetState.error) {
validData =
typeof targetState.error.message === 'string' &&
typeof targetState.error.code === 'string';
if (validData) {
firestoreError = new FirestoreError(targetState.error.code, targetState.error.message);
}
}
if (validData) {
return new QueryTargetMetadata(targetId, targetState.state, firestoreError);
}
else {
logError(LOG_TAG$a, "Failed to parse target state for ID '" + targetId + "': " + value);
return null;
}
};
QueryTargetMetadata.prototype.toWebStorageJSON = function () {
var targetState = {
state: this.state,
updateTimeMs: Date.now() // Modify the existing value to trigger update.
};
if (this.error) {
targetState.error = {
code: this.error.code,
message: this.error.message
};
}
return JSON.stringify(targetState);
};
return QueryTargetMetadata;
}());
/**
* This class represents the immutable ClientState for a client read from
* WebStorage, containing the list of active query targets.
*/
var RemoteClientState = /** @class */ (function () {
function RemoteClientState(clientId, activeTargetIds) {
this.clientId = clientId;
this.activeTargetIds = activeTargetIds;
}
/**
* Parses a RemoteClientState from the JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
RemoteClientState.fromWebStorageEntry = function (clientId, value) {
var clientState = JSON.parse(value);
var validData = typeof clientState === 'object' &&
clientState.activeTargetIds instanceof Array;
var activeTargetIdsSet = targetIdSet();
for (var i = 0; validData && i < clientState.activeTargetIds.length; ++i) {
validData = isSafeInteger(clientState.activeTargetIds[i]);
activeTargetIdsSet = activeTargetIdsSet.add(clientState.activeTargetIds[i]);
}
if (validData) {
return new RemoteClientState(clientId, activeTargetIdsSet);
}
else {
logError(LOG_TAG$a, "Failed to parse client data for instance '" + clientId + "': " + value);
return null;
}
};
return RemoteClientState;
}());
/**
* This class represents the online state for all clients participating in
* multi-tab. The online state is only written to by the primary client, and
* used in secondary clients to update their query views.
*/
var SharedOnlineState = /** @class */ (function () {
function SharedOnlineState(clientId, onlineState) {
this.clientId = clientId;
this.onlineState = onlineState;
}
/**
* Parses a SharedOnlineState from its JSON representation in WebStorage.
* Logs a warning and returns null if the format of the data is not valid.
*/
SharedOnlineState.fromWebStorageEntry = function (value) {
var onlineState = JSON.parse(value);
var validData = typeof onlineState === 'object' &&
['Unknown', 'Online', 'Offline'].indexOf(onlineState.onlineState) !==
-1 &&
typeof onlineState.clientId === 'string';
if (validData) {
return new SharedOnlineState(onlineState.clientId, onlineState.onlineState);
}
else {
logError(LOG_TAG$a, "Failed to parse online state: " + value);
return null;
}
};
return SharedOnlineState;
}());
/**
* Metadata state of the local client. Unlike `RemoteClientState`, this class is
* mutable and keeps track of all pending mutations, which allows us to
* update the range of pending mutation batch IDs as new mutations are added or
* removed.
*
* The data in `LocalClientState` is not read from WebStorage and instead
* updated via its instance methods. The updated state can be serialized via
* `toWebStorageJSON()`.
*/
// Visible for testing.
var LocalClientState = /** @class */ (function () {
function LocalClientState() {
this.activeTargetIds = targetIdSet();
}
LocalClientState.prototype.addQueryTarget = function (targetId) {
this.activeTargetIds = this.activeTargetIds.add(targetId);
};
LocalClientState.prototype.removeQueryTarget = function (targetId) {
this.activeTargetIds = this.activeTargetIds.delete(targetId);
};
/**
* Converts this entry into a JSON-encoded format we can use for WebStorage.
* Does not encode `clientId` as it is part of the key in WebStorage.
*/
LocalClientState.prototype.toWebStorageJSON = function () {
var data = {
activeTargetIds: this.activeTargetIds.toArray(),
updateTimeMs: Date.now() // Modify the existing value to trigger update.
};
return JSON.stringify(data);
};
return LocalClientState;
}());
/**
* `WebStorageSharedClientState` uses WebStorage (window.localStorage) as the
* backing store for the SharedClientState. It keeps track of all active
* clients and supports modifications of the local client's data.
*/
var WebStorageSharedClientState = /** @class */ (function () {
function WebStorageSharedClientState(window, queue, persistenceKey, localClientId, initialUser) {
this.window = window;
this.queue = queue;
this.persistenceKey = persistenceKey;
this.localClientId = localClientId;
this.syncEngine = null;
this.onlineStateHandler = null;
this.sequenceNumberHandler = null;
this.storageListener = this.handleWebStorageEvent.bind(this);
this.activeClients = new SortedMap(primitiveComparator);
this.started = false;
/**
* Captures WebStorage events that occur before `start()` is called. These
* events are replayed once `WebStorageSharedClientState` is started.
*/
this.earlyEvents = [];
// Escape the special characters mentioned here:
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions
var escapedPersistenceKey = persistenceKey.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
this.storage = this.window.localStorage;
this.currentUser = initialUser;
this.localClientStorageKey = createWebStorageClientStateKey(this.persistenceKey, this.localClientId);
this.sequenceNumberKey = createWebStorageSequenceNumberKey(this.persistenceKey);
this.activeClients = this.activeClients.insert(this.localClientId, new LocalClientState());
this.clientStateKeyRe = new RegExp("^" + CLIENT_STATE_KEY_PREFIX + "_" + escapedPersistenceKey + "_([^_]*)$");
this.mutationBatchKeyRe = new RegExp("^" + MUTATION_BATCH_KEY_PREFIX + "_" + escapedPersistenceKey + "_(\\d+)(?:_(.*))?$");
this.queryTargetKeyRe = new RegExp("^" + QUERY_TARGET_KEY_PREFIX + "_" + escapedPersistenceKey + "_(\\d+)$");
this.onlineStateKey = createWebStorageOnlineStateKey(this.persistenceKey);
this.bundleLoadedKey = createBundleLoadedKey(this.persistenceKey);
// Rather than adding the storage observer during start(), we add the
// storage observer during initialization. This ensures that we collect
// events before other components populate their initial state (during their
// respective start() calls). Otherwise, we might for example miss a
// mutation that is added after LocalStore's start() processed the existing
// mutations but before we observe WebStorage events.
this.window.addEventListener('storage', this.storageListener);
}
/** Returns 'true' if WebStorage is available in the current environment. */
WebStorageSharedClientState.isAvailable = function (window) {
return !!(window && window.localStorage);
};
WebStorageSharedClientState.prototype.start = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
var existingClients, _i, existingClients_1, clientId, storageItem, clientState, onlineStateJSON, onlineState, _d, _e, event_1;
var _this = this;
return tslib.__generator(this, function (_f) {
switch (_f.label) {
case 0: return [4 /*yield*/, this.syncEngine.getActiveClients()];
case 1:
existingClients = _f.sent();
for (_i = 0, existingClients_1 = existingClients; _i < existingClients_1.length; _i++) {
clientId = existingClients_1[_i];
if (clientId === this.localClientId) {
continue;
}
storageItem = this.getItem(createWebStorageClientStateKey(this.persistenceKey, clientId));
if (storageItem) {
clientState = RemoteClientState.fromWebStorageEntry(clientId, storageItem);
if (clientState) {
this.activeClients = this.activeClients.insert(clientState.clientId, clientState);
}
}
}
this.persistClientState();
onlineStateJSON = this.storage.getItem(this.onlineStateKey);
if (onlineStateJSON) {
onlineState = this.fromWebStorageOnlineState(onlineStateJSON);
if (onlineState) {
this.handleOnlineStateEvent(onlineState);
}
}
for (_d = 0, _e = this.earlyEvents; _d < _e.length; _d++) {
event_1 = _e[_d];
this.handleWebStorageEvent(event_1);
}
this.earlyEvents = [];
// Register a window unload hook to remove the client metadata entry from
// WebStorage even if `shutdown()` was not called.
this.window.addEventListener('pagehide', function () { return _this.shutdown(); });
this.started = true;
return [2 /*return*/];
}
});
});
};
WebStorageSharedClientState.prototype.writeSequenceNumber = function (sequenceNumber) {
this.setItem(this.sequenceNumberKey, JSON.stringify(sequenceNumber));
};
WebStorageSharedClientState.prototype.getAllActiveQueryTargets = function () {
return this.extractActiveQueryTargets(this.activeClients);
};
WebStorageSharedClientState.prototype.isActiveQueryTarget = function (targetId) {
var found = false;
this.activeClients.forEach(function (key, value) {
if (value.activeTargetIds.has(targetId)) {
found = true;
}
});
return found;
};
WebStorageSharedClientState.prototype.addPendingMutation = function (batchId) {
this.persistMutationState(batchId, 'pending');
};
WebStorageSharedClientState.prototype.updateMutationState = function (batchId, state, error) {
this.persistMutationState(batchId, state, error);
// Once a final mutation result is observed by other clients, they no longer
// access the mutation's metadata entry. Since WebStorage replays events
// in order, it is safe to delete the entry right after updating it.
this.removeMutationState(batchId);
};
WebStorageSharedClientState.prototype.addLocalQueryTarget = function (targetId) {
var queryState = 'not-current';
// Lookup an existing query state if the target ID was already registered
// by another tab
if (this.isActiveQueryTarget(targetId)) {
var storageItem = this.storage.getItem(createWebStorageQueryTargetMetadataKey(this.persistenceKey, targetId));
if (storageItem) {
var metadata = QueryTargetMetadata.fromWebStorageEntry(targetId, storageItem);
if (metadata) {
queryState = metadata.state;
}
}
}
this.localClientState.addQueryTarget(targetId);
this.persistClientState();
return queryState;
};
WebStorageSharedClientState.prototype.removeLocalQueryTarget = function (targetId) {
this.localClientState.removeQueryTarget(targetId);
this.persistClientState();
};
WebStorageSharedClientState.prototype.isLocalQueryTarget = function (targetId) {
return this.localClientState.activeTargetIds.has(targetId);
};
WebStorageSharedClientState.prototype.clearQueryState = function (targetId) {
this.removeItem(createWebStorageQueryTargetMetadataKey(this.persistenceKey, targetId));
};
WebStorageSharedClientState.prototype.updateQueryState = function (targetId, state, error) {
this.persistQueryTargetState(targetId, state, error);
};
WebStorageSharedClientState.prototype.handleUserChange = function (user, removedBatchIds, addedBatchIds) {
var _this = this;
removedBatchIds.forEach(function (batchId) {
_this.removeMutationState(batchId);
});
this.currentUser = user;
addedBatchIds.forEach(function (batchId) {
_this.addPendingMutation(batchId);
});
};
WebStorageSharedClientState.prototype.setOnlineState = function (onlineState) {
this.persistOnlineState(onlineState);
};
WebStorageSharedClientState.prototype.notifyBundleLoaded = function () {
this.persistBundleLoadedState();
};
WebStorageSharedClientState.prototype.shutdown = function () {
if (this.started) {
this.window.removeEventListener('storage', this.storageListener);
this.removeItem(this.localClientStorageKey);
this.started = false;
}
};
WebStorageSharedClientState.prototype.getItem = function (key) {
var value = this.storage.getItem(key);
logDebug(LOG_TAG$a, 'READ', key, value);
return value;
};
WebStorageSharedClientState.prototype.setItem = function (key, value) {
logDebug(LOG_TAG$a, 'SET', key, value);
this.storage.setItem(key, value);
};
WebStorageSharedClientState.prototype.removeItem = function (key) {
logDebug(LOG_TAG$a, 'REMOVE', key);
this.storage.removeItem(key);
};
WebStorageSharedClientState.prototype.handleWebStorageEvent = function (event) {
var _this = this;
// Note: The function is typed to take Event to be interface-compatible with
// `Window.addEventListener`.
var storageEvent = event;
if (storageEvent.storageArea === this.storage) {
logDebug(LOG_TAG$a, 'EVENT', storageEvent.key, storageEvent.newValue);
if (storageEvent.key === this.localClientStorageKey) {
logError('Received WebStorage notification for local change. Another client might have ' +
'garbage-collected our state');
return;
}
this.queue.enqueueRetryable(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var clientState, clientId, mutationMetadata, queryTargetMetadata, onlineState, sequenceNumber;
return tslib.__generator(this, function (_d) {
if (!this.started) {
this.earlyEvents.push(storageEvent);
return [2 /*return*/];
}
if (storageEvent.key === null) {
return [2 /*return*/];
}
if (this.clientStateKeyRe.test(storageEvent.key)) {
if (storageEvent.newValue != null) {
clientState = this.fromWebStorageClientState(storageEvent.key, storageEvent.newValue);
if (clientState) {
return [2 /*return*/, this.handleClientStateEvent(clientState.clientId, clientState)];
}
}
else {
clientId = this.fromWebStorageClientStateKey(storageEvent.key);
return [2 /*return*/, this.handleClientStateEvent(clientId, null)];
}
}
else if (this.mutationBatchKeyRe.test(storageEvent.key)) {
if (storageEvent.newValue !== null) {
mutationMetadata = this.fromWebStorageMutationMetadata(storageEvent.key, storageEvent.newValue);
if (mutationMetadata) {
return [2 /*return*/, this.handleMutationBatchEvent(mutationMetadata)];
}
}
}
else if (this.queryTargetKeyRe.test(storageEvent.key)) {
if (storageEvent.newValue !== null) {
queryTargetMetadata = this.fromWebStorageQueryTargetMetadata(storageEvent.key, storageEvent.newValue);
if (queryTargetMetadata) {
return [2 /*return*/, this.handleQueryTargetEvent(queryTargetMetadata)];
}
}
}
else if (storageEvent.key === this.onlineStateKey) {
if (storageEvent.newValue !== null) {
onlineState = this.fromWebStorageOnlineState(storageEvent.newValue);
if (onlineState) {
return [2 /*return*/, this.handleOnlineStateEvent(onlineState)];
}
}
}
else if (storageEvent.key === this.sequenceNumberKey) {
sequenceNumber = fromWebStorageSequenceNumber(storageEvent.newValue);
if (sequenceNumber !== ListenSequence.INVALID) {
this.sequenceNumberHandler(sequenceNumber);
}
}
else if (storageEvent.key === this.bundleLoadedKey) {
return [2 /*return*/, this.syncEngine.synchronizeWithChangedDocuments()];
}
return [2 /*return*/];
});
}); });
}
};
Object.defineProperty(WebStorageSharedClientState.prototype, "localClientState", {
get: function () {
return this.activeClients.get(this.localClientId);
},
enumerable: false,
configurable: true
});
WebStorageSharedClientState.prototype.persistClientState = function () {
this.setItem(this.localClientStorageKey, this.localClientState.toWebStorageJSON());
};
WebStorageSharedClientState.prototype.persistMutationState = function (batchId, state, error) {
var mutationState = new MutationMetadata(this.currentUser, batchId, state, error);
var mutationKey = createWebStorageMutationBatchKey(this.persistenceKey, this.currentUser, batchId);
this.setItem(mutationKey, mutationState.toWebStorageJSON());
};
WebStorageSharedClientState.prototype.removeMutationState = function (batchId) {
var mutationKey = createWebStorageMutationBatchKey(this.persistenceKey, this.currentUser, batchId);
this.removeItem(mutationKey);
};
WebStorageSharedClientState.prototype.persistOnlineState = function (onlineState) {
var entry = {
clientId: this.localClientId,
onlineState: onlineState
};
this.storage.setItem(this.onlineStateKey, JSON.stringify(entry));
};
WebStorageSharedClientState.prototype.persistQueryTargetState = function (targetId, state, error) {
var targetKey = createWebStorageQueryTargetMetadataKey(this.persistenceKey, targetId);
var targetMetadata = new QueryTargetMetadata(targetId, state, error);
this.setItem(targetKey, targetMetadata.toWebStorageJSON());
};
WebStorageSharedClientState.prototype.persistBundleLoadedState = function () {
this.setItem(this.bundleLoadedKey, 'value-not-used');
};
/**
* Parses a client state key in WebStorage. Returns null if the key does not
* match the expected key format.
*/
WebStorageSharedClientState.prototype.fromWebStorageClientStateKey = function (key) {
var match = this.clientStateKeyRe.exec(key);
return match ? match[1] : null;
};
/**
* Parses a client state in WebStorage. Returns 'null' if the value could not
* be parsed.
*/
WebStorageSharedClientState.prototype.fromWebStorageClientState = function (key, value) {
var clientId = this.fromWebStorageClientStateKey(key);
return RemoteClientState.fromWebStorageEntry(clientId, value);
};
/**
* Parses a mutation batch state in WebStorage. Returns 'null' if the value
* could not be parsed.
*/
WebStorageSharedClientState.prototype.fromWebStorageMutationMetadata = function (key, value) {
var match = this.mutationBatchKeyRe.exec(key);
var batchId = Number(match[1]);
var userId = match[2] !== undefined ? match[2] : null;
return MutationMetadata.fromWebStorageEntry(new User(userId), batchId, value);
};
/**
* Parses a query target state from WebStorage. Returns 'null' if the value
* could not be parsed.
*/
WebStorageSharedClientState.prototype.fromWebStorageQueryTargetMetadata = function (key, value) {
var match = this.queryTargetKeyRe.exec(key);
var targetId = Number(match[1]);
return QueryTargetMetadata.fromWebStorageEntry(targetId, value);
};
/**
* Parses an online state from WebStorage. Returns 'null' if the value
* could not be parsed.
*/
WebStorageSharedClientState.prototype.fromWebStorageOnlineState = function (value) {
return SharedOnlineState.fromWebStorageEntry(value);
};
WebStorageSharedClientState.prototype.handleMutationBatchEvent = function (mutationBatch) {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
if (mutationBatch.user.uid !== this.currentUser.uid) {
logDebug(LOG_TAG$a, "Ignoring mutation for non-active user " + mutationBatch.user.uid);
return [2 /*return*/];
}
return [2 /*return*/, this.syncEngine.applyBatchState(mutationBatch.batchId, mutationBatch.state, mutationBatch.error)];
});
});
};
WebStorageSharedClientState.prototype.handleQueryTargetEvent = function (targetMetadata) {
return this.syncEngine.applyTargetState(targetMetadata.targetId, targetMetadata.state, targetMetadata.error);
};
WebStorageSharedClientState.prototype.handleClientStateEvent = function (clientId, clientState) {
var _this = this;
var updatedClients = clientState
? this.activeClients.insert(clientId, clientState)
: this.activeClients.remove(clientId);
var existingTargets = this.extractActiveQueryTargets(this.activeClients);
var newTargets = this.extractActiveQueryTargets(updatedClients);
var addedTargets = [];
var removedTargets = [];
newTargets.forEach(function (targetId) {
if (!existingTargets.has(targetId)) {
addedTargets.push(targetId);
}
});
existingTargets.forEach(function (targetId) {
if (!newTargets.has(targetId)) {
removedTargets.push(targetId);
}
});
return this.syncEngine.applyActiveTargetsChange(addedTargets, removedTargets).then(function () {
_this.activeClients = updatedClients;
});
};
WebStorageSharedClientState.prototype.handleOnlineStateEvent = function (onlineState) {
// We check whether the client that wrote this online state is still active
// by comparing its client ID to the list of clients kept active in
// IndexedDb. If a client does not update their IndexedDb client state
// within 5 seconds, it is considered inactive and we don't emit an online
// state event.
if (this.activeClients.get(onlineState.clientId)) {
this.onlineStateHandler(onlineState.onlineState);
}
};
WebStorageSharedClientState.prototype.extractActiveQueryTargets = function (clients) {
var activeTargets = targetIdSet();
clients.forEach(function (kev, value) {
activeTargets = activeTargets.unionWith(value.activeTargetIds);
});
return activeTargets;
};
return WebStorageSharedClientState;
}());
function fromWebStorageSequenceNumber(seqString) {
var sequenceNumber = ListenSequence.INVALID;
if (seqString != null) {
try {
var parsed = JSON.parse(seqString);
hardAssert(typeof parsed === 'number');
sequenceNumber = parsed;
}
catch (e) {
logError(LOG_TAG$a, 'Failed to read sequence number from WebStorage', e);
}
}
return sequenceNumber;
}
/**
* `MemorySharedClientState` is a simple implementation of SharedClientState for
* clients using memory persistence. The state in this class remains fully
* isolated and no synchronization is performed.
*/
var MemorySharedClientState = /** @class */ (function () {
function MemorySharedClientState() {
this.localState = new LocalClientState();
this.queryState = {};
this.onlineStateHandler = null;
this.sequenceNumberHandler = null;
}
MemorySharedClientState.prototype.addPendingMutation = function (batchId) {
// No op.
};
MemorySharedClientState.prototype.updateMutationState = function (batchId, state, error) {
// No op.
};
MemorySharedClientState.prototype.addLocalQueryTarget = function (targetId) {
this.localState.addQueryTarget(targetId);
return this.queryState[targetId] || 'not-current';
};
MemorySharedClientState.prototype.updateQueryState = function (targetId, state, error) {
this.queryState[targetId] = state;
};
MemorySharedClientState.prototype.removeLocalQueryTarget = function (targetId) {
this.localState.removeQueryTarget(targetId);
};
MemorySharedClientState.prototype.isLocalQueryTarget = function (targetId) {
return this.localState.activeTargetIds.has(targetId);
};
MemorySharedClientState.prototype.clearQueryState = function (targetId) {
delete this.queryState[targetId];
};
MemorySharedClientState.prototype.getAllActiveQueryTargets = function () {
return this.localState.activeTargetIds;
};
MemorySharedClientState.prototype.isActiveQueryTarget = function (targetId) {
return this.localState.activeTargetIds.has(targetId);
};
MemorySharedClientState.prototype.start = function () {
this.localState = new LocalClientState();
return Promise.resolve();
};
MemorySharedClientState.prototype.handleUserChange = function (user, removedBatchIds, addedBatchIds) {
// No op.
};
MemorySharedClientState.prototype.setOnlineState = function (onlineState) {
// No op.
};
MemorySharedClientState.prototype.shutdown = function () { };
MemorySharedClientState.prototype.writeSequenceNumber = function (sequenceNumber) { };
MemorySharedClientState.prototype.notifyBundleLoaded = function () {
// No op.
};
return MemorySharedClientState;
}());
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var NoopConnectivityMonitor = /** @class */ (function () {
function NoopConnectivityMonitor() {
}
NoopConnectivityMonitor.prototype.addCallback = function (callback) {
// No-op.
};
NoopConnectivityMonitor.prototype.shutdown = function () {
// No-op.
};
return NoopConnectivityMonitor;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides a simple helper class that implements the Stream interface to
* bridge to other implementations that are streams but do not implement the
* interface. The stream callbacks are invoked with the callOn... methods.
*/
var StreamBridge = /** @class */ (function () {
function StreamBridge(args) {
this.sendFn = args.sendFn;
this.closeFn = args.closeFn;
}
StreamBridge.prototype.onOpen = function (callback) {
this.wrappedOnOpen = callback;
};
StreamBridge.prototype.onClose = function (callback) {
this.wrappedOnClose = callback;
};
StreamBridge.prototype.onMessage = function (callback) {
this.wrappedOnMessage = callback;
};
StreamBridge.prototype.close = function () {
this.closeFn();
};
StreamBridge.prototype.send = function (msg) {
this.sendFn(msg);
};
StreamBridge.prototype.callOnOpen = function () {
this.wrappedOnOpen();
};
StreamBridge.prototype.callOnClose = function (err) {
this.wrappedOnClose(err);
};
StreamBridge.prototype.callOnMessage = function (msg) {
this.wrappedOnMessage(msg);
};
return StreamBridge;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Utilities for dealing with node.js-style APIs. See nodePromise for more
* details.
*/
/**
* Creates a node-style callback that resolves or rejects a new Promise. The
* callback is passed to the given action which can then use the callback as
* a parameter to a node-style function.
*
* The intent is to directly bridge a node-style function (which takes a
* callback) into a Promise without manually converting between the node-style
* callback and the promise at each call.
*
* In effect it allows you to convert:
*
* @example
* new Promise((resolve: (value?: fs.Stats) => void,
* reject: (error?: any) => void) => {
* fs.stat(path, (error?: any, stat?: fs.Stats) => {
* if (error) {
* reject(error);
* } else {
* resolve(stat);
* }
* });
* });
*
* Into
* @example
* nodePromise((callback: NodeCallback) => {
* fs.stat(path, callback);
* });
*
* @param action - a function that takes a node-style callback as an argument
* and then uses that callback to invoke some node-style API.
* @returns a new Promise which will be rejected if the callback is given the
* first Error parameter or will resolve to the value given otherwise.
*/
function nodePromise(action) {
return new Promise(function (resolve, reject) {
action(function (error, value) {
if (error) {
reject(error);
}
else {
resolve(value);
}
});
});
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG$9 = 'Connection';
var X_GOOG_API_CLIENT_VALUE = "gl-node/" + process.versions.node + " fire/" + SDK_VERSION + " grpc/" + package_json.version;
function createMetadata(databasePath, token, appId) {
hardAssert(token === null || token.type === 'OAuth');
var metadata = new grpcJs.Metadata();
if (token) {
for (var header in token.authHeaders) {
if (token.authHeaders.hasOwnProperty(header)) {
metadata.set(header, token.authHeaders[header]);
}
}
}
if (appId) {
metadata.set('X-Firebase-GMPID', appId);
}
metadata.set('X-Goog-Api-Client', X_GOOG_API_CLIENT_VALUE);
// This header is used to improve routing and project isolation by the
// backend.
metadata.set('Google-Cloud-Resource-Prefix', databasePath);
return metadata;
}
/**
* A Connection implemented by GRPC-Node.
*/
var GrpcConnection = /** @class */ (function () {
function GrpcConnection(protos, databaseInfo) {
this.databaseInfo = databaseInfo;
// We cache stubs for the most-recently-used token.
this.cachedStub = null;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
this.firestore = protos['google']['firestore']['v1'];
this.databasePath = "projects/" + databaseInfo.databaseId.projectId + "/databases/" + databaseInfo.databaseId.database;
}
GrpcConnection.prototype.ensureActiveStub = function () {
if (!this.cachedStub) {
logDebug(LOG_TAG$9, 'Creating Firestore stub.');
var credentials$1 = this.databaseInfo.ssl
? grpcJs.credentials.createSsl()
: grpcJs.credentials.createInsecure();
this.cachedStub = new this.firestore.Firestore(this.databaseInfo.host, credentials$1);
}
return this.cachedStub;
};
GrpcConnection.prototype.invokeRPC = function (rpcName, path, request, token) {
var stub = this.ensureActiveStub();
var metadata = createMetadata(this.databasePath, token, this.databaseInfo.appId);
var jsonRequest = Object.assign({ database: this.databasePath }, request);
return nodePromise(function (callback) {
logDebug(LOG_TAG$9, "RPC '" + rpcName + "' invoked with request:", request);
return stub[rpcName](jsonRequest, metadata, function (grpcError, value) {
if (grpcError) {
logDebug(LOG_TAG$9, "RPC '" + rpcName + "' failed with error:", grpcError);
callback(new FirestoreError(mapCodeFromRpcCode(grpcError.code), grpcError.message));
}
else {
logDebug(LOG_TAG$9, "RPC '" + rpcName + "' completed with response:", value);
callback(undefined, value);
}
});
});
};
GrpcConnection.prototype.invokeStreamingRPC = function (rpcName, path, request, token) {
var results = [];
var responseDeferred = new Deferred();
logDebug(LOG_TAG$9, "RPC '" + rpcName + "' invoked (streaming) with request:", request);
var stub = this.ensureActiveStub();
var metadata = createMetadata(this.databasePath, token, this.databaseInfo.appId);
var jsonRequest = Object.assign(Object.assign({}, request), { database: this.databasePath });
var stream = stub[rpcName](jsonRequest, metadata);
stream.on('data', function (response) {
logDebug(LOG_TAG$9, "RPC " + rpcName + " received result:", response);
results.push(response);
});
stream.on('end', function () {
logDebug(LOG_TAG$9, "RPC '" + rpcName + "' completed.");
responseDeferred.resolve(results);
});
stream.on('error', function (grpcError) {
logDebug(LOG_TAG$9, "RPC '" + rpcName + "' failed with error:", grpcError);
var code = mapCodeFromRpcCode(grpcError.code);
responseDeferred.reject(new FirestoreError(code, grpcError.message));
});
return responseDeferred.promise;
};
// TODO(mikelehen): This "method" is a monster. Should be refactored.
GrpcConnection.prototype.openStream = function (rpcName, token) {
var stub = this.ensureActiveStub();
var metadata = createMetadata(this.databasePath, token, this.databaseInfo.appId);
var grpcStream = stub[rpcName](metadata);
var closed = false;
var close = function (err) {
if (!closed) {
closed = true;
stream.callOnClose(err);
grpcStream.end();
}
};
var stream = new StreamBridge({
sendFn: function (msg) {
if (!closed) {
logDebug(LOG_TAG$9, 'GRPC stream sending:', msg);
try {
grpcStream.write(msg);
}
catch (e) {
// This probably means we didn't conform to the proto. Make sure to
// log the message we sent.
logError('Failure sending:', msg);
logError('Error:', e);
throw e;
}
}
else {
logDebug(LOG_TAG$9, 'Not sending because gRPC stream is closed:', msg);
}
},
closeFn: function () {
logDebug(LOG_TAG$9, 'GRPC stream closed locally via close().');
close();
}
});
grpcStream.on('data', function (msg) {
if (!closed) {
logDebug(LOG_TAG$9, 'GRPC stream received:', msg);
stream.callOnMessage(msg);
}
});
grpcStream.on('end', function () {
logDebug(LOG_TAG$9, 'GRPC stream ended.');
close();
});
grpcStream.on('error', function (grpcError) {
if (!closed) {
logWarn(LOG_TAG$9, 'GRPC stream error. Code:', grpcError.code, 'Message:', grpcError.message);
var code = mapCodeFromRpcCode(grpcError.code);
close(new FirestoreError(code, grpcError.message));
}
});
logDebug(LOG_TAG$9, 'Opening GRPC stream');
// TODO(dimond): Since grpc has no explicit open status (or does it?) we
// simulate an onOpen in the next loop after the stream had it's listeners
// registered
setTimeout(function () {
stream.callOnOpen();
}, 0);
return stream;
};
return GrpcConnection;
}());
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Used by tests so we can match @grpc/proto-loader behavior. */
var protoLoaderOptions = {
longs: String,
enums: String,
defaults: true,
oneofs: false
};
/**
* Loads the protocol buffer definitions for Firestore.
*
* @returns The GrpcObject representing our protos.
*/
function loadProtos() {
var root = path.resolve(__dirname, "src/protos");
var firestoreProtoFile = path.join(root, 'google/firestore/v1/firestore.proto');
var packageDefinition = protoLoader.loadSync(firestoreProtoFile, Object.assign(Object.assign({}, protoLoaderOptions), { includeDirs: [root] }));
return grpcJs.loadPackageDefinition(packageDefinition);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Loads the GRPC stack */
function newConnection(databaseInfo) {
var protos = loadProtos();
return new GrpcConnection(protos, databaseInfo);
}
/** Return the Platform-specific connectivity monitor. */
function newConnectivityMonitor() {
return new NoopConnectivityMonitor();
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** The Platform's 'window' implementation or null if not available. */
function getWindow() {
if (process.env.USE_MOCK_PERSISTENCE === 'YES') {
// eslint-disable-next-line no-restricted-globals
return window;
}
return null;
}
/** The Platform's 'document' implementation or null if not available. */
function getDocument() {
return null;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function newSerializer(databaseId) {
return new JsonProtoSerializer(databaseId, /* useProto3Json= */ false);
}
/**
* An instance of the Platform's 'TextEncoder' implementation.
*/
function newTextEncoder() {
return new util$1.TextEncoder();
}
/**
* An instance of the Platform's 'TextDecoder' implementation.
*/
function newTextDecoder() {
return new util$1.TextDecoder('utf-8');
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG$8 = 'ExponentialBackoff';
/**
* Initial backoff time in milliseconds after an error.
* Set to 1s according to https://cloud.google.com/apis/design/errors.
*/
var DEFAULT_BACKOFF_INITIAL_DELAY_MS = 1000;
var DEFAULT_BACKOFF_FACTOR = 1.5;
/** Maximum backoff time in milliseconds */
var DEFAULT_BACKOFF_MAX_DELAY_MS = 60 * 1000;
/**
* A helper for running delayed tasks following an exponential backoff curve
* between attempts.
*
* Each delay is made up of a "base" delay which follows the exponential
* backoff curve, and a +/- 50% "jitter" that is calculated and added to the
* base delay. This prevents clients from accidentally synchronizing their
* delays causing spikes of load to the backend.
*/
var ExponentialBackoff = /** @class */ (function () {
function ExponentialBackoff(
/**
* The AsyncQueue to run backoff operations on.
*/
queue,
/**
* The ID to use when scheduling backoff operations on the AsyncQueue.
*/
timerId,
/**
* The initial delay (used as the base delay on the first retry attempt).
* Note that jitter will still be applied, so the actual delay could be as
* little as 0.5*initialDelayMs.
*/
initialDelayMs,
/**
* The multiplier to use to determine the extended base delay after each
* attempt.
*/
backoffFactor,
/**
* The maximum base delay after which no further backoff is performed.
* Note that jitter will still be applied, so the actual delay could be as
* much as 1.5*maxDelayMs.
*/
maxDelayMs) {
if (initialDelayMs === void 0) { initialDelayMs = DEFAULT_BACKOFF_INITIAL_DELAY_MS; }
if (backoffFactor === void 0) { backoffFactor = DEFAULT_BACKOFF_FACTOR; }
if (maxDelayMs === void 0) { maxDelayMs = DEFAULT_BACKOFF_MAX_DELAY_MS; }
this.queue = queue;
this.timerId = timerId;
this.initialDelayMs = initialDelayMs;
this.backoffFactor = backoffFactor;
this.maxDelayMs = maxDelayMs;
this.currentBaseMs = 0;
this.timerPromise = null;
/** The last backoff attempt, as epoch milliseconds. */
this.lastAttemptTime = Date.now();
this.reset();
}
/**
* Resets the backoff delay.
*
* The very next backoffAndWait() will have no delay. If it is called again
* (i.e. due to an error), initialDelayMs (plus jitter) will be used, and
* subsequent ones will increase according to the backoffFactor.
*/
ExponentialBackoff.prototype.reset = function () {
this.currentBaseMs = 0;
};
/**
* Resets the backoff delay to the maximum delay (e.g. for use after a
* RESOURCE_EXHAUSTED error).
*/
ExponentialBackoff.prototype.resetToMax = function () {
this.currentBaseMs = this.maxDelayMs;
};
/**
* Returns a promise that resolves after currentDelayMs, and increases the
* delay for any subsequent attempts. If there was a pending backoff operation
* already, it will be canceled.
*/
ExponentialBackoff.prototype.backoffAndRun = function (op) {
var _this = this;
// Cancel any pending backoff operation.
this.cancel();
// First schedule using the current base (which may be 0 and should be
// honored as such).
var desiredDelayWithJitterMs = Math.floor(this.currentBaseMs + this.jitterDelayMs());
// Guard against lastAttemptTime being in the future due to a clock change.
var delaySoFarMs = Math.max(0, Date.now() - this.lastAttemptTime);
// Guard against the backoff delay already being past.
var remainingDelayMs = Math.max(0, desiredDelayWithJitterMs - delaySoFarMs);
if (remainingDelayMs > 0) {
logDebug(LOG_TAG$8, "Backing off for " + remainingDelayMs + " ms " +
("(base delay: " + this.currentBaseMs + " ms, ") +
("delay with jitter: " + desiredDelayWithJitterMs + " ms, ") +
("last attempt: " + delaySoFarMs + " ms ago)"));
}
this.timerPromise = this.queue.enqueueAfterDelay(this.timerId, remainingDelayMs, function () {
_this.lastAttemptTime = Date.now();
return op();
});
// Apply backoff factor to determine next delay and ensure it is within
// bounds.
this.currentBaseMs *= this.backoffFactor;
if (this.currentBaseMs < this.initialDelayMs) {
this.currentBaseMs = this.initialDelayMs;
}
if (this.currentBaseMs > this.maxDelayMs) {
this.currentBaseMs = this.maxDelayMs;
}
};
ExponentialBackoff.prototype.skipBackoff = function () {
if (this.timerPromise !== null) {
this.timerPromise.skipDelay();
this.timerPromise = null;
}
};
ExponentialBackoff.prototype.cancel = function () {
if (this.timerPromise !== null) {
this.timerPromise.cancel();
this.timerPromise = null;
}
};
/** Returns a random value in the range [-currentBaseMs/2, currentBaseMs/2] */
ExponentialBackoff.prototype.jitterDelayMs = function () {
return (Math.random() - 0.5) * this.currentBaseMs;
};
return ExponentialBackoff;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG$7 = 'PersistentStream';
/** The time a stream stays open after it is marked idle. */
var IDLE_TIMEOUT_MS = 60 * 1000;
/**
* A PersistentStream is an abstract base class that represents a streaming RPC
* to the Firestore backend. It's built on top of the connections own support
* for streaming RPCs, and adds several critical features for our clients:
*
* - Exponential backoff on failure
* - Authentication via CredentialsProvider
* - Dispatching all callbacks into the shared worker queue
* - Closing idle streams after 60 seconds of inactivity
*
* Subclasses of PersistentStream implement serialization of models to and
* from the JSON representation of the protocol buffers for a specific
* streaming RPC.
*
* ## Starting and Stopping
*
* Streaming RPCs are stateful and need to be start()ed before messages can
* be sent and received. The PersistentStream will call the onOpen() function
* of the listener once the stream is ready to accept requests.
*
* Should a start() fail, PersistentStream will call the registered onClose()
* listener with a FirestoreError indicating what went wrong.
*
* A PersistentStream can be started and stopped repeatedly.
*
* Generic types:
* SendType: The type of the outgoing message of the underlying
* connection stream
* ReceiveType: The type of the incoming message of the underlying
* connection stream
* ListenerType: The type of the listener that will be used for callbacks
*/
var PersistentStream = /** @class */ (function () {
function PersistentStream(queue, connectionTimerId, idleTimerId, connection, credentialsProvider, listener) {
this.queue = queue;
this.idleTimerId = idleTimerId;
this.connection = connection;
this.credentialsProvider = credentialsProvider;
this.listener = listener;
this.state = 0 /* Initial */;
/**
* A close count that's incremented every time the stream is closed; used by
* getCloseGuardedDispatcher() to invalidate callbacks that happen after
* close.
*/
this.closeCount = 0;
this.idleTimer = null;
this.stream = null;
this.backoff = new ExponentialBackoff(queue, connectionTimerId);
}
/**
* Returns true if start() has been called and no error has occurred. True
* indicates the stream is open or in the process of opening (which
* encompasses respecting backoff, getting auth tokens, and starting the
* actual RPC). Use isOpen() to determine if the stream is open and ready for
* outbound requests.
*/
PersistentStream.prototype.isStarted = function () {
return (this.state === 1 /* Starting */ ||
this.state === 2 /* Open */ ||
this.state === 4 /* Backoff */);
};
/**
* Returns true if the underlying RPC is open (the onOpen() listener has been
* called) and the stream is ready for outbound requests.
*/
PersistentStream.prototype.isOpen = function () {
return this.state === 2 /* Open */;
};
/**
* Starts the RPC. Only allowed if isStarted() returns false. The stream is
* not immediately ready for use: onOpen() will be invoked when the RPC is
* ready for outbound requests, at which point isOpen() will return true.
*
* When start returns, isStarted() will return true.
*/
PersistentStream.prototype.start = function () {
if (this.state === 3 /* Error */) {
this.performBackoff();
return;
}
this.auth();
};
/**
* Stops the RPC. This call is idempotent and allowed regardless of the
* current isStarted() state.
*
* When stop returns, isStarted() and isOpen() will both return false.
*/
PersistentStream.prototype.stop = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!this.isStarted()) return [3 /*break*/, 2];
return [4 /*yield*/, this.close(0 /* Initial */)];
case 1:
_d.sent();
_d.label = 2;
case 2: return [2 /*return*/];
}
});
});
};
/**
* After an error the stream will usually back off on the next attempt to
* start it. If the error warrants an immediate restart of the stream, the
* sender can use this to indicate that the receiver should not back off.
*
* Each error will call the onClose() listener. That function can decide to
* inhibit backoff if required.
*/
PersistentStream.prototype.inhibitBackoff = function () {
this.state = 0 /* Initial */;
this.backoff.reset();
};
/**
* Marks this stream as idle. If no further actions are performed on the
* stream for one minute, the stream will automatically close itself and
* notify the stream's onClose() handler with Status.OK. The stream will then
* be in a !isStarted() state, requiring the caller to start the stream again
* before further use.
*
* Only streams that are in state 'Open' can be marked idle, as all other
* states imply pending network operations.
*/
PersistentStream.prototype.markIdle = function () {
var _this = this;
// Starts the idle time if we are in state 'Open' and are not yet already
// running a timer (in which case the previous idle timeout still applies).
if (this.isOpen() && this.idleTimer === null) {
this.idleTimer = this.queue.enqueueAfterDelay(this.idleTimerId, IDLE_TIMEOUT_MS, function () { return _this.handleIdleCloseTimer(); });
}
};
/** Sends a message to the underlying stream. */
PersistentStream.prototype.sendRequest = function (msg) {
this.cancelIdleCheck();
this.stream.send(msg);
};
/** Called by the idle timer when the stream should close due to inactivity. */
PersistentStream.prototype.handleIdleCloseTimer = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
if (this.isOpen()) {
// When timing out an idle stream there's no reason to force the stream into backoff when
// it restarts so set the stream state to Initial instead of Error.
return [2 /*return*/, this.close(0 /* Initial */)];
}
return [2 /*return*/];
});
});
};
/** Marks the stream as active again. */
PersistentStream.prototype.cancelIdleCheck = function () {
if (this.idleTimer) {
this.idleTimer.cancel();
this.idleTimer = null;
}
};
/**
* Closes the stream and cleans up as necessary:
*
* * closes the underlying GRPC stream;
* * calls the onClose handler with the given 'error';
* * sets internal stream state to 'finalState';
* * adjusts the backoff timer based on the error
*
* A new stream can be opened by calling start().
*
* @param finalState - the intended state of the stream after closing.
* @param error - the error the connection was closed with.
*/
PersistentStream.prototype.close = function (finalState, error) {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
// Cancel any outstanding timers (they're guaranteed not to execute).
this.cancelIdleCheck();
this.backoff.cancel();
// Invalidates any stream-related callbacks (e.g. from auth or the
// underlying stream), guaranteeing they won't execute.
this.closeCount++;
if (finalState !== 3 /* Error */) {
// If this is an intentional close ensure we don't delay our next connection attempt.
this.backoff.reset();
}
else if (error && error.code === Code.RESOURCE_EXHAUSTED) {
// Log the error. (Probably either 'quota exceeded' or 'max queue length reached'.)
logError(error.toString());
logError('Using maximum backoff delay to prevent overloading the backend.');
this.backoff.resetToMax();
}
else if (error && error.code === Code.UNAUTHENTICATED) {
// "unauthenticated" error means the token was rejected. Try force refreshing it in case it
// just expired.
this.credentialsProvider.invalidateToken();
}
// Clean up the underlying stream because we are no longer interested in events.
if (this.stream !== null) {
this.tearDown();
this.stream.close();
this.stream = null;
}
// This state must be assigned before calling onClose() to allow the callback to
// inhibit backoff or otherwise manipulate the state in its non-started state.
this.state = finalState;
// Notify the listener that the stream closed.
return [4 /*yield*/, this.listener.onClose(error)];
case 1:
// Notify the listener that the stream closed.
_d.sent();
return [2 /*return*/];
}
});
});
};
/**
* Can be overridden to perform additional cleanup before the stream is closed.
* Calling super.tearDown() is not required.
*/
PersistentStream.prototype.tearDown = function () { };
PersistentStream.prototype.auth = function () {
var _this = this;
this.state = 1 /* Starting */;
var dispatchIfNotClosed = this.getCloseGuardedDispatcher(this.closeCount);
// TODO(mikelehen): Just use dispatchIfNotClosed, but see TODO below.
var closeCount = this.closeCount;
this.credentialsProvider.getToken().then(function (token) {
// Stream can be stopped while waiting for authentication.
// TODO(mikelehen): We really should just use dispatchIfNotClosed
// and let this dispatch onto the queue, but that opened a spec test can
// of worms that I don't want to deal with in this PR.
if (_this.closeCount === closeCount) {
// Normally we'd have to schedule the callback on the AsyncQueue.
// However, the following calls are safe to be called outside the
// AsyncQueue since they don't chain asynchronous calls
_this.startStream(token);
}
}, function (error) {
dispatchIfNotClosed(function () {
var rpcError = new FirestoreError(Code.UNKNOWN, 'Fetching auth token failed: ' + error.message);
return _this.handleStreamClose(rpcError);
});
});
};
PersistentStream.prototype.startStream = function (token) {
var _this = this;
var dispatchIfNotClosed = this.getCloseGuardedDispatcher(this.closeCount);
this.stream = this.startRpc(token);
this.stream.onOpen(function () {
dispatchIfNotClosed(function () {
_this.state = 2 /* Open */;
return _this.listener.onOpen();
});
});
this.stream.onClose(function (error) {
dispatchIfNotClosed(function () {
return _this.handleStreamClose(error);
});
});
this.stream.onMessage(function (msg) {
dispatchIfNotClosed(function () {
return _this.onMessage(msg);
});
});
};
PersistentStream.prototype.performBackoff = function () {
var _this = this;
this.state = 4 /* Backoff */;
this.backoff.backoffAndRun(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
this.state = 0 /* Initial */;
this.start();
return [2 /*return*/];
});
}); });
};
// Visible for tests
PersistentStream.prototype.handleStreamClose = function (error) {
logDebug(LOG_TAG$7, "close with error: " + error);
this.stream = null;
// In theory the stream could close cleanly, however, in our current model
// we never expect this to happen because if we stop a stream ourselves,
// this callback will never be called. To prevent cases where we retry
// without a backoff accidentally, we set the stream to error in all cases.
return this.close(3 /* Error */, error);
};
/**
* Returns a "dispatcher" function that dispatches operations onto the
* AsyncQueue but only runs them if closeCount remains unchanged. This allows
* us to turn auth / stream callbacks into no-ops if the stream is closed /
* re-opened, etc.
*/
PersistentStream.prototype.getCloseGuardedDispatcher = function (startCloseCount) {
var _this = this;
return function (fn) {
_this.queue.enqueueAndForget(function () {
if (_this.closeCount === startCloseCount) {
return fn();
}
else {
logDebug(LOG_TAG$7, 'stream callback skipped by getCloseGuardedDispatcher.');
return Promise.resolve();
}
});
};
};
return PersistentStream;
}());
/**
* A PersistentStream that implements the Listen RPC.
*
* Once the Listen stream has called the onOpen() listener, any number of
* listen() and unlisten() calls can be made to control what changes will be
* sent from the server for ListenResponses.
*/
var PersistentListenStream = /** @class */ (function (_super) {
tslib.__extends(PersistentListenStream, _super);
function PersistentListenStream(queue, connection, credentials, serializer, listener) {
var _this = _super.call(this, queue, "listen_stream_connection_backoff" /* ListenStreamConnectionBackoff */, "listen_stream_idle" /* ListenStreamIdle */, connection, credentials, listener) || this;
_this.serializer = serializer;
return _this;
}
PersistentListenStream.prototype.startRpc = function (token) {
return this.connection.openStream('Listen', token);
};
PersistentListenStream.prototype.onMessage = function (watchChangeProto) {
// A successful response means the stream is healthy
this.backoff.reset();
var watchChange = fromWatchChange(this.serializer, watchChangeProto);
var snapshot = versionFromListenResponse(watchChangeProto);
return this.listener.onWatchChange(watchChange, snapshot);
};
/**
* Registers interest in the results of the given target. If the target
* includes a resumeToken it will be included in the request. Results that
* affect the target will be streamed back as WatchChange messages that
* reference the targetId.
*/
PersistentListenStream.prototype.watch = function (targetData) {
var request = {};
request.database = getEncodedDatabaseId(this.serializer);
request.addTarget = toTarget(this.serializer, targetData);
var labels = toListenRequestLabels(this.serializer, targetData);
if (labels) {
request.labels = labels;
}
this.sendRequest(request);
};
/**
* Unregisters interest in the results of the target associated with the
* given targetId.
*/
PersistentListenStream.prototype.unwatch = function (targetId) {
var request = {};
request.database = getEncodedDatabaseId(this.serializer);
request.removeTarget = targetId;
this.sendRequest(request);
};
return PersistentListenStream;
}(PersistentStream));
/**
* A Stream that implements the Write RPC.
*
* The Write RPC requires the caller to maintain special streamToken
* state in between calls, to help the server understand which responses the
* client has processed by the time the next request is made. Every response
* will contain a streamToken; this value must be passed to the next
* request.
*
* After calling start() on this stream, the next request must be a handshake,
* containing whatever streamToken is on hand. Once a response to this
* request is received, all pending mutations may be submitted. When
* submitting multiple batches of mutations at the same time, it's
* okay to use the same streamToken for the calls to writeMutations.
*
* TODO(b/33271235): Use proto types
*/
var PersistentWriteStream = /** @class */ (function (_super) {
tslib.__extends(PersistentWriteStream, _super);
function PersistentWriteStream(queue, connection, credentials, serializer, listener) {
var _this = _super.call(this, queue, "write_stream_connection_backoff" /* WriteStreamConnectionBackoff */, "write_stream_idle" /* WriteStreamIdle */, connection, credentials, listener) || this;
_this.serializer = serializer;
_this.handshakeComplete_ = false;
return _this;
}
Object.defineProperty(PersistentWriteStream.prototype, "handshakeComplete", {
/**
* Tracks whether or not a handshake has been successfully exchanged and
* the stream is ready to accept mutations.
*/
get: function () {
return this.handshakeComplete_;
},
enumerable: false,
configurable: true
});
// Override of PersistentStream.start
PersistentWriteStream.prototype.start = function () {
this.handshakeComplete_ = false;
this.lastStreamToken = undefined;
_super.prototype.start.call(this);
};
PersistentWriteStream.prototype.tearDown = function () {
if (this.handshakeComplete_) {
this.writeMutations([]);
}
};
PersistentWriteStream.prototype.startRpc = function (token) {
return this.connection.openStream('Write', token);
};
PersistentWriteStream.prototype.onMessage = function (responseProto) {
// Always capture the last stream token.
hardAssert(!!responseProto.streamToken);
this.lastStreamToken = responseProto.streamToken;
if (!this.handshakeComplete_) {
// The first response is always the handshake response
hardAssert(!responseProto.writeResults || responseProto.writeResults.length === 0);
this.handshakeComplete_ = true;
return this.listener.onHandshakeComplete();
}
else {
// A successful first write response means the stream is healthy,
// Note, that we could consider a successful handshake healthy, however,
// the write itself might be causing an error we want to back off from.
this.backoff.reset();
var results = fromWriteResults(responseProto.writeResults, responseProto.commitTime);
var commitVersion = fromVersion(responseProto.commitTime);
return this.listener.onMutationResult(commitVersion, results);
}
};
/**
* Sends an initial streamToken to the server, performing the handshake
* required to make the StreamingWrite RPC work. Subsequent
* calls should wait until onHandshakeComplete was called.
*/
PersistentWriteStream.prototype.writeHandshake = function () {
// TODO(dimond): Support stream resumption. We intentionally do not set the
// stream token on the handshake, ignoring any stream token we might have.
var request = {};
request.database = getEncodedDatabaseId(this.serializer);
this.sendRequest(request);
};
/** Sends a group of mutations to the Firestore backend to apply. */
PersistentWriteStream.prototype.writeMutations = function (mutations) {
var _this = this;
var request = {
streamToken: this.lastStreamToken,
writes: mutations.map(function (mutation) { return toMutation(_this.serializer, mutation); })
};
this.sendRequest(request);
};
return PersistentWriteStream;
}(PersistentStream));
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Datastore and its related methods are a wrapper around the external Google
* Cloud Datastore grpc API, which provides an interface that is more convenient
* for the rest of the client SDK architecture to consume.
*/
var Datastore = /** @class */ (function () {
function Datastore() {
}
return Datastore;
}());
/**
* An implementation of Datastore that exposes additional state for internal
* consumption.
*/
var DatastoreImpl = /** @class */ (function (_super) {
tslib.__extends(DatastoreImpl, _super);
function DatastoreImpl(credentials, connection, serializer) {
var _this = _super.call(this) || this;
_this.credentials = credentials;
_this.connection = connection;
_this.serializer = serializer;
_this.terminated = false;
return _this;
}
DatastoreImpl.prototype.verifyInitialized = function () {
if (this.terminated) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'The client has already been terminated.');
}
};
/** Gets an auth token and invokes the provided RPC. */
DatastoreImpl.prototype.invokeRPC = function (rpcName, path, request) {
var _this = this;
this.verifyInitialized();
return this.credentials
.getToken()
.then(function (token) {
return _this.connection.invokeRPC(rpcName, path, request, token);
})
.catch(function (error) {
if (error.name === 'FirebaseError') {
if (error.code === Code.UNAUTHENTICATED) {
_this.credentials.invalidateToken();
}
throw error;
}
else {
throw new FirestoreError(Code.UNKNOWN, error.toString());
}
});
};
/** Gets an auth token and invokes the provided RPC with streamed results. */
DatastoreImpl.prototype.invokeStreamingRPC = function (rpcName, path, request) {
var _this = this;
this.verifyInitialized();
return this.credentials
.getToken()
.then(function (token) {
return _this.connection.invokeStreamingRPC(rpcName, path, request, token);
})
.catch(function (error) {
if (error.name === 'FirebaseError') {
if (error.code === Code.UNAUTHENTICATED) {
_this.credentials.invalidateToken();
}
throw error;
}
else {
throw new FirestoreError(Code.UNKNOWN, error.toString());
}
});
};
DatastoreImpl.prototype.terminate = function () {
this.terminated = true;
};
return DatastoreImpl;
}(Datastore));
// TODO(firestorexp): Make sure there is only one Datastore instance per
// firestore-exp client.
function newDatastore(credentials, connection, serializer) {
return new DatastoreImpl(credentials, connection, serializer);
}
function invokeCommitRpc(datastore, mutations) {
return tslib.__awaiter(this, void 0, void 0, function () {
var datastoreImpl, path, request;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
datastoreImpl = debugCast(datastore);
path = getEncodedDatabaseId(datastoreImpl.serializer) + '/documents';
request = {
writes: mutations.map(function (m) { return toMutation(datastoreImpl.serializer, m); })
};
return [4 /*yield*/, datastoreImpl.invokeRPC('Commit', path, request)];
case 1:
_d.sent();
return [2 /*return*/];
}
});
});
}
function invokeBatchGetDocumentsRpc(datastore, keys) {
return tslib.__awaiter(this, void 0, void 0, function () {
var datastoreImpl, path, request, response, docs, result;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
datastoreImpl = debugCast(datastore);
path = getEncodedDatabaseId(datastoreImpl.serializer) + '/documents';
request = {
documents: keys.map(function (k) { return toName(datastoreImpl.serializer, k); })
};
return [4 /*yield*/, datastoreImpl.invokeStreamingRPC('BatchGetDocuments', path, request)];
case 1:
response = _d.sent();
docs = new Map();
response.forEach(function (proto) {
var doc = fromBatchGetDocumentsResponse(datastoreImpl.serializer, proto);
docs.set(doc.key.toString(), doc);
});
result = [];
keys.forEach(function (key) {
var doc = docs.get(key.toString());
hardAssert(!!doc);
result.push(doc);
});
return [2 /*return*/, result];
}
});
});
}
function newPersistentWriteStream(datastore, queue, listener) {
var datastoreImpl = debugCast(datastore);
datastoreImpl.verifyInitialized();
return new PersistentWriteStream(queue, datastoreImpl.connection, datastoreImpl.credentials, datastoreImpl.serializer, listener);
}
function newPersistentWatchStream(datastore, queue, listener) {
var datastoreImpl = debugCast(datastore);
datastoreImpl.verifyInitialized();
return new PersistentListenStream(queue, datastoreImpl.connection, datastoreImpl.credentials, datastoreImpl.serializer, listener);
}
/**
* @license
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG$6 = 'OnlineStateTracker';
// To deal with transient failures, we allow multiple stream attempts before
// giving up and transitioning from OnlineState.Unknown to Offline.
// TODO(mikelehen): This used to be set to 2 as a mitigation for b/66228394.
// @jdimond thinks that bug is sufficiently fixed so that we can set this back
// to 1. If that works okay, we could potentially remove this logic entirely.
var MAX_WATCH_STREAM_FAILURES = 1;
// To deal with stream attempts that don't succeed or fail in a timely manner,
// we have a timeout for OnlineState to reach Online or Offline.
// If the timeout is reached, we transition to Offline rather than waiting
// indefinitely.
var ONLINE_STATE_TIMEOUT_MS = 10 * 1000;
/**
* A component used by the RemoteStore to track the OnlineState (that is,
* whether or not the client as a whole should be considered to be online or
* offline), implementing the appropriate heuristics.
*
* In particular, when the client is trying to connect to the backend, we
* allow up to MAX_WATCH_STREAM_FAILURES within ONLINE_STATE_TIMEOUT_MS for
* a connection to succeed. If we have too many failures or the timeout elapses,
* then we set the OnlineState to Offline, and the client will behave as if
* it is offline (get()s will return cached data, etc.).
*/
var OnlineStateTracker = /** @class */ (function () {
function OnlineStateTracker(asyncQueue, onlineStateHandler) {
this.asyncQueue = asyncQueue;
this.onlineStateHandler = onlineStateHandler;
/** The current OnlineState. */
this.state = "Unknown" /* Unknown */;
/**
* A count of consecutive failures to open the stream. If it reaches the
* maximum defined by MAX_WATCH_STREAM_FAILURES, we'll set the OnlineState to
* Offline.
*/
this.watchStreamFailures = 0;
/**
* A timer that elapses after ONLINE_STATE_TIMEOUT_MS, at which point we
* transition from OnlineState.Unknown to OnlineState.Offline without waiting
* for the stream to actually fail (MAX_WATCH_STREAM_FAILURES times).
*/
this.onlineStateTimer = null;
/**
* Whether the client should log a warning message if it fails to connect to
* the backend (initially true, cleared after a successful stream, or if we've
* logged the message already).
*/
this.shouldWarnClientIsOffline = true;
}
/**
* Called by RemoteStore when a watch stream is started (including on each
* backoff attempt).
*
* If this is the first attempt, it sets the OnlineState to Unknown and starts
* the onlineStateTimer.
*/
OnlineStateTracker.prototype.handleWatchStreamStart = function () {
var _this = this;
if (this.watchStreamFailures === 0) {
this.setAndBroadcast("Unknown" /* Unknown */);
this.onlineStateTimer = this.asyncQueue.enqueueAfterDelay("online_state_timeout" /* OnlineStateTimeout */, ONLINE_STATE_TIMEOUT_MS, function () {
_this.onlineStateTimer = null;
_this.logClientOfflineWarningIfNecessary("Backend didn't respond within " + ONLINE_STATE_TIMEOUT_MS / 1000 + " " +
"seconds.");
_this.setAndBroadcast("Offline" /* Offline */);
// NOTE: handleWatchStreamFailure() will continue to increment
// watchStreamFailures even though we are already marked Offline,
// but this is non-harmful.
return Promise.resolve();
});
}
};
/**
* Updates our OnlineState as appropriate after the watch stream reports a
* failure. The first failure moves us to the 'Unknown' state. We then may
* allow multiple failures (based on MAX_WATCH_STREAM_FAILURES) before we
* actually transition to the 'Offline' state.
*/
OnlineStateTracker.prototype.handleWatchStreamFailure = function (error) {
if (this.state === "Online" /* Online */) {
this.setAndBroadcast("Unknown" /* Unknown */);
}
else {
this.watchStreamFailures++;
if (this.watchStreamFailures >= MAX_WATCH_STREAM_FAILURES) {
this.clearOnlineStateTimer();
this.logClientOfflineWarningIfNecessary("Connection failed " + MAX_WATCH_STREAM_FAILURES + " " +
("times. Most recent error: " + error.toString()));
this.setAndBroadcast("Offline" /* Offline */);
}
}
};
/**
* Explicitly sets the OnlineState to the specified state.
*
* Note that this resets our timers / failure counters, etc. used by our
* Offline heuristics, so must not be used in place of
* handleWatchStreamStart() and handleWatchStreamFailure().
*/
OnlineStateTracker.prototype.set = function (newState) {
this.clearOnlineStateTimer();
this.watchStreamFailures = 0;
if (newState === "Online" /* Online */) {
// We've connected to watch at least once. Don't warn the developer
// about being offline going forward.
this.shouldWarnClientIsOffline = false;
}
this.setAndBroadcast(newState);
};
OnlineStateTracker.prototype.setAndBroadcast = function (newState) {
if (newState !== this.state) {
this.state = newState;
this.onlineStateHandler(newState);
}
};
OnlineStateTracker.prototype.logClientOfflineWarningIfNecessary = function (details) {
var message = "Could not reach Cloud Firestore backend. " + details + "\n" +
"This typically indicates that your device does not have a healthy " +
"Internet connection at the moment. The client will operate in offline " +
"mode until it is able to successfully connect to the backend.";
if (this.shouldWarnClientIsOffline) {
logError(message);
this.shouldWarnClientIsOffline = false;
}
else {
logDebug(LOG_TAG$6, message);
}
};
OnlineStateTracker.prototype.clearOnlineStateTimer = function () {
if (this.onlineStateTimer !== null) {
this.onlineStateTimer.cancel();
this.onlineStateTimer = null;
}
};
return OnlineStateTracker;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG$5 = 'RemoteStore';
// TODO(b/35853402): Negotiate this with the stream.
var MAX_PENDING_WRITES = 10;
var RemoteStoreImpl = /** @class */ (function () {
function RemoteStoreImpl(
/**
* The local store, used to fill the write pipeline with outbound mutations.
*/
localStore,
/** The client-side proxy for interacting with the backend. */
datastore, asyncQueue, onlineStateHandler, connectivityMonitor) {
var _this = this;
this.localStore = localStore;
this.datastore = datastore;
this.asyncQueue = asyncQueue;
this.remoteSyncer = {};
/**
* A list of up to MAX_PENDING_WRITES writes that we have fetched from the
* LocalStore via fillWritePipeline() and have or will send to the write
* stream.
*
* Whenever writePipeline.length > 0 the RemoteStore will attempt to start or
* restart the write stream. When the stream is established the writes in the
* pipeline will be sent in order.
*
* Writes remain in writePipeline until they are acknowledged by the backend
* and thus will automatically be re-sent if the stream is interrupted /
* restarted before they're acknowledged.
*
* Write responses from the backend are linked to their originating request
* purely based on order, and so we can just shift() writes from the front of
* the writePipeline as we receive responses.
*/
this.writePipeline = [];
/**
* A mapping of watched targets that the client cares about tracking and the
* user has explicitly called a 'listen' for this target.
*
* These targets may or may not have been sent to or acknowledged by the
* server. On re-establishing the listen stream, these targets should be sent
* to the server. The targets removed with unlistens are removed eagerly
* without waiting for confirmation from the listen stream.
*/
this.listenTargets = new Map();
/**
* A set of reasons for why the RemoteStore may be offline. If empty, the
* RemoteStore may start its network connections.
*/
this.offlineCauses = new Set();
/**
* Event handlers that get called when the network is disabled or enabled.
*
* PORTING NOTE: These functions are used on the Web client to create the
* underlying streams (to support tree-shakeable streams). On Android and iOS,
* the streams are created during construction of RemoteStore.
*/
this.onNetworkStatusChange = [];
this.connectivityMonitor = connectivityMonitor;
this.connectivityMonitor.addCallback(function (_) {
asyncQueue.enqueueAndForget(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!canUseNetwork(this)) return [3 /*break*/, 2];
logDebug(LOG_TAG$5, 'Restarting streams for network reachability change.');
return [4 /*yield*/, restartNetwork(this)];
case 1:
_d.sent();
_d.label = 2;
case 2: return [2 /*return*/];
}
});
}); });
});
this.onlineStateTracker = new OnlineStateTracker(asyncQueue, onlineStateHandler);
}
return RemoteStoreImpl;
}());
function newRemoteStore(localStore, datastore, asyncQueue, onlineStateHandler, connectivityMonitor) {
return new RemoteStoreImpl(localStore, datastore, asyncQueue, onlineStateHandler, connectivityMonitor);
}
/** Re-enables the network. Idempotent. */
function remoteStoreEnableNetwork(remoteStore) {
var remoteStoreImpl = debugCast(remoteStore);
remoteStoreImpl.offlineCauses.delete(0 /* UserDisabled */);
return enableNetworkInternal(remoteStoreImpl);
}
function enableNetworkInternal(remoteStoreImpl) {
return tslib.__awaiter(this, void 0, void 0, function () {
var _i, _d, networkStatusHandler;
return tslib.__generator(this, function (_e) {
switch (_e.label) {
case 0:
if (!canUseNetwork(remoteStoreImpl)) return [3 /*break*/, 4];
_i = 0, _d = remoteStoreImpl.onNetworkStatusChange;
_e.label = 1;
case 1:
if (!(_i < _d.length)) return [3 /*break*/, 4];
networkStatusHandler = _d[_i];
return [4 /*yield*/, networkStatusHandler(/* enabled= */ true)];
case 2:
_e.sent();
_e.label = 3;
case 3:
_i++;
return [3 /*break*/, 1];
case 4: return [2 /*return*/];
}
});
});
}
/**
* Temporarily disables the network. The network can be re-enabled using
* enableNetwork().
*/
function remoteStoreDisableNetwork(remoteStore) {
return tslib.__awaiter(this, void 0, void 0, function () {
var remoteStoreImpl;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
remoteStoreImpl = debugCast(remoteStore);
remoteStoreImpl.offlineCauses.add(0 /* UserDisabled */);
return [4 /*yield*/, disableNetworkInternal(remoteStoreImpl)];
case 1:
_d.sent();
// Set the OnlineState to Offline so get()s return from cache, etc.
remoteStoreImpl.onlineStateTracker.set("Offline" /* Offline */);
return [2 /*return*/];
}
});
});
}
function disableNetworkInternal(remoteStoreImpl) {
return tslib.__awaiter(this, void 0, void 0, function () {
var _i, _d, networkStatusHandler;
return tslib.__generator(this, function (_e) {
switch (_e.label) {
case 0:
_i = 0, _d = remoteStoreImpl.onNetworkStatusChange;
_e.label = 1;
case 1:
if (!(_i < _d.length)) return [3 /*break*/, 4];
networkStatusHandler = _d[_i];
return [4 /*yield*/, networkStatusHandler(/* enabled= */ false)];
case 2:
_e.sent();
_e.label = 3;
case 3:
_i++;
return [3 /*break*/, 1];
case 4: return [2 /*return*/];
}
});
});
}
function remoteStoreShutdown(remoteStore) {
return tslib.__awaiter(this, void 0, void 0, function () {
var remoteStoreImpl;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
remoteStoreImpl = debugCast(remoteStore);
logDebug(LOG_TAG$5, 'RemoteStore shutting down.');
remoteStoreImpl.offlineCauses.add(5 /* Shutdown */);
return [4 /*yield*/, disableNetworkInternal(remoteStoreImpl)];
case 1:
_d.sent();
remoteStoreImpl.connectivityMonitor.shutdown();
// Set the OnlineState to Unknown (rather than Offline) to avoid potentially
// triggering spurious listener events with cached data, etc.
remoteStoreImpl.onlineStateTracker.set("Unknown" /* Unknown */);
return [2 /*return*/];
}
});
});
}
/**
* Starts new listen for the given target. Uses resume token if provided. It
* is a no-op if the target of given `TargetData` is already being listened to.
*/
function remoteStoreListen(remoteStore, targetData) {
var remoteStoreImpl = debugCast(remoteStore);
if (remoteStoreImpl.listenTargets.has(targetData.targetId)) {
return;
}
// Mark this as something the client is currently listening for.
remoteStoreImpl.listenTargets.set(targetData.targetId, targetData);
if (shouldStartWatchStream(remoteStoreImpl)) {
// The listen will be sent in onWatchStreamOpen
startWatchStream(remoteStoreImpl);
}
else if (ensureWatchStream(remoteStoreImpl).isOpen()) {
sendWatchRequest(remoteStoreImpl, targetData);
}
}
/**
* Removes the listen from server. It is a no-op if the given target id is
* not being listened to.
*/
function remoteStoreUnlisten(remoteStore, targetId) {
var remoteStoreImpl = debugCast(remoteStore);
var watchStream = ensureWatchStream(remoteStoreImpl);
remoteStoreImpl.listenTargets.delete(targetId);
if (watchStream.isOpen()) {
sendUnwatchRequest(remoteStoreImpl, targetId);
}
if (remoteStoreImpl.listenTargets.size === 0) {
if (watchStream.isOpen()) {
watchStream.markIdle();
}
else if (canUseNetwork(remoteStoreImpl)) {
// Revert to OnlineState.Unknown if the watch stream is not open and we
// have no listeners, since without any listens to send we cannot
// confirm if the stream is healthy and upgrade to OnlineState.Online.
remoteStoreImpl.onlineStateTracker.set("Unknown" /* Unknown */);
}
}
}
/**
* We need to increment the the expected number of pending responses we're due
* from watch so we wait for the ack to process any messages from this target.
*/
function sendWatchRequest(remoteStoreImpl, targetData) {
remoteStoreImpl.watchChangeAggregator.recordPendingTargetRequest(targetData.targetId);
ensureWatchStream(remoteStoreImpl).watch(targetData);
}
/**
* We need to increment the expected number of pending responses we're due
* from watch so we wait for the removal on the server before we process any
* messages from this target.
*/
function sendUnwatchRequest(remoteStoreImpl, targetId) {
remoteStoreImpl.watchChangeAggregator.recordPendingTargetRequest(targetId);
ensureWatchStream(remoteStoreImpl).unwatch(targetId);
}
function startWatchStream(remoteStoreImpl) {
remoteStoreImpl.watchChangeAggregator = new WatchChangeAggregator({
getRemoteKeysForTarget: function (targetId) { return remoteStoreImpl.remoteSyncer.getRemoteKeysForTarget(targetId); },
getTargetDataForTarget: function (targetId) { return remoteStoreImpl.listenTargets.get(targetId) || null; }
});
ensureWatchStream(remoteStoreImpl).start();
remoteStoreImpl.onlineStateTracker.handleWatchStreamStart();
}
/**
* Returns whether the watch stream should be started because it's necessary
* and has not yet been started.
*/
function shouldStartWatchStream(remoteStoreImpl) {
return (canUseNetwork(remoteStoreImpl) &&
!ensureWatchStream(remoteStoreImpl).isStarted() &&
remoteStoreImpl.listenTargets.size > 0);
}
function canUseNetwork(remoteStore) {
var remoteStoreImpl = debugCast(remoteStore);
return remoteStoreImpl.offlineCauses.size === 0;
}
function cleanUpWatchStreamState(remoteStoreImpl) {
remoteStoreImpl.watchChangeAggregator = undefined;
}
function onWatchStreamOpen(remoteStoreImpl) {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
remoteStoreImpl.listenTargets.forEach(function (targetData, targetId) {
sendWatchRequest(remoteStoreImpl, targetData);
});
return [2 /*return*/];
});
});
}
function onWatchStreamClose(remoteStoreImpl, error) {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
cleanUpWatchStreamState(remoteStoreImpl);
// If we still need the watch stream, retry the connection.
if (shouldStartWatchStream(remoteStoreImpl)) {
remoteStoreImpl.onlineStateTracker.handleWatchStreamFailure(error);
startWatchStream(remoteStoreImpl);
}
else {
// No need to restart watch stream because there are no active targets.
// The online state is set to unknown because there is no active attempt
// at establishing a connection
remoteStoreImpl.onlineStateTracker.set("Unknown" /* Unknown */);
}
return [2 /*return*/];
});
});
}
function onWatchStreamChange(remoteStoreImpl, watchChange, snapshotVersion) {
return tslib.__awaiter(this, void 0, void 0, function () {
var e_4, lastRemoteSnapshotVersion, e_5;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
// Mark the client as online since we got a message from the server
remoteStoreImpl.onlineStateTracker.set("Online" /* Online */);
if (!(watchChange instanceof WatchTargetChange &&
watchChange.state === 2 /* Removed */ &&
watchChange.cause)) return [3 /*break*/, 6];
_d.label = 1;
case 1:
_d.trys.push([1, 3, , 5]);
return [4 /*yield*/, handleTargetError(remoteStoreImpl, watchChange)];
case 2:
_d.sent();
return [3 /*break*/, 5];
case 3:
e_4 = _d.sent();
logDebug(LOG_TAG$5, 'Failed to remove targets %s: %s ', watchChange.targetIds.join(','), e_4);
return [4 /*yield*/, disableNetworkUntilRecovery(remoteStoreImpl, e_4)];
case 4:
_d.sent();
return [3 /*break*/, 5];
case 5: return [2 /*return*/];
case 6:
if (watchChange instanceof DocumentWatchChange) {
remoteStoreImpl.watchChangeAggregator.handleDocumentChange(watchChange);
}
else if (watchChange instanceof ExistenceFilterChange) {
remoteStoreImpl.watchChangeAggregator.handleExistenceFilter(watchChange);
}
else {
remoteStoreImpl.watchChangeAggregator.handleTargetChange(watchChange);
}
if (!!snapshotVersion.isEqual(SnapshotVersion.min())) return [3 /*break*/, 13];
_d.label = 7;
case 7:
_d.trys.push([7, 11, , 13]);
return [4 /*yield*/, localStoreGetLastRemoteSnapshotVersion(remoteStoreImpl.localStore)];
case 8:
lastRemoteSnapshotVersion = _d.sent();
if (!(snapshotVersion.compareTo(lastRemoteSnapshotVersion) >= 0)) return [3 /*break*/, 10];
// We have received a target change with a global snapshot if the snapshot
// version is not equal to SnapshotVersion.min().
return [4 /*yield*/, raiseWatchSnapshot(remoteStoreImpl, snapshotVersion)];
case 9:
// We have received a target change with a global snapshot if the snapshot
// version is not equal to SnapshotVersion.min().
_d.sent();
_d.label = 10;
case 10: return [3 /*break*/, 13];
case 11:
e_5 = _d.sent();
logDebug(LOG_TAG$5, 'Failed to raise snapshot:', e_5);
return [4 /*yield*/, disableNetworkUntilRecovery(remoteStoreImpl, e_5)];
case 12:
_d.sent();
return [3 /*break*/, 13];
case 13: return [2 /*return*/];
}
});
});
}
/**
* Recovery logic for IndexedDB errors that takes the network offline until
* `op` succeeds. Retries are scheduled with backoff using
* `enqueueRetryable()`. If `op()` is not provided, IndexedDB access is
* validated via a generic operation.
*
* The returned Promise is resolved once the network is disabled and before
* any retry attempt.
*/
function disableNetworkUntilRecovery(remoteStoreImpl, e, op) {
return tslib.__awaiter(this, void 0, void 0, function () {
var _this = this;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!isIndexedDbTransactionError(e)) return [3 /*break*/, 2];
remoteStoreImpl.offlineCauses.add(1 /* IndexedDbFailed */);
// Disable network and raise offline snapshots
return [4 /*yield*/, disableNetworkInternal(remoteStoreImpl)];
case 1:
// Disable network and raise offline snapshots
_d.sent();
remoteStoreImpl.onlineStateTracker.set("Offline" /* Offline */);
if (!op) {
// Use a simple read operation to determine if IndexedDB recovered.
// Ideally, we would expose a health check directly on SimpleDb, but
// RemoteStore only has access to persistence through LocalStore.
op = function () { return localStoreGetLastRemoteSnapshotVersion(remoteStoreImpl.localStore); };
}
// Probe IndexedDB periodically and re-enable network
remoteStoreImpl.asyncQueue.enqueueRetryable(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
logDebug(LOG_TAG$5, 'Retrying IndexedDB access');
return [4 /*yield*/, op()];
case 1:
_d.sent();
remoteStoreImpl.offlineCauses.delete(1 /* IndexedDbFailed */);
return [4 /*yield*/, enableNetworkInternal(remoteStoreImpl)];
case 2:
_d.sent();
return [2 /*return*/];
}
});
}); });
return [3 /*break*/, 3];
case 2: throw e;
case 3: return [2 /*return*/];
}
});
});
}
/**
* Executes `op`. If `op` fails, takes the network offline until `op`
* succeeds. Returns after the first attempt.
*/
function executeWithRecovery(remoteStoreImpl, op) {
return op().catch(function (e) { return disableNetworkUntilRecovery(remoteStoreImpl, e, op); });
}
/**
* Takes a batch of changes from the Datastore, repackages them as a
* RemoteEvent, and passes that on to the listener, which is typically the
* SyncEngine.
*/
function raiseWatchSnapshot(remoteStoreImpl, snapshotVersion) {
var remoteEvent = remoteStoreImpl.watchChangeAggregator.createRemoteEvent(snapshotVersion);
// Update in-memory resume tokens. LocalStore will update the
// persistent view of these when applying the completed RemoteEvent.
remoteEvent.targetChanges.forEach(function (change, targetId) {
if (change.resumeToken.approximateByteSize() > 0) {
var targetData = remoteStoreImpl.listenTargets.get(targetId);
// A watched target might have been removed already.
if (targetData) {
remoteStoreImpl.listenTargets.set(targetId, targetData.withResumeToken(change.resumeToken, snapshotVersion));
}
}
});
// Re-establish listens for the targets that have been invalidated by
// existence filter mismatches.
remoteEvent.targetMismatches.forEach(function (targetId) {
var targetData = remoteStoreImpl.listenTargets.get(targetId);
if (!targetData) {
// A watched target might have been removed already.
return;
}
// Clear the resume token for the target, since we're in a known mismatch
// state.
remoteStoreImpl.listenTargets.set(targetId, targetData.withResumeToken(ByteString.EMPTY_BYTE_STRING, targetData.snapshotVersion));
// Cause a hard reset by unwatching and rewatching immediately, but
// deliberately don't send a resume token so that we get a full update.
sendUnwatchRequest(remoteStoreImpl, targetId);
// Mark the target we send as being on behalf of an existence filter
// mismatch, but don't actually retain that in listenTargets. This ensures
// that we flag the first re-listen this way without impacting future
// listens of this target (that might happen e.g. on reconnect).
var requestTargetData = new TargetData(targetData.target, targetId, 1 /* ExistenceFilterMismatch */, targetData.sequenceNumber);
sendWatchRequest(remoteStoreImpl, requestTargetData);
});
return remoteStoreImpl.remoteSyncer.applyRemoteEvent(remoteEvent);
}
/** Handles an error on a target */
function handleTargetError(remoteStoreImpl, watchChange) {
return tslib.__awaiter(this, void 0, void 0, function () {
var error, _i, _d, targetId;
return tslib.__generator(this, function (_e) {
switch (_e.label) {
case 0:
error = watchChange.cause;
_i = 0, _d = watchChange.targetIds;
_e.label = 1;
case 1:
if (!(_i < _d.length)) return [3 /*break*/, 4];
targetId = _d[_i];
if (!remoteStoreImpl.listenTargets.has(targetId)) return [3 /*break*/, 3];
return [4 /*yield*/, remoteStoreImpl.remoteSyncer.rejectListen(targetId, error)];
case 2:
_e.sent();
remoteStoreImpl.listenTargets.delete(targetId);
remoteStoreImpl.watchChangeAggregator.removeTarget(targetId);
_e.label = 3;
case 3:
_i++;
return [3 /*break*/, 1];
case 4: return [2 /*return*/];
}
});
});
}
/**
* Attempts to fill our write pipeline with writes from the LocalStore.
*
* Called internally to bootstrap or refill the write pipeline and by
* SyncEngine whenever there are new mutations to process.
*
* Starts the write stream if necessary.
*/
function fillWritePipeline(remoteStore) {
return tslib.__awaiter(this, void 0, void 0, function () {
var remoteStoreImpl, writeStream, lastBatchIdRetrieved, batch, e_6;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
remoteStoreImpl = debugCast(remoteStore);
writeStream = ensureWriteStream(remoteStoreImpl);
lastBatchIdRetrieved = remoteStoreImpl.writePipeline.length > 0
? remoteStoreImpl.writePipeline[remoteStoreImpl.writePipeline.length - 1]
.batchId
: BATCHID_UNKNOWN;
_d.label = 1;
case 1:
if (!canAddToWritePipeline(remoteStoreImpl)) return [3 /*break*/, 7];
_d.label = 2;
case 2:
_d.trys.push([2, 4, , 6]);
return [4 /*yield*/, localStoreGetNextMutationBatch(remoteStoreImpl.localStore, lastBatchIdRetrieved)];
case 3:
batch = _d.sent();
if (batch === null) {
if (remoteStoreImpl.writePipeline.length === 0) {
writeStream.markIdle();
}
return [3 /*break*/, 7];
}
else {
lastBatchIdRetrieved = batch.batchId;
addToWritePipeline(remoteStoreImpl, batch);
}
return [3 /*break*/, 6];
case 4:
e_6 = _d.sent();
return [4 /*yield*/, disableNetworkUntilRecovery(remoteStoreImpl, e_6)];
case 5:
_d.sent();
return [3 /*break*/, 6];
case 6: return [3 /*break*/, 1];
case 7:
if (shouldStartWriteStream(remoteStoreImpl)) {
startWriteStream(remoteStoreImpl);
}
return [2 /*return*/];
}
});
});
}
/**
* Returns true if we can add to the write pipeline (i.e. the network is
* enabled and the write pipeline is not full).
*/
function canAddToWritePipeline(remoteStoreImpl) {
return (canUseNetwork(remoteStoreImpl) &&
remoteStoreImpl.writePipeline.length < MAX_PENDING_WRITES);
}
/**
* Queues additional writes to be sent to the write stream, sending them
* immediately if the write stream is established.
*/
function addToWritePipeline(remoteStoreImpl, batch) {
remoteStoreImpl.writePipeline.push(batch);
var writeStream = ensureWriteStream(remoteStoreImpl);
if (writeStream.isOpen() && writeStream.handshakeComplete) {
writeStream.writeMutations(batch.mutations);
}
}
function shouldStartWriteStream(remoteStoreImpl) {
return (canUseNetwork(remoteStoreImpl) &&
!ensureWriteStream(remoteStoreImpl).isStarted() &&
remoteStoreImpl.writePipeline.length > 0);
}
function startWriteStream(remoteStoreImpl) {
ensureWriteStream(remoteStoreImpl).start();
}
function onWriteStreamOpen(remoteStoreImpl) {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
ensureWriteStream(remoteStoreImpl).writeHandshake();
return [2 /*return*/];
});
});
}
function onWriteHandshakeComplete(remoteStoreImpl) {
return tslib.__awaiter(this, void 0, void 0, function () {
var writeStream, _i, _d, batch;
return tslib.__generator(this, function (_e) {
writeStream = ensureWriteStream(remoteStoreImpl);
// Send the write pipeline now that the stream is established.
for (_i = 0, _d = remoteStoreImpl.writePipeline; _i < _d.length; _i++) {
batch = _d[_i];
writeStream.writeMutations(batch.mutations);
}
return [2 /*return*/];
});
});
}
function onMutationResult(remoteStoreImpl, commitVersion, results) {
return tslib.__awaiter(this, void 0, void 0, function () {
var batch, success;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
batch = remoteStoreImpl.writePipeline.shift();
success = MutationBatchResult.from(batch, commitVersion, results);
return [4 /*yield*/, executeWithRecovery(remoteStoreImpl, function () { return remoteStoreImpl.remoteSyncer.applySuccessfulWrite(success); })];
case 1:
_d.sent();
// It's possible that with the completion of this mutation another
// slot has freed up.
return [4 /*yield*/, fillWritePipeline(remoteStoreImpl)];
case 2:
// It's possible that with the completion of this mutation another
// slot has freed up.
_d.sent();
return [2 /*return*/];
}
});
});
}
function onWriteStreamClose(remoteStoreImpl, error) {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!(error && ensureWriteStream(remoteStoreImpl).handshakeComplete)) return [3 /*break*/, 2];
// This error affects the actual write.
return [4 /*yield*/, handleWriteError(remoteStoreImpl, error)];
case 1:
// This error affects the actual write.
_d.sent();
_d.label = 2;
case 2:
// The write stream might have been started by refilling the write
// pipeline for failed writes
if (shouldStartWriteStream(remoteStoreImpl)) {
startWriteStream(remoteStoreImpl);
}
return [2 /*return*/];
}
});
});
}
function handleWriteError(remoteStoreImpl, error) {
return tslib.__awaiter(this, void 0, void 0, function () {
var batch_1;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!isPermanentWriteError(error.code)) return [3 /*break*/, 3];
batch_1 = remoteStoreImpl.writePipeline.shift();
// In this case it's also unlikely that the server itself is melting
// down -- this was just a bad request so inhibit backoff on the next
// restart.
ensureWriteStream(remoteStoreImpl).inhibitBackoff();
return [4 /*yield*/, executeWithRecovery(remoteStoreImpl, function () { return remoteStoreImpl.remoteSyncer.rejectFailedWrite(batch_1.batchId, error); })];
case 1:
_d.sent();
// It's possible that with the completion of this mutation
// another slot has freed up.
return [4 /*yield*/, fillWritePipeline(remoteStoreImpl)];
case 2:
// It's possible that with the completion of this mutation
// another slot has freed up.
_d.sent();
_d.label = 3;
case 3: return [2 /*return*/];
}
});
});
}
function restartNetwork(remoteStore) {
return tslib.__awaiter(this, void 0, void 0, function () {
var remoteStoreImpl;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
remoteStoreImpl = debugCast(remoteStore);
remoteStoreImpl.offlineCauses.add(4 /* ConnectivityChange */);
return [4 /*yield*/, disableNetworkInternal(remoteStoreImpl)];
case 1:
_d.sent();
remoteStoreImpl.onlineStateTracker.set("Unknown" /* Unknown */);
remoteStoreImpl.offlineCauses.delete(4 /* ConnectivityChange */);
return [4 /*yield*/, enableNetworkInternal(remoteStoreImpl)];
case 2:
_d.sent();
return [2 /*return*/];
}
});
});
}
function remoteStoreHandleCredentialChange(remoteStore, user) {
return tslib.__awaiter(this, void 0, void 0, function () {
var remoteStoreImpl, usesNetwork;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
remoteStoreImpl = debugCast(remoteStore);
remoteStoreImpl.asyncQueue.verifyOperationInProgress();
logDebug(LOG_TAG$5, 'RemoteStore received new credentials');
usesNetwork = canUseNetwork(remoteStoreImpl);
// Tear down and re-create our network streams. This will ensure we get a
// fresh auth token for the new user and re-fill the write pipeline with
// new mutations from the LocalStore (since mutations are per-user).
remoteStoreImpl.offlineCauses.add(3 /* CredentialChange */);
return [4 /*yield*/, disableNetworkInternal(remoteStoreImpl)];
case 1:
_d.sent();
if (usesNetwork) {
// Don't set the network status to Unknown if we are offline.
remoteStoreImpl.onlineStateTracker.set("Unknown" /* Unknown */);
}
return [4 /*yield*/, remoteStoreImpl.remoteSyncer.handleCredentialChange(user)];
case 2:
_d.sent();
remoteStoreImpl.offlineCauses.delete(3 /* CredentialChange */);
return [4 /*yield*/, enableNetworkInternal(remoteStoreImpl)];
case 3:
_d.sent();
return [2 /*return*/];
}
});
});
}
/**
* Toggles the network state when the client gains or loses its primary lease.
*/
function remoteStoreApplyPrimaryState(remoteStore, isPrimary) {
return tslib.__awaiter(this, void 0, void 0, function () {
var remoteStoreImpl;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
remoteStoreImpl = debugCast(remoteStore);
if (!isPrimary) return [3 /*break*/, 2];
remoteStoreImpl.offlineCauses.delete(2 /* IsSecondary */);
return [4 /*yield*/, enableNetworkInternal(remoteStoreImpl)];
case 1:
_d.sent();
return [3 /*break*/, 4];
case 2:
if (!!isPrimary) return [3 /*break*/, 4];
remoteStoreImpl.offlineCauses.add(2 /* IsSecondary */);
return [4 /*yield*/, disableNetworkInternal(remoteStoreImpl)];
case 3:
_d.sent();
remoteStoreImpl.onlineStateTracker.set("Unknown" /* Unknown */);
_d.label = 4;
case 4: return [2 /*return*/];
}
});
});
}
/**
* If not yet initialized, registers the WatchStream and its network state
* callback with `remoteStoreImpl`. Returns the existing stream if one is
* already available.
*
* PORTING NOTE: On iOS and Android, the WatchStream gets registered on startup.
* This is not done on Web to allow it to be tree-shaken.
*/
function ensureWatchStream(remoteStoreImpl) {
var _this = this;
if (!remoteStoreImpl.watchStream) {
// Create stream (but note that it is not started yet).
remoteStoreImpl.watchStream = newPersistentWatchStream(remoteStoreImpl.datastore, remoteStoreImpl.asyncQueue, {
onOpen: onWatchStreamOpen.bind(null, remoteStoreImpl),
onClose: onWatchStreamClose.bind(null, remoteStoreImpl),
onWatchChange: onWatchStreamChange.bind(null, remoteStoreImpl)
});
remoteStoreImpl.onNetworkStatusChange.push(function (enabled) { return tslib.__awaiter(_this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!enabled) return [3 /*break*/, 1];
remoteStoreImpl.watchStream.inhibitBackoff();
if (shouldStartWatchStream(remoteStoreImpl)) {
startWatchStream(remoteStoreImpl);
}
else {
remoteStoreImpl.onlineStateTracker.set("Unknown" /* Unknown */);
}
return [3 /*break*/, 3];
case 1: return [4 /*yield*/, remoteStoreImpl.watchStream.stop()];
case 2:
_d.sent();
cleanUpWatchStreamState(remoteStoreImpl);
_d.label = 3;
case 3: return [2 /*return*/];
}
});
}); });
}
return remoteStoreImpl.watchStream;
}
/**
* If not yet initialized, registers the WriteStream and its network state
* callback with `remoteStoreImpl`. Returns the existing stream if one is
* already available.
*
* PORTING NOTE: On iOS and Android, the WriteStream gets registered on startup.
* This is not done on Web to allow it to be tree-shaken.
*/
function ensureWriteStream(remoteStoreImpl) {
var _this = this;
if (!remoteStoreImpl.writeStream) {
// Create stream (but note that it is not started yet).
remoteStoreImpl.writeStream = newPersistentWriteStream(remoteStoreImpl.datastore, remoteStoreImpl.asyncQueue, {
onOpen: onWriteStreamOpen.bind(null, remoteStoreImpl),
onClose: onWriteStreamClose.bind(null, remoteStoreImpl),
onHandshakeComplete: onWriteHandshakeComplete.bind(null, remoteStoreImpl),
onMutationResult: onMutationResult.bind(null, remoteStoreImpl)
});
remoteStoreImpl.onNetworkStatusChange.push(function (enabled) { return tslib.__awaiter(_this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!enabled) return [3 /*break*/, 2];
remoteStoreImpl.writeStream.inhibitBackoff();
// This will start the write stream if necessary.
return [4 /*yield*/, fillWritePipeline(remoteStoreImpl)];
case 1:
// This will start the write stream if necessary.
_d.sent();
return [3 /*break*/, 4];
case 2: return [4 /*yield*/, remoteStoreImpl.writeStream.stop()];
case 3:
_d.sent();
if (remoteStoreImpl.writePipeline.length > 0) {
logDebug(LOG_TAG$5, "Stopping write stream with " + remoteStoreImpl.writePipeline.length + " pending writes");
remoteStoreImpl.writePipeline = [];
}
_d.label = 4;
case 4: return [2 /*return*/];
}
});
}); });
}
return remoteStoreImpl.writeStream;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG$4 = 'AsyncQueue';
/**
* Represents an operation scheduled to be run in the future on an AsyncQueue.
*
* It is created via DelayedOperation.createAndSchedule().
*
* Supports cancellation (via cancel()) and early execution (via skipDelay()).
*
* Note: We implement `PromiseLike` instead of `Promise`, as the `Promise` type
* in newer versions of TypeScript defines `finally`, which is not available in
* IE.
*/
var DelayedOperation = /** @class */ (function () {
function DelayedOperation(asyncQueue, timerId, targetTimeMs, op, removalCallback) {
this.asyncQueue = asyncQueue;
this.timerId = timerId;
this.targetTimeMs = targetTimeMs;
this.op = op;
this.removalCallback = removalCallback;
this.deferred = new Deferred();
this.then = this.deferred.promise.then.bind(this.deferred.promise);
// It's normal for the deferred promise to be canceled (due to cancellation)
// and so we attach a dummy catch callback to avoid
// 'UnhandledPromiseRejectionWarning' log spam.
this.deferred.promise.catch(function (err) { });
}
/**
* Creates and returns a DelayedOperation that has been scheduled to be
* executed on the provided asyncQueue after the provided delayMs.
*
* @param asyncQueue - The queue to schedule the operation on.
* @param id - A Timer ID identifying the type of operation this is.
* @param delayMs - The delay (ms) before the operation should be scheduled.
* @param op - The operation to run.
* @param removalCallback - A callback to be called synchronously once the
* operation is executed or canceled, notifying the AsyncQueue to remove it
* from its delayedOperations list.
* PORTING NOTE: This exists to prevent making removeDelayedOperation() and
* the DelayedOperation class public.
*/
DelayedOperation.createAndSchedule = function (asyncQueue, timerId, delayMs, op, removalCallback) {
var targetTime = Date.now() + delayMs;
var delayedOp = new DelayedOperation(asyncQueue, timerId, targetTime, op, removalCallback);
delayedOp.start(delayMs);
return delayedOp;
};
/**
* Starts the timer. This is called immediately after construction by
* createAndSchedule().
*/
DelayedOperation.prototype.start = function (delayMs) {
var _this = this;
this.timerHandle = setTimeout(function () { return _this.handleDelayElapsed(); }, delayMs);
};
/**
* Queues the operation to run immediately (if it hasn't already been run or
* canceled).
*/
DelayedOperation.prototype.skipDelay = function () {
return this.handleDelayElapsed();
};
/**
* Cancels the operation if it hasn't already been executed or canceled. The
* promise will be rejected.
*
* As long as the operation has not yet been run, calling cancel() provides a
* guarantee that the operation will not be run.
*/
DelayedOperation.prototype.cancel = function (reason) {
if (this.timerHandle !== null) {
this.clearTimeout();
this.deferred.reject(new FirestoreError(Code.CANCELLED, 'Operation cancelled' + (reason ? ': ' + reason : '')));
}
};
DelayedOperation.prototype.handleDelayElapsed = function () {
var _this = this;
this.asyncQueue.enqueueAndForget(function () {
if (_this.timerHandle !== null) {
_this.clearTimeout();
return _this.op().then(function (result) {
return _this.deferred.resolve(result);
});
}
else {
return Promise.resolve();
}
});
};
DelayedOperation.prototype.clearTimeout = function () {
if (this.timerHandle !== null) {
this.removalCallback(this);
clearTimeout(this.timerHandle);
this.timerHandle = null;
}
};
return DelayedOperation;
}());
/**
* Returns a FirestoreError that can be surfaced to the user if the provided
* error is an IndexedDbTransactionError. Re-throws the error otherwise.
*/
function wrapInUserErrorIfRecoverable(e, msg) {
logError(LOG_TAG$4, msg + ": " + e);
if (isIndexedDbTransactionError(e)) {
return new FirestoreError(Code.UNAVAILABLE, msg + ": " + e);
}
else {
throw e;
}
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* DocumentSet is an immutable (copy-on-write) collection that holds documents
* in order specified by the provided comparator. We always add a document key
* comparator on top of what is provided to guarantee document equality based on
* the key.
*/
var DocumentSet = /** @class */ (function () {
/** The default ordering is by key if the comparator is omitted */
function DocumentSet(comp) {
// We are adding document key comparator to the end as it's the only
// guaranteed unique property of a document.
if (comp) {
this.comparator = function (d1, d2) { return comp(d1, d2) || DocumentKey.comparator(d1.key, d2.key); };
}
else {
this.comparator = function (d1, d2) { return DocumentKey.comparator(d1.key, d2.key); };
}
this.keyedMap = documentMap();
this.sortedSet = new SortedMap(this.comparator);
}
/**
* Returns an empty copy of the existing DocumentSet, using the same
* comparator.
*/
DocumentSet.emptySet = function (oldSet) {
return new DocumentSet(oldSet.comparator);
};
DocumentSet.prototype.has = function (key) {
return this.keyedMap.get(key) != null;
};
DocumentSet.prototype.get = function (key) {
return this.keyedMap.get(key);
};
DocumentSet.prototype.first = function () {
return this.sortedSet.minKey();
};
DocumentSet.prototype.last = function () {
return this.sortedSet.maxKey();
};
DocumentSet.prototype.isEmpty = function () {
return this.sortedSet.isEmpty();
};
/**
* Returns the index of the provided key in the document set, or -1 if the
* document key is not present in the set;
*/
DocumentSet.prototype.indexOf = function (key) {
var doc = this.keyedMap.get(key);
return doc ? this.sortedSet.indexOf(doc) : -1;
};
Object.defineProperty(DocumentSet.prototype, "size", {
get: function () {
return this.sortedSet.size;
},
enumerable: false,
configurable: true
});
/** Iterates documents in order defined by "comparator" */
DocumentSet.prototype.forEach = function (cb) {
this.sortedSet.inorderTraversal(function (k, v) {
cb(k);
return false;
});
};
/** Inserts or updates a document with the same key */
DocumentSet.prototype.add = function (doc) {
// First remove the element if we have it.
var set = this.delete(doc.key);
return set.copy(set.keyedMap.insert(doc.key, doc), set.sortedSet.insert(doc, null));
};
/** Deletes a document with a given key */
DocumentSet.prototype.delete = function (key) {
var doc = this.get(key);
if (!doc) {
return this;
}
return this.copy(this.keyedMap.remove(key), this.sortedSet.remove(doc));
};
DocumentSet.prototype.isEqual = function (other) {
if (!(other instanceof DocumentSet)) {
return false;
}
if (this.size !== other.size) {
return false;
}
var thisIt = this.sortedSet.getIterator();
var otherIt = other.sortedSet.getIterator();
while (thisIt.hasNext()) {
var thisDoc = thisIt.getNext().key;
var otherDoc = otherIt.getNext().key;
if (!thisDoc.isEqual(otherDoc)) {
return false;
}
}
return true;
};
DocumentSet.prototype.toString = function () {
var docStrings = [];
this.forEach(function (doc) {
docStrings.push(doc.toString());
});
if (docStrings.length === 0) {
return 'DocumentSet ()';
}
else {
return 'DocumentSet (\n ' + docStrings.join(' \n') + '\n)';
}
};
DocumentSet.prototype.copy = function (keyedMap, sortedSet) {
var newSet = new DocumentSet();
newSet.comparator = this.comparator;
newSet.keyedMap = keyedMap;
newSet.sortedSet = sortedSet;
return newSet;
};
return DocumentSet;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* DocumentChangeSet keeps track of a set of changes to docs in a query, merging
* duplicate events for the same doc.
*/
var DocumentChangeSet = /** @class */ (function () {
function DocumentChangeSet() {
this.changeMap = new SortedMap(DocumentKey.comparator);
}
DocumentChangeSet.prototype.track = function (change) {
var key = change.doc.key;
var oldChange = this.changeMap.get(key);
if (!oldChange) {
this.changeMap = this.changeMap.insert(key, change);
return;
}
// Merge the new change with the existing change.
if (change.type !== 0 /* Added */ &&
oldChange.type === 3 /* Metadata */) {
this.changeMap = this.changeMap.insert(key, change);
}
else if (change.type === 3 /* Metadata */ &&
oldChange.type !== 1 /* Removed */) {
this.changeMap = this.changeMap.insert(key, {
type: oldChange.type,
doc: change.doc
});
}
else if (change.type === 2 /* Modified */ &&
oldChange.type === 2 /* Modified */) {
this.changeMap = this.changeMap.insert(key, {
type: 2 /* Modified */,
doc: change.doc
});
}
else if (change.type === 2 /* Modified */ &&
oldChange.type === 0 /* Added */) {
this.changeMap = this.changeMap.insert(key, {
type: 0 /* Added */,
doc: change.doc
});
}
else if (change.type === 1 /* Removed */ &&
oldChange.type === 0 /* Added */) {
this.changeMap = this.changeMap.remove(key);
}
else if (change.type === 1 /* Removed */ &&
oldChange.type === 2 /* Modified */) {
this.changeMap = this.changeMap.insert(key, {
type: 1 /* Removed */,
doc: oldChange.doc
});
}
else if (change.type === 0 /* Added */ &&
oldChange.type === 1 /* Removed */) {
this.changeMap = this.changeMap.insert(key, {
type: 2 /* Modified */,
doc: change.doc
});
}
else {
// This includes these cases, which don't make sense:
// Added->Added
// Removed->Removed
// Modified->Added
// Removed->Modified
// Metadata->Added
// Removed->Metadata
fail();
}
};
DocumentChangeSet.prototype.getChanges = function () {
var changes = [];
this.changeMap.inorderTraversal(function (key, change) {
changes.push(change);
});
return changes;
};
return DocumentChangeSet;
}());
var ViewSnapshot = /** @class */ (function () {
function ViewSnapshot(query, docs, oldDocs, docChanges, mutatedKeys, fromCache, syncStateChanged, excludesMetadataChanges) {
this.query = query;
this.docs = docs;
this.oldDocs = oldDocs;
this.docChanges = docChanges;
this.mutatedKeys = mutatedKeys;
this.fromCache = fromCache;
this.syncStateChanged = syncStateChanged;
this.excludesMetadataChanges = excludesMetadataChanges;
}
/** Returns a view snapshot as if all documents in the snapshot were added. */
ViewSnapshot.fromInitialDocuments = function (query, documents, mutatedKeys, fromCache) {
var changes = [];
documents.forEach(function (doc) {
changes.push({ type: 0 /* Added */, doc: doc });
});
return new ViewSnapshot(query, documents, DocumentSet.emptySet(documents), changes, mutatedKeys, fromCache,
/* syncStateChanged= */ true,
/* excludesMetadataChanges= */ false);
};
Object.defineProperty(ViewSnapshot.prototype, "hasPendingWrites", {
get: function () {
return !this.mutatedKeys.isEmpty();
},
enumerable: false,
configurable: true
});
ViewSnapshot.prototype.isEqual = function (other) {
if (this.fromCache !== other.fromCache ||
this.syncStateChanged !== other.syncStateChanged ||
!this.mutatedKeys.isEqual(other.mutatedKeys) ||
!queryEquals(this.query, other.query) ||
!this.docs.isEqual(other.docs) ||
!this.oldDocs.isEqual(other.oldDocs)) {
return false;
}
var changes = this.docChanges;
var otherChanges = other.docChanges;
if (changes.length !== otherChanges.length) {
return false;
}
for (var i = 0; i < changes.length; i++) {
if (changes[i].type !== otherChanges[i].type ||
!changes[i].doc.isEqual(otherChanges[i].doc)) {
return false;
}
}
return true;
};
return ViewSnapshot;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Holds the listeners and the last received ViewSnapshot for a query being
* tracked by EventManager.
*/
var QueryListenersInfo = /** @class */ (function () {
function QueryListenersInfo() {
this.viewSnap = undefined;
this.listeners = [];
}
return QueryListenersInfo;
}());
function newEventManager() {
return new EventManagerImpl();
}
var EventManagerImpl = /** @class */ (function () {
function EventManagerImpl() {
this.queries = new ObjectMap(function (q) { return canonifyQuery(q); }, queryEquals);
this.onlineState = "Unknown" /* Unknown */;
this.snapshotsInSyncListeners = new Set();
}
return EventManagerImpl;
}());
function eventManagerListen(eventManager, listener) {
return tslib.__awaiter(this, void 0, void 0, function () {
var eventManagerImpl, query, firstListen, queryInfo, _d, e_7, firestoreError, raisedEvent;
return tslib.__generator(this, function (_e) {
switch (_e.label) {
case 0:
eventManagerImpl = debugCast(eventManager);
query = listener.query;
firstListen = false;
queryInfo = eventManagerImpl.queries.get(query);
if (!queryInfo) {
firstListen = true;
queryInfo = new QueryListenersInfo();
}
if (!firstListen) return [3 /*break*/, 4];
_e.label = 1;
case 1:
_e.trys.push([1, 3, , 4]);
_d = queryInfo;
return [4 /*yield*/, eventManagerImpl.onListen(query)];
case 2:
_d.viewSnap = _e.sent();
return [3 /*break*/, 4];
case 3:
e_7 = _e.sent();
firestoreError = wrapInUserErrorIfRecoverable(e_7, "Initialization of query '" + stringifyQuery(listener.query) + "' failed");
listener.onError(firestoreError);
return [2 /*return*/];
case 4:
eventManagerImpl.queries.set(query, queryInfo);
queryInfo.listeners.push(listener);
// Run global snapshot listeners if a consistent snapshot has been emitted.
listener.applyOnlineStateChange(eventManagerImpl.onlineState);
if (queryInfo.viewSnap) {
raisedEvent = listener.onViewSnapshot(queryInfo.viewSnap);
if (raisedEvent) {
raiseSnapshotsInSyncEvent(eventManagerImpl);
}
}
return [2 /*return*/];
}
});
});
}
function eventManagerUnlisten(eventManager, listener) {
return tslib.__awaiter(this, void 0, void 0, function () {
var eventManagerImpl, query, lastListen, queryInfo, i;
return tslib.__generator(this, function (_d) {
eventManagerImpl = debugCast(eventManager);
query = listener.query;
lastListen = false;
queryInfo = eventManagerImpl.queries.get(query);
if (queryInfo) {
i = queryInfo.listeners.indexOf(listener);
if (i >= 0) {
queryInfo.listeners.splice(i, 1);
lastListen = queryInfo.listeners.length === 0;
}
}
if (lastListen) {
eventManagerImpl.queries.delete(query);
return [2 /*return*/, eventManagerImpl.onUnlisten(query)];
}
return [2 /*return*/];
});
});
}
function eventManagerOnWatchChange(eventManager, viewSnaps) {
var eventManagerImpl = debugCast(eventManager);
var raisedEvent = false;
for (var _i = 0, viewSnaps_1 = viewSnaps; _i < viewSnaps_1.length; _i++) {
var viewSnap = viewSnaps_1[_i];
var query_1 = viewSnap.query;
var queryInfo = eventManagerImpl.queries.get(query_1);
if (queryInfo) {
for (var _d = 0, _e = queryInfo.listeners; _d < _e.length; _d++) {
var listener = _e[_d];
if (listener.onViewSnapshot(viewSnap)) {
raisedEvent = true;
}
}
queryInfo.viewSnap = viewSnap;
}
}
if (raisedEvent) {
raiseSnapshotsInSyncEvent(eventManagerImpl);
}
}
function eventManagerOnWatchError(eventManager, query, error) {
var eventManagerImpl = debugCast(eventManager);
var queryInfo = eventManagerImpl.queries.get(query);
if (queryInfo) {
for (var _i = 0, _d = queryInfo.listeners; _i < _d.length; _i++) {
var listener = _d[_i];
listener.onError(error);
}
}
// Remove all listeners. NOTE: We don't need to call syncEngine.unlisten()
// after an error.
eventManagerImpl.queries.delete(query);
}
function eventManagerOnOnlineStateChange(eventManager, onlineState) {
var eventManagerImpl = debugCast(eventManager);
eventManagerImpl.onlineState = onlineState;
var raisedEvent = false;
eventManagerImpl.queries.forEach(function (_, queryInfo) {
for (var _i = 0, _d = queryInfo.listeners; _i < _d.length; _i++) {
var listener = _d[_i];
// Run global snapshot listeners if a consistent snapshot has been emitted.
if (listener.applyOnlineStateChange(onlineState)) {
raisedEvent = true;
}
}
});
if (raisedEvent) {
raiseSnapshotsInSyncEvent(eventManagerImpl);
}
}
function addSnapshotsInSyncListener(eventManager, observer) {
var eventManagerImpl = debugCast(eventManager);
eventManagerImpl.snapshotsInSyncListeners.add(observer);
// Immediately fire an initial event, indicating all existing listeners
// are in-sync.
observer.next();
}
function removeSnapshotsInSyncListener(eventManager, observer) {
var eventManagerImpl = debugCast(eventManager);
eventManagerImpl.snapshotsInSyncListeners.delete(observer);
}
// Call all global snapshot listeners that have been set.
function raiseSnapshotsInSyncEvent(eventManagerImpl) {
eventManagerImpl.snapshotsInSyncListeners.forEach(function (observer) {
observer.next();
});
}
/**
* QueryListener takes a series of internal view snapshots and determines
* when to raise the event.
*
* It uses an Observer to dispatch events.
*/
var QueryListener = /** @class */ (function () {
function QueryListener(query, queryObserver, options) {
this.query = query;
this.queryObserver = queryObserver;
/**
* Initial snapshots (e.g. from cache) may not be propagated to the wrapped
* observer. This flag is set to true once we've actually raised an event.
*/
this.raisedInitialEvent = false;
this.snap = null;
this.onlineState = "Unknown" /* Unknown */;
this.options = options || {};
}
/**
* Applies the new ViewSnapshot to this listener, raising a user-facing event
* if applicable (depending on what changed, whether the user has opted into
* metadata-only changes, etc.). Returns true if a user-facing event was
* indeed raised.
*/
QueryListener.prototype.onViewSnapshot = function (snap) {
if (!this.options.includeMetadataChanges) {
// Remove the metadata only changes.
var docChanges = [];
for (var _i = 0, _d = snap.docChanges; _i < _d.length; _i++) {
var docChange = _d[_i];
if (docChange.type !== 3 /* Metadata */) {
docChanges.push(docChange);
}
}
snap = new ViewSnapshot(snap.query, snap.docs, snap.oldDocs, docChanges, snap.mutatedKeys, snap.fromCache, snap.syncStateChanged,
/* excludesMetadataChanges= */ true);
}
var raisedEvent = false;
if (!this.raisedInitialEvent) {
if (this.shouldRaiseInitialEvent(snap, this.onlineState)) {
this.raiseInitialEvent(snap);
raisedEvent = true;
}
}
else if (this.shouldRaiseEvent(snap)) {
this.queryObserver.next(snap);
raisedEvent = true;
}
this.snap = snap;
return raisedEvent;
};
QueryListener.prototype.onError = function (error) {
this.queryObserver.error(error);
};
/** Returns whether a snapshot was raised. */
QueryListener.prototype.applyOnlineStateChange = function (onlineState) {
this.onlineState = onlineState;
var raisedEvent = false;
if (this.snap &&
!this.raisedInitialEvent &&
this.shouldRaiseInitialEvent(this.snap, onlineState)) {
this.raiseInitialEvent(this.snap);
raisedEvent = true;
}
return raisedEvent;
};
QueryListener.prototype.shouldRaiseInitialEvent = function (snap, onlineState) {
// Always raise the first event when we're synced
if (!snap.fromCache) {
return true;
}
// NOTE: We consider OnlineState.Unknown as online (it should become Offline
// or Online if we wait long enough).
var maybeOnline = onlineState !== "Offline" /* Offline */;
// Don't raise the event if we're online, aren't synced yet (checked
// above) and are waiting for a sync.
if (this.options.waitForSyncWhenOnline && maybeOnline) {
return false;
}
// Raise data from cache if we have any documents or we are offline
return !snap.docs.isEmpty() || onlineState === "Offline" /* Offline */;
};
QueryListener.prototype.shouldRaiseEvent = function (snap) {
// We don't need to handle includeDocumentMetadataChanges here because
// the Metadata only changes have already been stripped out if needed.
// At this point the only changes we will see are the ones we should
// propagate.
if (snap.docChanges.length > 0) {
return true;
}
var hasPendingWritesChanged = this.snap && this.snap.hasPendingWrites !== snap.hasPendingWrites;
if (snap.syncStateChanged || hasPendingWritesChanged) {
return this.options.includeMetadataChanges === true;
}
// Generally we should have hit one of the cases above, but it's possible
// to get here if there were only metadata docChanges and they got
// stripped out.
return false;
};
QueryListener.prototype.raiseInitialEvent = function (snap) {
snap = ViewSnapshot.fromInitialDocuments(snap.query, snap.docs, snap.mutatedKeys, snap.fromCache);
this.raisedInitialEvent = true;
this.queryObserver.next(snap);
};
return QueryListener;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A set of changes to what documents are currently in view and out of view for
* a given query. These changes are sent to the LocalStore by the View (via
* the SyncEngine) and are used to pin / unpin documents as appropriate.
*/
var LocalViewChanges = /** @class */ (function () {
function LocalViewChanges(targetId, fromCache, addedKeys, removedKeys) {
this.targetId = targetId;
this.fromCache = fromCache;
this.addedKeys = addedKeys;
this.removedKeys = removedKeys;
}
LocalViewChanges.fromSnapshot = function (targetId, viewSnapshot) {
var addedKeys = documentKeySet();
var removedKeys = documentKeySet();
for (var _i = 0, _d = viewSnapshot.docChanges; _i < _d.length; _i++) {
var docChange = _d[_i];
switch (docChange.type) {
case 0 /* Added */:
addedKeys = addedKeys.add(docChange.doc.key);
break;
case 1 /* Removed */:
removedKeys = removedKeys.add(docChange.doc.key);
break;
// do nothing
}
}
return new LocalViewChanges(targetId, viewSnapshot.fromCache, addedKeys, removedKeys);
};
return LocalViewChanges;
}());
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var BundleLoadResult = /** @class */ (function () {
function BundleLoadResult(progress, changedDocs) {
this.progress = progress;
this.changedDocs = changedDocs;
}
return BundleLoadResult;
}());
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Helper to convert objects from bundles to model objects in the SDK.
*/
var BundleConverterImpl = /** @class */ (function () {
function BundleConverterImpl(serializer) {
this.serializer = serializer;
}
BundleConverterImpl.prototype.toDocumentKey = function (name) {
return fromName(this.serializer, name);
};
/**
* Converts a BundleDocument to a MutableDocument.
*/
BundleConverterImpl.prototype.toMutableDocument = function (bundledDoc) {
if (bundledDoc.metadata.exists) {
return fromDocument(this.serializer, bundledDoc.document, false);
}
else {
return MutableDocument.newNoDocument(this.toDocumentKey(bundledDoc.metadata.name), this.toSnapshotVersion(bundledDoc.metadata.readTime));
}
};
BundleConverterImpl.prototype.toSnapshotVersion = function (time) {
return fromVersion(time);
};
return BundleConverterImpl;
}());
/**
* A class to process the elements from a bundle, load them into local
* storage and provide progress update while loading.
*/
var BundleLoader = /** @class */ (function () {
function BundleLoader(bundleMetadata, localStore, serializer) {
this.bundleMetadata = bundleMetadata;
this.localStore = localStore;
this.serializer = serializer;
/** Batched queries to be saved into storage */
this.queries = [];
/** Batched documents to be saved into storage */
this.documents = [];
this.progress = bundleInitialProgress(bundleMetadata);
}
/**
* Adds an element from the bundle to the loader.
*
* Returns a new progress if adding the element leads to a new progress,
* otherwise returns null.
*/
BundleLoader.prototype.addSizedElement = function (element) {
this.progress.bytesLoaded += element.byteLength;
var documentsLoaded = this.progress.documentsLoaded;
if (element.payload.namedQuery) {
this.queries.push(element.payload.namedQuery);
}
else if (element.payload.documentMetadata) {
this.documents.push({ metadata: element.payload.documentMetadata });
if (!element.payload.documentMetadata.exists) {
++documentsLoaded;
}
}
else if (element.payload.document) {
this.documents[this.documents.length - 1].document =
element.payload.document;
++documentsLoaded;
}
if (documentsLoaded !== this.progress.documentsLoaded) {
this.progress.documentsLoaded = documentsLoaded;
return Object.assign({}, this.progress);
}
return null;
};
BundleLoader.prototype.getQueryDocumentMapping = function (documents) {
var queryDocumentMap = new Map();
var bundleConverter = new BundleConverterImpl(this.serializer);
for (var _i = 0, documents_2 = documents; _i < documents_2.length; _i++) {
var bundleDoc = documents_2[_i];
if (bundleDoc.metadata.queries) {
var documentKey = bundleConverter.toDocumentKey(bundleDoc.metadata.name);
for (var _d = 0, _e = bundleDoc.metadata.queries; _d < _e.length; _d++) {
var queryName = _e[_d];
var documentKeys = (queryDocumentMap.get(queryName) || documentKeySet()).add(documentKey);
queryDocumentMap.set(queryName, documentKeys);
}
}
}
return queryDocumentMap;
};
/**
* Update the progress to 'Success' and return the updated progress.
*/
BundleLoader.prototype.complete = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
var changedDocuments, queryDocumentMap, _i, _d, q;
return tslib.__generator(this, function (_e) {
switch (_e.label) {
case 0: return [4 /*yield*/, localStoreApplyBundledDocuments(this.localStore, new BundleConverterImpl(this.serializer), this.documents, this.bundleMetadata.id)];
case 1:
changedDocuments = _e.sent();
queryDocumentMap = this.getQueryDocumentMapping(this.documents);
_i = 0, _d = this.queries;
_e.label = 2;
case 2:
if (!(_i < _d.length)) return [3 /*break*/, 5];
q = _d[_i];
return [4 /*yield*/, localStoreSaveNamedQuery(this.localStore, q, queryDocumentMap.get(q.name))];
case 3:
_e.sent();
_e.label = 4;
case 4:
_i++;
return [3 /*break*/, 2];
case 5:
this.progress.taskState = 'Success';
return [2 /*return*/, new BundleLoadResult(Object.assign({}, this.progress), changedDocuments)];
}
});
});
};
return BundleLoader;
}());
/**
* Returns a `LoadBundleTaskProgress` representing the initial progress of
* loading a bundle.
*/
function bundleInitialProgress(metadata) {
return {
taskState: 'Running',
documentsLoaded: 0,
bytesLoaded: 0,
totalDocuments: metadata.totalDocuments,
totalBytes: metadata.totalBytes
};
}
/**
* Returns a `LoadBundleTaskProgress` representing the progress that the loading
* has succeeded.
*/
function bundleSuccessProgress(metadata) {
return {
taskState: 'Success',
documentsLoaded: metadata.totalDocuments,
bytesLoaded: metadata.totalBytes,
totalDocuments: metadata.totalDocuments,
totalBytes: metadata.totalBytes
};
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var AddedLimboDocument = /** @class */ (function () {
function AddedLimboDocument(key) {
this.key = key;
}
return AddedLimboDocument;
}());
var RemovedLimboDocument = /** @class */ (function () {
function RemovedLimboDocument(key) {
this.key = key;
}
return RemovedLimboDocument;
}());
/**
* View is responsible for computing the final merged truth of what docs are in
* a query. It gets notified of local and remote changes to docs, and applies
* the query filters and limits to determine the most correct possible results.
*/
var View = /** @class */ (function () {
function View(query,
/** Documents included in the remote target */
_syncedDocuments) {
this.query = query;
this._syncedDocuments = _syncedDocuments;
this.syncState = null;
/**
* A flag whether the view is current with the backend. A view is considered
* current after it has seen the current flag from the backend and did not
* lose consistency within the watch stream (e.g. because of an existence
* filter mismatch).
*/
this.current = false;
/** Documents in the view but not in the remote target */
this.limboDocuments = documentKeySet();
/** Document Keys that have local changes */
this.mutatedKeys = documentKeySet();
this.docComparator = newQueryComparator(query);
this.documentSet = new DocumentSet(this.docComparator);
}
Object.defineProperty(View.prototype, "syncedDocuments", {
/**
* The set of remote documents that the server has told us belongs to the target associated with
* this view.
*/
get: function () {
return this._syncedDocuments;
},
enumerable: false,
configurable: true
});
/**
* Iterates over a set of doc changes, applies the query limit, and computes
* what the new results should be, what the changes were, and whether we may
* need to go back to the local cache for more results. Does not make any
* changes to the view.
* @param docChanges - The doc changes to apply to this view.
* @param previousChanges - If this is being called with a refill, then start
* with this set of docs and changes instead of the current view.
* @returns a new set of docs, changes, and refill flag.
*/
View.prototype.computeDocChanges = function (docChanges, previousChanges) {
var _this = this;
var changeSet = previousChanges
? previousChanges.changeSet
: new DocumentChangeSet();
var oldDocumentSet = previousChanges
? previousChanges.documentSet
: this.documentSet;
var newMutatedKeys = previousChanges
? previousChanges.mutatedKeys
: this.mutatedKeys;
var newDocumentSet = oldDocumentSet;
var needsRefill = false;
// Track the last doc in a (full) limit. This is necessary, because some
// update (a delete, or an update moving a doc past the old limit) might
// mean there is some other document in the local cache that either should
// come (1) between the old last limit doc and the new last document, in the
// case of updates, or (2) after the new last document, in the case of
// deletes. So we keep this doc at the old limit to compare the updates to.
//
// Note that this should never get used in a refill (when previousChanges is
// set), because there will only be adds -- no deletes or updates.
var lastDocInLimit = hasLimitToFirst(this.query) && oldDocumentSet.size === this.query.limit
? oldDocumentSet.last()
: null;
var firstDocInLimit = hasLimitToLast(this.query) && oldDocumentSet.size === this.query.limit
? oldDocumentSet.first()
: null;
docChanges.inorderTraversal(function (key, entry) {
var oldDoc = oldDocumentSet.get(key);
var newDoc = queryMatches(_this.query, entry) ? entry : null;
var oldDocHadPendingMutations = oldDoc
? _this.mutatedKeys.has(oldDoc.key)
: false;
var newDocHasPendingMutations = newDoc
? newDoc.hasLocalMutations ||
// We only consider committed mutations for documents that were
// mutated during the lifetime of the view.
(_this.mutatedKeys.has(newDoc.key) && newDoc.hasCommittedMutations)
: false;
var changeApplied = false;
// Calculate change
if (oldDoc && newDoc) {
var docsEqual = oldDoc.data.isEqual(newDoc.data);
if (!docsEqual) {
if (!_this.shouldWaitForSyncedDocument(oldDoc, newDoc)) {
changeSet.track({
type: 2 /* Modified */,
doc: newDoc
});
changeApplied = true;
if ((lastDocInLimit &&
_this.docComparator(newDoc, lastDocInLimit) > 0) ||
(firstDocInLimit &&
_this.docComparator(newDoc, firstDocInLimit) < 0)) {
// This doc moved from inside the limit to outside the limit.
// That means there may be some other doc in the local cache
// that should be included instead.
needsRefill = true;
}
}
}
else if (oldDocHadPendingMutations !== newDocHasPendingMutations) {
changeSet.track({ type: 3 /* Metadata */, doc: newDoc });
changeApplied = true;
}
}
else if (!oldDoc && newDoc) {
changeSet.track({ type: 0 /* Added */, doc: newDoc });
changeApplied = true;
}
else if (oldDoc && !newDoc) {
changeSet.track({ type: 1 /* Removed */, doc: oldDoc });
changeApplied = true;
if (lastDocInLimit || firstDocInLimit) {
// A doc was removed from a full limit query. We'll need to
// requery from the local cache to see if we know about some other
// doc that should be in the results.
needsRefill = true;
}
}
if (changeApplied) {
if (newDoc) {
newDocumentSet = newDocumentSet.add(newDoc);
if (newDocHasPendingMutations) {
newMutatedKeys = newMutatedKeys.add(key);
}
else {
newMutatedKeys = newMutatedKeys.delete(key);
}
}
else {
newDocumentSet = newDocumentSet.delete(key);
newMutatedKeys = newMutatedKeys.delete(key);
}
}
});
// Drop documents out to meet limit/limitToLast requirement.
if (hasLimitToFirst(this.query) || hasLimitToLast(this.query)) {
while (newDocumentSet.size > this.query.limit) {
var oldDoc = hasLimitToFirst(this.query)
? newDocumentSet.last()
: newDocumentSet.first();
newDocumentSet = newDocumentSet.delete(oldDoc.key);
newMutatedKeys = newMutatedKeys.delete(oldDoc.key);
changeSet.track({ type: 1 /* Removed */, doc: oldDoc });
}
}
return {
documentSet: newDocumentSet,
changeSet: changeSet,
needsRefill: needsRefill,
mutatedKeys: newMutatedKeys
};
};
View.prototype.shouldWaitForSyncedDocument = function (oldDoc, newDoc) {
// We suppress the initial change event for documents that were modified as
// part of a write acknowledgment (e.g. when the value of a server transform
// is applied) as Watch will send us the same document again.
// By suppressing the event, we only raise two user visible events (one with
// `hasPendingWrites` and the final state of the document) instead of three
// (one with `hasPendingWrites`, the modified document with
// `hasPendingWrites` and the final state of the document).
return (oldDoc.hasLocalMutations &&
newDoc.hasCommittedMutations &&
!newDoc.hasLocalMutations);
};
/**
* Updates the view with the given ViewDocumentChanges and optionally updates
* limbo docs and sync state from the provided target change.
* @param docChanges - The set of changes to make to the view's docs.
* @param updateLimboDocuments - Whether to update limbo documents based on
* this change.
* @param targetChange - A target change to apply for computing limbo docs and
* sync state.
* @returns A new ViewChange with the given docs, changes, and sync state.
*/
// PORTING NOTE: The iOS/Android clients always compute limbo document changes.
View.prototype.applyChanges = function (docChanges, updateLimboDocuments, targetChange) {
var _this = this;
var oldDocs = this.documentSet;
this.documentSet = docChanges.documentSet;
this.mutatedKeys = docChanges.mutatedKeys;
// Sort changes based on type and query comparator
var changes = docChanges.changeSet.getChanges();
changes.sort(function (c1, c2) {
return (compareChangeType(c1.type, c2.type) ||
_this.docComparator(c1.doc, c2.doc));
});
this.applyTargetChange(targetChange);
var limboChanges = updateLimboDocuments
? this.updateLimboDocuments()
: [];
var synced = this.limboDocuments.size === 0 && this.current;
var newSyncState = synced ? 1 /* Synced */ : 0 /* Local */;
var syncStateChanged = newSyncState !== this.syncState;
this.syncState = newSyncState;
if (changes.length === 0 && !syncStateChanged) {
// no changes
return { limboChanges: limboChanges };
}
else {
var snap = new ViewSnapshot(this.query, docChanges.documentSet, oldDocs, changes, docChanges.mutatedKeys, newSyncState === 0 /* Local */, syncStateChanged,
/* excludesMetadataChanges= */ false);
return {
snapshot: snap,
limboChanges: limboChanges
};
}
};
/**
* Applies an OnlineState change to the view, potentially generating a
* ViewChange if the view's syncState changes as a result.
*/
View.prototype.applyOnlineStateChange = function (onlineState) {
if (this.current && onlineState === "Offline" /* Offline */) {
// If we're offline, set `current` to false and then call applyChanges()
// to refresh our syncState and generate a ViewChange as appropriate. We
// are guaranteed to get a new TargetChange that sets `current` back to
// true once the client is back online.
this.current = false;
return this.applyChanges({
documentSet: this.documentSet,
changeSet: new DocumentChangeSet(),
mutatedKeys: this.mutatedKeys,
needsRefill: false
},
/* updateLimboDocuments= */ false);
}
else {
// No effect, just return a no-op ViewChange.
return { limboChanges: [] };
}
};
/**
* Returns whether the doc for the given key should be in limbo.
*/
View.prototype.shouldBeInLimbo = function (key) {
// If the remote end says it's part of this query, it's not in limbo.
if (this._syncedDocuments.has(key)) {
return false;
}
// The local store doesn't think it's a result, so it shouldn't be in limbo.
if (!this.documentSet.has(key)) {
return false;
}
// If there are local changes to the doc, they might explain why the server
// doesn't know that it's part of the query. So don't put it in limbo.
// TODO(klimt): Ideally, we would only consider changes that might actually
// affect this specific query.
if (this.documentSet.get(key).hasLocalMutations) {
return false;
}
// Everything else is in limbo.
return true;
};
/**
* Updates syncedDocuments, current, and limbo docs based on the given change.
* Returns the list of changes to which docs are in limbo.
*/
View.prototype.applyTargetChange = function (targetChange) {
var _this = this;
if (targetChange) {
targetChange.addedDocuments.forEach(function (key) { return (_this._syncedDocuments = _this._syncedDocuments.add(key)); });
targetChange.modifiedDocuments.forEach(function (key) {
});
targetChange.removedDocuments.forEach(function (key) { return (_this._syncedDocuments = _this._syncedDocuments.delete(key)); });
this.current = targetChange.current;
}
};
View.prototype.updateLimboDocuments = function () {
var _this = this;
// We can only determine limbo documents when we're in-sync with the server.
if (!this.current) {
return [];
}
// TODO(klimt): Do this incrementally so that it's not quadratic when
// updating many documents.
var oldLimboDocuments = this.limboDocuments;
this.limboDocuments = documentKeySet();
this.documentSet.forEach(function (doc) {
if (_this.shouldBeInLimbo(doc.key)) {
_this.limboDocuments = _this.limboDocuments.add(doc.key);
}
});
// Diff the new limbo docs with the old limbo docs.
var changes = [];
oldLimboDocuments.forEach(function (key) {
if (!_this.limboDocuments.has(key)) {
changes.push(new RemovedLimboDocument(key));
}
});
this.limboDocuments.forEach(function (key) {
if (!oldLimboDocuments.has(key)) {
changes.push(new AddedLimboDocument(key));
}
});
return changes;
};
/**
* Update the in-memory state of the current view with the state read from
* persistence.
*
* We update the query view whenever a client's primary status changes:
* - When a client transitions from primary to secondary, it can miss
* LocalStorage updates and its query views may temporarily not be
* synchronized with the state on disk.
* - For secondary to primary transitions, the client needs to update the list
* of `syncedDocuments` since secondary clients update their query views
* based purely on synthesized RemoteEvents.
*
* @param queryResult.documents - The documents that match the query according
* to the LocalStore.
* @param queryResult.remoteKeys - The keys of the documents that match the
* query according to the backend.
*
* @returns The ViewChange that resulted from this synchronization.
*/
// PORTING NOTE: Multi-tab only.
View.prototype.synchronizeWithPersistedState = function (queryResult) {
this._syncedDocuments = queryResult.remoteKeys;
this.limboDocuments = documentKeySet();
var docChanges = this.computeDocChanges(queryResult.documents);
return this.applyChanges(docChanges, /*updateLimboDocuments=*/ true);
};
/**
* Returns a view snapshot as if this query was just listened to. Contains
* a document add for every existing document and the `fromCache` and
* `hasPendingWrites` status of the already established view.
*/
// PORTING NOTE: Multi-tab only.
View.prototype.computeInitialSnapshot = function () {
return ViewSnapshot.fromInitialDocuments(this.query, this.documentSet, this.mutatedKeys, this.syncState === 0 /* Local */);
};
return View;
}());
function compareChangeType(c1, c2) {
var order = function (change) {
switch (change) {
case 0 /* Added */:
return 1;
case 2 /* Modified */:
return 2;
case 3 /* Metadata */:
// A metadata change is converted to a modified change at the public
// api layer. Since we sort by document key and then change type,
// metadata and modified changes must be sorted equivalently.
return 2;
case 1 /* Removed */:
return 0;
default:
return fail();
}
};
return order(c1) - order(c2);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG$3 = 'SyncEngine';
/**
* QueryView contains all of the data that SyncEngine needs to keep track of for
* a particular query.
*/
var QueryView = /** @class */ (function () {
function QueryView(
/**
* The query itself.
*/
query,
/**
* The target number created by the client that is used in the watch
* stream to identify this query.
*/
targetId,
/**
* The view is responsible for computing the final merged truth of what
* docs are in the query. It gets notified of local and remote changes,
* and applies the query filters and limits to determine the most correct
* possible results.
*/
view) {
this.query = query;
this.targetId = targetId;
this.view = view;
}
return QueryView;
}());
/** Tracks a limbo resolution. */
var LimboResolution = /** @class */ (function () {
function LimboResolution(key) {
this.key = key;
/**
* Set to true once we've received a document. This is used in
* getRemoteKeysForTarget() and ultimately used by WatchChangeAggregator to
* decide whether it needs to manufacture a delete event for the target once
* the target is CURRENT.
*/
this.receivedDocument = false;
}
return LimboResolution;
}());
/**
* An implementation of `SyncEngine` coordinating with other parts of SDK.
*
* The parts of SyncEngine that act as a callback to RemoteStore need to be
* registered individually. This is done in `syncEngineWrite()` and
* `syncEngineListen()` (as well as `applyPrimaryState()`) as these methods
* serve as entry points to RemoteStore's functionality.
*
* Note: some field defined in this class might have public access level, but
* the class is not exported so they are only accessible from this module.
* This is useful to implement optional features (like bundles) in free
* functions, such that they are tree-shakeable.
*/
var SyncEngineImpl = /** @class */ (function () {
function SyncEngineImpl(localStore, remoteStore, eventManager,
// PORTING NOTE: Manages state synchronization in multi-tab environments.
sharedClientState, currentUser, maxConcurrentLimboResolutions) {
this.localStore = localStore;
this.remoteStore = remoteStore;
this.eventManager = eventManager;
this.sharedClientState = sharedClientState;
this.currentUser = currentUser;
this.maxConcurrentLimboResolutions = maxConcurrentLimboResolutions;
this.syncEngineListener = {};
this.queryViewsByQuery = new ObjectMap(function (q) { return canonifyQuery(q); }, queryEquals);
this.queriesByTarget = new Map();
/**
* The keys of documents that are in limbo for which we haven't yet started a
* limbo resolution query. The strings in this set are the result of calling
* `key.path.canonicalString()` where `key` is a `DocumentKey` object.
*
* The `Set` type was chosen because it provides efficient lookup and removal
* of arbitrary elements and it also maintains insertion order, providing the
* desired queue-like FIFO semantics.
*/
this.enqueuedLimboResolutions = new Set();
/**
* Keeps track of the target ID for each document that is in limbo with an
* active target.
*/
this.activeLimboTargetsByKey = new SortedMap(DocumentKey.comparator);
/**
* Keeps track of the information about an active limbo resolution for each
* active target ID that was started for the purpose of limbo resolution.
*/
this.activeLimboResolutionsByTarget = new Map();
this.limboDocumentRefs = new ReferenceSet();
/** Stores user completion handlers, indexed by User and BatchId. */
this.mutationUserCallbacks = {};
/** Stores user callbacks waiting for all pending writes to be acknowledged. */
this.pendingWritesCallbacks = new Map();
this.limboTargetIdGenerator = TargetIdGenerator.forSyncEngine();
this.onlineState = "Unknown" /* Unknown */;
// The primary state is set to `true` or `false` immediately after Firestore
// startup. In the interim, a client should only be considered primary if
// `isPrimary` is true.
this._isPrimaryClient = undefined;
}
Object.defineProperty(SyncEngineImpl.prototype, "isPrimaryClient", {
get: function () {
return this._isPrimaryClient === true;
},
enumerable: false,
configurable: true
});
return SyncEngineImpl;
}());
function newSyncEngine(localStore, remoteStore, eventManager,
// PORTING NOTE: Manages state synchronization in multi-tab environments.
sharedClientState, currentUser, maxConcurrentLimboResolutions, isPrimary) {
var syncEngine = new SyncEngineImpl(localStore, remoteStore, eventManager, sharedClientState, currentUser, maxConcurrentLimboResolutions);
if (isPrimary) {
syncEngine._isPrimaryClient = true;
}
return syncEngine;
}
/**
* Initiates the new listen, resolves promise when listen enqueued to the
* server. All the subsequent view snapshots or errors are sent to the
* subscribed handlers. Returns the initial snapshot.
*/
function syncEngineListen(syncEngine, query) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, targetId, viewSnapshot, queryView, targetData, status_1;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
syncEngineImpl = ensureWatchCallbacks(syncEngine);
queryView = syncEngineImpl.queryViewsByQuery.get(query);
if (!queryView) return [3 /*break*/, 1];
// PORTING NOTE: With Multi-Tab Web, it is possible that a query view
// already exists when EventManager calls us for the first time. This
// happens when the primary tab is already listening to this query on
// behalf of another tab and the user of the primary also starts listening
// to the query. EventManager will not have an assigned target ID in this
// case and calls `listen` to obtain this ID.
targetId = queryView.targetId;
syncEngineImpl.sharedClientState.addLocalQueryTarget(targetId);
viewSnapshot = queryView.view.computeInitialSnapshot();
return [3 /*break*/, 4];
case 1: return [4 /*yield*/, localStoreAllocateTarget(syncEngineImpl.localStore, queryToTarget(query))];
case 2:
targetData = _d.sent();
status_1 = syncEngineImpl.sharedClientState.addLocalQueryTarget(targetData.targetId);
targetId = targetData.targetId;
return [4 /*yield*/, initializeViewAndComputeSnapshot(syncEngineImpl, query, targetId, status_1 === 'current')];
case 3:
viewSnapshot = _d.sent();
if (syncEngineImpl.isPrimaryClient) {
remoteStoreListen(syncEngineImpl.remoteStore, targetData);
}
_d.label = 4;
case 4: return [2 /*return*/, viewSnapshot];
}
});
});
}
/**
* Registers a view for a previously unknown query and computes its initial
* snapshot.
*/
function initializeViewAndComputeSnapshot(syncEngineImpl, query, targetId, current) {
return tslib.__awaiter(this, void 0, void 0, function () {
var queryResult, view, viewDocChanges, synthesizedTargetChange, viewChange, data;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
// PORTING NOTE: On Web only, we inject the code that registers new Limbo
// targets based on view changes. This allows us to only depend on Limbo
// changes when user code includes queries.
syncEngineImpl.applyDocChanges = function (queryView, changes, remoteEvent) { return applyDocChanges(syncEngineImpl, queryView, changes, remoteEvent); };
return [4 /*yield*/, localStoreExecuteQuery(syncEngineImpl.localStore, query,
/* usePreviousResults= */ true)];
case 1:
queryResult = _d.sent();
view = new View(query, queryResult.remoteKeys);
viewDocChanges = view.computeDocChanges(queryResult.documents);
synthesizedTargetChange = TargetChange.createSynthesizedTargetChangeForCurrentChange(targetId, current && syncEngineImpl.onlineState !== "Offline" /* Offline */);
viewChange = view.applyChanges(viewDocChanges,
/* updateLimboDocuments= */ syncEngineImpl.isPrimaryClient, synthesizedTargetChange);
updateTrackedLimbos(syncEngineImpl, targetId, viewChange.limboChanges);
data = new QueryView(query, targetId, view);
syncEngineImpl.queryViewsByQuery.set(query, data);
if (syncEngineImpl.queriesByTarget.has(targetId)) {
syncEngineImpl.queriesByTarget.get(targetId).push(query);
}
else {
syncEngineImpl.queriesByTarget.set(targetId, [query]);
}
return [2 /*return*/, viewChange.snapshot];
}
});
});
}
/** Stops listening to the query. */
function syncEngineUnlisten(syncEngine, query) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, queryView, queries, targetRemainsActive;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
syncEngineImpl = debugCast(syncEngine);
queryView = syncEngineImpl.queryViewsByQuery.get(query);
queries = syncEngineImpl.queriesByTarget.get(queryView.targetId);
if (queries.length > 1) {
syncEngineImpl.queriesByTarget.set(queryView.targetId, queries.filter(function (q) { return !queryEquals(q, query); }));
syncEngineImpl.queryViewsByQuery.delete(query);
return [2 /*return*/];
}
if (!syncEngineImpl.isPrimaryClient) return [3 /*break*/, 3];
// We need to remove the local query target first to allow us to verify
// whether any other client is still interested in this target.
syncEngineImpl.sharedClientState.removeLocalQueryTarget(queryView.targetId);
targetRemainsActive = syncEngineImpl.sharedClientState.isActiveQueryTarget(queryView.targetId);
if (!!targetRemainsActive) return [3 /*break*/, 2];
return [4 /*yield*/, localStoreReleaseTarget(syncEngineImpl.localStore, queryView.targetId,
/*keepPersistedTargetData=*/ false)
.then(function () {
syncEngineImpl.sharedClientState.clearQueryState(queryView.targetId);
remoteStoreUnlisten(syncEngineImpl.remoteStore, queryView.targetId);
removeAndCleanupTarget(syncEngineImpl, queryView.targetId);
})
.catch(ignoreIfPrimaryLeaseLoss)];
case 1:
_d.sent();
_d.label = 2;
case 2: return [3 /*break*/, 5];
case 3:
removeAndCleanupTarget(syncEngineImpl, queryView.targetId);
return [4 /*yield*/, localStoreReleaseTarget(syncEngineImpl.localStore, queryView.targetId,
/*keepPersistedTargetData=*/ true)];
case 4:
_d.sent();
_d.label = 5;
case 5: return [2 /*return*/];
}
});
});
}
/**
* Initiates the write of local mutation batch which involves adding the
* writes to the mutation queue, notifying the remote store about new
* mutations and raising events for any changes this write caused.
*
* The promise returned by this call is resolved when the above steps
* have completed, *not* when the write was acked by the backend. The
* userCallback is resolved once the write was acked/rejected by the
* backend (or failed locally for any other reason).
*/
function syncEngineWrite(syncEngine, batch, userCallback) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, result, e_8, error;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
syncEngineImpl = syncEngineEnsureWriteCallbacks(syncEngine);
_d.label = 1;
case 1:
_d.trys.push([1, 5, , 6]);
return [4 /*yield*/, localStoreWriteLocally(syncEngineImpl.localStore, batch)];
case 2:
result = _d.sent();
syncEngineImpl.sharedClientState.addPendingMutation(result.batchId);
addMutationCallback(syncEngineImpl, result.batchId, userCallback);
return [4 /*yield*/, syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, result.changes)];
case 3:
_d.sent();
return [4 /*yield*/, fillWritePipeline(syncEngineImpl.remoteStore)];
case 4:
_d.sent();
return [3 /*break*/, 6];
case 5:
e_8 = _d.sent();
error = wrapInUserErrorIfRecoverable(e_8, "Failed to persist write");
userCallback.reject(error);
return [3 /*break*/, 6];
case 6: return [2 /*return*/];
}
});
});
}
/**
* Applies one remote event to the sync engine, notifying any views of the
* changes, and releasing any pending mutation batches that would become
* visible because of the snapshot version the remote event contains.
*/
function syncEngineApplyRemoteEvent(syncEngine, remoteEvent) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, changes, error_2;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
syncEngineImpl = debugCast(syncEngine);
_d.label = 1;
case 1:
_d.trys.push([1, 4, , 6]);
return [4 /*yield*/, localStoreApplyRemoteEventToLocalCache(syncEngineImpl.localStore, remoteEvent)];
case 2:
changes = _d.sent();
// Update `receivedDocument` as appropriate for any limbo targets.
remoteEvent.targetChanges.forEach(function (targetChange, targetId) {
var limboResolution = syncEngineImpl.activeLimboResolutionsByTarget.get(targetId);
if (limboResolution) {
// Since this is a limbo resolution lookup, it's for a single document
// and it could be added, modified, or removed, but not a combination.
hardAssert(targetChange.addedDocuments.size +
targetChange.modifiedDocuments.size +
targetChange.removedDocuments.size <=
1);
if (targetChange.addedDocuments.size > 0) {
limboResolution.receivedDocument = true;
}
else if (targetChange.modifiedDocuments.size > 0) {
hardAssert(limboResolution.receivedDocument);
}
else if (targetChange.removedDocuments.size > 0) {
hardAssert(limboResolution.receivedDocument);
limboResolution.receivedDocument = false;
}
else ;
}
});
return [4 /*yield*/, syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes, remoteEvent)];
case 3:
_d.sent();
return [3 /*break*/, 6];
case 4:
error_2 = _d.sent();
return [4 /*yield*/, ignoreIfPrimaryLeaseLoss(error_2)];
case 5:
_d.sent();
return [3 /*break*/, 6];
case 6: return [2 /*return*/];
}
});
});
}
/**
* Applies an OnlineState change to the sync engine and notifies any views of
* the change.
*/
function syncEngineApplyOnlineStateChange(syncEngine, onlineState, source) {
var syncEngineImpl = debugCast(syncEngine);
// If we are the secondary client, we explicitly ignore the remote store's
// online state (the local client may go offline, even though the primary
// tab remains online) and only apply the primary tab's online state from
// SharedClientState.
if ((syncEngineImpl.isPrimaryClient &&
source === 0 /* RemoteStore */) ||
(!syncEngineImpl.isPrimaryClient &&
source === 1 /* SharedClientState */)) {
var newViewSnapshots_1 = [];
syncEngineImpl.queryViewsByQuery.forEach(function (query, queryView) {
var viewChange = queryView.view.applyOnlineStateChange(onlineState);
if (viewChange.snapshot) {
newViewSnapshots_1.push(viewChange.snapshot);
}
});
eventManagerOnOnlineStateChange(syncEngineImpl.eventManager, onlineState);
if (newViewSnapshots_1.length) {
syncEngineImpl.syncEngineListener.onWatchChange(newViewSnapshots_1);
}
syncEngineImpl.onlineState = onlineState;
if (syncEngineImpl.isPrimaryClient) {
syncEngineImpl.sharedClientState.setOnlineState(onlineState);
}
}
}
/**
* Rejects the listen for the given targetID. This can be triggered by the
* backend for any active target.
*
* @param syncEngine - The sync engine implementation.
* @param targetId - The targetID corresponds to one previously initiated by the
* user as part of TargetData passed to listen() on RemoteStore.
* @param err - A description of the condition that has forced the rejection.
* Nearly always this will be an indication that the user is no longer
* authorized to see the data matching the target.
*/
function syncEngineRejectListen(syncEngine, targetId, err) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, limboResolution, limboKey, documentUpdates, resolvedLimboDocuments, event_2;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
syncEngineImpl = debugCast(syncEngine);
// PORTING NOTE: Multi-tab only.
syncEngineImpl.sharedClientState.updateQueryState(targetId, 'rejected', err);
limboResolution = syncEngineImpl.activeLimboResolutionsByTarget.get(targetId);
limboKey = limboResolution && limboResolution.key;
if (!limboKey) return [3 /*break*/, 2];
documentUpdates = new SortedMap(DocumentKey.comparator);
documentUpdates = documentUpdates.insert(limboKey, MutableDocument.newNoDocument(limboKey, SnapshotVersion.min()));
resolvedLimboDocuments = documentKeySet().add(limboKey);
event_2 = new RemoteEvent(SnapshotVersion.min(),
/* targetChanges= */ new Map(),
/* targetMismatches= */ new SortedSet(primitiveComparator), documentUpdates, resolvedLimboDocuments);
return [4 /*yield*/, syncEngineApplyRemoteEvent(syncEngineImpl, event_2)];
case 1:
_d.sent();
// Since this query failed, we won't want to manually unlisten to it.
// We only remove it from bookkeeping after we successfully applied the
// RemoteEvent. If `applyRemoteEvent()` throws, we want to re-listen to
// this query when the RemoteStore restarts the Watch stream, which should
// re-trigger the target failure.
syncEngineImpl.activeLimboTargetsByKey = syncEngineImpl.activeLimboTargetsByKey.remove(limboKey);
syncEngineImpl.activeLimboResolutionsByTarget.delete(targetId);
pumpEnqueuedLimboResolutions(syncEngineImpl);
return [3 /*break*/, 4];
case 2: return [4 /*yield*/, localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
/* keepPersistedTargetData */ false)
.then(function () { return removeAndCleanupTarget(syncEngineImpl, targetId, err); })
.catch(ignoreIfPrimaryLeaseLoss)];
case 3:
_d.sent();
_d.label = 4;
case 4: return [2 /*return*/];
}
});
});
}
function syncEngineApplySuccessfulWrite(syncEngine, mutationBatchResult) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, batchId, changes, error_3;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
syncEngineImpl = debugCast(syncEngine);
batchId = mutationBatchResult.batch.batchId;
_d.label = 1;
case 1:
_d.trys.push([1, 4, , 6]);
return [4 /*yield*/, localStoreAcknowledgeBatch(syncEngineImpl.localStore, mutationBatchResult)];
case 2:
changes = _d.sent();
// The local store may or may not be able to apply the write result and
// raise events immediately (depending on whether the watcher is caught
// up), so we raise user callbacks first so that they consistently happen
// before listen events.
processUserCallback(syncEngineImpl, batchId, /*error=*/ null);
triggerPendingWritesCallbacks(syncEngineImpl, batchId);
syncEngineImpl.sharedClientState.updateMutationState(batchId, 'acknowledged');
return [4 /*yield*/, syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes)];
case 3:
_d.sent();
return [3 /*break*/, 6];
case 4:
error_3 = _d.sent();
return [4 /*yield*/, ignoreIfPrimaryLeaseLoss(error_3)];
case 5:
_d.sent();
return [3 /*break*/, 6];
case 6: return [2 /*return*/];
}
});
});
}
function syncEngineRejectFailedWrite(syncEngine, batchId, error) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, changes, error_4;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
syncEngineImpl = debugCast(syncEngine);
_d.label = 1;
case 1:
_d.trys.push([1, 4, , 6]);
return [4 /*yield*/, localStoreRejectBatch(syncEngineImpl.localStore, batchId)];
case 2:
changes = _d.sent();
// The local store may or may not be able to apply the write result and
// raise events immediately (depending on whether the watcher is caught up),
// so we raise user callbacks first so that they consistently happen before
// listen events.
processUserCallback(syncEngineImpl, batchId, error);
triggerPendingWritesCallbacks(syncEngineImpl, batchId);
syncEngineImpl.sharedClientState.updateMutationState(batchId, 'rejected', error);
return [4 /*yield*/, syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes)];
case 3:
_d.sent();
return [3 /*break*/, 6];
case 4:
error_4 = _d.sent();
return [4 /*yield*/, ignoreIfPrimaryLeaseLoss(error_4)];
case 5:
_d.sent();
return [3 /*break*/, 6];
case 6: return [2 /*return*/];
}
});
});
}
/**
* Registers a user callback that resolves when all pending mutations at the moment of calling
* are acknowledged .
*/
function syncEngineRegisterPendingWritesCallback(syncEngine, callback) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, highestBatchId, callbacks, e_9, firestoreError;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
syncEngineImpl = debugCast(syncEngine);
if (!canUseNetwork(syncEngineImpl.remoteStore)) {
logDebug(LOG_TAG$3, 'The network is disabled. The task returned by ' +
"'awaitPendingWrites()' will not complete until the network is enabled.");
}
_d.label = 1;
case 1:
_d.trys.push([1, 3, , 4]);
return [4 /*yield*/, localStoreGetHighestUnacknowledgedBatchId(syncEngineImpl.localStore)];
case 2:
highestBatchId = _d.sent();
if (highestBatchId === BATCHID_UNKNOWN) {
// Trigger the callback right away if there is no pending writes at the moment.
callback.resolve();
return [2 /*return*/];
}
callbacks = syncEngineImpl.pendingWritesCallbacks.get(highestBatchId) || [];
callbacks.push(callback);
syncEngineImpl.pendingWritesCallbacks.set(highestBatchId, callbacks);
return [3 /*break*/, 4];
case 3:
e_9 = _d.sent();
firestoreError = wrapInUserErrorIfRecoverable(e_9, 'Initialization of waitForPendingWrites() operation failed');
callback.reject(firestoreError);
return [3 /*break*/, 4];
case 4: return [2 /*return*/];
}
});
});
}
/**
* Triggers the callbacks that are waiting for this batch id to get acknowledged by server,
* if there are any.
*/
function triggerPendingWritesCallbacks(syncEngineImpl, batchId) {
(syncEngineImpl.pendingWritesCallbacks.get(batchId) || []).forEach(function (callback) {
callback.resolve();
});
syncEngineImpl.pendingWritesCallbacks.delete(batchId);
}
/** Reject all outstanding callbacks waiting for pending writes to complete. */
function rejectOutstandingPendingWritesCallbacks(syncEngineImpl, errorMessage) {
syncEngineImpl.pendingWritesCallbacks.forEach(function (callbacks) {
callbacks.forEach(function (callback) {
callback.reject(new FirestoreError(Code.CANCELLED, errorMessage));
});
});
syncEngineImpl.pendingWritesCallbacks.clear();
}
function addMutationCallback(syncEngineImpl, batchId, callback) {
var newCallbacks = syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()];
if (!newCallbacks) {
newCallbacks = new SortedMap(primitiveComparator);
}
newCallbacks = newCallbacks.insert(batchId, callback);
syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()] = newCallbacks;
}
/**
* Resolves or rejects the user callback for the given batch and then discards
* it.
*/
function processUserCallback(syncEngine, batchId, error) {
var syncEngineImpl = debugCast(syncEngine);
var newCallbacks = syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()];
// NOTE: Mutations restored from persistence won't have callbacks, so it's
// okay for there to be no callback for this ID.
if (newCallbacks) {
var callback = newCallbacks.get(batchId);
if (callback) {
if (error) {
callback.reject(error);
}
else {
callback.resolve();
}
newCallbacks = newCallbacks.remove(batchId);
}
syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()] = newCallbacks;
}
}
function removeAndCleanupTarget(syncEngineImpl, targetId, error) {
if (error === void 0) { error = null; }
syncEngineImpl.sharedClientState.removeLocalQueryTarget(targetId);
for (var _i = 0, _d = syncEngineImpl.queriesByTarget.get(targetId); _i < _d.length; _i++) {
var query_2 = _d[_i];
syncEngineImpl.queryViewsByQuery.delete(query_2);
if (error) {
syncEngineImpl.syncEngineListener.onWatchError(query_2, error);
}
}
syncEngineImpl.queriesByTarget.delete(targetId);
if (syncEngineImpl.isPrimaryClient) {
var limboKeys = syncEngineImpl.limboDocumentRefs.removeReferencesForId(targetId);
limboKeys.forEach(function (limboKey) {
var isReferenced = syncEngineImpl.limboDocumentRefs.containsKey(limboKey);
if (!isReferenced) {
// We removed the last reference for this key
removeLimboTarget(syncEngineImpl, limboKey);
}
});
}
}
function removeLimboTarget(syncEngineImpl, key) {
syncEngineImpl.enqueuedLimboResolutions.delete(key.path.canonicalString());
// It's possible that the target already got removed because the query failed. In that case,
// the key won't exist in `limboTargetsByKey`. Only do the cleanup if we still have the target.
var limboTargetId = syncEngineImpl.activeLimboTargetsByKey.get(key);
if (limboTargetId === null) {
// This target already got removed, because the query failed.
return;
}
remoteStoreUnlisten(syncEngineImpl.remoteStore, limboTargetId);
syncEngineImpl.activeLimboTargetsByKey = syncEngineImpl.activeLimboTargetsByKey.remove(key);
syncEngineImpl.activeLimboResolutionsByTarget.delete(limboTargetId);
pumpEnqueuedLimboResolutions(syncEngineImpl);
}
function updateTrackedLimbos(syncEngineImpl, targetId, limboChanges) {
for (var _i = 0, limboChanges_1 = limboChanges; _i < limboChanges_1.length; _i++) {
var limboChange = limboChanges_1[_i];
if (limboChange instanceof AddedLimboDocument) {
syncEngineImpl.limboDocumentRefs.addReference(limboChange.key, targetId);
trackLimboChange(syncEngineImpl, limboChange);
}
else if (limboChange instanceof RemovedLimboDocument) {
logDebug(LOG_TAG$3, 'Document no longer in limbo: ' + limboChange.key);
syncEngineImpl.limboDocumentRefs.removeReference(limboChange.key, targetId);
var isReferenced = syncEngineImpl.limboDocumentRefs.containsKey(limboChange.key);
if (!isReferenced) {
// We removed the last reference for this key
removeLimboTarget(syncEngineImpl, limboChange.key);
}
}
else {
fail();
}
}
}
function trackLimboChange(syncEngineImpl, limboChange) {
var key = limboChange.key;
var keyString = key.path.canonicalString();
if (!syncEngineImpl.activeLimboTargetsByKey.get(key) &&
!syncEngineImpl.enqueuedLimboResolutions.has(keyString)) {
logDebug(LOG_TAG$3, 'New document in limbo: ' + key);
syncEngineImpl.enqueuedLimboResolutions.add(keyString);
pumpEnqueuedLimboResolutions(syncEngineImpl);
}
}
/**
* Starts listens for documents in limbo that are enqueued for resolution,
* subject to a maximum number of concurrent resolutions.
*
* Without bounding the number of concurrent resolutions, the server can fail
* with "resource exhausted" errors which can lead to pathological client
* behavior as seen in https://github.com/firebase/firebase-js-sdk/issues/2683.
*/
function pumpEnqueuedLimboResolutions(syncEngineImpl) {
while (syncEngineImpl.enqueuedLimboResolutions.size > 0 &&
syncEngineImpl.activeLimboTargetsByKey.size <
syncEngineImpl.maxConcurrentLimboResolutions) {
var keyString = syncEngineImpl.enqueuedLimboResolutions.values().next()
.value;
syncEngineImpl.enqueuedLimboResolutions.delete(keyString);
var key = new DocumentKey(ResourcePath.fromString(keyString));
var limboTargetId = syncEngineImpl.limboTargetIdGenerator.next();
syncEngineImpl.activeLimboResolutionsByTarget.set(limboTargetId, new LimboResolution(key));
syncEngineImpl.activeLimboTargetsByKey = syncEngineImpl.activeLimboTargetsByKey.insert(key, limboTargetId);
remoteStoreListen(syncEngineImpl.remoteStore, new TargetData(queryToTarget(newQueryForPath(key.path)), limboTargetId, 2 /* LimboResolution */, ListenSequence.INVALID));
}
}
function syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngine, changes, remoteEvent) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, newSnaps, docChangesInAllViews, queriesProcessed;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
syncEngineImpl = debugCast(syncEngine);
newSnaps = [];
docChangesInAllViews = [];
queriesProcessed = [];
if (syncEngineImpl.queryViewsByQuery.isEmpty()) {
// Return early since `onWatchChange()` might not have been assigned yet.
return [2 /*return*/];
}
syncEngineImpl.queryViewsByQuery.forEach(function (_, queryView) {
queriesProcessed.push(syncEngineImpl
.applyDocChanges(queryView, changes, remoteEvent)
.then(function (viewSnapshot) {
if (viewSnapshot) {
if (syncEngineImpl.isPrimaryClient) {
syncEngineImpl.sharedClientState.updateQueryState(queryView.targetId, viewSnapshot.fromCache ? 'not-current' : 'current');
}
newSnaps.push(viewSnapshot);
var docChanges = LocalViewChanges.fromSnapshot(queryView.targetId, viewSnapshot);
docChangesInAllViews.push(docChanges);
}
}));
});
return [4 /*yield*/, Promise.all(queriesProcessed)];
case 1:
_d.sent();
syncEngineImpl.syncEngineListener.onWatchChange(newSnaps);
return [4 /*yield*/, localStoreNotifyLocalViewChanges(syncEngineImpl.localStore, docChangesInAllViews)];
case 2:
_d.sent();
return [2 /*return*/];
}
});
});
}
function applyDocChanges(syncEngineImpl, queryView, changes, remoteEvent) {
return tslib.__awaiter(this, void 0, void 0, function () {
var viewDocChanges, targetChange, viewChange;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
viewDocChanges = queryView.view.computeDocChanges(changes);
if (!viewDocChanges.needsRefill) return [3 /*break*/, 2];
return [4 /*yield*/, localStoreExecuteQuery(syncEngineImpl.localStore, queryView.query,
/* usePreviousResults= */ false).then(function (_d) {
var documents = _d.documents;
return queryView.view.computeDocChanges(documents, viewDocChanges);
})];
case 1:
// The query has a limit and some docs were removed, so we need
// to re-run the query against the local store to make sure we
// didn't lose any good docs that had been past the limit.
viewDocChanges = _d.sent();
_d.label = 2;
case 2:
targetChange = remoteEvent && remoteEvent.targetChanges.get(queryView.targetId);
viewChange = queryView.view.applyChanges(viewDocChanges,
/* updateLimboDocuments= */ syncEngineImpl.isPrimaryClient, targetChange);
updateTrackedLimbos(syncEngineImpl, queryView.targetId, viewChange.limboChanges);
return [2 /*return*/, viewChange.snapshot];
}
});
});
}
function syncEngineHandleCredentialChange(syncEngine, user) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, userChanged, result;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
syncEngineImpl = debugCast(syncEngine);
userChanged = !syncEngineImpl.currentUser.isEqual(user);
if (!userChanged) return [3 /*break*/, 3];
logDebug(LOG_TAG$3, 'User change. New user:', user.toKey());
return [4 /*yield*/, localStoreHandleUserChange(syncEngineImpl.localStore, user)];
case 1:
result = _d.sent();
syncEngineImpl.currentUser = user;
// Fails tasks waiting for pending writes requested by previous user.
rejectOutstandingPendingWritesCallbacks(syncEngineImpl, "'waitForPendingWrites' promise is rejected due to a user change.");
// TODO(b/114226417): Consider calling this only in the primary tab.
syncEngineImpl.sharedClientState.handleUserChange(user, result.removedBatchIds, result.addedBatchIds);
return [4 /*yield*/, syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, result.affectedDocuments)];
case 2:
_d.sent();
_d.label = 3;
case 3: return [2 /*return*/];
}
});
});
}
function syncEngineGetRemoteKeysForTarget(syncEngine, targetId) {
var syncEngineImpl = debugCast(syncEngine);
var limboResolution = syncEngineImpl.activeLimboResolutionsByTarget.get(targetId);
if (limboResolution && limboResolution.receivedDocument) {
return documentKeySet().add(limboResolution.key);
}
else {
var keySet = documentKeySet();
var queries = syncEngineImpl.queriesByTarget.get(targetId);
if (!queries) {
return keySet;
}
for (var _i = 0, queries_1 = queries; _i < queries_1.length; _i++) {
var query_3 = queries_1[_i];
var queryView = syncEngineImpl.queryViewsByQuery.get(query_3);
keySet = keySet.unionWith(queryView.view.syncedDocuments);
}
return keySet;
}
}
/**
* Reconcile the list of synced documents in an existing view with those
* from persistence.
*/
function synchronizeViewAndComputeSnapshot(syncEngine, queryView) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, queryResult, viewSnapshot;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
syncEngineImpl = debugCast(syncEngine);
return [4 /*yield*/, localStoreExecuteQuery(syncEngineImpl.localStore, queryView.query,
/* usePreviousResults= */ true)];
case 1:
queryResult = _d.sent();
viewSnapshot = queryView.view.synchronizeWithPersistedState(queryResult);
if (syncEngineImpl.isPrimaryClient) {
updateTrackedLimbos(syncEngineImpl, queryView.targetId, viewSnapshot.limboChanges);
}
return [2 /*return*/, viewSnapshot];
}
});
});
}
/**
* Retrieves newly changed documents from remote document cache and raises
* snapshots if needed.
*/
// PORTING NOTE: Multi-Tab only.
function syncEngineSynchronizeWithChangedDocuments(syncEngine) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl;
return tslib.__generator(this, function (_d) {
syncEngineImpl = debugCast(syncEngine);
return [2 /*return*/, localStoreGetNewDocumentChanges(syncEngineImpl.localStore).then(function (changes) { return syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes); })];
});
});
}
/** Applies a mutation state to an existing batch. */
// PORTING NOTE: Multi-Tab only.
function syncEngineApplyBatchState(syncEngine, batchId, batchState, error) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, documents;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
syncEngineImpl = debugCast(syncEngine);
return [4 /*yield*/, localStoreLookupMutationDocuments(syncEngineImpl.localStore, batchId)];
case 1:
documents = _d.sent();
if (documents === null) {
// A throttled tab may not have seen the mutation before it was completed
// and removed from the mutation queue, in which case we won't have cached
// the affected documents. In this case we can safely ignore the update
// since that means we didn't apply the mutation locally at all (if we
// had, we would have cached the affected documents), and so we will just
// see any resulting document changes via normal remote document updates
// as applicable.
logDebug(LOG_TAG$3, 'Cannot apply mutation batch with id: ' + batchId);
return [2 /*return*/];
}
if (!(batchState === 'pending')) return [3 /*break*/, 3];
// If we are the primary client, we need to send this write to the
// backend. Secondary clients will ignore these writes since their remote
// connection is disabled.
return [4 /*yield*/, fillWritePipeline(syncEngineImpl.remoteStore)];
case 2:
// If we are the primary client, we need to send this write to the
// backend. Secondary clients will ignore these writes since their remote
// connection is disabled.
_d.sent();
return [3 /*break*/, 4];
case 3:
if (batchState === 'acknowledged' || batchState === 'rejected') {
// NOTE: Both these methods are no-ops for batches that originated from
// other clients.
processUserCallback(syncEngineImpl, batchId, error ? error : null);
triggerPendingWritesCallbacks(syncEngineImpl, batchId);
localStoreRemoveCachedMutationBatchMetadata(syncEngineImpl.localStore, batchId);
}
else {
fail();
}
_d.label = 4;
case 4: return [4 /*yield*/, syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, documents)];
case 5:
_d.sent();
return [2 /*return*/];
}
});
});
}
/** Applies a query target change from a different tab. */
// PORTING NOTE: Multi-Tab only.
function syncEngineApplyPrimaryState(syncEngine, isPrimary) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, activeTargets, activeQueries, _i, activeQueries_1, targetData, activeTargets_1, p_1;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
syncEngineImpl = debugCast(syncEngine);
ensureWatchCallbacks(syncEngineImpl);
syncEngineEnsureWriteCallbacks(syncEngineImpl);
if (!(isPrimary === true && syncEngineImpl._isPrimaryClient !== true)) return [3 /*break*/, 3];
activeTargets = syncEngineImpl.sharedClientState.getAllActiveQueryTargets();
return [4 /*yield*/, synchronizeQueryViewsAndRaiseSnapshots(syncEngineImpl, activeTargets.toArray())];
case 1:
activeQueries = _d.sent();
syncEngineImpl._isPrimaryClient = true;
return [4 /*yield*/, remoteStoreApplyPrimaryState(syncEngineImpl.remoteStore, true)];
case 2:
_d.sent();
for (_i = 0, activeQueries_1 = activeQueries; _i < activeQueries_1.length; _i++) {
targetData = activeQueries_1[_i];
remoteStoreListen(syncEngineImpl.remoteStore, targetData);
}
return [3 /*break*/, 7];
case 3:
if (!(isPrimary === false && syncEngineImpl._isPrimaryClient !== false)) return [3 /*break*/, 7];
activeTargets_1 = [];
p_1 = Promise.resolve();
syncEngineImpl.queriesByTarget.forEach(function (_, targetId) {
if (syncEngineImpl.sharedClientState.isLocalQueryTarget(targetId)) {
activeTargets_1.push(targetId);
}
else {
p_1 = p_1.then(function () {
removeAndCleanupTarget(syncEngineImpl, targetId);
return localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
/*keepPersistedTargetData=*/ true);
});
}
remoteStoreUnlisten(syncEngineImpl.remoteStore, targetId);
});
return [4 /*yield*/, p_1];
case 4:
_d.sent();
return [4 /*yield*/, synchronizeQueryViewsAndRaiseSnapshots(syncEngineImpl, activeTargets_1)];
case 5:
_d.sent();
resetLimboDocuments(syncEngineImpl);
syncEngineImpl._isPrimaryClient = false;
return [4 /*yield*/, remoteStoreApplyPrimaryState(syncEngineImpl.remoteStore, false)];
case 6:
_d.sent();
_d.label = 7;
case 7: return [2 /*return*/];
}
});
});
}
// PORTING NOTE: Multi-Tab only.
function resetLimboDocuments(syncEngine) {
var syncEngineImpl = debugCast(syncEngine);
syncEngineImpl.activeLimboResolutionsByTarget.forEach(function (_, targetId) {
remoteStoreUnlisten(syncEngineImpl.remoteStore, targetId);
});
syncEngineImpl.limboDocumentRefs.removeAllReferences();
syncEngineImpl.activeLimboResolutionsByTarget = new Map();
syncEngineImpl.activeLimboTargetsByKey = new SortedMap(DocumentKey.comparator);
}
/**
* Reconcile the query views of the provided query targets with the state from
* persistence. Raises snapshots for any changes that affect the local
* client and returns the updated state of all target's query data.
*
* @param syncEngine - The sync engine implementation
* @param targets - the list of targets with views that need to be recomputed
* @param transitionToPrimary - `true` iff the tab transitions from a secondary
* tab to a primary tab
*/
// PORTING NOTE: Multi-Tab only.
function synchronizeQueryViewsAndRaiseSnapshots(syncEngine, targets, transitionToPrimary) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, activeQueries, newViewSnapshots, _i, targets_1, targetId, targetData, queries, _d, queries_2, query_4, queryView, viewChange, target;
return tslib.__generator(this, function (_e) {
switch (_e.label) {
case 0:
syncEngineImpl = debugCast(syncEngine);
activeQueries = [];
newViewSnapshots = [];
_i = 0, targets_1 = targets;
_e.label = 1;
case 1:
if (!(_i < targets_1.length)) return [3 /*break*/, 13];
targetId = targets_1[_i];
targetData = void 0;
queries = syncEngineImpl.queriesByTarget.get(targetId);
if (!(queries && queries.length !== 0)) return [3 /*break*/, 7];
return [4 /*yield*/, localStoreAllocateTarget(syncEngineImpl.localStore, queryToTarget(queries[0]))];
case 2:
// For queries that have a local View, we fetch their current state
// from LocalStore (as the resume token and the snapshot version
// might have changed) and reconcile their views with the persisted
// state (the list of syncedDocuments may have gotten out of sync).
targetData = _e.sent();
_d = 0, queries_2 = queries;
_e.label = 3;
case 3:
if (!(_d < queries_2.length)) return [3 /*break*/, 6];
query_4 = queries_2[_d];
queryView = syncEngineImpl.queryViewsByQuery.get(query_4);
return [4 /*yield*/, synchronizeViewAndComputeSnapshot(syncEngineImpl, queryView)];
case 4:
viewChange = _e.sent();
if (viewChange.snapshot) {
newViewSnapshots.push(viewChange.snapshot);
}
_e.label = 5;
case 5:
_d++;
return [3 /*break*/, 3];
case 6: return [3 /*break*/, 11];
case 7: return [4 /*yield*/, localStoreGetCachedTarget(syncEngineImpl.localStore, targetId)];
case 8:
target = _e.sent();
return [4 /*yield*/, localStoreAllocateTarget(syncEngineImpl.localStore, target)];
case 9:
targetData = _e.sent();
return [4 /*yield*/, initializeViewAndComputeSnapshot(syncEngineImpl, synthesizeTargetToQuery(target), targetId,
/*current=*/ false)];
case 10:
_e.sent();
_e.label = 11;
case 11:
activeQueries.push(targetData);
_e.label = 12;
case 12:
_i++;
return [3 /*break*/, 1];
case 13:
syncEngineImpl.syncEngineListener.onWatchChange(newViewSnapshots);
return [2 /*return*/, activeQueries];
}
});
});
}
/**
* Creates a `Query` object from the specified `Target`. There is no way to
* obtain the original `Query`, so we synthesize a `Query` from the `Target`
* object.
*
* The synthesized result might be different from the original `Query`, but
* since the synthesized `Query` should return the same results as the
* original one (only the presentation of results might differ), the potential
* difference will not cause issues.
*/
// PORTING NOTE: Multi-Tab only.
function synthesizeTargetToQuery(target) {
return newQuery(target.path, target.collectionGroup, target.orderBy, target.filters, target.limit, "F" /* First */, target.startAt, target.endAt);
}
/** Returns the IDs of the clients that are currently active. */
// PORTING NOTE: Multi-Tab only.
function syncEngineGetActiveClients(syncEngine) {
var syncEngineImpl = debugCast(syncEngine);
return localStoreGetActiveClients(syncEngineImpl.localStore);
}
/** Applies a query target change from a different tab. */
// PORTING NOTE: Multi-Tab only.
function syncEngineApplyTargetState(syncEngine, targetId, state, error) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, _d, changes, synthesizedRemoteEvent;
return tslib.__generator(this, function (_e) {
switch (_e.label) {
case 0:
syncEngineImpl = debugCast(syncEngine);
if (syncEngineImpl._isPrimaryClient) {
// If we receive a target state notification via WebStorage, we are
// either already secondary or another tab has taken the primary lease.
logDebug(LOG_TAG$3, 'Ignoring unexpected query state notification.');
return [2 /*return*/];
}
if (!syncEngineImpl.queriesByTarget.has(targetId)) return [3 /*break*/, 7];
_d = state;
switch (_d) {
case 'current': return [3 /*break*/, 1];
case 'not-current': return [3 /*break*/, 1];
case 'rejected': return [3 /*break*/, 4];
}
return [3 /*break*/, 6];
case 1: return [4 /*yield*/, localStoreGetNewDocumentChanges(syncEngineImpl.localStore)];
case 2:
changes = _e.sent();
synthesizedRemoteEvent = RemoteEvent.createSynthesizedRemoteEventForCurrentChange(targetId, state === 'current');
return [4 /*yield*/, syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes, synthesizedRemoteEvent)];
case 3:
_e.sent();
return [3 /*break*/, 7];
case 4: return [4 /*yield*/, localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
/* keepPersistedTargetData */ true)];
case 5:
_e.sent();
removeAndCleanupTarget(syncEngineImpl, targetId, error);
return [3 /*break*/, 7];
case 6:
fail();
_e.label = 7;
case 7: return [2 /*return*/];
}
});
});
}
/** Adds or removes Watch targets for queries from different tabs. */
function syncEngineApplyActiveTargetsChange(syncEngine, added, removed) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngineImpl, _i, added_1, targetId, target, targetData, _loop_5, _d, removed_1, targetId;
return tslib.__generator(this, function (_e) {
switch (_e.label) {
case 0:
syncEngineImpl = ensureWatchCallbacks(syncEngine);
if (!syncEngineImpl._isPrimaryClient) {
return [2 /*return*/];
}
_i = 0, added_1 = added;
_e.label = 1;
case 1:
if (!(_i < added_1.length)) return [3 /*break*/, 6];
targetId = added_1[_i];
if (syncEngineImpl.queriesByTarget.has(targetId)) {
// A target might have been added in a previous attempt
logDebug(LOG_TAG$3, 'Adding an already active target ' + targetId);
return [3 /*break*/, 5];
}
return [4 /*yield*/, localStoreGetCachedTarget(syncEngineImpl.localStore, targetId)];
case 2:
target = _e.sent();
return [4 /*yield*/, localStoreAllocateTarget(syncEngineImpl.localStore, target)];
case 3:
targetData = _e.sent();
return [4 /*yield*/, initializeViewAndComputeSnapshot(syncEngineImpl, synthesizeTargetToQuery(target), targetData.targetId,
/*current=*/ false)];
case 4:
_e.sent();
remoteStoreListen(syncEngineImpl.remoteStore, targetData);
_e.label = 5;
case 5:
_i++;
return [3 /*break*/, 1];
case 6:
_loop_5 = function (targetId) {
return tslib.__generator(this, function (_f) {
switch (_f.label) {
case 0:
// Check that the target is still active since the target might have been
// removed if it has been rejected by the backend.
if (!syncEngineImpl.queriesByTarget.has(targetId)) {
return [2 /*return*/, "continue"];
}
// Release queries that are still active.
return [4 /*yield*/, localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
/* keepPersistedTargetData */ false)
.then(function () {
remoteStoreUnlisten(syncEngineImpl.remoteStore, targetId);
removeAndCleanupTarget(syncEngineImpl, targetId);
})
.catch(ignoreIfPrimaryLeaseLoss)];
case 1:
// Release queries that are still active.
_f.sent();
return [2 /*return*/];
}
});
};
_d = 0, removed_1 = removed;
_e.label = 7;
case 7:
if (!(_d < removed_1.length)) return [3 /*break*/, 10];
targetId = removed_1[_d];
return [5 /*yield**/, _loop_5(targetId)];
case 8:
_e.sent();
_e.label = 9;
case 9:
_d++;
return [3 /*break*/, 7];
case 10: return [2 /*return*/];
}
});
});
}
function ensureWatchCallbacks(syncEngine) {
var syncEngineImpl = debugCast(syncEngine);
syncEngineImpl.remoteStore.remoteSyncer.applyRemoteEvent = syncEngineApplyRemoteEvent.bind(null, syncEngineImpl);
syncEngineImpl.remoteStore.remoteSyncer.getRemoteKeysForTarget = syncEngineGetRemoteKeysForTarget.bind(null, syncEngineImpl);
syncEngineImpl.remoteStore.remoteSyncer.rejectListen = syncEngineRejectListen.bind(null, syncEngineImpl);
syncEngineImpl.syncEngineListener.onWatchChange = eventManagerOnWatchChange.bind(null, syncEngineImpl.eventManager);
syncEngineImpl.syncEngineListener.onWatchError = eventManagerOnWatchError.bind(null, syncEngineImpl.eventManager);
return syncEngineImpl;
}
function syncEngineEnsureWriteCallbacks(syncEngine) {
var syncEngineImpl = debugCast(syncEngine);
syncEngineImpl.remoteStore.remoteSyncer.applySuccessfulWrite = syncEngineApplySuccessfulWrite.bind(null, syncEngineImpl);
syncEngineImpl.remoteStore.remoteSyncer.rejectFailedWrite = syncEngineRejectFailedWrite.bind(null, syncEngineImpl);
return syncEngineImpl;
}
/**
* Loads a Firestore bundle into the SDK. The returned promise resolves when
* the bundle finished loading.
*
* @param syncEngine - SyncEngine to use.
* @param bundleReader - Bundle to load into the SDK.
* @param task - LoadBundleTask used to update the loading progress to public API.
*/
function syncEngineLoadBundle(syncEngine, bundleReader, task) {
var syncEngineImpl = debugCast(syncEngine);
// eslint-disable-next-line @typescript-eslint/no-floating-promises
loadBundleImpl(syncEngineImpl, bundleReader, task).then(function () {
syncEngineImpl.sharedClientState.notifyBundleLoaded();
});
}
function loadBundleImpl(syncEngine, reader, task) {
return tslib.__awaiter(this, void 0, void 0, function () {
var metadata, skip, loader, element, progress, result, e_10;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
_d.trys.push([0, 13, , 14]);
return [4 /*yield*/, reader.getMetadata()];
case 1:
metadata = _d.sent();
return [4 /*yield*/, localStoreHasNewerBundle(syncEngine.localStore, metadata)];
case 2:
skip = _d.sent();
if (!skip) return [3 /*break*/, 4];
return [4 /*yield*/, reader.close()];
case 3:
_d.sent();
task._completeWith(bundleSuccessProgress(metadata));
return [2 /*return*/];
case 4:
task._updateProgress(bundleInitialProgress(metadata));
loader = new BundleLoader(metadata, syncEngine.localStore, reader.serializer);
return [4 /*yield*/, reader.nextElement()];
case 5:
element = _d.sent();
_d.label = 6;
case 6:
if (!element) return [3 /*break*/, 9];
return [4 /*yield*/, loader.addSizedElement(element)];
case 7:
progress = _d.sent();
if (progress) {
task._updateProgress(progress);
}
return [4 /*yield*/, reader.nextElement()];
case 8:
element = _d.sent();
return [3 /*break*/, 6];
case 9: return [4 /*yield*/, loader.complete()];
case 10:
result = _d.sent();
// TODO(b/160876443): This currently raises snapshots with
// `fromCache=false` if users already listen to some queries and bundles
// has newer version.
return [4 /*yield*/, syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngine, result.changedDocs,
/* remoteEvent */ undefined)];
case 11:
// TODO(b/160876443): This currently raises snapshots with
// `fromCache=false` if users already listen to some queries and bundles
// has newer version.
_d.sent();
// Save metadata, so loading the same bundle will skip.
return [4 /*yield*/, localStoreSaveBundle(syncEngine.localStore, metadata)];
case 12:
// Save metadata, so loading the same bundle will skip.
_d.sent();
task._completeWith(result.progress);
return [3 /*break*/, 14];
case 13:
e_10 = _d.sent();
logWarn(LOG_TAG$3, "Loading bundle failed with " + e_10);
task._failWith(e_10);
return [3 /*break*/, 14];
case 14: return [2 /*return*/];
}
});
});
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides all components needed for Firestore with in-memory persistence.
* Uses EagerGC garbage collection.
*/
var MemoryOfflineComponentProvider = /** @class */ (function () {
function MemoryOfflineComponentProvider() {
this.synchronizeTabs = false;
}
MemoryOfflineComponentProvider.prototype.initialize = function (cfg) {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
this.serializer = newSerializer(cfg.databaseInfo.databaseId);
this.sharedClientState = this.createSharedClientState(cfg);
this.persistence = this.createPersistence(cfg);
return [4 /*yield*/, this.persistence.start()];
case 1:
_d.sent();
this.gcScheduler = this.createGarbageCollectionScheduler(cfg);
this.localStore = this.createLocalStore(cfg);
return [2 /*return*/];
}
});
});
};
MemoryOfflineComponentProvider.prototype.createGarbageCollectionScheduler = function (cfg) {
return null;
};
MemoryOfflineComponentProvider.prototype.createLocalStore = function (cfg) {
return newLocalStore(this.persistence, new QueryEngine(), cfg.initialUser, this.serializer);
};
MemoryOfflineComponentProvider.prototype.createPersistence = function (cfg) {
return new MemoryPersistence(MemoryEagerDelegate.factory, this.serializer);
};
MemoryOfflineComponentProvider.prototype.createSharedClientState = function (cfg) {
return new MemorySharedClientState();
};
MemoryOfflineComponentProvider.prototype.terminate = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (this.gcScheduler) {
this.gcScheduler.stop();
}
return [4 /*yield*/, this.sharedClientState.shutdown()];
case 1:
_d.sent();
return [4 /*yield*/, this.persistence.shutdown()];
case 2:
_d.sent();
return [2 /*return*/];
}
});
});
};
return MemoryOfflineComponentProvider;
}());
/**
* Provides all components needed for Firestore with IndexedDB persistence.
*/
var IndexedDbOfflineComponentProvider = /** @class */ (function (_super) {
tslib.__extends(IndexedDbOfflineComponentProvider, _super);
function IndexedDbOfflineComponentProvider(onlineComponentProvider, cacheSizeBytes, forceOwnership) {
var _this = _super.call(this) || this;
_this.onlineComponentProvider = onlineComponentProvider;
_this.cacheSizeBytes = cacheSizeBytes;
_this.forceOwnership = forceOwnership;
_this.synchronizeTabs = false;
return _this;
}
IndexedDbOfflineComponentProvider.prototype.initialize = function (cfg) {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, _super.prototype.initialize.call(this, cfg)];
case 1:
_d.sent();
return [4 /*yield*/, localStoreSynchronizeLastDocumentChangeReadTime(this.localStore)];
case 2:
_d.sent();
return [4 /*yield*/, this.onlineComponentProvider.initialize(this, cfg)];
case 3:
_d.sent();
// Enqueue writes from a previous session
return [4 /*yield*/, syncEngineEnsureWriteCallbacks(this.onlineComponentProvider.syncEngine)];
case 4:
// Enqueue writes from a previous session
_d.sent();
return [4 /*yield*/, fillWritePipeline(this.onlineComponentProvider.remoteStore)];
case 5:
_d.sent();
return [2 /*return*/];
}
});
});
};
IndexedDbOfflineComponentProvider.prototype.createLocalStore = function (cfg) {
return newLocalStore(this.persistence, new QueryEngine(), cfg.initialUser, this.serializer);
};
IndexedDbOfflineComponentProvider.prototype.createGarbageCollectionScheduler = function (cfg) {
var garbageCollector = this.persistence.referenceDelegate
.garbageCollector;
return new LruScheduler(garbageCollector, cfg.asyncQueue);
};
IndexedDbOfflineComponentProvider.prototype.createPersistence = function (cfg) {
var persistenceKey = indexedDbStoragePrefix(cfg.databaseInfo.databaseId, cfg.databaseInfo.persistenceKey);
var lruParams = this.cacheSizeBytes !== undefined
? LruParams.withCacheSize(this.cacheSizeBytes)
: LruParams.DEFAULT;
return new IndexedDbPersistence(this.synchronizeTabs, persistenceKey, cfg.clientId, lruParams, cfg.asyncQueue, getWindow(), getDocument(), this.serializer, this.sharedClientState, !!this.forceOwnership);
};
IndexedDbOfflineComponentProvider.prototype.createSharedClientState = function (cfg) {
return new MemorySharedClientState();
};
return IndexedDbOfflineComponentProvider;
}(MemoryOfflineComponentProvider));
/**
* Provides all components needed for Firestore with multi-tab IndexedDB
* persistence.
*
* In the legacy client, this provider is used to provide both multi-tab and
* non-multi-tab persistence since we cannot tell at build time whether
* `synchronizeTabs` will be enabled.
*/
var MultiTabOfflineComponentProvider = /** @class */ (function (_super) {
tslib.__extends(MultiTabOfflineComponentProvider, _super);
function MultiTabOfflineComponentProvider(onlineComponentProvider, cacheSizeBytes) {
var _this = _super.call(this, onlineComponentProvider, cacheSizeBytes, /* forceOwnership= */ false) || this;
_this.onlineComponentProvider = onlineComponentProvider;
_this.cacheSizeBytes = cacheSizeBytes;
_this.synchronizeTabs = true;
return _this;
}
MultiTabOfflineComponentProvider.prototype.initialize = function (cfg) {
return tslib.__awaiter(this, void 0, void 0, function () {
var syncEngine;
var _this = this;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, _super.prototype.initialize.call(this, cfg)];
case 1:
_d.sent();
syncEngine = this.onlineComponentProvider.syncEngine;
if (!(this.sharedClientState instanceof WebStorageSharedClientState)) return [3 /*break*/, 3];
this.sharedClientState.syncEngine = {
applyBatchState: syncEngineApplyBatchState.bind(null, syncEngine),
applyTargetState: syncEngineApplyTargetState.bind(null, syncEngine),
applyActiveTargetsChange: syncEngineApplyActiveTargetsChange.bind(null, syncEngine),
getActiveClients: syncEngineGetActiveClients.bind(null, syncEngine),
synchronizeWithChangedDocuments: syncEngineSynchronizeWithChangedDocuments.bind(null, syncEngine)
};
return [4 /*yield*/, this.sharedClientState.start()];
case 2:
_d.sent();
_d.label = 3;
case 3:
// NOTE: This will immediately call the listener, so we make sure to
// set it after localStore / remoteStore are started.
return [4 /*yield*/, this.persistence.setPrimaryStateListener(function (isPrimary) { return tslib.__awaiter(_this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, syncEngineApplyPrimaryState(this.onlineComponentProvider.syncEngine, isPrimary)];
case 1:
_d.sent();
if (this.gcScheduler) {
if (isPrimary && !this.gcScheduler.started) {
this.gcScheduler.start(this.localStore);
}
else if (!isPrimary) {
this.gcScheduler.stop();
}
}
return [2 /*return*/];
}
});
}); })];
case 4:
// NOTE: This will immediately call the listener, so we make sure to
// set it after localStore / remoteStore are started.
_d.sent();
return [2 /*return*/];
}
});
});
};
MultiTabOfflineComponentProvider.prototype.createSharedClientState = function (cfg) {
var window = getWindow();
if (!WebStorageSharedClientState.isAvailable(window)) {
throw new FirestoreError(Code.UNIMPLEMENTED, 'IndexedDB persistence is only available on platforms that support LocalStorage.');
}
var persistenceKey = indexedDbStoragePrefix(cfg.databaseInfo.databaseId, cfg.databaseInfo.persistenceKey);
return new WebStorageSharedClientState(window, cfg.asyncQueue, persistenceKey, cfg.clientId, cfg.initialUser);
};
return MultiTabOfflineComponentProvider;
}(IndexedDbOfflineComponentProvider));
/**
* Initializes and wires the components that are needed to interface with the
* network.
*/
var OnlineComponentProvider = /** @class */ (function () {
function OnlineComponentProvider() {
}
OnlineComponentProvider.prototype.initialize = function (offlineComponentProvider, cfg) {
return tslib.__awaiter(this, void 0, void 0, function () {
var _this = this;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (this.localStore) {
// OnlineComponentProvider may get initialized multiple times if
// multi-tab persistence is used.
return [2 /*return*/];
}
this.localStore = offlineComponentProvider.localStore;
this.sharedClientState = offlineComponentProvider.sharedClientState;
this.datastore = this.createDatastore(cfg);
this.remoteStore = this.createRemoteStore(cfg);
this.eventManager = this.createEventManager(cfg);
this.syncEngine = this.createSyncEngine(cfg,
/* startAsPrimary=*/ !offlineComponentProvider.synchronizeTabs);
this.sharedClientState.onlineStateHandler = function (onlineState) { return syncEngineApplyOnlineStateChange(_this.syncEngine, onlineState, 1 /* SharedClientState */); };
this.remoteStore.remoteSyncer.handleCredentialChange = syncEngineHandleCredentialChange.bind(null, this.syncEngine);
return [4 /*yield*/, remoteStoreApplyPrimaryState(this.remoteStore, this.syncEngine.isPrimaryClient)];
case 1:
_d.sent();
return [2 /*return*/];
}
});
});
};
OnlineComponentProvider.prototype.createEventManager = function (cfg) {
return newEventManager();
};
OnlineComponentProvider.prototype.createDatastore = function (cfg) {
var serializer = newSerializer(cfg.databaseInfo.databaseId);
var connection = newConnection(cfg.databaseInfo);
return newDatastore(cfg.credentials, connection, serializer);
};
OnlineComponentProvider.prototype.createRemoteStore = function (cfg) {
var _this = this;
return newRemoteStore(this.localStore, this.datastore, cfg.asyncQueue, function (onlineState) { return syncEngineApplyOnlineStateChange(_this.syncEngine, onlineState, 0 /* RemoteStore */); }, newConnectivityMonitor());
};
OnlineComponentProvider.prototype.createSyncEngine = function (cfg, startAsPrimary) {
return newSyncEngine(this.localStore, this.remoteStore, this.eventManager, this.sharedClientState, cfg.initialUser, cfg.maxConcurrentLimboResolutions, startAsPrimary);
};
OnlineComponentProvider.prototype.terminate = function () {
return remoteStoreShutdown(this.remoteStore);
};
return OnlineComponentProvider;
}());
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* How many bytes to read each time when `ReadableStreamReader.read()` is
* called. Only applicable for byte streams that we control (e.g. those backed
* by an UInt8Array).
*/
var DEFAULT_BYTES_PER_READ = 10240;
/**
* Builds a `ByteStreamReader` from a UInt8Array.
* @param source - The data source to use.
* @param bytesPerRead - How many bytes each `read()` from the returned reader
* will read.
*/
function toByteStreamReaderHelper(source, bytesPerRead) {
if (bytesPerRead === void 0) { bytesPerRead = DEFAULT_BYTES_PER_READ; }
var readFrom = 0;
var reader = {
read: function () {
return tslib.__awaiter(this, void 0, void 0, function () {
var result;
return tslib.__generator(this, function (_d) {
if (readFrom < source.byteLength) {
result = {
value: source.slice(readFrom, readFrom + bytesPerRead),
done: false
};
readFrom += bytesPerRead;
return [2 /*return*/, result];
}
return [2 /*return*/, { done: true }];
});
});
},
cancel: function () {
return tslib.__awaiter(this, void 0, void 0, function () { return tslib.__generator(this, function (_d) {
return [2 /*return*/];
}); });
},
releaseLock: function () { },
closed: Promise.reject('unimplemented')
};
return reader;
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function validateNonEmptyArgument(functionName, argumentName, argument) {
if (!argument) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Function " + functionName + "() cannot be called with an empty " + argumentName + ".");
}
}
/**
* Validates that two boolean options are not set at the same time.
*/
function validateIsNotUsedTogether(optionName1, argument1, optionName2, argument2) {
if (argument1 === true && argument2 === true) {
throw new FirestoreError(Code.INVALID_ARGUMENT, optionName1 + " and " + optionName2 + " cannot be used together.");
}
}
/**
* Validates that `path` refers to a document (indicated by the fact it contains
* an even numbers of segments).
*/
function validateDocumentPath(path) {
if (!DocumentKey.isDocumentKey(path)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid document reference. Document references must have an even number of segments, but " + path + " has " + path.length + ".");
}
}
/**
* Validates that `path` refers to a collection (indicated by the fact it
* contains an odd numbers of segments).
*/
function validateCollectionPath(path) {
if (DocumentKey.isDocumentKey(path)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid collection reference. Collection references must have an odd number of segments, but " + path + " has " + path.length + ".");
}
}
/**
* Returns true if it's a non-null object without a custom prototype
* (i.e. excludes Array, Date, etc.).
*/
function isPlainObject(input) {
return (typeof input === 'object' &&
input !== null &&
(Object.getPrototypeOf(input) === Object.prototype ||
Object.getPrototypeOf(input) === null));
}
/** Returns a string describing the type / value of the provided input. */
function valueDescription(input) {
if (input === undefined) {
return 'undefined';
}
else if (input === null) {
return 'null';
}
else if (typeof input === 'string') {
if (input.length > 20) {
input = input.substring(0, 20) + "...";
}
return JSON.stringify(input);
}
else if (typeof input === 'number' || typeof input === 'boolean') {
return '' + input;
}
else if (typeof input === 'object') {
if (input instanceof Array) {
return 'an array';
}
else {
var customObjectName = tryGetCustomObjectType(input);
if (customObjectName) {
return "a custom " + customObjectName + " object";
}
else {
return 'an object';
}
}
}
else if (typeof input === 'function') {
return 'a function';
}
else {
return fail();
}
}
/** Hacky method to try to get the constructor name for an object. */
function tryGetCustomObjectType(input) {
if (input.constructor) {
var funcNameRegex = /function\s+([^\s(]+)\s*\(/;
var results = funcNameRegex.exec(input.constructor.toString());
if (results && results.length > 1) {
return results[1];
}
}
return null;
}
/**
* Casts `obj` to `T`, optionally unwrapping Compat types to expose the
* underlying instance. Throws if `obj` is not an instance of `T`.
*
* This cast is used in the Lite and Full SDK to verify instance types for
* arguments passed to the public API.
*/
function cast(obj,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
constructor) {
if ('_delegate' in obj) {
// Unwrap Compat types
// eslint-disable-next-line @typescript-eslint/no-explicit-any
obj = obj._delegate;
}
if (!(obj instanceof constructor)) {
if (constructor.name === obj.constructor.name) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Type does not match the expected instance. Did you pass a ' +
"reference from a different Firestore SDK?");
}
else {
var description = valueDescription(obj);
throw new FirestoreError(Code.INVALID_ARGUMENT, "Expected type '" + constructor.name + "', but it was: " + description);
}
}
return obj;
}
function validatePositiveNumber(functionName, n) {
if (n <= 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Function " + functionName + "() requires a positive number, but it was: " + n + ".");
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* On Node, only supported data source is a `Uint8Array` for now.
*/
function toByteStreamReader(source, bytesPerRead) {
if (!(source instanceof Uint8Array)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "NodePlatform.toByteStreamReader expects source to be Uint8Array, got " + valueDescription(source));
}
return toByteStreamReaderHelper(source, bytesPerRead);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* A wrapper implementation of Observer that will dispatch events
* asynchronously. To allow immediate silencing, a mute call is added which
* causes events scheduled to no longer be raised.
*/
var AsyncObserver = /** @class */ (function () {
function AsyncObserver(observer) {
this.observer = observer;
/**
* When set to true, will not raise future events. Necessary to deal with
* async detachment of listener.
*/
this.muted = false;
}
AsyncObserver.prototype.next = function (value) {
if (this.observer.next) {
this.scheduleEvent(this.observer.next, value);
}
};
AsyncObserver.prototype.error = function (error) {
if (this.observer.error) {
this.scheduleEvent(this.observer.error, error);
}
else {
console.error('Uncaught Error in snapshot listener:', error);
}
};
AsyncObserver.prototype.mute = function () {
this.muted = true;
};
AsyncObserver.prototype.scheduleEvent = function (eventHandler, event) {
var _this = this;
if (!this.muted) {
setTimeout(function () {
if (!_this.muted) {
eventHandler(event);
}
}, 0);
}
};
return AsyncObserver;
}());
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A complete element in the bundle stream, together with the byte length it
* occupies in the stream.
*/
var SizedBundleElement = /** @class */ (function () {
function SizedBundleElement(payload,
// How many bytes this element takes to store in the bundle.
byteLength) {
this.payload = payload;
this.byteLength = byteLength;
}
SizedBundleElement.prototype.isBundleMetadata = function () {
return 'metadata' in this.payload;
};
return SizedBundleElement;
}());
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A class representing a bundle.
*
* Takes a bundle stream or buffer, and presents abstractions to read bundled
* elements out of the underlying content.
*/
var BundleReaderImpl = /** @class */ (function () {
function BundleReaderImpl(
/** The reader to read from underlying binary bundle data source. */
reader, serializer) {
var _this = this;
this.reader = reader;
this.serializer = serializer;
/** Cached bundle metadata. */
this.metadata = new Deferred();
/**
* Internal buffer to hold bundle content, accumulating incomplete element
* content.
*/
this.buffer = new Uint8Array();
this.textDecoder = newTextDecoder();
// Read the metadata (which is the first element).
this.nextElementImpl().then(function (element) {
if (element && element.isBundleMetadata()) {
_this.metadata.resolve(element.payload.metadata);
}
else {
_this.metadata.reject(new Error("The first element of the bundle is not a metadata, it is\n " + JSON.stringify(element === null || element === void 0 ? void 0 : element.payload)));
}
}, function (error) { return _this.metadata.reject(error); });
}
BundleReaderImpl.prototype.close = function () {
return this.reader.cancel();
};
BundleReaderImpl.prototype.getMetadata = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
return [2 /*return*/, this.metadata.promise];
});
});
};
BundleReaderImpl.prototype.nextElement = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
// Makes sure metadata is read before proceeding.
return [4 /*yield*/, this.getMetadata()];
case 1:
// Makes sure metadata is read before proceeding.
_d.sent();
return [2 /*return*/, this.nextElementImpl()];
}
});
});
};
/**
* Reads from the head of internal buffer, and pulling more data from
* underlying stream if a complete element cannot be found, until an
* element(including the prefixed length and the JSON string) is found.
*
* Once a complete element is read, it is dropped from internal buffer.
*
* Returns either the bundled element, or null if we have reached the end of
* the stream.
*/
BundleReaderImpl.prototype.nextElementImpl = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
var lengthBuffer, lengthString, length, jsonString;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, this.readLength()];
case 1:
lengthBuffer = _d.sent();
if (lengthBuffer === null) {
return [2 /*return*/, null];
}
lengthString = this.textDecoder.decode(lengthBuffer);
length = Number(lengthString);
if (isNaN(length)) {
this.raiseError("length string (" + lengthString + ") is not valid number");
}
return [4 /*yield*/, this.readJsonString(length)];
case 2:
jsonString = _d.sent();
return [2 /*return*/, new SizedBundleElement(JSON.parse(jsonString), lengthBuffer.length + length)];
}
});
});
};
/** First index of '{' from the underlying buffer. */
BundleReaderImpl.prototype.indexOfOpenBracket = function () {
return this.buffer.findIndex(function (v) { return v === '{'.charCodeAt(0); });
};
/**
* Reads from the beginning of the internal buffer, until the first '{', and
* return the content.
*
* If reached end of the stream, returns a null.
*/
BundleReaderImpl.prototype.readLength = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
var done, position, result;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!(this.indexOfOpenBracket() < 0)) return [3 /*break*/, 2];
return [4 /*yield*/, this.pullMoreDataToBuffer()];
case 1:
done = _d.sent();
if (done) {
return [3 /*break*/, 2];
}
return [3 /*break*/, 0];
case 2:
// Broke out of the loop because underlying stream is closed, and there
// happens to be no more data to process.
if (this.buffer.length === 0) {
return [2 /*return*/, null];
}
position = this.indexOfOpenBracket();
// Broke out of the loop because underlying stream is closed, but still
// cannot find an open bracket.
if (position < 0) {
this.raiseError('Reached the end of bundle when a length string is expected.');
}
result = this.buffer.slice(0, position);
// Update the internal buffer to drop the read length.
this.buffer = this.buffer.slice(position);
return [2 /*return*/, result];
}
});
});
};
/**
* Reads from a specified position from the internal buffer, for a specified
* number of bytes, pulling more data from the underlying stream if needed.
*
* Returns a string decoded from the read bytes.
*/
BundleReaderImpl.prototype.readJsonString = function (length) {
return tslib.__awaiter(this, void 0, void 0, function () {
var done, result;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!(this.buffer.length < length)) return [3 /*break*/, 2];
return [4 /*yield*/, this.pullMoreDataToBuffer()];
case 1:
done = _d.sent();
if (done) {
this.raiseError('Reached the end of bundle when more is expected.');
}
return [3 /*break*/, 0];
case 2:
result = this.textDecoder.decode(this.buffer.slice(0, length));
// Update the internal buffer to drop the read json string.
this.buffer = this.buffer.slice(length);
return [2 /*return*/, result];
}
});
});
};
BundleReaderImpl.prototype.raiseError = function (message) {
// eslint-disable-next-line @typescript-eslint/no-floating-promises
this.reader.cancel();
throw new Error("Invalid bundle format: " + message);
};
/**
* Pulls more data from underlying stream to internal buffer.
* Returns a boolean indicating whether the stream is finished.
*/
BundleReaderImpl.prototype.pullMoreDataToBuffer = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
var result, newBuffer;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, this.reader.read()];
case 1:
result = _d.sent();
if (!result.done) {
newBuffer = new Uint8Array(this.buffer.length + result.value.length);
newBuffer.set(this.buffer);
newBuffer.set(result.value, this.buffer.length);
this.buffer = newBuffer;
}
return [2 /*return*/, result.done];
}
});
});
};
return BundleReaderImpl;
}());
function newBundleReader(reader, serializer) {
return new BundleReaderImpl(reader, serializer);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal transaction object responsible for accumulating the mutations to
* perform and the base versions for any documents read.
*/
var Transaction$2 = /** @class */ (function () {
function Transaction$2(datastore) {
this.datastore = datastore;
// The version of each document that was read during this transaction.
this.readVersions = new Map();
this.mutations = [];
this.committed = false;
/**
* A deferred usage error that occurred previously in this transaction that
* will cause the transaction to fail once it actually commits.
*/
this.lastWriteError = null;
/**
* Set of documents that have been written in the transaction.
*
* When there's more than one write to the same key in a transaction, any
* writes after the first are handled differently.
*/
this.writtenDocs = new Set();
}
Transaction$2.prototype.lookup = function (keys) {
return tslib.__awaiter(this, void 0, void 0, function () {
var docs;
var _this = this;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
this.ensureCommitNotCalled();
if (this.mutations.length > 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Firestore transactions require all reads to be executed before all writes.');
}
return [4 /*yield*/, invokeBatchGetDocumentsRpc(this.datastore, keys)];
case 1:
docs = _d.sent();
docs.forEach(function (doc) { return _this.recordVersion(doc); });
return [2 /*return*/, docs];
}
});
});
};
Transaction$2.prototype.set = function (key, data) {
this.write(data.toMutation(key, this.precondition(key)));
this.writtenDocs.add(key.toString());
};
Transaction$2.prototype.update = function (key, data) {
try {
this.write(data.toMutation(key, this.preconditionForUpdate(key)));
}
catch (e) {
this.lastWriteError = e;
}
this.writtenDocs.add(key.toString());
};
Transaction$2.prototype.delete = function (key) {
this.write(new DeleteMutation(key, this.precondition(key)));
this.writtenDocs.add(key.toString());
};
Transaction$2.prototype.commit = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
var unwritten;
var _this = this;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
this.ensureCommitNotCalled();
if (this.lastWriteError) {
throw this.lastWriteError;
}
unwritten = this.readVersions;
// For each mutation, note that the doc was written.
this.mutations.forEach(function (mutation) {
unwritten.delete(mutation.key.toString());
});
// For each document that was read but not written to, we want to perform
// a `verify` operation.
unwritten.forEach(function (_, path) {
var key = DocumentKey.fromPath(path);
_this.mutations.push(new VerifyMutation(key, _this.precondition(key)));
});
return [4 /*yield*/, invokeCommitRpc(this.datastore, this.mutations)];
case 1:
_d.sent();
this.committed = true;
return [2 /*return*/];
}
});
});
};
Transaction$2.prototype.recordVersion = function (doc) {
var docVersion;
if (doc.isFoundDocument()) {
docVersion = doc.version;
}
else if (doc.isNoDocument()) {
// For deleted docs, we must use baseVersion 0 when we overwrite them.
docVersion = SnapshotVersion.min();
}
else {
throw fail();
}
var existingVersion = this.readVersions.get(doc.key.toString());
if (existingVersion) {
if (!docVersion.isEqual(existingVersion)) {
// This transaction will fail no matter what.
throw new FirestoreError(Code.ABORTED, 'Document version changed between two reads.');
}
}
else {
this.readVersions.set(doc.key.toString(), docVersion);
}
};
/**
* Returns the version of this document when it was read in this transaction,
* as a precondition, or no precondition if it was not read.
*/
Transaction$2.prototype.precondition = function (key) {
var version = this.readVersions.get(key.toString());
if (!this.writtenDocs.has(key.toString()) && version) {
return Precondition.updateTime(version);
}
else {
return Precondition.none();
}
};
/**
* Returns the precondition for a document if the operation is an update.
*/
Transaction$2.prototype.preconditionForUpdate = function (key) {
var version = this.readVersions.get(key.toString());
// The first time a document is written, we want to take into account the
// read time and existence
if (!this.writtenDocs.has(key.toString()) && version) {
if (version.isEqual(SnapshotVersion.min())) {
// The document doesn't exist, so fail the transaction.
// This has to be validated locally because you can't send a
// precondition that a document does not exist without changing the
// semantics of the backend write to be an insert. This is the reverse
// of what we want, since we want to assert that the document doesn't
// exist but then send the update and have it fail. Since we can't
// express that to the backend, we have to validate locally.
// Note: this can change once we can send separate verify writes in the
// transaction.
throw new FirestoreError(Code.INVALID_ARGUMENT, "Can't update a document that doesn't exist.");
}
// Document exists, base precondition on document update time.
return Precondition.updateTime(version);
}
else {
// Document was not read, so we just use the preconditions for a blind
// update.
return Precondition.exists(true);
}
};
Transaction$2.prototype.write = function (mutation) {
this.ensureCommitNotCalled();
this.mutations.push(mutation);
};
Transaction$2.prototype.ensureCommitNotCalled = function () {
};
return Transaction$2;
}());
/**
* @license
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var DEFAULT_MAX_ATTEMPTS_COUNT = 5;
/**
* TransactionRunner encapsulates the logic needed to run and retry transactions
* with backoff.
*/
var TransactionRunner = /** @class */ (function () {
function TransactionRunner(asyncQueue, datastore, updateFunction, deferred) {
this.asyncQueue = asyncQueue;
this.datastore = datastore;
this.updateFunction = updateFunction;
this.deferred = deferred;
this.attemptsRemaining = DEFAULT_MAX_ATTEMPTS_COUNT;
this.backoff = new ExponentialBackoff(this.asyncQueue, "transaction_retry" /* TransactionRetry */);
}
/** Runs the transaction and sets the result on deferred. */
TransactionRunner.prototype.run = function () {
this.attemptsRemaining -= 1;
this.runWithBackOff();
};
TransactionRunner.prototype.runWithBackOff = function () {
var _this = this;
this.backoff.backoffAndRun(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var transaction, userPromise;
var _this = this;
return tslib.__generator(this, function (_d) {
transaction = new Transaction$2(this.datastore);
userPromise = this.tryRunUpdateFunction(transaction);
if (userPromise) {
userPromise
.then(function (result) {
_this.asyncQueue.enqueueAndForget(function () {
return transaction
.commit()
.then(function () {
_this.deferred.resolve(result);
})
.catch(function (commitError) {
_this.handleTransactionError(commitError);
});
});
})
.catch(function (userPromiseError) {
_this.handleTransactionError(userPromiseError);
});
}
return [2 /*return*/];
});
}); });
};
TransactionRunner.prototype.tryRunUpdateFunction = function (transaction) {
try {
var userPromise = this.updateFunction(transaction);
if (isNullOrUndefined(userPromise) ||
!userPromise.catch ||
!userPromise.then) {
this.deferred.reject(Error('Transaction callback must return a Promise'));
return null;
}
return userPromise;
}
catch (error) {
// Do not retry errors thrown by user provided updateFunction.
this.deferred.reject(error);
return null;
}
};
TransactionRunner.prototype.handleTransactionError = function (error) {
var _this = this;
if (this.attemptsRemaining > 0 && this.isRetryableTransactionError(error)) {
this.attemptsRemaining -= 1;
this.asyncQueue.enqueueAndForget(function () {
_this.runWithBackOff();
return Promise.resolve();
});
}
else {
this.deferred.reject(error);
}
};
TransactionRunner.prototype.isRetryableTransactionError = function (error) {
if (error.name === 'FirebaseError') {
// In transactions, the backend will fail outdated reads with FAILED_PRECONDITION and
// non-matching document versions with ABORTED. These errors should be retried.
var code = error.code;
return (code === 'aborted' ||
code === 'failed-precondition' ||
!isPermanentError(code));
}
return false;
};
return TransactionRunner;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG$2 = 'FirestoreClient';
var MAX_CONCURRENT_LIMBO_RESOLUTIONS = 100;
/**
* FirestoreClient is a top-level class that constructs and owns all of the
* pieces of the client SDK architecture. It is responsible for creating the
* async queue that is shared by all of the other components in the system.
*/
var FirestoreClient = /** @class */ (function () {
function FirestoreClient(credentials,
/**
* Asynchronous queue responsible for all of our internal processing. When
* we get incoming work from the user (via public API) or the network
* (incoming GRPC messages), we should always schedule onto this queue.
* This ensures all of our work is properly serialized (e.g. we don't
* start processing a new operation while the previous one is waiting for
* an async I/O to complete).
*/
asyncQueue, databaseInfo) {
var _this = this;
this.credentials = credentials;
this.asyncQueue = asyncQueue;
this.databaseInfo = databaseInfo;
this.user = User.UNAUTHENTICATED;
this.clientId = AutoId.newId();
this.credentialListener = function () { return Promise.resolve(); };
this.credentials.setChangeListener(asyncQueue, function (user) { return tslib.__awaiter(_this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
logDebug(LOG_TAG$2, 'Received user=', user.uid);
return [4 /*yield*/, this.credentialListener(user)];
case 1:
_d.sent();
this.user = user;
return [2 /*return*/];
}
});
}); });
}
FirestoreClient.prototype.getConfiguration = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
return [2 /*return*/, {
asyncQueue: this.asyncQueue,
databaseInfo: this.databaseInfo,
clientId: this.clientId,
credentials: this.credentials,
initialUser: this.user,
maxConcurrentLimboResolutions: MAX_CONCURRENT_LIMBO_RESOLUTIONS
}];
});
});
};
FirestoreClient.prototype.setCredentialChangeListener = function (listener) {
this.credentialListener = listener;
};
/**
* Checks that the client has not been terminated. Ensures that other methods on
* this class cannot be called after the client is terminated.
*/
FirestoreClient.prototype.verifyNotTerminated = function () {
if (this.asyncQueue.isShuttingDown) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'The client has already been terminated.');
}
};
FirestoreClient.prototype.terminate = function () {
var _this = this;
this.asyncQueue.enterRestrictedMode();
var deferred = new Deferred();
this.asyncQueue.enqueueAndForgetEvenWhileRestricted(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var e_11, firestoreError;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
_d.trys.push([0, 5, , 6]);
if (!this.onlineComponents) return [3 /*break*/, 2];
return [4 /*yield*/, this.onlineComponents.terminate()];
case 1:
_d.sent();
_d.label = 2;
case 2:
if (!this.offlineComponents) return [3 /*break*/, 4];
return [4 /*yield*/, this.offlineComponents.terminate()];
case 3:
_d.sent();
_d.label = 4;
case 4:
// `removeChangeListener` must be called after shutting down the
// RemoteStore as it will prevent the RemoteStore from retrieving
// auth tokens.
this.credentials.removeChangeListener();
deferred.resolve();
return [3 /*break*/, 6];
case 5:
e_11 = _d.sent();
firestoreError = wrapInUserErrorIfRecoverable(e_11, "Failed to shutdown persistence");
deferred.reject(firestoreError);
return [3 /*break*/, 6];
case 6: return [2 /*return*/];
}
});
}); });
return deferred.promise;
};
return FirestoreClient;
}());
function setOfflineComponentProvider(client, offlineComponentProvider) {
return tslib.__awaiter(this, void 0, void 0, function () {
var configuration, currentUser;
var _this = this;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
client.asyncQueue.verifyOperationInProgress();
logDebug(LOG_TAG$2, 'Initializing OfflineComponentProvider');
return [4 /*yield*/, client.getConfiguration()];
case 1:
configuration = _d.sent();
return [4 /*yield*/, offlineComponentProvider.initialize(configuration)];
case 2:
_d.sent();
currentUser = configuration.initialUser;
client.setCredentialChangeListener(function (user) { return tslib.__awaiter(_this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!!currentUser.isEqual(user)) return [3 /*break*/, 2];
return [4 /*yield*/, localStoreHandleUserChange(offlineComponentProvider.localStore, user)];
case 1:
_d.sent();
currentUser = user;
_d.label = 2;
case 2: return [2 /*return*/];
}
});
}); });
// When a user calls clearPersistence() in one client, all other clients
// need to be terminated to allow the delete to succeed.
offlineComponentProvider.persistence.setDatabaseDeletedListener(function () { return client.terminate(); });
client.offlineComponents = offlineComponentProvider;
return [2 /*return*/];
}
});
});
}
function setOnlineComponentProvider(client, onlineComponentProvider) {
return tslib.__awaiter(this, void 0, void 0, function () {
var offlineComponentProvider, configuration;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
client.asyncQueue.verifyOperationInProgress();
return [4 /*yield*/, ensureOfflineComponents(client)];
case 1:
offlineComponentProvider = _d.sent();
logDebug(LOG_TAG$2, 'Initializing OnlineComponentProvider');
return [4 /*yield*/, client.getConfiguration()];
case 2:
configuration = _d.sent();
return [4 /*yield*/, onlineComponentProvider.initialize(offlineComponentProvider, configuration)];
case 3:
_d.sent();
// The CredentialChangeListener of the online component provider takes
// precedence over the offline component provider.
client.setCredentialChangeListener(function (user) { return remoteStoreHandleCredentialChange(onlineComponentProvider.remoteStore, user); });
client.onlineComponents = onlineComponentProvider;
return [2 /*return*/];
}
});
});
}
function ensureOfflineComponents(client) {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!!client.offlineComponents) return [3 /*break*/, 2];
logDebug(LOG_TAG$2, 'Using default OfflineComponentProvider');
return [4 /*yield*/, setOfflineComponentProvider(client, new MemoryOfflineComponentProvider())];
case 1:
_d.sent();
_d.label = 2;
case 2: return [2 /*return*/, client.offlineComponents];
}
});
});
}
function ensureOnlineComponents(client) {
return tslib.__awaiter(this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (!!client.onlineComponents) return [3 /*break*/, 2];
logDebug(LOG_TAG$2, 'Using default OnlineComponentProvider');
return [4 /*yield*/, setOnlineComponentProvider(client, new OnlineComponentProvider())];
case 1:
_d.sent();
_d.label = 2;
case 2: return [2 /*return*/, client.onlineComponents];
}
});
});
}
function getPersistence(client) {
return ensureOfflineComponents(client).then(function (c) { return c.persistence; });
}
function getLocalStore(client) {
return ensureOfflineComponents(client).then(function (c) { return c.localStore; });
}
function getRemoteStore(client) {
return ensureOnlineComponents(client).then(function (c) { return c.remoteStore; });
}
function getSyncEngine(client) {
return ensureOnlineComponents(client).then(function (c) { return c.syncEngine; });
}
function getDatastore(client) {
return ensureOnlineComponents(client).then(function (c) { return c.datastore; });
}
function getEventManager(client) {
return tslib.__awaiter(this, void 0, void 0, function () {
var onlineComponentProvider, eventManager;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, ensureOnlineComponents(client)];
case 1:
onlineComponentProvider = _d.sent();
eventManager = onlineComponentProvider.eventManager;
eventManager.onListen = syncEngineListen.bind(null, onlineComponentProvider.syncEngine);
eventManager.onUnlisten = syncEngineUnlisten.bind(null, onlineComponentProvider.syncEngine);
return [2 /*return*/, eventManager];
}
});
});
}
/** Enables the network connection and re-enqueues all pending operations. */
function firestoreClientEnableNetwork(client) {
var _this = this;
return client.asyncQueue.enqueue(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var persistence, remoteStore;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, getPersistence(client)];
case 1:
persistence = _d.sent();
return [4 /*yield*/, getRemoteStore(client)];
case 2:
remoteStore = _d.sent();
persistence.setNetworkEnabled(true);
return [2 /*return*/, remoteStoreEnableNetwork(remoteStore)];
}
});
}); });
}
/** Disables the network connection. Pending operations will not complete. */
function firestoreClientDisableNetwork(client) {
var _this = this;
return client.asyncQueue.enqueue(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var persistence, remoteStore;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, getPersistence(client)];
case 1:
persistence = _d.sent();
return [4 /*yield*/, getRemoteStore(client)];
case 2:
remoteStore = _d.sent();
persistence.setNetworkEnabled(false);
return [2 /*return*/, remoteStoreDisableNetwork(remoteStore)];
}
});
}); });
}
/**
* Returns a Promise that resolves when all writes that were pending at the time
* this method was called received server acknowledgement. An acknowledgement
* can be either acceptance or rejection.
*/
function firestoreClientWaitForPendingWrites(client) {
var _this = this;
var deferred = new Deferred();
client.asyncQueue.enqueueAndForget(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var syncEngine;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, getSyncEngine(client)];
case 1:
syncEngine = _d.sent();
return [2 /*return*/, syncEngineRegisterPendingWritesCallback(syncEngine, deferred)];
}
});
}); });
return deferred.promise;
}
function firestoreClientListen(client, query, options, observer) {
var _this = this;
var wrappedObserver = new AsyncObserver(observer);
var listener = new QueryListener(query, wrappedObserver, options);
client.asyncQueue.enqueueAndForget(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var eventManager;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, getEventManager(client)];
case 1:
eventManager = _d.sent();
return [2 /*return*/, eventManagerListen(eventManager, listener)];
}
});
}); });
return function () {
wrappedObserver.mute();
client.asyncQueue.enqueueAndForget(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var eventManager;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, getEventManager(client)];
case 1:
eventManager = _d.sent();
return [2 /*return*/, eventManagerUnlisten(eventManager, listener)];
}
});
}); });
};
}
function firestoreClientGetDocumentFromLocalCache(client, docKey) {
var _this = this;
var deferred = new Deferred();
client.asyncQueue.enqueueAndForget(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var localStore;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, getLocalStore(client)];
case 1:
localStore = _d.sent();
return [2 /*return*/, readDocumentFromCache(localStore, docKey, deferred)];
}
});
}); });
return deferred.promise;
}
function firestoreClientGetDocumentViaSnapshotListener(client, key, options) {
var _this = this;
if (options === void 0) { options = {}; }
var deferred = new Deferred();
client.asyncQueue.enqueueAndForget(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var eventManager;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, getEventManager(client)];
case 1:
eventManager = _d.sent();
return [2 /*return*/, readDocumentViaSnapshotListener(eventManager, client.asyncQueue, key, options, deferred)];
}
});
}); });
return deferred.promise;
}
function firestoreClientGetDocumentsFromLocalCache(client, query) {
var _this = this;
var deferred = new Deferred();
client.asyncQueue.enqueueAndForget(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var localStore;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, getLocalStore(client)];
case 1:
localStore = _d.sent();
return [2 /*return*/, executeQueryFromCache(localStore, query, deferred)];
}
});
}); });
return deferred.promise;
}
function firestoreClientGetDocumentsViaSnapshotListener(client, query, options) {
var _this = this;
if (options === void 0) { options = {}; }
var deferred = new Deferred();
client.asyncQueue.enqueueAndForget(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var eventManager;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, getEventManager(client)];
case 1:
eventManager = _d.sent();
return [2 /*return*/, executeQueryViaSnapshotListener(eventManager, client.asyncQueue, query, options, deferred)];
}
});
}); });
return deferred.promise;
}
function firestoreClientWrite(client, mutations) {
var _this = this;
var deferred = new Deferred();
client.asyncQueue.enqueueAndForget(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var syncEngine;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, getSyncEngine(client)];
case 1:
syncEngine = _d.sent();
return [2 /*return*/, syncEngineWrite(syncEngine, mutations, deferred)];
}
});
}); });
return deferred.promise;
}
function firestoreClientAddSnapshotsInSyncListener(client, observer) {
var _this = this;
var wrappedObserver = new AsyncObserver(observer);
client.asyncQueue.enqueueAndForget(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var eventManager;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, getEventManager(client)];
case 1:
eventManager = _d.sent();
return [2 /*return*/, addSnapshotsInSyncListener(eventManager, wrappedObserver)];
}
});
}); });
return function () {
wrappedObserver.mute();
client.asyncQueue.enqueueAndForget(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var eventManager;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, getEventManager(client)];
case 1:
eventManager = _d.sent();
return [2 /*return*/, removeSnapshotsInSyncListener(eventManager, wrappedObserver)];
}
});
}); });
};
}
/**
* Takes an updateFunction in which a set of reads and writes can be performed
* atomically. In the updateFunction, the client can read and write values
* using the supplied transaction object. After the updateFunction, all
* changes will be committed. If a retryable error occurs (ex: some other
* client has changed any of the data referenced), then the updateFunction
* will be called again after a backoff. If the updateFunction still fails
* after all retries, then the transaction will be rejected.
*
* The transaction object passed to the updateFunction contains methods for
* accessing documents and collections. Unlike other datastore access, data
* accessed with the transaction will not reflect local changes that have not
* been committed. For this reason, it is required that all reads are
* performed before any writes. Transactions must be performed while online.
*/
function firestoreClientTransaction(client, updateFunction) {
var _this = this;
var deferred = new Deferred();
client.asyncQueue.enqueueAndForget(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var datastore;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, getDatastore(client)];
case 1:
datastore = _d.sent();
new TransactionRunner(client.asyncQueue, datastore, updateFunction, deferred).run();
return [2 /*return*/];
}
});
}); });
return deferred.promise;
}
function readDocumentFromCache(localStore, docKey, result) {
return tslib.__awaiter(this, void 0, void 0, function () {
var document_4, e_12, firestoreError;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
_d.trys.push([0, 2, , 3]);
return [4 /*yield*/, localStoreReadDocument(localStore, docKey)];
case 1:
document_4 = _d.sent();
if (document_4.isFoundDocument()) {
result.resolve(document_4);
}
else if (document_4.isNoDocument()) {
result.resolve(null);
}
else {
result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get document from cache. (However, this document may ' +
"exist on the server. Run again without setting 'source' in " +
'the GetOptions to attempt to retrieve the document from the ' +
'server.)'));
}
return [3 /*break*/, 3];
case 2:
e_12 = _d.sent();
firestoreError = wrapInUserErrorIfRecoverable(e_12, "Failed to get document '" + docKey + " from cache");
result.reject(firestoreError);
return [3 /*break*/, 3];
case 3: return [2 /*return*/];
}
});
});
}
/**
* Retrieves a latency-compensated document from the backend via a
* SnapshotListener.
*/
function readDocumentViaSnapshotListener(eventManager, asyncQueue, key, options, result) {
var wrappedObserver = new AsyncObserver({
next: function (snap) {
// Remove query first before passing event to user to avoid
// user actions affecting the now stale query.
asyncQueue.enqueueAndForget(function () { return eventManagerUnlisten(eventManager, listener); });
var exists = snap.docs.has(key);
if (!exists && snap.fromCache) {
// TODO(dimond): If we're online and the document doesn't
// exist then we resolve with a doc.exists set to false. If
// we're offline however, we reject the Promise in this
// case. Two options: 1) Cache the negative response from
// the server so we can deliver that even when you're
// offline 2) Actually reject the Promise in the online case
// if the document doesn't exist.
result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get document because the client is offline.'));
}
else if (exists &&
snap.fromCache &&
options &&
options.source === 'server') {
result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get document from server. (However, this ' +
'document does exist in the local cache. Run again ' +
'without setting source to "server" to ' +
'retrieve the cached document.)'));
}
else {
result.resolve(snap);
}
},
error: function (e) { return result.reject(e); }
});
var listener = new QueryListener(newQueryForPath(key.path), wrappedObserver, {
includeMetadataChanges: true,
waitForSyncWhenOnline: true
});
return eventManagerListen(eventManager, listener);
}
function executeQueryFromCache(localStore, query, result) {
return tslib.__awaiter(this, void 0, void 0, function () {
var queryResult, view, viewDocChanges, viewChange, e_13, firestoreError;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
_d.trys.push([0, 2, , 3]);
return [4 /*yield*/, localStoreExecuteQuery(localStore, query,
/* usePreviousResults= */ true)];
case 1:
queryResult = _d.sent();
view = new View(query, queryResult.remoteKeys);
viewDocChanges = view.computeDocChanges(queryResult.documents);
viewChange = view.applyChanges(viewDocChanges,
/* updateLimboDocuments= */ false);
result.resolve(viewChange.snapshot);
return [3 /*break*/, 3];
case 2:
e_13 = _d.sent();
firestoreError = wrapInUserErrorIfRecoverable(e_13, "Failed to execute query '" + query + " against cache");
result.reject(firestoreError);
return [3 /*break*/, 3];
case 3: return [2 /*return*/];
}
});
});
}
/**
* Retrieves a latency-compensated query snapshot from the backend via a
* SnapshotListener.
*/
function executeQueryViaSnapshotListener(eventManager, asyncQueue, query, options, result) {
var wrappedObserver = new AsyncObserver({
next: function (snapshot) {
// Remove query first before passing event to user to avoid
// user actions affecting the now stale query.
asyncQueue.enqueueAndForget(function () { return eventManagerUnlisten(eventManager, listener); });
if (snapshot.fromCache && options.source === 'server') {
result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get documents from server. (However, these ' +
'documents may exist in the local cache. Run again ' +
'without setting source to "server" to ' +
'retrieve the cached documents.)'));
}
else {
result.resolve(snapshot);
}
},
error: function (e) { return result.reject(e); }
});
var listener = new QueryListener(query, wrappedObserver, {
includeMetadataChanges: true,
waitForSyncWhenOnline: true
});
return eventManagerListen(eventManager, listener);
}
function firestoreClientLoadBundle(client, databaseId, data, resultTask) {
var _this = this;
var reader = createBundleReader(data, newSerializer(databaseId));
client.asyncQueue.enqueueAndForget(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var _d;
return tslib.__generator(this, function (_e) {
switch (_e.label) {
case 0:
_d = syncEngineLoadBundle;
return [4 /*yield*/, getSyncEngine(client)];
case 1:
_d.apply(void 0, [_e.sent(), reader, resultTask]);
return [2 /*return*/];
}
});
}); });
}
function firestoreClientGetNamedQuery(client, queryName) {
var _this = this;
return client.asyncQueue.enqueue(function () { return tslib.__awaiter(_this, void 0, void 0, function () { var _d; return tslib.__generator(this, function (_e) {
switch (_e.label) {
case 0:
_d = localStoreGetNamedQuery;
return [4 /*yield*/, getLocalStore(client)];
case 1: return [2 /*return*/, _d.apply(void 0, [_e.sent(), queryName])];
}
}); }); });
}
function createBundleReader(data, serializer) {
var content;
if (typeof data === 'string') {
content = newTextEncoder().encode(data);
}
else {
content = data;
}
return newBundleReader(toByteStreamReader(content), serializer);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var DatabaseInfo = /** @class */ (function () {
/**
* Constructs a DatabaseInfo using the provided host, databaseId and
* persistenceKey.
*
* @param databaseId - The database to use.
* @param appId - The Firebase App Id.
* @param persistenceKey - A unique identifier for this Firestore's local
* storage (used in conjunction with the databaseId).
* @param host - The Firestore backend host to connect to.
* @param ssl - Whether to use SSL when connecting.
* @param forceLongPolling - Whether to use the forceLongPolling option
* when using WebChannel as the network transport.
* @param autoDetectLongPolling - Whether to use the detectBufferingProxy
* option when using WebChannel as the network transport.
* @param useFetchStreams Whether to use the Fetch API instead of
* XMLHTTPRequest
*/
function DatabaseInfo(databaseId, appId, persistenceKey, host, ssl, forceLongPolling, autoDetectLongPolling, useFetchStreams) {
this.databaseId = databaseId;
this.appId = appId;
this.persistenceKey = persistenceKey;
this.host = host;
this.ssl = ssl;
this.forceLongPolling = forceLongPolling;
this.autoDetectLongPolling = autoDetectLongPolling;
this.useFetchStreams = useFetchStreams;
}
return DatabaseInfo;
}());
/** The default database name for a project. */
var DEFAULT_DATABASE_NAME = '(default)';
/** Represents the database ID a Firestore client is associated with. */
var DatabaseId = /** @class */ (function () {
function DatabaseId(projectId, database) {
this.projectId = projectId;
this.database = database ? database : DEFAULT_DATABASE_NAME;
}
Object.defineProperty(DatabaseId.prototype, "isDefaultDatabase", {
get: function () {
return this.database === DEFAULT_DATABASE_NAME;
},
enumerable: false,
configurable: true
});
DatabaseId.prototype.isEqual = function (other) {
return (other instanceof DatabaseId &&
other.projectId === this.projectId &&
other.database === this.database);
};
return DatabaseId;
}());
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG$1 = 'ComponentProvider';
/**
* An instance map that ensures only one Datastore exists per Firestore
* instance.
*/
var datastoreInstances = new Map();
/**
* Removes all components associated with the provided instance. Must be called
* when the `Firestore` instance is terminated.
*/
function removeComponents(firestore) {
var datastore = datastoreInstances.get(firestore);
if (datastore) {
logDebug(LOG_TAG$1, 'Removing Datastore');
datastoreInstances.delete(firestore);
datastore.terminate();
}
}
function makeDatabaseInfo(databaseId, appId, persistenceKey, settings) {
return new DatabaseInfo(databaseId, appId, persistenceKey, settings.host, settings.ssl, settings.experimentalForceLongPolling, settings.experimentalAutoDetectLongPolling, settings.useFetchStreams);
}
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var OAuthToken = /** @class */ (function () {
function OAuthToken(value, user) {
this.user = user;
this.type = 'OAuth';
this.authHeaders = {};
// Set the headers using Object Literal notation to avoid minification
this.authHeaders['Authorization'] = "Bearer " + value;
}
return OAuthToken;
}());
/** A CredentialsProvider that always yields an empty token. */
var EmptyCredentialsProvider = /** @class */ (function () {
function EmptyCredentialsProvider() {
/**
* Stores the listener registered with setChangeListener()
* This isn't actually necessary since the UID never changes, but we use this
* to verify the listen contract is adhered to in tests.
*/
this.changeListener = null;
}
EmptyCredentialsProvider.prototype.getToken = function () {
return Promise.resolve(null);
};
EmptyCredentialsProvider.prototype.invalidateToken = function () { };
EmptyCredentialsProvider.prototype.setChangeListener = function (asyncQueue, changeListener) {
this.changeListener = changeListener;
// Fire with initial user.
asyncQueue.enqueueRetryable(function () { return changeListener(User.UNAUTHENTICATED); });
};
EmptyCredentialsProvider.prototype.removeChangeListener = function () {
this.changeListener = null;
};
return EmptyCredentialsProvider;
}());
/**
* A CredentialsProvider that always returns a constant token. Used for
* emulator token mocking.
*/
var EmulatorCredentialsProvider = /** @class */ (function () {
function EmulatorCredentialsProvider(token) {
this.token = token;
/**
* Stores the listener registered with setChangeListener()
* This isn't actually necessary since the UID never changes, but we use this
* to verify the listen contract is adhered to in tests.
*/
this.changeListener = null;
}
EmulatorCredentialsProvider.prototype.getToken = function () {
return Promise.resolve(this.token);
};
EmulatorCredentialsProvider.prototype.invalidateToken = function () { };
EmulatorCredentialsProvider.prototype.setChangeListener = function (asyncQueue, changeListener) {
var _this = this;
this.changeListener = changeListener;
// Fire with initial user.
asyncQueue.enqueueRetryable(function () { return changeListener(_this.token.user); });
};
EmulatorCredentialsProvider.prototype.removeChangeListener = function () {
this.changeListener = null;
};
return EmulatorCredentialsProvider;
}());
var FirebaseCredentialsProvider = /** @class */ (function () {
function FirebaseCredentialsProvider(authProvider) {
var _this = this;
/** Tracks the current User. */
this.currentUser = User.UNAUTHENTICATED;
/** Promise that allows blocking on the initialization of Firebase Auth. */
this.authDeferred = new Deferred();
/**
* Counter used to detect if the token changed while a getToken request was
* outstanding.
*/
this.tokenCounter = 0;
this.forceRefresh = false;
this.auth = null;
this.asyncQueue = null;
this.tokenListener = function () {
_this.tokenCounter++;
_this.currentUser = _this.getUser();
_this.authDeferred.resolve();
if (_this.changeListener) {
_this.asyncQueue.enqueueRetryable(function () { return _this.changeListener(_this.currentUser); });
}
};
var registerAuth = function (auth) {
logDebug('FirebaseCredentialsProvider', 'Auth detected');
_this.auth = auth;
_this.auth.addAuthTokenListener(_this.tokenListener);
};
authProvider.onInit(function (auth) { return registerAuth(auth); });
// Our users can initialize Auth right after Firestore, so we give it
// a chance to register itself with the component framework before we
// determine whether to start up in unauthenticated mode.
setTimeout(function () {
if (!_this.auth) {
var auth = authProvider.getImmediate({ optional: true });
if (auth) {
registerAuth(auth);
}
else {
// If auth is still not available, proceed with `null` user
logDebug('FirebaseCredentialsProvider', 'Auth not yet detected');
_this.authDeferred.resolve();
}
}
}, 0);
}
FirebaseCredentialsProvider.prototype.getToken = function () {
var _this = this;
// Take note of the current value of the tokenCounter so that this method
// can fail (with an ABORTED error) if there is a token change while the
// request is outstanding.
var initialTokenCounter = this.tokenCounter;
var forceRefresh = this.forceRefresh;
this.forceRefresh = false;
if (!this.auth) {
return Promise.resolve(null);
}
return this.auth.getToken(forceRefresh).then(function (tokenData) {
// Cancel the request since the token changed while the request was
// outstanding so the response is potentially for a previous user (which
// user, we can't be sure).
if (_this.tokenCounter !== initialTokenCounter) {
logDebug('FirebaseCredentialsProvider', 'getToken aborted due to token change.');
return _this.getToken();
}
else {
if (tokenData) {
hardAssert(typeof tokenData.accessToken === 'string');
return new OAuthToken(tokenData.accessToken, _this.currentUser);
}
else {
return null;
}
}
});
};
FirebaseCredentialsProvider.prototype.invalidateToken = function () {
this.forceRefresh = true;
};
FirebaseCredentialsProvider.prototype.setChangeListener = function (asyncQueue, changeListener) {
var _this = this;
this.asyncQueue = asyncQueue;
// Blocks the AsyncQueue until the next user is available.
this.asyncQueue.enqueueRetryable(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0: return [4 /*yield*/, this.authDeferred.promise];
case 1:
_d.sent();
return [4 /*yield*/, changeListener(this.currentUser)];
case 2:
_d.sent();
this.changeListener = changeListener;
return [2 /*return*/];
}
});
}); });
};
FirebaseCredentialsProvider.prototype.removeChangeListener = function () {
if (this.auth) {
this.auth.removeAuthTokenListener(this.tokenListener);
}
this.changeListener = function () { return Promise.resolve(); };
};
// Auth.getUid() can return null even with a user logged in. It is because
// getUid() is synchronous, but the auth code populating Uid is asynchronous.
// This method should only be called in the AuthTokenListener callback
// to guarantee to get the actual user.
FirebaseCredentialsProvider.prototype.getUser = function () {
var currentUid = this.auth && this.auth.getUid();
hardAssert(currentUid === null || typeof currentUid === 'string');
return new User(currentUid);
};
return FirebaseCredentialsProvider;
}());
/*
* FirstPartyToken provides a fresh token each time its value
* is requested, because if the token is too old, requests will be rejected.
* Technically this may no longer be necessary since the SDK should gracefully
* recover from unauthenticated errors (see b/33147818 for context), but it's
* safer to keep the implementation as-is.
*/
var FirstPartyToken = /** @class */ (function () {
function FirstPartyToken(gapi, sessionIndex, iamToken) {
this.gapi = gapi;
this.sessionIndex = sessionIndex;
this.iamToken = iamToken;
this.type = 'FirstParty';
this.user = User.FIRST_PARTY;
}
Object.defineProperty(FirstPartyToken.prototype, "authHeaders", {
get: function () {
var headers = {
'X-Goog-AuthUser': this.sessionIndex
};
// Use array notation to prevent minification
var authHeader = this.gapi['auth']['getAuthHeaderValueForFirstParty']([]);
if (authHeader) {
headers['Authorization'] = authHeader;
}
if (this.iamToken) {
headers['X-Goog-Iam-Authorization-Token'] = this.iamToken;
}
return headers;
},
enumerable: false,
configurable: true
});
return FirstPartyToken;
}());
/*
* Provides user credentials required for the Firestore JavaScript SDK
* to authenticate the user, using technique that is only available
* to applications hosted by Google.
*/
var FirstPartyCredentialsProvider = /** @class */ (function () {
function FirstPartyCredentialsProvider(gapi, sessionIndex, iamToken) {
this.gapi = gapi;
this.sessionIndex = sessionIndex;
this.iamToken = iamToken;
}
FirstPartyCredentialsProvider.prototype.getToken = function () {
return Promise.resolve(new FirstPartyToken(this.gapi, this.sessionIndex, this.iamToken));
};
FirstPartyCredentialsProvider.prototype.setChangeListener = function (asyncQueue, changeListener) {
// Fire with initial uid.
asyncQueue.enqueueRetryable(function () { return changeListener(User.FIRST_PARTY); });
};
FirstPartyCredentialsProvider.prototype.removeChangeListener = function () { };
FirstPartyCredentialsProvider.prototype.invalidateToken = function () { };
return FirstPartyCredentialsProvider;
}());
/**
* Builds a CredentialsProvider depending on the type of
* the credentials passed in.
*/
function makeCredentialsProvider(credentials) {
if (!credentials) {
return new EmptyCredentialsProvider();
}
switch (credentials['type']) {
case 'gapi':
var client = credentials['client'];
// Make sure this really is a Gapi client.
hardAssert(!!(typeof client === 'object' &&
client !== null &&
client['auth'] &&
client['auth']['getAuthHeaderValueForFirstParty']));
return new FirstPartyCredentialsProvider(client, credentials['sessionIndex'] || '0', credentials['iamToken'] || null);
case 'provider':
return credentials['client'];
default:
throw new FirestoreError(Code.INVALID_ARGUMENT, 'makeCredentialsProvider failed due to invalid credential type');
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// settings() defaults:
var DEFAULT_HOST = 'firestore.googleapis.com';
var DEFAULT_SSL = true;
/**
* A concrete type describing all the values that can be applied via a
* user-supplied firestore.Settings object. This is a separate type so that
* defaults can be supplied and the value can be checked for equality.
*/
var FirestoreSettingsImpl = /** @class */ (function () {
function FirestoreSettingsImpl(settings) {
var _a;
if (settings.host === undefined) {
if (settings.ssl !== undefined) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Can't provide ssl option if host option is not set");
}
this.host = DEFAULT_HOST;
this.ssl = DEFAULT_SSL;
}
else {
this.host = settings.host;
this.ssl = (_a = settings.ssl) !== null && _a !== void 0 ? _a : DEFAULT_SSL;
}
this.credentials = settings.credentials;
this.ignoreUndefinedProperties = !!settings.ignoreUndefinedProperties;
if (settings.cacheSizeBytes === undefined) {
this.cacheSizeBytes = LRU_DEFAULT_CACHE_SIZE_BYTES;
}
else {
if (settings.cacheSizeBytes !== LRU_COLLECTION_DISABLED &&
settings.cacheSizeBytes < LRU_MINIMUM_CACHE_SIZE_BYTES) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "cacheSizeBytes must be at least " + LRU_MINIMUM_CACHE_SIZE_BYTES);
}
else {
this.cacheSizeBytes = settings.cacheSizeBytes;
}
}
this.experimentalForceLongPolling = !!settings.experimentalForceLongPolling;
this.experimentalAutoDetectLongPolling =
!!settings.experimentalAutoDetectLongPolling;
this.useFetchStreams = !!settings.useFetchStreams;
validateIsNotUsedTogether('experimentalForceLongPolling', settings.experimentalForceLongPolling, 'experimentalAutoDetectLongPolling', settings.experimentalAutoDetectLongPolling);
}
FirestoreSettingsImpl.prototype.isEqual = function (other) {
return (this.host === other.host &&
this.ssl === other.ssl &&
this.credentials === other.credentials &&
this.cacheSizeBytes === other.cacheSizeBytes &&
this.experimentalForceLongPolling ===
other.experimentalForceLongPolling &&
this.experimentalAutoDetectLongPolling ===
other.experimentalAutoDetectLongPolling &&
this.ignoreUndefinedProperties === other.ignoreUndefinedProperties &&
this.useFetchStreams === other.useFetchStreams);
};
return FirestoreSettingsImpl;
}());
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The Cloud Firestore service interface.
*
* Do not call this constructor directly. Instead, use {@link getFirestore}.
*/
var Firestore$1 = /** @class */ (function () {
/** @hideconstructor */
function Firestore$1(databaseIdOrApp, authProvider) {
/**
* Whether it's a Firestore or Firestore Lite instance.
*/
this.type = 'firestore-lite';
this._persistenceKey = '(lite)';
this._settings = new FirestoreSettingsImpl({});
this._settingsFrozen = false;
if (databaseIdOrApp instanceof DatabaseId) {
this._databaseId = databaseIdOrApp;
this._credentials = new EmptyCredentialsProvider();
}
else {
this._app = databaseIdOrApp;
this._databaseId = databaseIdFromApp(databaseIdOrApp);
this._credentials = new FirebaseCredentialsProvider(authProvider);
}
}
Object.defineProperty(Firestore$1.prototype, "app", {
/**
* The {@link @firebase/app#FirebaseApp} associated with this `Firestore` service
* instance.
*/
get: function () {
if (!this._app) {
throw new FirestoreError(Code.FAILED_PRECONDITION, "Firestore was not initialized using the Firebase SDK. 'app' is " +
'not available');
}
return this._app;
},
enumerable: false,
configurable: true
});
Object.defineProperty(Firestore$1.prototype, "_initialized", {
get: function () {
return this._settingsFrozen;
},
enumerable: false,
configurable: true
});
Object.defineProperty(Firestore$1.prototype, "_terminated", {
get: function () {
return this._terminateTask !== undefined;
},
enumerable: false,
configurable: true
});
Firestore$1.prototype._setSettings = function (settings) {
if (this._settingsFrozen) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'Firestore has already been started and its settings can no longer ' +
'be changed. You can only modify settings before calling any other ' +
'methods on a Firestore object.');
}
this._settings = new FirestoreSettingsImpl(settings);
if (settings.credentials !== undefined) {
this._credentials = makeCredentialsProvider(settings.credentials);
}
};
Firestore$1.prototype._getSettings = function () {
return this._settings;
};
Firestore$1.prototype._freezeSettings = function () {
this._settingsFrozen = true;
return this._settings;
};
Firestore$1.prototype._delete = function () {
if (!this._terminateTask) {
this._terminateTask = this._terminate();
}
return this._terminateTask;
};
/** Returns a JSON-serializable representation of this Firestore instance. */
Firestore$1.prototype.toJSON = function () {
return {
app: this._app,
databaseId: this._databaseId,
settings: this._settings
};
};
/**
* Terminates all components used by this client. Subclasses can override
* this method to clean up their own dependencies, but must also call this
* method.
*
* Only ever called once.
*/
Firestore$1.prototype._terminate = function () {
removeComponents(this);
return Promise.resolve();
};
return Firestore$1;
}());
function databaseIdFromApp(app) {
if (!Object.prototype.hasOwnProperty.apply(app.options, ['projectId'])) {
throw new FirestoreError(Code.INVALID_ARGUMENT, '"projectId" not provided in firebase.initializeApp.');
}
return new DatabaseId(app.options.projectId);
}
/**
* Modify this instance to communicate with the Cloud Firestore emulator.
*
* Note: This must be called before this instance has been used to do any
* operations.
*
* @param firestore - The Firestore instance to configure to connect to the
* emulator.
* @param host - the emulator host (ex: localhost).
* @param port - the emulator port (ex: 9000).
* @param options.mockUserToken - the mock auth token to use for unit testing
* Security Rules.
*/
function connectFirestoreEmulator(firestore, host, port, options) {
if (options === void 0) { options = {}; }
firestore = cast(firestore, Firestore$1);
var settings = firestore._getSettings();
if (settings.host !== DEFAULT_HOST && settings.host !== host) {
logWarn('Host has been set in both settings() and useEmulator(), emulator host ' +
'will be used');
}
firestore._setSettings(Object.assign(Object.assign({}, settings), { host: host + ":" + port, ssl: false }));
if (options.mockUserToken) {
// Let createMockUserToken validate first (catches common mistakes like
// invalid field "uid" and missing field "sub" / "user_id".)
var token = util.createMockUserToken(options.mockUserToken);
var uid = options.mockUserToken.sub || options.mockUserToken.user_id;
if (!uid) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "mockUserToken must contain 'sub' or 'user_id' field!");
}
firestore._credentials = new EmulatorCredentialsProvider(new OAuthToken(token, new User(uid)));
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A `DocumentReference` refers to a document location in a Firestore database
* and can be used to write, read, or listen to the location. The document at
* the referenced location may or may not exist.
*/
var DocumentReference = /** @class */ (function () {
/** @hideconstructor */
function DocumentReference(firestore,
/**
* If provided, the `FirestoreDataConverter` associated with this instance.
*/
converter, _key) {
this.converter = converter;
this._key = _key;
/** The type of this Firestore reference. */
this.type = 'document';
this.firestore = firestore;
}
Object.defineProperty(DocumentReference.prototype, "_path", {
get: function () {
return this._key.path;
},
enumerable: false,
configurable: true
});
Object.defineProperty(DocumentReference.prototype, "id", {
/**
* The document's identifier within its collection.
*/
get: function () {
return this._key.path.lastSegment();
},
enumerable: false,
configurable: true
});
Object.defineProperty(DocumentReference.prototype, "path", {
/**
* A string representing the path of the referenced document (relative
* to the root of the database).
*/
get: function () {
return this._key.path.canonicalString();
},
enumerable: false,
configurable: true
});
Object.defineProperty(DocumentReference.prototype, "parent", {
/**
* The collection this `DocumentReference` belongs to.
*/
get: function () {
return new CollectionReference(this.firestore, this.converter, this._key.path.popLast());
},
enumerable: false,
configurable: true
});
DocumentReference.prototype.withConverter = function (converter) {
return new DocumentReference(this.firestore, converter, this._key);
};
return DocumentReference;
}());
/**
* A `Query` refers to a Query which you can read or listen to. You can also
* construct refined `Query` objects by adding filters and ordering.
*/
var Query = /** @class */ (function () {
// This is the lite version of the Query class in the main SDK.
/** @hideconstructor protected */
function Query(firestore,
/**
* If provided, the `FirestoreDataConverter` associated with this instance.
*/
converter, _query) {
this.converter = converter;
this._query = _query;
/** The type of this Firestore reference. */
this.type = 'query';
this.firestore = firestore;
}
Query.prototype.withConverter = function (converter) {
return new Query(this.firestore, converter, this._query);
};
return Query;
}());
/**
* A `CollectionReference` object can be used for adding documents, getting
* document references, and querying for documents (using {@link query}).
*/
var CollectionReference = /** @class */ (function (_super) {
tslib.__extends(CollectionReference, _super);
/** @hideconstructor */
function CollectionReference(firestore, converter, _path) {
var _this = _super.call(this, firestore, converter, newQueryForPath(_path)) || this;
_this._path = _path;
/** The type of this Firestore reference. */
_this.type = 'collection';
return _this;
}
Object.defineProperty(CollectionReference.prototype, "id", {
/** The collection's identifier. */
get: function () {
return this._query.path.lastSegment();
},
enumerable: false,
configurable: true
});
Object.defineProperty(CollectionReference.prototype, "path", {
/**
* A string representing the path of the referenced collection (relative
* to the root of the database).
*/
get: function () {
return this._query.path.canonicalString();
},
enumerable: false,
configurable: true
});
Object.defineProperty(CollectionReference.prototype, "parent", {
/**
* A reference to the containing `DocumentReference` if this is a
* subcollection. If this isn't a subcollection, the reference is null.
*/
get: function () {
var parentPath = this._path.popLast();
if (parentPath.isEmpty()) {
return null;
}
else {
return new DocumentReference(this.firestore,
/* converter= */ null, new DocumentKey(parentPath));
}
},
enumerable: false,
configurable: true
});
CollectionReference.prototype.withConverter = function (converter) {
return new CollectionReference(this.firestore, converter, this._path);
};
return CollectionReference;
}(Query));
function collection(parent, path) {
var pathSegments = [];
for (var _i = 2; _i < arguments.length; _i++) {
pathSegments[_i - 2] = arguments[_i];
}
parent = util.getModularInstance(parent);
validateNonEmptyArgument('collection', 'path', path);
if (parent instanceof Firestore$1) {
var absolutePath = ResourcePath.fromString.apply(ResourcePath, tslib.__spreadArray([path], pathSegments));
validateCollectionPath(absolutePath);
return new CollectionReference(parent, /* converter= */ null, absolutePath);
}
else {
if (!(parent instanceof DocumentReference) &&
!(parent instanceof CollectionReference)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Expected first argument to collection() to be a CollectionReference, ' +
'a DocumentReference or FirebaseFirestore');
}
var absolutePath = ResourcePath.fromString.apply(ResourcePath, tslib.__spreadArray([parent.path], pathSegments)).child(ResourcePath.fromString(path));
validateCollectionPath(absolutePath);
return new CollectionReference(parent.firestore,
/* converter= */ null, absolutePath);
}
}
// TODO(firestorelite): Consider using ErrorFactory -
// https://github.com/firebase/firebase-js-sdk/blob/0131e1f/packages/util/src/errors.ts#L106
/**
* Creates and returns a new `Query` instance that includes all documents in the
* database that are contained in a collection or subcollection with the
* given `collectionId`.
*
* @param firestore - A reference to the root Firestore instance.
* @param collectionId - Identifies the collections to query over. Every
* collection or subcollection with this ID as the last segment of its path
* will be included. Cannot contain a slash.
* @returns The created `Query`.
*/
function collectionGroup(firestore, collectionId) {
firestore = cast(firestore, Firestore$1);
validateNonEmptyArgument('collectionGroup', 'collection id', collectionId);
if (collectionId.indexOf('/') >= 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid collection ID '" + collectionId + "' passed to function " +
"collectionGroup(). Collection IDs must not contain '/'.");
}
return new Query(firestore,
/* converter= */ null, newQueryForCollectionGroup(collectionId));
}
function doc(parent, path) {
var pathSegments = [];
for (var _i = 2; _i < arguments.length; _i++) {
pathSegments[_i - 2] = arguments[_i];
}
parent = util.getModularInstance(parent);
// We allow omission of 'pathString' but explicitly prohibit passing in both
// 'undefined' and 'null'.
if (arguments.length === 1) {
path = AutoId.newId();
}
validateNonEmptyArgument('doc', 'path', path);
if (parent instanceof Firestore$1) {
var absolutePath = ResourcePath.fromString.apply(ResourcePath, tslib.__spreadArray([path], pathSegments));
validateDocumentPath(absolutePath);
return new DocumentReference(parent,
/* converter= */ null, new DocumentKey(absolutePath));
}
else {
if (!(parent instanceof DocumentReference) &&
!(parent instanceof CollectionReference)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Expected first argument to collection() to be a CollectionReference, ' +
'a DocumentReference or FirebaseFirestore');
}
var absolutePath = parent._path.child(ResourcePath.fromString.apply(ResourcePath, tslib.__spreadArray([path], pathSegments)));
validateDocumentPath(absolutePath);
return new DocumentReference(parent.firestore, parent instanceof CollectionReference ? parent.converter : null, new DocumentKey(absolutePath));
}
}
/**
* Returns true if the provided references are equal.
*
* @param left - A reference to compare.
* @param right - A reference to compare.
* @returns true if the references point to the same location in the same
* Firestore database.
*/
function refEqual(left, right) {
left = util.getModularInstance(left);
right = util.getModularInstance(right);
if ((left instanceof DocumentReference ||
left instanceof CollectionReference) &&
(right instanceof DocumentReference || right instanceof CollectionReference)) {
return (left.firestore === right.firestore &&
left.path === right.path &&
left.converter === right.converter);
}
return false;
}
/**
* Returns true if the provided queries point to the same collection and apply
* the same constraints.
*
* @param left - A `Query` to compare.
* @param right - A `Query` to compare.
* @returns true if the references point to the same location in the same
* Firestore database.
*/
function queryEqual(left, right) {
left = util.getModularInstance(left);
right = util.getModularInstance(right);
if (left instanceof Query && right instanceof Query) {
return (left.firestore === right.firestore &&
queryEquals(left._query, right._query) &&
left.converter === right.converter);
}
return false;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var LOG_TAG = 'AsyncQueue';
var AsyncQueueImpl = /** @class */ (function () {
function AsyncQueueImpl() {
var _this = this;
// The last promise in the queue.
this.tail = Promise.resolve();
// A list of retryable operations. Retryable operations are run in order and
// retried with backoff.
this.retryableOps = [];
// Is this AsyncQueue being shut down? Once it is set to true, it will not
// be changed again.
this._isShuttingDown = false;
// Operations scheduled to be queued in the future. Operations are
// automatically removed after they are run or canceled.
this.delayedOperations = [];
// visible for testing
this.failure = null;
// Flag set while there's an outstanding AsyncQueue operation, used for
// assertion sanity-checks.
this.operationInProgress = false;
// Enabled during shutdown on Safari to prevent future access to IndexedDB.
this.skipNonRestrictedTasks = false;
// List of TimerIds to fast-forward delays for.
this.timerIdsToSkip = [];
// Backoff timer used to schedule retries for retryable operations
this.backoff = new ExponentialBackoff(this, "async_queue_retry" /* AsyncQueueRetry */);
// Visibility handler that triggers an immediate retry of all retryable
// operations. Meant to speed up recovery when we regain file system access
// after page comes into foreground.
this.visibilityHandler = function () {
_this.backoff.skipBackoff();
};
}
Object.defineProperty(AsyncQueueImpl.prototype, "isShuttingDown", {
get: function () {
return this._isShuttingDown;
},
enumerable: false,
configurable: true
});
/**
* Adds a new operation to the queue without waiting for it to complete (i.e.
* we ignore the Promise result).
*/
AsyncQueueImpl.prototype.enqueueAndForget = function (op) {
// eslint-disable-next-line @typescript-eslint/no-floating-promises
this.enqueue(op);
};
AsyncQueueImpl.prototype.enqueueAndForgetEvenWhileRestricted = function (op) {
this.verifyNotFailed();
// eslint-disable-next-line @typescript-eslint/no-floating-promises
this.enqueueInternal(op);
};
AsyncQueueImpl.prototype.enterRestrictedMode = function (purgeExistingTasks) {
if (!this._isShuttingDown) {
this._isShuttingDown = true;
this.skipNonRestrictedTasks = purgeExistingTasks || false;
}
};
AsyncQueueImpl.prototype.enqueue = function (op) {
var _this = this;
this.verifyNotFailed();
if (this._isShuttingDown) {
// Return a Promise which never resolves.
return new Promise(function () { });
}
// Create a deferred Promise that we can return to the callee. This
// allows us to return a "hanging Promise" only to the callee and still
// advance the queue even when the operation is not run.
var task = new Deferred();
return this.enqueueInternal(function () {
if (_this._isShuttingDown && _this.skipNonRestrictedTasks) {
// We do not resolve 'task'
return Promise.resolve();
}
op().then(task.resolve, task.reject);
return task.promise;
}).then(function () { return task.promise; });
};
AsyncQueueImpl.prototype.enqueueRetryable = function (op) {
var _this = this;
this.enqueueAndForget(function () {
_this.retryableOps.push(op);
return _this.retryNextOp();
});
};
/**
* Runs the next operation from the retryable queue. If the operation fails,
* reschedules with backoff.
*/
AsyncQueueImpl.prototype.retryNextOp = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
var e_14;
var _this = this;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
if (this.retryableOps.length === 0) {
return [2 /*return*/];
}
_d.label = 1;
case 1:
_d.trys.push([1, 3, , 4]);
return [4 /*yield*/, this.retryableOps[0]()];
case 2:
_d.sent();
this.retryableOps.shift();
this.backoff.reset();
return [3 /*break*/, 4];
case 3:
e_14 = _d.sent();
if (isIndexedDbTransactionError(e_14)) {
logDebug(LOG_TAG, 'Operation failed with retryable error: ' + e_14);
}
else {
throw e_14; // Failure will be handled by AsyncQueue
}
return [3 /*break*/, 4];
case 4:
if (this.retryableOps.length > 0) {
// If there are additional operations, we re-schedule `retryNextOp()`.
// This is necessary to run retryable operations that failed during
// their initial attempt since we don't know whether they are already
// enqueued. If, for example, `op1`, `op2`, `op3` are enqueued and `op1`
// needs to be re-run, we will run `op1`, `op1`, `op2` using the
// already enqueued calls to `retryNextOp()`. `op3()` will then run in the
// call scheduled here.
// Since `backoffAndRun()` cancels an existing backoff and schedules a
// new backoff on every call, there is only ever a single additional
// operation in the queue.
this.backoff.backoffAndRun(function () { return _this.retryNextOp(); });
}
return [2 /*return*/];
}
});
});
};
AsyncQueueImpl.prototype.enqueueInternal = function (op) {
var _this = this;
var newTail = this.tail.then(function () {
_this.operationInProgress = true;
return op()
.catch(function (error) {
_this.failure = error;
_this.operationInProgress = false;
var message = getMessageOrStack(error);
logError('INTERNAL UNHANDLED ERROR: ', message);
// Re-throw the error so that this.tail becomes a rejected Promise and
// all further attempts to chain (via .then) will just short-circuit
// and return the rejected Promise.
throw error;
})
.then(function (result) {
_this.operationInProgress = false;
return result;
});
});
this.tail = newTail;
return newTail;
};
AsyncQueueImpl.prototype.enqueueAfterDelay = function (timerId, delayMs, op) {
var _this = this;
this.verifyNotFailed();
// Fast-forward delays for timerIds that have been overriden.
if (this.timerIdsToSkip.indexOf(timerId) > -1) {
delayMs = 0;
}
var delayedOp = DelayedOperation.createAndSchedule(this, timerId, delayMs, op, function (removedOp) { return _this.removeDelayedOperation(removedOp); });
this.delayedOperations.push(delayedOp);
return delayedOp;
};
AsyncQueueImpl.prototype.verifyNotFailed = function () {
if (this.failure) {
fail();
}
};
AsyncQueueImpl.prototype.verifyOperationInProgress = function () {
};
/**
* Waits until all currently queued tasks are finished executing. Delayed
* operations are not run.
*/
AsyncQueueImpl.prototype.drain = function () {
return tslib.__awaiter(this, void 0, void 0, function () {
var currentTail;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
currentTail = this.tail;
return [4 /*yield*/, currentTail];
case 1:
_d.sent();
_d.label = 2;
case 2:
if (currentTail !== this.tail) return [3 /*break*/, 0];
_d.label = 3;
case 3: return [2 /*return*/];
}
});
});
};
/**
* For Tests: Determine if a delayed operation with a particular TimerId
* exists.
*/
AsyncQueueImpl.prototype.containsDelayedOperation = function (timerId) {
for (var _i = 0, _d = this.delayedOperations; _i < _d.length; _i++) {
var op = _d[_i];
if (op.timerId === timerId) {
return true;
}
}
return false;
};
/**
* For Tests: Runs some or all delayed operations early.
*
* @param lastTimerId - Delayed operations up to and including this TimerId
* will be drained. Pass TimerId.All to run all delayed operations.
* @returns a Promise that resolves once all operations have been run.
*/
AsyncQueueImpl.prototype.runAllDelayedOperationsUntil = function (lastTimerId) {
var _this = this;
// Note that draining may generate more delayed ops, so we do that first.
return this.drain().then(function () {
// Run ops in the same order they'd run if they ran naturally.
_this.delayedOperations.sort(function (a, b) { return a.targetTimeMs - b.targetTimeMs; });
for (var _i = 0, _d = _this.delayedOperations; _i < _d.length; _i++) {
var op = _d[_i];
op.skipDelay();
if (lastTimerId !== "all" /* All */ && op.timerId === lastTimerId) {
break;
}
}
return _this.drain();
});
};
/**
* For Tests: Skip all subsequent delays for a timer id.
*/
AsyncQueueImpl.prototype.skipDelaysForTimerId = function (timerId) {
this.timerIdsToSkip.push(timerId);
};
/** Called once a DelayedOperation is run or canceled. */
AsyncQueueImpl.prototype.removeDelayedOperation = function (op) {
// NOTE: indexOf / slice are O(n), but delayedOperations is expected to be small.
var index = this.delayedOperations.indexOf(op);
this.delayedOperations.splice(index, 1);
};
return AsyncQueueImpl;
}());
function newAsyncQueue() {
return new AsyncQueueImpl();
}
/**
* Chrome includes Error.message in Error.stack. Other browsers do not.
* This returns expected output of message + stack when available.
* @param error - Error or FirestoreError
*/
function getMessageOrStack(error) {
var message = error.message || '';
if (error.stack) {
if (error.stack.includes(error.message)) {
message = error.stack;
}
else {
message = error.message + '\n' + error.stack;
}
}
return message;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Represents the task of loading a Firestore bundle. It provides progress of bundle
* loading, as well as task completion and error events.
*
* The API is compatible with `Promise`.
*/
var LoadBundleTask = /** @class */ (function () {
function LoadBundleTask() {
this._progressObserver = {};
this._taskCompletionResolver = new Deferred();
this._lastProgress = {
taskState: 'Running',
totalBytes: 0,
totalDocuments: 0,
bytesLoaded: 0,
documentsLoaded: 0
};
}
/**
* Registers functions to listen to bundle loading progress events.
* @param next - Called when there is a progress update from bundle loading. Typically `next` calls occur
* each time a Firestore document is loaded from the bundle.
* @param error - Called when an error occurs during bundle loading. The task aborts after reporting the
* error, and there should be no more updates after this.
* @param complete - Called when the loading task is complete.
*/
LoadBundleTask.prototype.onProgress = function (next, error, complete) {
this._progressObserver = {
next: next,
error: error,
complete: complete
};
};
/**
* Implements the `Promise.catch` interface.
*
* @param onRejected - Called when an error occurs during bundle loading.
*/
LoadBundleTask.prototype.catch = function (onRejected) {
return this._taskCompletionResolver.promise.catch(onRejected);
};
/**
* Implements the `Promise.then` interface.
*
* @param onFulfilled - Called on the completion of the loading task with a final `LoadBundleTaskProgress` update.
* The update will always have its `taskState` set to `"Success"`.
* @param onRejected - Called when an error occurs during bundle loading.
*/
LoadBundleTask.prototype.then = function (onFulfilled, onRejected) {
return this._taskCompletionResolver.promise.then(onFulfilled, onRejected);
};
/**
* Notifies all observers that bundle loading has completed, with a provided
* `LoadBundleTaskProgress` object.
*
* @private
*/
LoadBundleTask.prototype._completeWith = function (progress) {
this._updateProgress(progress);
if (this._progressObserver.complete) {
this._progressObserver.complete();
}
this._taskCompletionResolver.resolve(progress);
};
/**
* Notifies all observers that bundle loading has failed, with a provided
* `Error` as the reason.
*
* @private
*/
LoadBundleTask.prototype._failWith = function (error) {
this._lastProgress.taskState = 'Error';
if (this._progressObserver.next) {
this._progressObserver.next(this._lastProgress);
}
if (this._progressObserver.error) {
this._progressObserver.error(error);
}
this._taskCompletionResolver.reject(error);
};
/**
* Notifies a progress update of loading a bundle.
* @param progress - The new progress.
*
* @private
*/
LoadBundleTask.prototype._updateProgress = function (progress) {
this._lastProgress = progress;
if (this._progressObserver.next) {
this._progressObserver.next(progress);
}
};
return LoadBundleTask;
}());
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** DOMException error code constants. */
var DOM_EXCEPTION_INVALID_STATE = 11;
var DOM_EXCEPTION_ABORTED = 20;
var DOM_EXCEPTION_QUOTA_EXCEEDED = 22;
/**
* Constant used to indicate the LRU garbage collection should be disabled.
* Set this value as the `cacheSizeBytes` on the settings passed to the
* `Firestore` instance.
*/
var CACHE_SIZE_UNLIMITED = LRU_COLLECTION_DISABLED;
/**
* The Cloud Firestore service interface.
*
* Do not call this constructor directly. Instead, use {@link getFirestore}.
*/
var Firestore = /** @class */ (function (_super) {
tslib.__extends(Firestore, _super);
/** @hideconstructor */
function Firestore(databaseIdOrApp, authProvider) {
var _this = _super.call(this, databaseIdOrApp, authProvider) || this;
/**
* Whether it's a Firestore or Firestore Lite instance.
*/
_this.type = 'firestore';
_this._queue = newAsyncQueue();
_this._persistenceKey =
'name' in databaseIdOrApp ? databaseIdOrApp.name : '[DEFAULT]';
return _this;
}
Firestore.prototype._terminate = function () {
if (!this._firestoreClient) {
// The client must be initialized to ensure that all subsequent API
// usage throws an exception.
configureFirestore(this);
}
return this._firestoreClient.terminate();
};
return Firestore;
}(Firestore$1));
/**
* Initializes a new instance of Cloud Firestore with the provided settings.
* Can only be called before any other function, including
* {@link getFirestore}. If the custom settings are empty, this function is
* equivalent to calling {@link getFirestore}.
*
* @param app - The {@link @firebase/app#FirebaseApp} with which the `Firestore` instance will
* be associated.
* @param settings - A settings object to configure the `Firestore` instance.
* @returns A newly initialized `Firestore` instance.
*/
function initializeFirestore(app$1, settings) {
var provider = app._getProvider(app$1, 'firestore-exp');
if (provider.isInitialized()) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'Firestore can only be initialized once per app.');
}
if (settings.cacheSizeBytes !== undefined &&
settings.cacheSizeBytes !== CACHE_SIZE_UNLIMITED &&
settings.cacheSizeBytes < LRU_MINIMUM_CACHE_SIZE_BYTES) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "cacheSizeBytes must be at least " + LRU_MINIMUM_CACHE_SIZE_BYTES);
}
return provider.initialize({ options: settings });
}
/**
* Returns the existing instance of Firestore that is associated with the
* provided {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new
* instance with default settings.
*
* @param app - The {@link @firebase/app#FirebaseApp} instance that the returned Firestore
* instance is associated with.
* @returns The `Firestore` instance of the provided app.
*/
function getFirestore(app$1) {
if (app$1 === void 0) { app$1 = app.getApp(); }
return app._getProvider(app$1, 'firestore-exp').getImmediate();
}
/**
* @internal
*/
function ensureFirestoreConfigured(firestore) {
if (!firestore._firestoreClient) {
configureFirestore(firestore);
}
firestore._firestoreClient.verifyNotTerminated();
return firestore._firestoreClient;
}
function configureFirestore(firestore) {
var _a;
var settings = firestore._freezeSettings();
var databaseInfo = makeDatabaseInfo(firestore._databaseId, ((_a = firestore._app) === null || _a === void 0 ? void 0 : _a.options.appId) || '', firestore._persistenceKey, settings);
firestore._firestoreClient = new FirestoreClient(firestore._credentials, firestore._queue, databaseInfo);
}
/**
* Attempts to enable persistent storage, if possible.
*
* Must be called before any other functions (other than
* {@link initializeFirestore}, {@link getFirestore} or
* {@link clearIndexedDbPersistence}.
*
* If this fails, `enableIndexedDbPersistence()` will reject the promise it
* returns. Note that even after this failure, the `Firestore` instance will
* remain usable, however offline persistence will be disabled.
*
* There are several reasons why this can fail, which can be identified by
* the `code` on the error.
*
* * failed-precondition: The app is already open in another browser tab.
* * unimplemented: The browser is incompatible with the offline
* persistence implementation.
*
* @param firestore - The `Firestore` instance to enable persistence for.
* @param persistenceSettings - Optional settings object to configure
* persistence.
* @returns A promise that represents successfully enabling persistent storage.
*/
function enableIndexedDbPersistence(firestore, persistenceSettings) {
firestore = cast(firestore, Firestore);
verifyNotInitialized(firestore);
var client = ensureFirestoreConfigured(firestore);
var settings = firestore._freezeSettings();
var onlineComponentProvider = new OnlineComponentProvider();
var offlineComponentProvider = new IndexedDbOfflineComponentProvider(onlineComponentProvider, settings.cacheSizeBytes, persistenceSettings === null || persistenceSettings === void 0 ? void 0 : persistenceSettings.forceOwnership);
return setPersistenceProviders(client, onlineComponentProvider, offlineComponentProvider);
}
/**
* Attempts to enable multi-tab persistent storage, if possible. If enabled
* across all tabs, all operations share access to local persistence, including
* shared execution of queries and latency-compensated local document updates
* across all connected instances.
*
* If this fails, `enableMultiTabIndexedDbPersistence()` will reject the promise
* it returns. Note that even after this failure, the `Firestore` instance will
* remain usable, however offline persistence will be disabled.
*
* There are several reasons why this can fail, which can be identified by
* the `code` on the error.
*
* * failed-precondition: The app is already open in another browser tab and
* multi-tab is not enabled.
* * unimplemented: The browser is incompatible with the offline
* persistence implementation.
*
* @param firestore - The `Firestore` instance to enable persistence for.
* @returns A promise that represents successfully enabling persistent
* storage.
*/
function enableMultiTabIndexedDbPersistence(firestore) {
firestore = cast(firestore, Firestore);
verifyNotInitialized(firestore);
var client = ensureFirestoreConfigured(firestore);
var settings = firestore._freezeSettings();
var onlineComponentProvider = new OnlineComponentProvider();
var offlineComponentProvider = new MultiTabOfflineComponentProvider(onlineComponentProvider, settings.cacheSizeBytes);
return setPersistenceProviders(client, onlineComponentProvider, offlineComponentProvider);
}
/**
* Registers both the `OfflineComponentProvider` and `OnlineComponentProvider`.
* If the operation fails with a recoverable error (see
* `canRecoverFromIndexedDbError()` below), the returned Promise is rejected
* but the client remains usable.
*/
function setPersistenceProviders(client, onlineComponentProvider, offlineComponentProvider) {
var _this = this;
var persistenceResult = new Deferred();
return client.asyncQueue
.enqueue(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var e_15;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
_d.trys.push([0, 3, , 4]);
return [4 /*yield*/, setOfflineComponentProvider(client, offlineComponentProvider)];
case 1:
_d.sent();
return [4 /*yield*/, setOnlineComponentProvider(client, onlineComponentProvider)];
case 2:
_d.sent();
persistenceResult.resolve();
return [3 /*break*/, 4];
case 3:
e_15 = _d.sent();
if (!canFallbackFromIndexedDbError(e_15)) {
throw e_15;
}
console.warn('Error enabling offline persistence. Falling back to ' +
'persistence disabled: ' +
e_15);
persistenceResult.reject(e_15);
return [3 /*break*/, 4];
case 4: return [2 /*return*/];
}
});
}); })
.then(function () { return persistenceResult.promise; });
}
/**
* Decides whether the provided error allows us to gracefully disable
* persistence (as opposed to crashing the client).
*/
function canFallbackFromIndexedDbError(error) {
if (error.name === 'FirebaseError') {
return (error.code === Code.FAILED_PRECONDITION ||
error.code === Code.UNIMPLEMENTED);
}
else if (typeof DOMException !== 'undefined' &&
error instanceof DOMException) {
// There are a few known circumstances where we can open IndexedDb but
// trying to read/write will fail (e.g. quota exceeded). For
// well-understood cases, we attempt to detect these and then gracefully
// fall back to memory persistence.
// NOTE: Rather than continue to add to this list, we could decide to
// always fall back, with the risk that we might accidentally hide errors
// representing actual SDK bugs.
return (
// When the browser is out of quota we could get either quota exceeded
// or an aborted error depending on whether the error happened during
// schema migration.
error.code === DOM_EXCEPTION_QUOTA_EXCEEDED ||
error.code === DOM_EXCEPTION_ABORTED ||
// Firefox Private Browsing mode disables IndexedDb and returns
// INVALID_STATE for any usage.
error.code === DOM_EXCEPTION_INVALID_STATE);
}
return true;
}
/**
* Clears the persistent storage. This includes pending writes and cached
* documents.
*
* Must be called while the `Firestore` instance is not started (after the app is
* terminated or when the app is first initialized). On startup, this function
* must be called before other functions (other than {@link
* initializeFirestore} or {@link getFirestore})). If the `Firestore`
* instance is still running, the promise will be rejected with the error code
* of `failed-precondition`.
*
* Note: `clearIndexedDbPersistence()` is primarily intended to help write
* reliable tests that use Cloud Firestore. It uses an efficient mechanism for
* dropping existing data but does not attempt to securely overwrite or
* otherwise make cached data unrecoverable. For applications that are sensitive
* to the disclosure of cached data in between user sessions, we strongly
* recommend not enabling persistence at all.
*
* @param firestore - The `Firestore` instance to clear persistence for.
* @returns A promise that is resolved when the persistent storage is
* cleared. Otherwise, the promise is rejected with an error.
*/
function clearIndexedDbPersistence(firestore) {
var _this = this;
if (firestore._initialized && !firestore._terminated) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'Persistence can only be cleared before a Firestore instance is ' +
'initialized or after it is terminated.');
}
var deferred = new Deferred();
firestore._queue.enqueueAndForgetEvenWhileRestricted(function () { return tslib.__awaiter(_this, void 0, void 0, function () {
var e_16;
return tslib.__generator(this, function (_d) {
switch (_d.label) {
case 0:
_d.trys.push([0, 2, , 3]);
return [4 /*yield*/, indexedDbClearPersistence(indexedDbStoragePrefix(firestore._databaseId, firestore._persistenceKey))];
case 1:
_d.sent();
deferred.resolve();
return [3 /*break*/, 3];
case 2:
e_16 = _d.sent();
deferred.reject(e_16);
return [3 /*break*/, 3];
case 3: return [2 /*return*/];
}
});
}); });
return deferred.promise;
}
/**
* Waits until all currently pending writes for the active user have been
* acknowledged by the backend.
*
* The returned Promise resolves immediately if there are no outstanding writes.
* Otherwise, the Promise waits for all previously issued writes (including
* those written in a previous app session), but it does not wait for writes
* that were added after the function is called. If you want to wait for
* additional writes, call `waitForPendingWrites()` again.
*
* Any outstanding `waitForPendingWrites()` Promises are rejected during user
* changes.
*
* @returns A Promise which resolves when all currently pending writes have been
* acknowledged by the backend.
*/
function waitForPendingWrites(firestore) {
firestore = cast(firestore, Firestore);
var client = ensureFirestoreConfigured(firestore);
return firestoreClientWaitForPendingWrites(client);
}
/**
* Re-enables use of the network for this Firestore instance after a prior
* call to {@link disableNetwork}.
*
* @returns A promise that is resolved once the network has been enabled.
*/
function enableNetwork(firestore) {
firestore = cast(firestore, Firestore);
var client = ensureFirestoreConfigured(firestore);
return firestoreClientEnableNetwork(client);
}
/**
* Disables network usage for this instance. It can be re-enabled via {@link
* enableNetwork}. While the network is disabled, any snapshot listeners,
* `getDoc()` or `getDocs()` calls will return results from cache, and any write
* operations will be queued until the network is restored.
*
* @returns A promise that is resolved once the network has been disabled.
*/
function disableNetwork(firestore) {
firestore = cast(firestore, Firestore);
var client = ensureFirestoreConfigured(firestore);
return firestoreClientDisableNetwork(client);
}
/**
* Terminates the provided Firestore instance.
*
* After calling `terminate()` only the `clearIndexedDbPersistence()` function
* may be used. Any other function will throw a `FirestoreError`.
*
* To restart after termination, create a new instance of FirebaseFirestore with
* {@link getFirestore}.
*
* Termination does not cancel any pending writes, and any promises that are
* awaiting a response from the server will not be resolved. If you have
* persistence enabled, the next time you start this instance, it will resume
* sending these writes to the server.
*
* Note: Under normal circumstances, calling `terminate()` is not required. This
* function is useful only when you want to force this instance to release all
* of its resources or in combination with `clearIndexedDbPersistence()` to
* ensure that all local state is destroyed between test runs.
*
* @returns A promise that is resolved when the instance has been successfully
* terminated.
*/
function terminate(firestore) {
app._removeServiceInstance(firestore.app, 'firestore-exp');
return firestore._delete();
}
/**
* Loads a Firestore bundle into the local cache.
*
* @param firestore - The `Firestore` instance to load bundles for for.
* @param bundleData - An object representing the bundle to be loaded. Valid objects are
* `ArrayBuffer`, `ReadableStream` or `string`.
*
* @returns
* A `LoadBundleTask` object, which notifies callers with progress updates, and completion
* or error events. It can be used as a `Promise`.
*/
function loadBundle(firestore, bundleData) {
firestore = cast(firestore, Firestore);
var client = ensureFirestoreConfigured(firestore);
var resultTask = new LoadBundleTask();
firestoreClientLoadBundle(client, firestore._databaseId, bundleData, resultTask);
return resultTask;
}
/**
* Reads a Firestore `Query` from local cache, identified by the given name.
*
* The named queries are packaged into bundles on the server side (along
* with resulting documents), and loaded to local cache using `loadBundle`. Once in local
* cache, use this method to extract a `Query` by name.
*/
function namedQuery(firestore, name) {
firestore = cast(firestore, Firestore);
var client = ensureFirestoreConfigured(firestore);
return firestoreClientGetNamedQuery(client, name).then(function (namedQuery) {
if (!namedQuery) {
return null;
}
return new Query(firestore, null, namedQuery.query);
});
}
function verifyNotInitialized(firestore) {
if (firestore._initialized || firestore._terminated) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'Firestore has already been started and persistence can no longer be ' +
'enabled. You can only enable persistence before calling any other ' +
'methods on a Firestore object.');
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function registerFirestore(variant) {
setSDKVersion(app.SDK_VERSION);
app._registerComponent(new component.Component('firestore-exp', function (container, _d) {
var settings = _d.options;
var app = container.getProvider('app-exp').getImmediate();
var firestoreInstance = new Firestore(app, container.getProvider('auth-internal'));
settings = Object.assign({ useFetchStreams: false }, settings);
firestoreInstance._setSettings(settings);
return firestoreInstance;
}, "PUBLIC" /* PUBLIC */));
app.registerVersion(name, version$1, variant);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A `FieldPath` refers to a field in a document. The path may consist of a
* single field name (referring to a top-level field in the document), or a
* list of field names (referring to a nested field in the document).
*
* Create a `FieldPath` by providing field names. If more than one field
* name is provided, the path will point to a nested field in a document.
*/
var FieldPath = /** @class */ (function () {
/**
* Creates a FieldPath from the provided field names. If more than one field
* name is provided, the path will point to a nested field in a document.
*
* @param fieldNames - A list of field names.
*/
function FieldPath() {
var fieldNames = [];
for (var _i = 0; _i < arguments.length; _i++) {
fieldNames[_i] = arguments[_i];
}
for (var i = 0; i < fieldNames.length; ++i) {
if (fieldNames[i].length === 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid field name at argument $(i + 1). " +
'Field names must not be empty.');
}
}
this._internalPath = new FieldPath$1(fieldNames);
}
/**
* Returns true if this `FieldPath` is equal to the provided one.
*
* @param other - The `FieldPath` to compare against.
* @returns true if this `FieldPath` is equal to the provided one.
*/
FieldPath.prototype.isEqual = function (other) {
return this._internalPath.isEqual(other._internalPath);
};
return FieldPath;
}());
/**
* Returns a special sentinel `FieldPath` to refer to the ID of a document.
* It can be used in queries to sort or filter by the document ID.
*/
function documentId() {
return new FieldPath(DOCUMENT_KEY_NAME);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An immutable object representing an array of bytes.
*/
var Bytes = /** @class */ (function () {
/** @hideconstructor */
function Bytes(byteString) {
this._byteString = byteString;
}
/**
* Creates a new `Bytes` object from the given Base64 string, converting it to
* bytes.
*
* @param base64 - The Base64 string used to create the `Bytes` object.
*/
Bytes.fromBase64String = function (base64) {
try {
return new Bytes(ByteString.fromBase64String(base64));
}
catch (e) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Failed to construct data from Base64 string: ' + e);
}
};
/**
* Creates a new `Bytes` object from the given Uint8Array.
*
* @param array - The Uint8Array used to create the `Bytes` object.
*/
Bytes.fromUint8Array = function (array) {
return new Bytes(ByteString.fromUint8Array(array));
};
/**
* Returns the underlying bytes as a Base64-encoded string.
*
* @returns The Base64-encoded string created from the `Bytes` object.
*/
Bytes.prototype.toBase64 = function () {
return this._byteString.toBase64();
};
/**
* Returns the underlying bytes in a new `Uint8Array`.
*
* @returns The Uint8Array created from the `Bytes` object.
*/
Bytes.prototype.toUint8Array = function () {
return this._byteString.toUint8Array();
};
/**
* Returns a string representation of the `Bytes` object.
*
* @returns A string representation of the `Bytes` object.
*/
Bytes.prototype.toString = function () {
return 'Bytes(base64: ' + this.toBase64() + ')';
};
/**
* Returns true if this `Bytes` object is equal to the provided one.
*
* @param other - The `Bytes` object to compare against.
* @returns true if this `Bytes` object is equal to the provided one.
*/
Bytes.prototype.isEqual = function (other) {
return this._byteString.isEqual(other._byteString);
};
return Bytes;
}());
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Sentinel values that can be used when writing document fields with `set()`
* or `update()`.
*/
var FieldValue = /** @class */ (function () {
/**
* @param _methodName - The public API endpoint that returns this class.
* @hideconstructor
*/
function FieldValue(_methodName) {
this._methodName = _methodName;
}
return FieldValue;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* An immutable object representing a geographic location in Firestore. The
* location is represented as latitude/longitude pair.
*
* Latitude values are in the range of [-90, 90].
* Longitude values are in the range of [-180, 180].
*/
var GeoPoint = /** @class */ (function () {
/**
* Creates a new immutable `GeoPoint` object with the provided latitude and
* longitude values.
* @param latitude - The latitude as number between -90 and 90.
* @param longitude - The longitude as number between -180 and 180.
*/
function GeoPoint(latitude, longitude) {
if (!isFinite(latitude) || latitude < -90 || latitude > 90) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Latitude must be a number between -90 and 90, but was: ' + latitude);
}
if (!isFinite(longitude) || longitude < -180 || longitude > 180) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Longitude must be a number between -180 and 180, but was: ' + longitude);
}
this._lat = latitude;
this._long = longitude;
}
Object.defineProperty(GeoPoint.prototype, "latitude", {
/**
* The latitude of this `GeoPoint` instance.
*/
get: function () {
return this._lat;
},
enumerable: false,
configurable: true
});
Object.defineProperty(GeoPoint.prototype, "longitude", {
/**
* The longitude of this `GeoPoint` instance.
*/
get: function () {
return this._long;
},
enumerable: false,
configurable: true
});
/**
* Returns true if this `GeoPoint` is equal to the provided one.
*
* @param other - The `GeoPoint` to compare against.
* @returns true if this `GeoPoint` is equal to the provided one.
*/
GeoPoint.prototype.isEqual = function (other) {
return this._lat === other._lat && this._long === other._long;
};
/** Returns a JSON-serializable representation of this GeoPoint. */
GeoPoint.prototype.toJSON = function () {
return { latitude: this._lat, longitude: this._long };
};
/**
* Actually private to JS consumers of our API, so this function is prefixed
* with an underscore.
*/
GeoPoint.prototype._compareTo = function (other) {
return (primitiveComparator(this._lat, other._lat) ||
primitiveComparator(this._long, other._long));
};
return GeoPoint;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var RESERVED_FIELD_REGEX = /^__.*__$/;
/** The result of parsing document data (e.g. for a setData call). */
var ParsedSetData = /** @class */ (function () {
function ParsedSetData(data, fieldMask, fieldTransforms) {
this.data = data;
this.fieldMask = fieldMask;
this.fieldTransforms = fieldTransforms;
}
ParsedSetData.prototype.toMutation = function (key, precondition) {
if (this.fieldMask !== null) {
return new PatchMutation(key, this.data, this.fieldMask, precondition, this.fieldTransforms);
}
else {
return new SetMutation(key, this.data, precondition, this.fieldTransforms);
}
};
return ParsedSetData;
}());
/** The result of parsing "update" data (i.e. for an updateData call). */
var ParsedUpdateData = /** @class */ (function () {
function ParsedUpdateData(data,
// The fieldMask does not include document transforms.
fieldMask, fieldTransforms) {
this.data = data;
this.fieldMask = fieldMask;
this.fieldTransforms = fieldTransforms;
}
ParsedUpdateData.prototype.toMutation = function (key, precondition) {
return new PatchMutation(key, this.data, this.fieldMask, precondition, this.fieldTransforms);
};
return ParsedUpdateData;
}());
function isWrite(dataSource) {
switch (dataSource) {
case 0 /* Set */: // fall through
case 2 /* MergeSet */: // fall through
case 1 /* Update */:
return true;
case 3 /* Argument */:
case 4 /* ArrayArgument */:
return false;
default:
throw fail();
}
}
/** A "context" object passed around while parsing user data. */
var ParseContextImpl = /** @class */ (function () {
/**
* Initializes a ParseContext with the given source and path.
*
* @param settings - The settings for the parser.
* @param databaseId - The database ID of the Firestore instance.
* @param serializer - The serializer to use to generate the Value proto.
* @param ignoreUndefinedProperties - Whether to ignore undefined properties
* rather than throw.
* @param fieldTransforms - A mutable list of field transforms encountered
* while parsing the data.
* @param fieldMask - A mutable list of field paths encountered while parsing
* the data.
*
* TODO(b/34871131): We don't support array paths right now, so path can be
* null to indicate the context represents any location within an array (in
* which case certain features will not work and errors will be somewhat
* compromised).
*/
function ParseContextImpl(settings, databaseId, serializer, ignoreUndefinedProperties, fieldTransforms, fieldMask) {
this.settings = settings;
this.databaseId = databaseId;
this.serializer = serializer;
this.ignoreUndefinedProperties = ignoreUndefinedProperties;
// Minor hack: If fieldTransforms is undefined, we assume this is an
// external call and we need to validate the entire path.
if (fieldTransforms === undefined) {
this.validatePath();
}
this.fieldTransforms = fieldTransforms || [];
this.fieldMask = fieldMask || [];
}
Object.defineProperty(ParseContextImpl.prototype, "path", {
get: function () {
return this.settings.path;
},
enumerable: false,
configurable: true
});
Object.defineProperty(ParseContextImpl.prototype, "dataSource", {
get: function () {
return this.settings.dataSource;
},
enumerable: false,
configurable: true
});
/** Returns a new context with the specified settings overwritten. */
ParseContextImpl.prototype.contextWith = function (configuration) {
return new ParseContextImpl(Object.assign(Object.assign({}, this.settings), configuration), this.databaseId, this.serializer, this.ignoreUndefinedProperties, this.fieldTransforms, this.fieldMask);
};
ParseContextImpl.prototype.childContextForField = function (field) {
var _a;
var childPath = (_a = this.path) === null || _a === void 0 ? void 0 : _a.child(field);
var context = this.contextWith({ path: childPath, arrayElement: false });
context.validatePathSegment(field);
return context;
};
ParseContextImpl.prototype.childContextForFieldPath = function (field) {
var _a;
var childPath = (_a = this.path) === null || _a === void 0 ? void 0 : _a.child(field);
var context = this.contextWith({ path: childPath, arrayElement: false });
context.validatePath();
return context;
};
ParseContextImpl.prototype.childContextForArray = function (index) {
// TODO(b/34871131): We don't support array paths right now; so make path
// undefined.
return this.contextWith({ path: undefined, arrayElement: true });
};
ParseContextImpl.prototype.createError = function (reason) {
return createError(reason, this.settings.methodName, this.settings.hasConverter || false, this.path, this.settings.targetDoc);
};
/** Returns 'true' if 'fieldPath' was traversed when creating this context. */
ParseContextImpl.prototype.contains = function (fieldPath) {
return (this.fieldMask.find(function (field) { return fieldPath.isPrefixOf(field); }) !== undefined ||
this.fieldTransforms.find(function (transform) { return fieldPath.isPrefixOf(transform.field); }) !== undefined);
};
ParseContextImpl.prototype.validatePath = function () {
// TODO(b/34871131): Remove null check once we have proper paths for fields
// within arrays.
if (!this.path) {
return;
}
for (var i = 0; i < this.path.length; i++) {
this.validatePathSegment(this.path.get(i));
}
};
ParseContextImpl.prototype.validatePathSegment = function (segment) {
if (segment.length === 0) {
throw this.createError('Document fields must not be empty');
}
if (isWrite(this.dataSource) && RESERVED_FIELD_REGEX.test(segment)) {
throw this.createError('Document fields cannot begin and end with "__"');
}
};
return ParseContextImpl;
}());
/**
* Helper for parsing raw user input (provided via the API) into internal model
* classes.
*/
var UserDataReader = /** @class */ (function () {
function UserDataReader(databaseId, ignoreUndefinedProperties, serializer) {
this.databaseId = databaseId;
this.ignoreUndefinedProperties = ignoreUndefinedProperties;
this.serializer = serializer || newSerializer(databaseId);
}
/** Creates a new top-level parse context. */
UserDataReader.prototype.createContext = function (dataSource, methodName, targetDoc, hasConverter) {
if (hasConverter === void 0) { hasConverter = false; }
return new ParseContextImpl({
dataSource: dataSource,
methodName: methodName,
targetDoc: targetDoc,
path: FieldPath$1.emptyPath(),
arrayElement: false,
hasConverter: hasConverter
}, this.databaseId, this.serializer, this.ignoreUndefinedProperties);
};
return UserDataReader;
}());
function newUserDataReader(firestore) {
var settings = firestore._freezeSettings();
var serializer = newSerializer(firestore._databaseId);
return new UserDataReader(firestore._databaseId, !!settings.ignoreUndefinedProperties, serializer);
}
/** Parse document data from a set() call. */
function parseSetData(userDataReader, methodName, targetDoc, input, hasConverter, options) {
if (options === void 0) { options = {}; }
var context = userDataReader.createContext(options.merge || options.mergeFields
? 2 /* MergeSet */
: 0 /* Set */, methodName, targetDoc, hasConverter);
validatePlainObject('Data must be an object, but it was:', context, input);
var updateData = parseObject(input, context);
var fieldMask;
var fieldTransforms;
if (options.merge) {
fieldMask = new FieldMask(context.fieldMask);
fieldTransforms = context.fieldTransforms;
}
else if (options.mergeFields) {
var validatedFieldPaths = [];
for (var _i = 0, _d = options.mergeFields; _i < _d.length; _i++) {
var stringOrFieldPath = _d[_i];
var fieldPath = fieldPathFromArgument$1(methodName, stringOrFieldPath, targetDoc);
if (!context.contains(fieldPath)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Field '" + fieldPath + "' is specified in your field mask but missing from your input data.");
}
if (!fieldMaskContains(validatedFieldPaths, fieldPath)) {
validatedFieldPaths.push(fieldPath);
}
}
fieldMask = new FieldMask(validatedFieldPaths);
fieldTransforms = context.fieldTransforms.filter(function (transform) { return fieldMask.covers(transform.field); });
}
else {
fieldMask = null;
fieldTransforms = context.fieldTransforms;
}
return new ParsedSetData(new ObjectValue(updateData), fieldMask, fieldTransforms);
}
var DeleteFieldValueImpl = /** @class */ (function (_super) {
tslib.__extends(DeleteFieldValueImpl, _super);
function DeleteFieldValueImpl() {
return _super !== null && _super.apply(this, arguments) || this;
}
DeleteFieldValueImpl.prototype._toFieldTransform = function (context) {
if (context.dataSource === 2 /* MergeSet */) {
// No transform to add for a delete, but we need to add it to our
// fieldMask so it gets deleted.
context.fieldMask.push(context.path);
}
else if (context.dataSource === 1 /* Update */) {
throw context.createError(this._methodName + "() can only appear at the top level " +
'of your update data');
}
else {
// We shouldn't encounter delete sentinels for queries or non-merge set() calls.
throw context.createError(this._methodName + "() cannot be used with set() unless you pass " +
'{merge:true}');
}
return null;
};
DeleteFieldValueImpl.prototype.isEqual = function (other) {
return other instanceof DeleteFieldValueImpl;
};
return DeleteFieldValueImpl;
}(FieldValue));
/**
* Creates a child context for parsing SerializableFieldValues.
*
* This is different than calling `ParseContext.contextWith` because it keeps
* the fieldTransforms and fieldMask separate.
*
* The created context has its `dataSource` set to `UserDataSource.Argument`.
* Although these values are used with writes, any elements in these FieldValues
* are not considered writes since they cannot contain any FieldValue sentinels,
* etc.
*
* @param fieldValue - The sentinel FieldValue for which to create a child
* context.
* @param context - The parent context.
* @param arrayElement - Whether or not the FieldValue has an array.
*/
function createSentinelChildContext(fieldValue, context, arrayElement) {
return new ParseContextImpl({
dataSource: 3 /* Argument */,
targetDoc: context.settings.targetDoc,
methodName: fieldValue._methodName,
arrayElement: arrayElement
}, context.databaseId, context.serializer, context.ignoreUndefinedProperties);
}
var ServerTimestampFieldValueImpl = /** @class */ (function (_super) {
tslib.__extends(ServerTimestampFieldValueImpl, _super);
function ServerTimestampFieldValueImpl() {
return _super !== null && _super.apply(this, arguments) || this;
}
ServerTimestampFieldValueImpl.prototype._toFieldTransform = function (context) {
return new FieldTransform(context.path, new ServerTimestampTransform());
};
ServerTimestampFieldValueImpl.prototype.isEqual = function (other) {
return other instanceof ServerTimestampFieldValueImpl;
};
return ServerTimestampFieldValueImpl;
}(FieldValue));
var ArrayUnionFieldValueImpl = /** @class */ (function (_super) {
tslib.__extends(ArrayUnionFieldValueImpl, _super);
function ArrayUnionFieldValueImpl(methodName, _elements) {
var _this = _super.call(this, methodName) || this;
_this._elements = _elements;
return _this;
}
ArrayUnionFieldValueImpl.prototype._toFieldTransform = function (context) {
var parseContext = createSentinelChildContext(this, context,
/*array=*/ true);
var parsedElements = this._elements.map(function (element) { return parseData(element, parseContext); });
var arrayUnion = new ArrayUnionTransformOperation(parsedElements);
return new FieldTransform(context.path, arrayUnion);
};
ArrayUnionFieldValueImpl.prototype.isEqual = function (other) {
// TODO(mrschmidt): Implement isEquals
return this === other;
};
return ArrayUnionFieldValueImpl;
}(FieldValue));
var ArrayRemoveFieldValueImpl = /** @class */ (function (_super) {
tslib.__extends(ArrayRemoveFieldValueImpl, _super);
function ArrayRemoveFieldValueImpl(methodName, _elements) {
var _this = _super.call(this, methodName) || this;
_this._elements = _elements;
return _this;
}
ArrayRemoveFieldValueImpl.prototype._toFieldTransform = function (context) {
var parseContext = createSentinelChildContext(this, context,
/*array=*/ true);
var parsedElements = this._elements.map(function (element) { return parseData(element, parseContext); });
var arrayUnion = new ArrayRemoveTransformOperation(parsedElements);
return new FieldTransform(context.path, arrayUnion);
};
ArrayRemoveFieldValueImpl.prototype.isEqual = function (other) {
// TODO(mrschmidt): Implement isEquals
return this === other;
};
return ArrayRemoveFieldValueImpl;
}(FieldValue));
var NumericIncrementFieldValueImpl = /** @class */ (function (_super) {
tslib.__extends(NumericIncrementFieldValueImpl, _super);
function NumericIncrementFieldValueImpl(methodName, _operand) {
var _this = _super.call(this, methodName) || this;
_this._operand = _operand;
return _this;
}
NumericIncrementFieldValueImpl.prototype._toFieldTransform = function (context) {
var numericIncrement = new NumericIncrementTransformOperation(context.serializer, toNumber(context.serializer, this._operand));
return new FieldTransform(context.path, numericIncrement);
};
NumericIncrementFieldValueImpl.prototype.isEqual = function (other) {
// TODO(mrschmidt): Implement isEquals
return this === other;
};
return NumericIncrementFieldValueImpl;
}(FieldValue));
/** Parse update data from an update() call. */
function parseUpdateData(userDataReader, methodName, targetDoc, input) {
var context = userDataReader.createContext(1 /* Update */, methodName, targetDoc);
validatePlainObject('Data must be an object, but it was:', context, input);
var fieldMaskPaths = [];
var updateData = ObjectValue.empty();
forEach(input, function (key, value) {
var path = fieldPathFromDotSeparatedString(methodName, key, targetDoc);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
value = util.getModularInstance(value);
var childContext = context.childContextForFieldPath(path);
if (value instanceof DeleteFieldValueImpl) {
// Add it to the field mask, but don't add anything to updateData.
fieldMaskPaths.push(path);
}
else {
var parsedValue = parseData(value, childContext);
if (parsedValue != null) {
fieldMaskPaths.push(path);
updateData.set(path, parsedValue);
}
}
});
var mask = new FieldMask(fieldMaskPaths);
return new ParsedUpdateData(updateData, mask, context.fieldTransforms);
}
/** Parse update data from a list of field/value arguments. */
function parseUpdateVarargs(userDataReader, methodName, targetDoc, field, value, moreFieldsAndValues) {
var context = userDataReader.createContext(1 /* Update */, methodName, targetDoc);
var keys = [fieldPathFromArgument$1(methodName, field, targetDoc)];
var values = [value];
if (moreFieldsAndValues.length % 2 !== 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Function " + methodName + "() needs to be called with an even number " +
'of arguments that alternate between field names and values.');
}
for (var i = 0; i < moreFieldsAndValues.length; i += 2) {
keys.push(fieldPathFromArgument$1(methodName, moreFieldsAndValues[i]));
values.push(moreFieldsAndValues[i + 1]);
}
var fieldMaskPaths = [];
var updateData = ObjectValue.empty();
// We iterate in reverse order to pick the last value for a field if the
// user specified the field multiple times.
for (var i = keys.length - 1; i >= 0; --i) {
if (!fieldMaskContains(fieldMaskPaths, keys[i])) {
var path = keys[i];
var value_1 = values[i];
// For Compat types, we have to "extract" the underlying types before
// performing validation.
value_1 = util.getModularInstance(value_1);
var childContext = context.childContextForFieldPath(path);
if (value_1 instanceof DeleteFieldValueImpl) {
// Add it to the field mask, but don't add anything to updateData.
fieldMaskPaths.push(path);
}
else {
var parsedValue = parseData(value_1, childContext);
if (parsedValue != null) {
fieldMaskPaths.push(path);
updateData.set(path, parsedValue);
}
}
}
}
var mask = new FieldMask(fieldMaskPaths);
return new ParsedUpdateData(updateData, mask, context.fieldTransforms);
}
/**
* Parse a "query value" (e.g. value in a where filter or a value in a cursor
* bound).
*
* @param allowArrays - Whether the query value is an array that may directly
* contain additional arrays (e.g. the operand of an `in` query).
*/
function parseQueryValue(userDataReader, methodName, input, allowArrays) {
if (allowArrays === void 0) { allowArrays = false; }
var context = userDataReader.createContext(allowArrays ? 4 /* ArrayArgument */ : 3 /* Argument */, methodName);
var parsed = parseData(input, context);
return parsed;
}
/**
* Parses user data to Protobuf Values.
*
* @param input - Data to be parsed.
* @param context - A context object representing the current path being parsed,
* the source of the data being parsed, etc.
* @returns The parsed value, or null if the value was a FieldValue sentinel
* that should not be included in the resulting parsed data.
*/
function parseData(input, context) {
// Unwrap the API type from the Compat SDK. This will return the API type
// from firestore-exp.
input = util.getModularInstance(input);
if (looksLikeJsonObject(input)) {
validatePlainObject('Unsupported field value:', context, input);
return parseObject(input, context);
}
else if (input instanceof FieldValue) {
// FieldValues usually parse into transforms (except FieldValue.delete())
// in which case we do not want to include this field in our parsed data
// (as doing so will overwrite the field directly prior to the transform
// trying to transform it). So we don't add this location to
// context.fieldMask and we return null as our parsing result.
parseSentinelFieldValue(input, context);
return null;
}
else if (input === undefined && context.ignoreUndefinedProperties) {
// If the input is undefined it can never participate in the fieldMask, so
// don't handle this below. If `ignoreUndefinedProperties` is false,
// `parseScalarValue` will reject an undefined value.
return null;
}
else {
// If context.path is null we are inside an array and we don't support
// field mask paths more granular than the top-level array.
if (context.path) {
context.fieldMask.push(context.path);
}
if (input instanceof Array) {
// TODO(b/34871131): Include the path containing the array in the error
// message.
// In the case of IN queries, the parsed data is an array (representing
// the set of values to be included for the IN query) that may directly
// contain additional arrays (each representing an individual field
// value), so we disable this validation.
if (context.settings.arrayElement &&
context.dataSource !== 4 /* ArrayArgument */) {
throw context.createError('Nested arrays are not supported');
}
return parseArray(input, context);
}
else {
return parseScalarValue(input, context);
}
}
}
function parseObject(obj, context) {
var fields = {};
if (isEmpty(obj)) {
// If we encounter an empty object, we explicitly add it to the update
// mask to ensure that the server creates a map entry.
if (context.path && context.path.length > 0) {
context.fieldMask.push(context.path);
}
}
else {
forEach(obj, function (key, val) {
var parsedValue = parseData(val, context.childContextForField(key));
if (parsedValue != null) {
fields[key] = parsedValue;
}
});
}
return { mapValue: { fields: fields } };
}
function parseArray(array, context) {
var values = [];
var entryIndex = 0;
for (var _i = 0, array_1 = array; _i < array_1.length; _i++) {
var entry = array_1[_i];
var parsedEntry = parseData(entry, context.childContextForArray(entryIndex));
if (parsedEntry == null) {
// Just include nulls in the array for fields being replaced with a
// sentinel.
parsedEntry = { nullValue: 'NULL_VALUE' };
}
values.push(parsedEntry);
entryIndex++;
}
return { arrayValue: { values: values } };
}
/**
* "Parses" the provided FieldValueImpl, adding any necessary transforms to
* context.fieldTransforms.
*/
function parseSentinelFieldValue(value, context) {
// Sentinels are only supported with writes, and not within arrays.
if (!isWrite(context.dataSource)) {
throw context.createError(value._methodName + "() can only be used with update() and set()");
}
if (!context.path) {
throw context.createError(value._methodName + "() is not currently supported inside arrays");
}
var fieldTransform = value._toFieldTransform(context);
if (fieldTransform) {
context.fieldTransforms.push(fieldTransform);
}
}
/**
* Helper to parse a scalar value (i.e. not an Object, Array, or FieldValue)
*
* @returns The parsed value
*/
function parseScalarValue(value, context) {
value = util.getModularInstance(value);
if (value === null) {
return { nullValue: 'NULL_VALUE' };
}
else if (typeof value === 'number') {
return toNumber(context.serializer, value);
}
else if (typeof value === 'boolean') {
return { booleanValue: value };
}
else if (typeof value === 'string') {
return { stringValue: value };
}
else if (value instanceof Date) {
var timestamp = Timestamp.fromDate(value);
return {
timestampValue: toTimestamp(context.serializer, timestamp)
};
}
else if (value instanceof Timestamp) {
// Firestore backend truncates precision down to microseconds. To ensure
// offline mode works the same with regards to truncation, perform the
// truncation immediately without waiting for the backend to do that.
var timestamp = new Timestamp(value.seconds, Math.floor(value.nanoseconds / 1000) * 1000);
return {
timestampValue: toTimestamp(context.serializer, timestamp)
};
}
else if (value instanceof GeoPoint) {
return {
geoPointValue: {
latitude: value.latitude,
longitude: value.longitude
}
};
}
else if (value instanceof Bytes) {
return { bytesValue: toBytes(context.serializer, value._byteString) };
}
else if (value instanceof DocumentReference) {
var thisDb = context.databaseId;
var otherDb = value.firestore._databaseId;
if (!otherDb.isEqual(thisDb)) {
throw context.createError('Document reference is for database ' +
(otherDb.projectId + "/" + otherDb.database + " but should be ") +
("for database " + thisDb.projectId + "/" + thisDb.database));
}
return {
referenceValue: toResourceName(value.firestore._databaseId || context.databaseId, value._key.path)
};
}
else {
throw context.createError("Unsupported field value: " + valueDescription(value));
}
}
/**
* Checks whether an object looks like a JSON object that should be converted
* into a struct. Normal class/prototype instances are considered to look like
* JSON objects since they should be converted to a struct value. Arrays, Dates,
* GeoPoints, etc. are not considered to look like JSON objects since they map
* to specific FieldValue types other than ObjectValue.
*/
function looksLikeJsonObject(input) {
return (typeof input === 'object' &&
input !== null &&
!(input instanceof Array) &&
!(input instanceof Date) &&
!(input instanceof Timestamp) &&
!(input instanceof GeoPoint) &&
!(input instanceof Bytes) &&
!(input instanceof DocumentReference) &&
!(input instanceof FieldValue));
}
function validatePlainObject(message, context, input) {
if (!looksLikeJsonObject(input) || !isPlainObject(input)) {
var description = valueDescription(input);
if (description === 'an object') {
// Massage the error if it was an object.
throw context.createError(message + ' a custom object');
}
else {
throw context.createError(message + ' ' + description);
}
}
}
/**
* Helper that calls fromDotSeparatedString() but wraps any error thrown.
*/
function fieldPathFromArgument$1(methodName, path, targetDoc) {
// If required, replace the FieldPath Compat class with with the firestore-exp
// FieldPath.
path = util.getModularInstance(path);
if (path instanceof FieldPath) {
return path._internalPath;
}
else if (typeof path === 'string') {
return fieldPathFromDotSeparatedString(methodName, path);
}
else {
var message = 'Field path arguments must be of type string or FieldPath.';
throw createError(message, methodName,
/* hasConverter= */ false,
/* path= */ undefined, targetDoc);
}
}
/**
* Matches any characters in a field path string that are reserved.
*/
var FIELD_PATH_RESERVED = new RegExp('[~\\*/\\[\\]]');
/**
* Wraps fromDotSeparatedString with an error message about the method that
* was thrown.
* @param methodName - The publicly visible method name
* @param path - The dot-separated string form of a field path which will be
* split on dots.
* @param targetDoc - The document against which the field path will be
* evaluated.
*/
function fieldPathFromDotSeparatedString(methodName, path, targetDoc) {
var found = path.search(FIELD_PATH_RESERVED);
if (found >= 0) {
throw createError("Invalid field path (" + path + "). Paths must not contain " +
"'~', '*', '/', '[', or ']'", methodName,
/* hasConverter= */ false,
/* path= */ undefined, targetDoc);
}
try {
return new (FieldPath.bind.apply(FieldPath, tslib.__spreadArray([void 0], path.split('.'))))()._internalPath;
}
catch (e) {
throw createError("Invalid field path (" + path + "). Paths must not be empty, " +
"begin with '.', end with '.', or contain '..'", methodName,
/* hasConverter= */ false,
/* path= */ undefined, targetDoc);
}
}
function createError(reason, methodName, hasConverter, path, targetDoc) {
var hasPath = path && !path.isEmpty();
var hasDocument = targetDoc !== undefined;
var message = "Function " + methodName + "() called with invalid data";
if (hasConverter) {
message += ' (via `toFirestore()`)';
}
message += '. ';
var description = '';
if (hasPath || hasDocument) {
description += ' (found';
if (hasPath) {
description += " in field " + path;
}
if (hasDocument) {
description += " in document " + targetDoc;
}
description += ')';
}
return new FirestoreError(Code.INVALID_ARGUMENT, message + reason + description);
}
/** Checks `haystack` if FieldPath `needle` is present. Runs in O(n). */
function fieldMaskContains(haystack, needle) {
return haystack.some(function (v) { return v.isEqual(needle); });
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A `DocumentSnapshot` contains data read from a document in your Firestore
* database. The data can be extracted with `.data()` or `.get()` to
* get a specific field.
*
* For a `DocumentSnapshot` that points to a non-existing document, any data
* access will return 'undefined'. You can use the `exists()` method to
* explicitly verify a document's existence.
*/
var DocumentSnapshot$1 = /** @class */ (function () {
// Note: This class is stripped down version of the DocumentSnapshot in
// the legacy SDK. The changes are:
// - No support for SnapshotMetadata.
// - No support for SnapshotOptions.
/** @hideconstructor protected */
function DocumentSnapshot$1(_firestore, _userDataWriter, _key, _document, _converter) {
this._firestore = _firestore;
this._userDataWriter = _userDataWriter;
this._key = _key;
this._document = _document;
this._converter = _converter;
}
Object.defineProperty(DocumentSnapshot$1.prototype, "id", {
/** Property of the `DocumentSnapshot` that provides the document's ID. */
get: function () {
return this._key.path.lastSegment();
},
enumerable: false,
configurable: true
});
Object.defineProperty(DocumentSnapshot$1.prototype, "ref", {
/**
* The `DocumentReference` for the document included in the `DocumentSnapshot`.
*/
get: function () {
return new DocumentReference(this._firestore, this._converter, this._key);
},
enumerable: false,
configurable: true
});
/**
* Signals whether or not the document at the snapshot's location exists.
*
* @returns true if the document exists.
*/
DocumentSnapshot$1.prototype.exists = function () {
return this._document !== null;
};
/**
* Retrieves all fields in the document as an `Object`. Returns `undefined` if
* the document doesn't exist.
*
* @returns An `Object` containing all fields in the document or `undefined`
* if the document doesn't exist.
*/
DocumentSnapshot$1.prototype.data = function () {
if (!this._document) {
return undefined;
}
else if (this._converter) {
// We only want to use the converter and create a new DocumentSnapshot
// if a converter has been provided.
var snapshot = new QueryDocumentSnapshot$1(this._firestore, this._userDataWriter, this._key, this._document,
/* converter= */ null);
return this._converter.fromFirestore(snapshot);
}
else {
return this._userDataWriter.convertValue(this._document.data.value);
}
};
/**
* Retrieves the field specified by `fieldPath`. Returns `undefined` if the
* document or field doesn't exist.
*
* @param fieldPath - The path (for example 'foo' or 'foo.bar') to a specific
* field.
* @returns The data at the specified field location or undefined if no such
* field exists in the document.
*/
// We are using `any` here to avoid an explicit cast by our users.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
DocumentSnapshot$1.prototype.get = function (fieldPath) {
if (this._document) {
var value = this._document.data.field(fieldPathFromArgument('DocumentSnapshot.get', fieldPath));
if (value !== null) {
return this._userDataWriter.convertValue(value);
}
}
return undefined;
};
return DocumentSnapshot$1;
}());
/**
* A `QueryDocumentSnapshot` contains data read from a document in your
* Firestore database as part of a query. The document is guaranteed to exist
* and its data can be extracted with `.data()` or `.get()` to get a
* specific field.
*
* A `QueryDocumentSnapshot` offers the same API surface as a
* `DocumentSnapshot`. Since query results contain only existing documents, the
* `exists` property will always be true and `data()` will never return
* 'undefined'.
*/
var QueryDocumentSnapshot$1 = /** @class */ (function (_super) {
tslib.__extends(QueryDocumentSnapshot$1, _super);
function QueryDocumentSnapshot$1() {
return _super !== null && _super.apply(this, arguments) || this;
}
/**
* Retrieves all fields in the document as an `Object`.
*
* @override
* @returns An `Object` containing all fields in the document.
*/
QueryDocumentSnapshot$1.prototype.data = function () {
return _super.prototype.data.call(this);
};
return QueryDocumentSnapshot$1;
}(DocumentSnapshot$1));
/**
* Helper that calls fromDotSeparatedString() but wraps any error thrown.
*/
function fieldPathFromArgument(methodName, arg) {
if (typeof arg === 'string') {
return fieldPathFromDotSeparatedString(methodName, arg);
}
else if (arg instanceof FieldPath) {
return arg._internalPath;
}
else {
return arg._delegate._internalPath;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Metadata about a snapshot, describing the state of the snapshot.
*/
var SnapshotMetadata = /** @class */ (function () {
/** @hideconstructor */
function SnapshotMetadata(hasPendingWrites, fromCache) {
this.hasPendingWrites = hasPendingWrites;
this.fromCache = fromCache;
}
/**
* Returns true if this `SnapshotMetadata` is equal to the provided one.
*
* @param other - The `SnapshotMetadata` to compare against.
* @returns true if this `SnapshotMetadata` is equal to the provided one.
*/
SnapshotMetadata.prototype.isEqual = function (other) {
return (this.hasPendingWrites === other.hasPendingWrites &&
this.fromCache === other.fromCache);
};
return SnapshotMetadata;
}());
/**
* A `DocumentSnapshot` contains data read from a document in your Firestore
* database. The data can be extracted with `.data()` or `.get()` to
* get a specific field.
*
* For a `DocumentSnapshot` that points to a non-existing document, any data
* access will return 'undefined'. You can use the `exists()` method to
* explicitly verify a document's existence.
*/
var DocumentSnapshot = /** @class */ (function (_super) {
tslib.__extends(DocumentSnapshot, _super);
/** @hideconstructor protected */
function DocumentSnapshot(_firestore, userDataWriter, key, document, metadata, converter) {
var _this = _super.call(this, _firestore, userDataWriter, key, document, converter) || this;
_this._firestore = _firestore;
_this._firestoreImpl = _firestore;
_this.metadata = metadata;
return _this;
}
/**
* Property of the `DocumentSnapshot` that signals whether or not the data
* exists. True if the document exists.
*/
DocumentSnapshot.prototype.exists = function () {
return _super.prototype.exists.call(this);
};
/**
* Retrieves all fields in the document as an `Object`. Returns `undefined` if
* the document doesn't exist.
*
* By default, `FieldValue.serverTimestamp()` values that have not yet been
* set to their final value will be returned as `null`. You can override
* this by passing an options object.
*
* @param options - An options object to configure how data is retrieved from
* the snapshot (for example the desired behavior for server timestamps that
* have not yet been set to their final value).
* @returns An `Object` containing all fields in the document or `undefined` if
* the document doesn't exist.
*/
DocumentSnapshot.prototype.data = function (options) {
if (options === void 0) { options = {}; }
if (!this._document) {
return undefined;
}
else if (this._converter) {
// We only want to use the converter and create a new DocumentSnapshot
// if a converter has been provided.
var snapshot = new QueryDocumentSnapshot(this._firestore, this._userDataWriter, this._key, this._document, this.metadata,
/* converter= */ null);
return this._converter.fromFirestore(snapshot, options);
}
else {
return this._userDataWriter.convertValue(this._document.data.value, options.serverTimestamps);
}
};
/**
* Retrieves the field specified by `fieldPath`. Returns `undefined` if the
* document or field doesn't exist.
*
* By default, a `FieldValue.serverTimestamp()` that has not yet been set to
* its final value will be returned as `null`. You can override this by
* passing an options object.
*
* @param fieldPath - The path (for example 'foo' or 'foo.bar') to a specific
* field.
* @param options - An options object to configure how the field is retrieved
* from the snapshot (for example the desired behavior for server timestamps
* that have not yet been set to their final value).
* @returns The data at the specified field location or undefined if no such
* field exists in the document.
*/
// We are using `any` here to avoid an explicit cast by our users.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
DocumentSnapshot.prototype.get = function (fieldPath, options) {
if (options === void 0) { options = {}; }
if (this._document) {
var value = this._document.data.field(fieldPathFromArgument('DocumentSnapshot.get', fieldPath));
if (value !== null) {
return this._userDataWriter.convertValue(value, options.serverTimestamps);
}
}
return undefined;
};
return DocumentSnapshot;
}(DocumentSnapshot$1));
/**
* A `QueryDocumentSnapshot` contains data read from a document in your
* Firestore database as part of a query. The document is guaranteed to exist
* and its data can be extracted with `.data()` or `.get()` to get a
* specific field.
*
* A `QueryDocumentSnapshot` offers the same API surface as a
* `DocumentSnapshot`. Since query results contain only existing documents, the
* `exists` property will always be true and `data()` will never return
* 'undefined'.
*/
var QueryDocumentSnapshot = /** @class */ (function (_super) {
tslib.__extends(QueryDocumentSnapshot, _super);
function QueryDocumentSnapshot() {
return _super !== null && _super.apply(this, arguments) || this;
}
/**
* Retrieves all fields in the document as an `Object`.
*
* By default, `FieldValue.serverTimestamp()` values that have not yet been
* set to their final value will be returned as `null`. You can override
* this by passing an options object.
*
* @override
* @param options - An options object to configure how data is retrieved from
* the snapshot (for example the desired behavior for server timestamps that
* have not yet been set to their final value).
* @returns An `Object` containing all fields in the document.
*/
QueryDocumentSnapshot.prototype.data = function (options) {
if (options === void 0) { options = {}; }
return _super.prototype.data.call(this, options);
};
return QueryDocumentSnapshot;
}(DocumentSnapshot));
/**
* A `QuerySnapshot` contains zero or more `DocumentSnapshot` objects
* representing the results of a query. The documents can be accessed as an
* array via the `docs` property or enumerated using the `forEach` method. The
* number of documents can be determined via the `empty` and `size`
* properties.
*/
var QuerySnapshot = /** @class */ (function () {
/** @hideconstructor */
function QuerySnapshot(_firestore, _userDataWriter, query, _snapshot) {
this._firestore = _firestore;
this._userDataWriter = _userDataWriter;
this._snapshot = _snapshot;
this.metadata = new SnapshotMetadata(_snapshot.hasPendingWrites, _snapshot.fromCache);
this.query = query;
}
Object.defineProperty(QuerySnapshot.prototype, "docs", {
/** An array of all the documents in the `QuerySnapshot`. */
get: function () {
var result = [];
this.forEach(function (doc) { return result.push(doc); });
return result;
},
enumerable: false,
configurable: true
});
Object.defineProperty(QuerySnapshot.prototype, "size", {
/** The number of documents in the `QuerySnapshot`. */
get: function () {
return this._snapshot.docs.size;
},
enumerable: false,
configurable: true
});
Object.defineProperty(QuerySnapshot.prototype, "empty", {
/** True if there are no documents in the `QuerySnapshot`. */
get: function () {
return this.size === 0;
},
enumerable: false,
configurable: true
});
/**
* Enumerates all of the documents in the `QuerySnapshot`.
*
* @param callback - A callback to be called with a `QueryDocumentSnapshot` for
* each document in the snapshot.
* @param thisArg - The `this` binding for the callback.
*/
QuerySnapshot.prototype.forEach = function (callback, thisArg) {
var _this = this;
this._snapshot.docs.forEach(function (doc) {
callback.call(thisArg, new QueryDocumentSnapshot(_this._firestore, _this._userDataWriter, doc.key, doc, new SnapshotMetadata(_this._snapshot.mutatedKeys.has(doc.key), _this._snapshot.fromCache), _this.query.converter));
});
};
/**
* Returns an array of the documents changes since the last snapshot. If this
* is the first snapshot, all documents will be in the list as 'added'
* changes.
*
* @param options - `SnapshotListenOptions` that control whether metadata-only
* changes (i.e. only `DocumentSnapshot.metadata` changed) should trigger
* snapshot events.
*/
QuerySnapshot.prototype.docChanges = function (options) {
if (options === void 0) { options = {}; }
var includeMetadataChanges = !!options.includeMetadataChanges;
if (includeMetadataChanges && this._snapshot.excludesMetadataChanges) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'To include metadata changes with your document changes, you must ' +
'also pass { includeMetadataChanges:true } to onSnapshot().');
}
if (!this._cachedChanges ||
this._cachedChangesIncludeMetadataChanges !== includeMetadataChanges) {
this._cachedChanges = changesFromSnapshot(this, includeMetadataChanges);
this._cachedChangesIncludeMetadataChanges = includeMetadataChanges;
}
return this._cachedChanges;
};
return QuerySnapshot;
}());
/** Calculates the array of DocumentChanges for a given ViewSnapshot. */
function changesFromSnapshot(querySnapshot, includeMetadataChanges) {
if (querySnapshot._snapshot.oldDocs.isEmpty()) {
var index_1 = 0;
return querySnapshot._snapshot.docChanges.map(function (change) {
var doc = new QueryDocumentSnapshot(querySnapshot._firestore, querySnapshot._userDataWriter, change.doc.key, change.doc, new SnapshotMetadata(querySnapshot._snapshot.mutatedKeys.has(change.doc.key), querySnapshot._snapshot.fromCache), querySnapshot.query.converter);
return {
type: 'added',
doc: doc,
oldIndex: -1,
newIndex: index_1++
};
});
}
else {
// A DocumentSet that is updated incrementally as changes are applied to use
// to lookup the index of a document.
var indexTracker_1 = querySnapshot._snapshot.oldDocs;
return querySnapshot._snapshot.docChanges
.filter(function (change) { return includeMetadataChanges || change.type !== 3; } /* Metadata */)
.map(function (change) {
var doc = new QueryDocumentSnapshot(querySnapshot._firestore, querySnapshot._userDataWriter, change.doc.key, change.doc, new SnapshotMetadata(querySnapshot._snapshot.mutatedKeys.has(change.doc.key), querySnapshot._snapshot.fromCache), querySnapshot.query.converter);
var oldIndex = -1;
var newIndex = -1;
if (change.type !== 0 /* Added */) {
oldIndex = indexTracker_1.indexOf(change.doc.key);
indexTracker_1 = indexTracker_1.delete(change.doc.key);
}
if (change.type !== 1 /* Removed */) {
indexTracker_1 = indexTracker_1.add(change.doc);
newIndex = indexTracker_1.indexOf(change.doc.key);
}
return {
type: resultChangeType(change.type),
doc: doc,
oldIndex: oldIndex,
newIndex: newIndex
};
});
}
}
function resultChangeType(type) {
switch (type) {
case 0 /* Added */:
return 'added';
case 2 /* Modified */:
case 3 /* Metadata */:
return 'modified';
case 1 /* Removed */:
return 'removed';
default:
return fail();
}
}
// TODO(firestoreexp): Add tests for snapshotEqual with different snapshot
// metadata
/**
* Returns true if the provided snapshots are equal.
*
* @param left - A snapshot to compare.
* @param right - A snapshot to compare.
* @returns true if the snapshots are equal.
*/
function snapshotEqual(left, right) {
if (left instanceof DocumentSnapshot && right instanceof DocumentSnapshot) {
return (left._firestore === right._firestore &&
left._key.isEqual(right._key) &&
(left._document === null
? right._document === null
: left._document.isEqual(right._document)) &&
left._converter === right._converter);
}
else if (left instanceof QuerySnapshot && right instanceof QuerySnapshot) {
return (left._firestore === right._firestore &&
queryEqual(left.query, right.query) &&
left.metadata.isEqual(right.metadata) &&
left._snapshot.isEqual(right._snapshot));
}
return false;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function validateHasExplicitOrderByForLimitToLast(query) {
if (hasLimitToLast(query) && query.explicitOrderBy.length === 0) {
throw new FirestoreError(Code.UNIMPLEMENTED, 'limitToLast() queries require specifying at least one orderBy() clause');
}
}
/**
* A `QueryConstraint` is used to narrow the set of documents returned by a
* Firestore query. `QueryConstraint`s are created by invoking {@link where},
* {@link orderBy}, {@link (startAt:1)}, {@link (startAfter:1)}, {@link
* endBefore:1}, {@link (endAt:1)}, {@link limit} or {@link limitToLast} and
* can then be passed to {@link query} to create a new query instance that
* also contains this `QueryConstraint`.
*/
var QueryConstraint = /** @class */ (function () {
function QueryConstraint() {
}
return QueryConstraint;
}());
/**
* Creates a new immutable instance of `Query` that is extended to also include
* additional query constraints.
*
* @param query - The Query instance to use as a base for the new constraints.
* @param queryConstraints - The list of `QueryConstraint`s to apply.
* @throws if any of the provided query constraints cannot be combined with the
* existing or new constraints.
*/
function query(query) {
var queryConstraints = [];
for (var _i = 1; _i < arguments.length; _i++) {
queryConstraints[_i - 1] = arguments[_i];
}
for (var _d = 0, queryConstraints_1 = queryConstraints; _d < queryConstraints_1.length; _d++) {
var constraint = queryConstraints_1[_d];
query = constraint._apply(query);
}
return query;
}
var QueryFilterConstraint = /** @class */ (function (_super) {
tslib.__extends(QueryFilterConstraint, _super);
function QueryFilterConstraint(_field, _op, _value) {
var _this = _super.call(this) || this;
_this._field = _field;
_this._op = _op;
_this._value = _value;
_this.type = 'where';
return _this;
}
QueryFilterConstraint.prototype._apply = function (query) {
var reader = newUserDataReader(query.firestore);
var filter = newQueryFilter(query._query, 'where', reader, query.firestore._databaseId, this._field, this._op, this._value);
return new Query(query.firestore, query.converter, queryWithAddedFilter(query._query, filter));
};
return QueryFilterConstraint;
}(QueryConstraint));
/**
* Creates a `QueryConstraint` that enforces that documents must contain the
* specified field and that the value should satisfy the relation constraint
* provided.
*
* @param fieldPath - The path to compare
* @param opStr - The operation string (e.g "<", "<=", "==", "<",
* "<=", "!=").
* @param value - The value for comparison
* @returns The created `Query`.
*/
function where(fieldPath, opStr, value) {
var op = opStr;
var field = fieldPathFromArgument('where', fieldPath);
return new QueryFilterConstraint(field, op, value);
}
var QueryOrderByConstraint = /** @class */ (function (_super) {
tslib.__extends(QueryOrderByConstraint, _super);
function QueryOrderByConstraint(_field, _direction) {
var _this = _super.call(this) || this;
_this._field = _field;
_this._direction = _direction;
_this.type = 'orderBy';
return _this;
}
QueryOrderByConstraint.prototype._apply = function (query) {
var orderBy = newQueryOrderBy(query._query, this._field, this._direction);
return new Query(query.firestore, query.converter, queryWithAddedOrderBy(query._query, orderBy));
};
return QueryOrderByConstraint;
}(QueryConstraint));
/**
* Creates a `QueryConstraint` that sorts the query result by the
* specified field, optionally in descending order instead of ascending.
*
* @param fieldPath - The field to sort by.
* @param directionStr - Optional direction to sort by ('asc' or 'desc'). If
* not specified, order will be ascending.
* @returns The created `Query`.
*/
function orderBy(fieldPath, directionStr) {
if (directionStr === void 0) { directionStr = 'asc'; }
var direction = directionStr;
var path = fieldPathFromArgument('orderBy', fieldPath);
return new QueryOrderByConstraint(path, direction);
}
var QueryLimitConstraint = /** @class */ (function (_super) {
tslib.__extends(QueryLimitConstraint, _super);
function QueryLimitConstraint(type, _limit, _limitType) {
var _this = _super.call(this) || this;
_this.type = type;
_this._limit = _limit;
_this._limitType = _limitType;
return _this;
}
QueryLimitConstraint.prototype._apply = function (query) {
return new Query(query.firestore, query.converter, queryWithLimit(query._query, this._limit, this._limitType));
};
return QueryLimitConstraint;
}(QueryConstraint));
/**
* Creates a `QueryConstraint` that only returns the first matching documents.
*
* @param limit - The maximum number of items to return.
* @returns The created `Query`.
*/
function limit(limit) {
validatePositiveNumber('limit', limit);
return new QueryLimitConstraint('limit', limit, "F" /* First */);
}
/**
* Creates a `QueryConstraint` that only returns the last matching documents.
*
* You must specify at least one `orderBy` clause for `limitToLast` queries,
* otherwise an exception will be thrown during execution.
*
* @param limit - The maximum number of items to return.
* @returns The created `Query`.
*/
function limitToLast(limit) {
validatePositiveNumber('limitToLast', limit);
return new QueryLimitConstraint('limitToLast', limit, "L" /* Last */);
}
var QueryStartAtConstraint = /** @class */ (function (_super) {
tslib.__extends(QueryStartAtConstraint, _super);
function QueryStartAtConstraint(type, _docOrFields, _before) {
var _this = _super.call(this) || this;
_this.type = type;
_this._docOrFields = _docOrFields;
_this._before = _before;
return _this;
}
QueryStartAtConstraint.prototype._apply = function (query) {
var bound = newQueryBoundFromDocOrFields(query, this.type, this._docOrFields, this._before);
return new Query(query.firestore, query.converter, queryWithStartAt(query._query, bound));
};
return QueryStartAtConstraint;
}(QueryConstraint));
function startAt() {
var docOrFields = [];
for (var _i = 0; _i < arguments.length; _i++) {
docOrFields[_i] = arguments[_i];
}
return new QueryStartAtConstraint('startAt', docOrFields, /*before=*/ true);
}
function startAfter() {
var docOrFields = [];
for (var _i = 0; _i < arguments.length; _i++) {
docOrFields[_i] = arguments[_i];
}
return new QueryStartAtConstraint('startAfter', docOrFields,
/*before=*/ false);
}
var QueryEndAtConstraint = /** @class */ (function (_super) {
tslib.__extends(QueryEndAtConstraint, _super);
function QueryEndAtConstraint(type, _docOrFields, _before) {
var _this = _super.call(this) || this;
_this.type = type;
_this._docOrFields = _docOrFields;
_this._before = _before;
return _this;
}
QueryEndAtConstraint.prototype._apply = function (query) {
var bound = newQueryBoundFromDocOrFields(query, this.type, this._docOrFields, this._before);
return new Query(query.firestore, query.converter, queryWithEndAt(query._query, bound));
};
return QueryEndAtConstraint;
}(QueryConstraint));
function endBefore() {
var docOrFields = [];
for (var _i = 0; _i < arguments.length; _i++) {
docOrFields[_i] = arguments[_i];
}
return new QueryEndAtConstraint('endBefore', docOrFields, /*before=*/ true);
}
function endAt() {
var docOrFields = [];
for (var _i = 0; _i < arguments.length; _i++) {
docOrFields[_i] = arguments[_i];
}
return new QueryEndAtConstraint('endAt', docOrFields, /*before=*/ false);
}
/** Helper function to create a bound from a document or fields */
function newQueryBoundFromDocOrFields(query, methodName, docOrFields, before) {
docOrFields[0] = util.getModularInstance(docOrFields[0]);
if (docOrFields[0] instanceof DocumentSnapshot$1) {
return newQueryBoundFromDocument(query._query, query.firestore._databaseId, methodName, docOrFields[0]._document, before);
}
else {
var reader = newUserDataReader(query.firestore);
return newQueryBoundFromFields(query._query, query.firestore._databaseId, reader, methodName, docOrFields, before);
}
}
function newQueryFilter(query, methodName, dataReader, databaseId, fieldPath, op, value) {
var fieldValue;
if (fieldPath.isKeyField()) {
if (op === "array-contains" /* ARRAY_CONTAINS */ || op === "array-contains-any" /* ARRAY_CONTAINS_ANY */) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid Query. You can't perform '" + op + "' " +
'queries on FieldPath.documentId().');
}
else if (op === "in" /* IN */ || op === "not-in" /* NOT_IN */) {
validateDisjunctiveFilterElements(value, op);
var referenceList = [];
for (var _i = 0, value_2 = value; _i < value_2.length; _i++) {
var arrayValue = value_2[_i];
referenceList.push(parseDocumentIdValue(databaseId, query, arrayValue));
}
fieldValue = { arrayValue: { values: referenceList } };
}
else {
fieldValue = parseDocumentIdValue(databaseId, query, value);
}
}
else {
if (op === "in" /* IN */ ||
op === "not-in" /* NOT_IN */ ||
op === "array-contains-any" /* ARRAY_CONTAINS_ANY */) {
validateDisjunctiveFilterElements(value, op);
}
fieldValue = parseQueryValue(dataReader, methodName, value,
/* allowArrays= */ op === "in" /* IN */ || op === "not-in" /* NOT_IN */);
}
var filter = FieldFilter.create(fieldPath, op, fieldValue);
validateNewFilter(query, filter);
return filter;
}
function newQueryOrderBy(query, fieldPath, direction) {
if (query.startAt !== null) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You must not call startAt() or startAfter() before ' +
'calling orderBy().');
}
if (query.endAt !== null) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You must not call endAt() or endBefore() before ' +
'calling orderBy().');
}
var orderBy = new OrderBy(fieldPath, direction);
validateNewOrderBy(query, orderBy);
return orderBy;
}
/**
* Create a Bound from a query and a document.
*
* Note that the Bound will always include the key of the document
* and so only the provided document will compare equal to the returned
* position.
*
* Will throw if the document does not contain all fields of the order by
* of the query or if any of the fields in the order by are an uncommitted
* server timestamp.
*/
function newQueryBoundFromDocument(query, databaseId, methodName, doc, before) {
if (!doc) {
throw new FirestoreError(Code.NOT_FOUND, "Can't use a DocumentSnapshot that doesn't exist for " +
(methodName + "()."));
}
var components = [];
// Because people expect to continue/end a query at the exact document
// provided, we need to use the implicit sort order rather than the explicit
// sort order, because it's guaranteed to contain the document key. That way
// the position becomes unambiguous and the query continues/ends exactly at
// the provided document. Without the key (by using the explicit sort
// orders), multiple documents could match the position, yielding duplicate
// results.
for (var _i = 0, _d = queryOrderBy(query); _i < _d.length; _i++) {
var orderBy_5 = _d[_i];
if (orderBy_5.field.isKeyField()) {
components.push(refValue(databaseId, doc.key));
}
else {
var value = doc.data.field(orderBy_5.field);
if (isServerTimestamp(value)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You are trying to start or end a query using a ' +
'document for which the field "' +
orderBy_5.field +
'" is an uncommitted server timestamp. (Since the value of ' +
'this field is unknown, you cannot start/end a query with it.)');
}
else if (value !== null) {
components.push(value);
}
else {
var field = orderBy_5.field.canonicalString();
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid query. You are trying to start or end a query using a " +
("document for which the field '" + field + "' (used as the ") +
"orderBy) does not exist.");
}
}
}
return new Bound(components, before);
}
/**
* Converts a list of field values to a Bound for the given query.
*/
function newQueryBoundFromFields(query, databaseId, dataReader, methodName, values, before) {
// Use explicit order by's because it has to match the query the user made
var orderBy = query.explicitOrderBy;
if (values.length > orderBy.length) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Too many arguments provided to " + methodName + "(). " +
"The number of arguments must be less than or equal to the " +
"number of orderBy() clauses");
}
var components = [];
for (var i = 0; i < values.length; i++) {
var rawValue = values[i];
var orderByComponent = orderBy[i];
if (orderByComponent.field.isKeyField()) {
if (typeof rawValue !== 'string') {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid query. Expected a string for document ID in " +
(methodName + "(), but got a " + typeof rawValue));
}
if (!isCollectionGroupQuery(query) && rawValue.indexOf('/') !== -1) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid query. When querying a collection and ordering by FieldPath.documentId(), " +
("the value passed to " + methodName + "() must be a plain document ID, but ") +
("'" + rawValue + "' contains a slash."));
}
var path = query.path.child(ResourcePath.fromString(rawValue));
if (!DocumentKey.isDocumentKey(path)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid query. When querying a collection group and ordering by " +
("FieldPath.documentId(), the value passed to " + methodName + "() must result in a ") +
("valid document path, but '" + path + "' is not because it contains an odd number ") +
"of segments.");
}
var key = new DocumentKey(path);
components.push(refValue(databaseId, key));
}
else {
var wrapped = parseQueryValue(dataReader, methodName, rawValue);
components.push(wrapped);
}
}
return new Bound(components, before);
}
/**
* Parses the given documentIdValue into a ReferenceValue, throwing
* appropriate errors if the value is anything other than a DocumentReference
* or String, or if the string is malformed.
*/
function parseDocumentIdValue(databaseId, query, documentIdValue) {
documentIdValue = util.getModularInstance(documentIdValue);
if (typeof documentIdValue === 'string') {
if (documentIdValue === '') {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. When querying with FieldPath.documentId(), you ' +
'must provide a valid document ID, but it was an empty string.');
}
if (!isCollectionGroupQuery(query) && documentIdValue.indexOf('/') !== -1) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid query. When querying a collection by " +
"FieldPath.documentId(), you must provide a plain document ID, but " +
("'" + documentIdValue + "' contains a '/' character."));
}
var path = query.path.child(ResourcePath.fromString(documentIdValue));
if (!DocumentKey.isDocumentKey(path)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid query. When querying a collection group by " +
"FieldPath.documentId(), the value provided must result in a valid document path, " +
("but '" + path + "' is not because it has an odd number of segments (" + path.length + ")."));
}
return refValue(databaseId, new DocumentKey(path));
}
else if (documentIdValue instanceof DocumentReference) {
return refValue(databaseId, documentIdValue._key);
}
else {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid query. When querying with FieldPath.documentId(), you must provide a valid " +
"string or a DocumentReference, but it was: " +
(valueDescription(documentIdValue) + "."));
}
}
/**
* Validates that the value passed into a disjunctive filter satisfies all
* array requirements.
*/
function validateDisjunctiveFilterElements(value, operator) {
if (!Array.isArray(value) || value.length === 0) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid Query. A non-empty array is required for ' +
("'" + operator.toString() + "' filters."));
}
if (value.length > 10) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid Query. '" + operator.toString() + "' filters support a " +
'maximum of 10 elements in the value array.');
}
}
/**
* Given an operator, returns the set of operators that cannot be used with it.
*
* Operators in a query must adhere to the following set of rules:
* 1. Only one array operator is allowed.
* 2. Only one disjunctive operator is allowed.
* 3. NOT_EQUAL cannot be used with another NOT_EQUAL operator.
* 4. NOT_IN cannot be used with array, disjunctive, or NOT_EQUAL operators.
*
* Array operators: ARRAY_CONTAINS, ARRAY_CONTAINS_ANY
* Disjunctive operators: IN, ARRAY_CONTAINS_ANY, NOT_IN
*/
function conflictingOps(op) {
switch (op) {
case "!=" /* NOT_EQUAL */:
return ["!=" /* NOT_EQUAL */, "not-in" /* NOT_IN */];
case "array-contains" /* ARRAY_CONTAINS */:
return [
"array-contains" /* ARRAY_CONTAINS */,
"array-contains-any" /* ARRAY_CONTAINS_ANY */,
"not-in" /* NOT_IN */
];
case "in" /* IN */:
return ["array-contains-any" /* ARRAY_CONTAINS_ANY */, "in" /* IN */, "not-in" /* NOT_IN */];
case "array-contains-any" /* ARRAY_CONTAINS_ANY */:
return [
"array-contains" /* ARRAY_CONTAINS */,
"array-contains-any" /* ARRAY_CONTAINS_ANY */,
"in" /* IN */,
"not-in" /* NOT_IN */
];
case "not-in" /* NOT_IN */:
return [
"array-contains" /* ARRAY_CONTAINS */,
"array-contains-any" /* ARRAY_CONTAINS_ANY */,
"in" /* IN */,
"not-in" /* NOT_IN */,
"!=" /* NOT_EQUAL */
];
default:
return [];
}
}
function validateNewFilter(query, filter) {
if (filter.isInequality()) {
var existingField = getInequalityFilterField(query);
if (existingField !== null && !existingField.isEqual(filter.field)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. All where filters with an inequality' +
' (<, <=, !=, not-in, >, or >=) must be on the same field. But you have' +
(" inequality filters on '" + existingField.toString() + "'") +
(" and '" + filter.field.toString() + "'"));
}
var firstOrderByField = getFirstOrderByField(query);
if (firstOrderByField !== null) {
validateOrderByAndInequalityMatch(query, filter.field, firstOrderByField);
}
}
var conflictingOp = findFilterOperator(query, conflictingOps(filter.op));
if (conflictingOp !== null) {
// Special case when it's a duplicate op to give a slightly clearer error message.
if (conflictingOp === filter.op) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You cannot use more than one ' +
("'" + filter.op.toString() + "' filter."));
}
else {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid query. You cannot use '" + filter.op.toString() + "' filters " +
("with '" + conflictingOp.toString() + "' filters."));
}
}
}
function validateNewOrderBy(query, orderBy) {
if (getFirstOrderByField(query) === null) {
// This is the first order by. It must match any inequality.
var inequalityField = getInequalityFilterField(query);
if (inequalityField !== null) {
validateOrderByAndInequalityMatch(query, inequalityField, orderBy.field);
}
}
}
function validateOrderByAndInequalityMatch(baseQuery, inequality, orderBy) {
if (!orderBy.isEqual(inequality)) {
throw new FirestoreError(Code.INVALID_ARGUMENT, "Invalid query. You have a where filter with an inequality " +
("(<, <=, !=, not-in, >, or >=) on field '" + inequality.toString() + "' ") +
("and so you must also use '" + inequality.toString() + "' ") +
"as your first argument to orderBy(), but your first orderBy() " +
("is on field '" + orderBy.toString() + "' instead."));
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Converts Firestore's internal types to the JavaScript types that we expose
* to the user.
*
* @internal
*/
var AbstractUserDataWriter = /** @class */ (function () {
function AbstractUserDataWriter() {
}
AbstractUserDataWriter.prototype.convertValue = function (value, serverTimestampBehavior) {
if (serverTimestampBehavior === void 0) { serverTimestampBehavior = 'none'; }
switch (typeOrder(value)) {
case 0 /* NullValue */:
return null;
case 1 /* BooleanValue */:
return value.booleanValue;
case 2 /* NumberValue */:
return normalizeNumber(value.integerValue || value.doubleValue);
case 3 /* TimestampValue */:
return this.convertTimestamp(value.timestampValue);
case 4 /* ServerTimestampValue */:
return this.convertServerTimestamp(value, serverTimestampBehavior);
case 5 /* StringValue */:
return value.stringValue;
case 6 /* BlobValue */:
return this.convertBytes(normalizeByteString(value.bytesValue));
case 7 /* RefValue */:
return this.convertReference(value.referenceValue);
case 8 /* GeoPointValue */:
return this.convertGeoPoint(value.geoPointValue);
case 9 /* ArrayValue */:
return this.convertArray(value.arrayValue, serverTimestampBehavior);
case 10 /* ObjectValue */:
return this.convertObject(value.mapValue, serverTimestampBehavior);
default:
throw fail();
}
};
AbstractUserDataWriter.prototype.convertObject = function (mapValue, serverTimestampBehavior) {
var _this = this;
var result = {};
forEach(mapValue.fields, function (key, value) {
result[key] = _this.convertValue(value, serverTimestampBehavior);
});
return result;
};
AbstractUserDataWriter.prototype.convertGeoPoint = function (value) {
return new GeoPoint(normalizeNumber(value.latitude), normalizeNumber(value.longitude));
};
AbstractUserDataWriter.prototype.convertArray = function (arrayValue, serverTimestampBehavior) {
var _this = this;
return (arrayValue.values || []).map(function (value) { return _this.convertValue(value, serverTimestampBehavior); });
};
AbstractUserDataWriter.prototype.convertServerTimestamp = function (value, serverTimestampBehavior) {
switch (serverTimestampBehavior) {
case 'previous':
var previousValue = getPreviousValue(value);
if (previousValue == null) {
return null;
}
return this.convertValue(previousValue, serverTimestampBehavior);
case 'estimate':
return this.convertTimestamp(getLocalWriteTime(value));
default:
return null;
}
};
AbstractUserDataWriter.prototype.convertTimestamp = function (value) {
var normalizedValue = normalizeTimestamp(value);
return new Timestamp(normalizedValue.seconds, normalizedValue.nanos);
};
AbstractUserDataWriter.prototype.convertDocumentKey = function (name, expectedDatabaseId) {
var resourcePath = ResourcePath.fromString(name);
hardAssert(isValidResourceName(resourcePath));
var databaseId = new DatabaseId(resourcePath.get(1), resourcePath.get(3));
var key = new DocumentKey(resourcePath.popFirst(5));
if (!databaseId.isEqual(expectedDatabaseId)) {
// TODO(b/64130202): Somehow support foreign references.
logError("Document " + key + " contains a document " +
"reference within a different database (" +
(databaseId.projectId + "/" + databaseId.database + ") which is not ") +
"supported. It will be treated as a reference in the current " +
("database (" + expectedDatabaseId.projectId + "/" + expectedDatabaseId.database + ") ") +
"instead.");
}
return key;
};
return AbstractUserDataWriter;
}());
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Converts custom model object of type T into DocumentData by applying the
* converter if it exists.
*
* This function is used when converting user objects to DocumentData
* because we want to provide the user with a more specific error message if
* their set() or fails due to invalid data originating from a toFirestore()
* call.
*/
function applyFirestoreDataConverter(converter, value, options) {
var convertedValue;
if (converter) {
if (options && (options.merge || options.mergeFields)) {
// Cast to `any` in order to satisfy the union type constraint on
// toFirestore().
// eslint-disable-next-line @typescript-eslint/no-explicit-any
convertedValue = converter.toFirestore(value, options);
}
else {
convertedValue = converter.toFirestore(value);
}
}
else {
convertedValue = value;
}
return convertedValue;
}
var LiteUserDataWriter = /** @class */ (function (_super) {
tslib.__extends(LiteUserDataWriter, _super);
function LiteUserDataWriter(firestore) {
var _this = _super.call(this) || this;
_this.firestore = firestore;
return _this;
}
LiteUserDataWriter.prototype.convertBytes = function (bytes) {
return new Bytes(bytes);
};
LiteUserDataWriter.prototype.convertReference = function (name) {
var key = this.convertDocumentKey(name, this.firestore._databaseId);
return new DocumentReference(this.firestore, /* converter= */ null, key);
};
return LiteUserDataWriter;
}(AbstractUserDataWriter));
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A write batch, used to perform multiple writes as a single atomic unit.
*
* A `WriteBatch` object can be acquired by calling {@link writeBatch}. It
* provides methods for adding writes to the write batch. None of the writes
* will be committed (or visible locally) until {@link WriteBatch.commit} is
* called.
*/
var WriteBatch = /** @class */ (function () {
/** @hideconstructor */
function WriteBatch(_firestore, _commitHandler) {
this._firestore = _firestore;
this._commitHandler = _commitHandler;
this._mutations = [];
this._committed = false;
this._dataReader = newUserDataReader(_firestore);
}
WriteBatch.prototype.set = function (documentRef, data, options) {
this._verifyNotCommitted();
var ref = validateReference(documentRef, this._firestore);
var convertedValue = applyFirestoreDataConverter(ref.converter, data, options);
var parsed = parseSetData(this._dataReader, 'WriteBatch.set', ref._key, convertedValue, ref.converter !== null, options);
this._mutations.push(parsed.toMutation(ref._key, Precondition.none()));
return this;
};
WriteBatch.prototype.update = function (documentRef, fieldOrUpdateData, value) {
var moreFieldsAndValues = [];
for (var _i = 3; _i < arguments.length; _i++) {
moreFieldsAndValues[_i - 3] = arguments[_i];
}
this._verifyNotCommitted();
var ref = validateReference(documentRef, this._firestore);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
fieldOrUpdateData = util.getModularInstance(fieldOrUpdateData);
var parsed;
if (typeof fieldOrUpdateData === 'string' ||
fieldOrUpdateData instanceof FieldPath) {
parsed = parseUpdateVarargs(this._dataReader, 'WriteBatch.update', ref._key, fieldOrUpdateData, value, moreFieldsAndValues);
}
else {
parsed = parseUpdateData(this._dataReader, 'WriteBatch.update', ref._key, fieldOrUpdateData);
}
this._mutations.push(parsed.toMutation(ref._key, Precondition.exists(true)));
return this;
};
/**
* Deletes the document referred to by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be deleted.
* @returns This `WriteBatch` instance. Used for chaining method calls.
*/
WriteBatch.prototype.delete = function (documentRef) {
this._verifyNotCommitted();
var ref = validateReference(documentRef, this._firestore);
this._mutations = this._mutations.concat(new DeleteMutation(ref._key, Precondition.none()));
return this;
};
/**
* Commits all of the writes in this write batch as a single atomic unit.
*
* The result of these writes will only be reflected in document reads that
* occur after the returned Promise resolves. If the client is offline, the
* write fails. If you would like to see local modifications or buffer writes
* until the client is online, use the full Firestore SDK.
*
* @returns A Promise resolved once all of the writes in the batch have been
* successfully written to the backend as an atomic unit (note that it won't
* resolve while you're offline).
*/
WriteBatch.prototype.commit = function () {
this._verifyNotCommitted();
this._committed = true;
if (this._mutations.length > 0) {
return this._commitHandler(this._mutations);
}
return Promise.resolve();
};
WriteBatch.prototype._verifyNotCommitted = function () {
if (this._committed) {
throw new FirestoreError(Code.FAILED_PRECONDITION, 'A write batch can no longer be used after commit() ' +
'has been called.');
}
};
return WriteBatch;
}());
function validateReference(documentRef, firestore) {
documentRef = util.getModularInstance(documentRef);
if (documentRef.firestore !== firestore) {
throw new FirestoreError(Code.INVALID_ARGUMENT, 'Provided document reference is from a different Firestore instance.');
}
else {
return documentRef;
}
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// TODO(mrschmidt) Consider using `BaseTransaction` as the base class in the
// legacy SDK.
/**
* A reference to a transaction.
*
* The `Transaction` object passed to a transaction's `updateFunction` provides
* the methods to read and write data within the transaction context. See
* {@link runTransaction}.
*/
var Transaction$1 = /** @class */ (function () {
/** @hideconstructor */
function Transaction$1(_firestore, _transaction) {
this._firestore = _firestore;
this._transaction = _transaction;
this._dataReader = newUserDataReader(_firestore);
}
/**
* Reads the document referenced by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be read.
* @returns A `DocumentSnapshot` with the read data.
*/
Transaction$1.prototype.get = function (documentRef) {
var _this = this;
var ref = validateReference(documentRef, this._firestore);
var userDataWriter = new LiteUserDataWriter(this._firestore);
return this._transaction.lookup([ref._key]).then(function (docs) {
if (!docs || docs.length !== 1) {
return fail();
}
var doc = docs[0];
if (doc.isFoundDocument()) {
return new DocumentSnapshot$1(_this._firestore, userDataWriter, doc.key, doc, ref.converter);
}
else if (doc.isNoDocument()) {
return new DocumentSnapshot$1(_this._firestore, userDataWriter, ref._key, null, ref.converter);
}
else {
throw fail();
}
});
};
Transaction$1.prototype.set = function (documentRef, value, options) {
var ref = validateReference(documentRef, this._firestore);
var convertedValue = applyFirestoreDataConverter(ref.converter, value, options);
var parsed = parseSetData(this._dataReader, 'Transaction.set', ref._key, convertedValue, ref.converter !== null, options);
this._transaction.set(ref._key, parsed);
return this;
};
Transaction$1.prototype.update = function (documentRef, fieldOrUpdateData, value) {
var moreFieldsAndValues = [];
for (var _i = 3; _i < arguments.length; _i++) {
moreFieldsAndValues[_i - 3] = arguments[_i];
}
var ref = validateReference(documentRef, this._firestore);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
fieldOrUpdateData = util.getModularInstance(fieldOrUpdateData);
var parsed;
if (typeof fieldOrUpdateData === 'string' ||
fieldOrUpdateData instanceof FieldPath) {
parsed = parseUpdateVarargs(this._dataReader, 'Transaction.update', ref._key, fieldOrUpdateData, value, moreFieldsAndValues);
}
else {
parsed = parseUpdateData(this._dataReader, 'Transaction.update', ref._key, fieldOrUpdateData);
}
this._transaction.update(ref._key, parsed);
return this;
};
/**
* Deletes the document referred to by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be deleted.
* @returns This `Transaction` instance. Used for chaining method calls.
*/
Transaction$1.prototype.delete = function (documentRef) {
var ref = validateReference(documentRef, this._firestore);
this._transaction.delete(ref._key);
return this;
};
return Transaction$1;
}());
/**
* @license
* Copyright 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
function isPartialObserver(obj) {
return implementsAnyMethods(obj, ['next', 'error', 'complete']);
}
/**
* Returns true if obj is an object and contains at least one of the specified
* methods.
*/
function implementsAnyMethods(obj, methods) {
if (typeof obj !== 'object' || obj === null) {
return false;
}
var object = obj;
for (var _i = 0, methods_1 = methods; _i < methods_1.length; _i++) {
var method = methods_1[_i];
if (method in object && typeof object[method] === 'function') {
return true;
}
}
return false;
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Reads the document referred to by this `DocumentReference`.
*
* Note: `getDoc()` attempts to provide up-to-date data when possible by waiting
* for data from the server, but it may return cached data or fail if you are
* offline and the server cannot be reached. To specify this behavior, invoke
* {@link getDocFromCache} or {@link getDocFromServer}.
*
* @param reference - The reference of the document to fetch.
* @returns A Promise resolved with a `DocumentSnapshot` containing the
* current document contents.
*/
function getDoc(reference) {
reference = cast(reference, DocumentReference);
var firestore = cast(reference.firestore, Firestore);
var client = ensureFirestoreConfigured(firestore);
return firestoreClientGetDocumentViaSnapshotListener(client, reference._key).then(function (snapshot) { return convertToDocSnapshot(firestore, reference, snapshot); });
}
var ExpUserDataWriter = /** @class */ (function (_super) {
tslib.__extends(ExpUserDataWriter, _super);
function ExpUserDataWriter(firestore) {
var _this = _super.call(this) || this;
_this.firestore = firestore;
return _this;
}
ExpUserDataWriter.prototype.convertBytes = function (bytes) {
return new Bytes(bytes);
};
ExpUserDataWriter.prototype.convertReference = function (name) {
var key = this.convertDocumentKey(name, this.firestore._databaseId);
return new DocumentReference(this.firestore, /* converter= */ null, key);
};
return ExpUserDataWriter;
}(AbstractUserDataWriter));
/**
* Reads the document referred to by this `DocumentReference` from cache.
* Returns an error if the document is not currently cached.
*
* @returns A Promise resolved with a `DocumentSnapshot` containing the
* current document contents.
*/
function getDocFromCache(reference) {
reference = cast(reference, DocumentReference);
var firestore = cast(reference.firestore, Firestore);
var client = ensureFirestoreConfigured(firestore);
var userDataWriter = new ExpUserDataWriter(firestore);
return firestoreClientGetDocumentFromLocalCache(client, reference._key).then(function (doc) { return new DocumentSnapshot(firestore, userDataWriter, reference._key, doc, new SnapshotMetadata(doc !== null && doc.hasLocalMutations,
/* fromCache= */ true), reference.converter); });
}
/**
* Reads the document referred to by this `DocumentReference` from the server.
* Returns an error if the network is not available.
*
* @returns A Promise resolved with a `DocumentSnapshot` containing the
* current document contents.
*/
function getDocFromServer(reference) {
reference = cast(reference, DocumentReference);
var firestore = cast(reference.firestore, Firestore);
var client = ensureFirestoreConfigured(firestore);
return firestoreClientGetDocumentViaSnapshotListener(client, reference._key, {
source: 'server'
}).then(function (snapshot) { return convertToDocSnapshot(firestore, reference, snapshot); });
}
/**
* Executes the query and returns the results as a `QuerySnapshot`.
*
* Note: `getDocs()` attempts to provide up-to-date data when possible by
* waiting for data from the server, but it may return cached data or fail if
* you are offline and the server cannot be reached. To specify this behavior,
* invoke {@link getDocsFromCache} or {@link getDocsFromServer}.
*
* @returns A Promise that will be resolved with the results of the query.
*/
function getDocs(query) {
query = cast(query, Query);
var firestore = cast(query.firestore, Firestore);
var client = ensureFirestoreConfigured(firestore);
var userDataWriter = new ExpUserDataWriter(firestore);
validateHasExplicitOrderByForLimitToLast(query._query);
return firestoreClientGetDocumentsViaSnapshotListener(client, query._query).then(function (snapshot) { return new QuerySnapshot(firestore, userDataWriter, query, snapshot); });
}
/**
* Executes the query and returns the results as a `QuerySnapshot` from cache.
* Returns an error if the document is not currently cached.
*
* @returns A Promise that will be resolved with the results of the query.
*/
function getDocsFromCache(query) {
query = cast(query, Query);
var firestore = cast(query.firestore, Firestore);
var client = ensureFirestoreConfigured(firestore);
var userDataWriter = new ExpUserDataWriter(firestore);
return firestoreClientGetDocumentsFromLocalCache(client, query._query).then(function (snapshot) { return new QuerySnapshot(firestore, userDataWriter, query, snapshot); });
}
/**
* Executes the query and returns the results as a `QuerySnapshot` from the
* server. Returns an error if the network is not available.
*
* @returns A Promise that will be resolved with the results of the query.
*/
function getDocsFromServer(query) {
query = cast(query, Query);
var firestore = cast(query.firestore, Firestore);
var client = ensureFirestoreConfigured(firestore);
var userDataWriter = new ExpUserDataWriter(firestore);
return firestoreClientGetDocumentsViaSnapshotListener(client, query._query, {
source: 'server'
}).then(function (snapshot) { return new QuerySnapshot(firestore, userDataWriter, query, snapshot); });
}
function setDoc(reference, data, options) {
reference = cast(reference, DocumentReference);
var firestore = cast(reference.firestore, Firestore);
var convertedValue = applyFirestoreDataConverter(reference.converter, data, options);
var dataReader = newUserDataReader(firestore);
var parsed = parseSetData(dataReader, 'setDoc', reference._key, convertedValue, reference.converter !== null, options);
var mutation = parsed.toMutation(reference._key, Precondition.none());
return executeWrite(firestore, [mutation]);
}
function updateDoc(reference, fieldOrUpdateData, value) {
var moreFieldsAndValues = [];
for (var _i = 3; _i < arguments.length; _i++) {
moreFieldsAndValues[_i - 3] = arguments[_i];
}
reference = cast(reference, DocumentReference);
var firestore = cast(reference.firestore, Firestore);
var dataReader = newUserDataReader(firestore);
// For Compat types, we have to "extract" the underlying types before
// performing validation.
fieldOrUpdateData = util.getModularInstance(fieldOrUpdateData);
var parsed;
if (typeof fieldOrUpdateData === 'string' ||
fieldOrUpdateData instanceof FieldPath) {
parsed = parseUpdateVarargs(dataReader, 'updateDoc', reference._key, fieldOrUpdateData, value, moreFieldsAndValues);
}
else {
parsed = parseUpdateData(dataReader, 'updateDoc', reference._key, fieldOrUpdateData);
}
var mutation = parsed.toMutation(reference._key, Precondition.exists(true));
return executeWrite(firestore, [mutation]);
}
/**
* Deletes the document referred to by the specified `DocumentReference`.
*
* @param reference - A reference to the document to delete.
* @returns A Promise resolved once the document has been successfully
* deleted from the backend (note that it won't resolve while you're offline).
*/
function deleteDoc(reference) {
var firestore = cast(reference.firestore, Firestore);
var mutations = [new DeleteMutation(reference._key, Precondition.none())];
return executeWrite(firestore, mutations);
}
/**
* Add a new document to specified `CollectionReference` with the given data,
* assigning it a document ID automatically.
*
* @param reference - A reference to the collection to add this document to.
* @param data - An Object containing the data for the new document.
* @returns A Promise resolved with a `DocumentReference` pointing to the
* newly created document after it has been written to the backend (Note that it
* won't resolve while you're offline).
*/
function addDoc(reference, data) {
var firestore = cast(reference.firestore, Firestore);
var docRef = doc(reference);
var convertedValue = applyFirestoreDataConverter(reference.converter, data);
var dataReader = newUserDataReader(reference.firestore);
var parsed = parseSetData(dataReader, 'addDoc', docRef._key, convertedValue, reference.converter !== null, {});
var mutation = parsed.toMutation(docRef._key, Precondition.exists(false));
return executeWrite(firestore, [mutation]).then(function () { return docRef; });
}
function onSnapshot(reference) {
var args = [];
for (var _i = 1; _i < arguments.length; _i++) {
args[_i - 1] = arguments[_i];
}
var _a, _b, _c;
reference = util.getModularInstance(reference);
var options = {
includeMetadataChanges: false
};
var currArg = 0;
if (typeof args[currArg] === 'object' && !isPartialObserver(args[currArg])) {
options = args[currArg];
currArg++;
}
var internalOptions = {
includeMetadataChanges: options.includeMetadataChanges
};
if (isPartialObserver(args[currArg])) {
var userObserver = args[currArg];
args[currArg] = (_a = userObserver.next) === null || _a === void 0 ? void 0 : _a.bind(userObserver);
args[currArg + 1] = (_b = userObserver.error) === null || _b === void 0 ? void 0 : _b.bind(userObserver);
args[currArg + 2] = (_c = userObserver.complete) === null || _c === void 0 ? void 0 : _c.bind(userObserver);
}
var observer;
var firestore;
var internalQuery;
if (reference instanceof DocumentReference) {
firestore = cast(reference.firestore, Firestore);
internalQuery = newQueryForPath(reference._key.path);
observer = {
next: function (snapshot) {
if (args[currArg]) {
args[currArg](convertToDocSnapshot(firestore, reference, snapshot));
}
},
error: args[currArg + 1],
complete: args[currArg + 2]
};
}
else {
var query_5 = cast(reference, Query);
firestore = cast(query_5.firestore, Firestore);
internalQuery = query_5._query;
var userDataWriter_1 = new ExpUserDataWriter(firestore);
observer = {
next: function (snapshot) {
if (args[currArg]) {
args[currArg](new QuerySnapshot(firestore, userDataWriter_1, query_5, snapshot));
}
},
error: args[currArg + 1],
complete: args[currArg + 2]
};
validateHasExplicitOrderByForLimitToLast(reference._query);
}
var client = ensureFirestoreConfigured(firestore);
return firestoreClientListen(client, internalQuery, internalOptions, observer);
}
function onSnapshotsInSync(firestore, arg) {
firestore = cast(firestore, Firestore);
var client = ensureFirestoreConfigured(firestore);
var observer = isPartialObserver(arg)
? arg
: {
next: arg
};
return firestoreClientAddSnapshotsInSyncListener(client, observer);
}
/**
* Locally writes `mutations` on the async queue.
* @internal
*/
function executeWrite(firestore, mutations) {
var client = ensureFirestoreConfigured(firestore);
return firestoreClientWrite(client, mutations);
}
/**
* Converts a ViewSnapshot that contains the single document specified by `ref`
* to a DocumentSnapshot.
*/
function convertToDocSnapshot(firestore, ref, snapshot) {
var doc = snapshot.docs.get(ref._key);
var userDataWriter = new ExpUserDataWriter(firestore);
return new DocumentSnapshot(firestore, userDataWriter, ref._key, doc, new SnapshotMetadata(snapshot.hasPendingWrites, snapshot.fromCache), ref.converter);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A reference to a transaction.
*
* The `Transaction` object passed to a transaction's `updateFunction` provides
* the methods to read and write data within the transaction context. See
* {@link runTransaction}.
*/
var Transaction = /** @class */ (function (_super) {
tslib.__extends(Transaction, _super);
// This class implements the same logic as the Transaction API in the Lite SDK
// but is subclassed in order to return its own DocumentSnapshot types.
/** @hideconstructor */
function Transaction(_firestore, _transaction) {
var _this = _super.call(this, _firestore, _transaction) || this;
_this._firestore = _firestore;
return _this;
}
/**
* Reads the document referenced by the provided {@link DocumentReference}.
*
* @param documentRef - A reference to the document to be read.
* @returns A `DocumentSnapshot` with the read data.
*/
Transaction.prototype.get = function (documentRef) {
var _this = this;
var ref = validateReference(documentRef, this._firestore);
var userDataWriter = new ExpUserDataWriter(this._firestore);
return _super.prototype.get.call(this, documentRef)
.then(function (liteDocumentSnapshot) { return new DocumentSnapshot(_this._firestore, userDataWriter, ref._key, liteDocumentSnapshot._document, new SnapshotMetadata(
/* hasPendingWrites= */ false,
/* fromCache= */ false), ref.converter); });
};
return Transaction;
}(Transaction$1));
/**
* Executes the given `updateFunction` and then attempts to commit the changes
* applied within the transaction. If any document read within the transaction
* has changed, Cloud Firestore retries the `updateFunction`. If it fails to
* commit after 5 attempts, the transaction fails.
*
* The maximum number of writes allowed in a single transaction is 500.
*
* @param firestore - A reference to the Firestore database to run this
* transaction against.
* @param updateFunction - The function to execute within the transaction
* context.
* @returns If the transaction completed successfully or was explicitly aborted
* (the `updateFunction` returned a failed promise), the promise returned by the
* `updateFunction `is returned here. Otherwise, if the transaction failed, a
* rejected promise with the corresponding failure error is returned.
*/
function runTransaction(firestore, updateFunction) {
var client = ensureFirestoreConfigured(firestore);
return firestoreClientTransaction(client, function (internalTransaction) { return updateFunction(new Transaction(firestore, internalTransaction)); });
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Returns a sentinel for use with {@link @firebase/firestore/lite#(updateDoc:1)} or
* {@link @firebase/firestore/lite#(setDoc:1)} with `{merge: true}` to mark a field for deletion.
*/
function deleteField() {
return new DeleteFieldValueImpl('deleteField');
}
/**
* Returns a sentinel used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link @firebase/firestore/lite#(updateDoc:1)} to
* include a server-generated timestamp in the written data.
*/
function serverTimestamp() {
return new ServerTimestampFieldValueImpl('serverTimestamp');
}
/**
* Returns a special value that can be used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link
* @firebase/firestore/lite#(updateDoc:1)} that tells the server to union the given elements with any array
* value that already exists on the server. Each specified element that doesn't
* already exist in the array will be added to the end. If the field being
* modified is not already an array it will be overwritten with an array
* containing exactly the specified elements.
*
* @param elements - The elements to union into the array.
* @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
* `updateDoc()`.
*/
function arrayUnion() {
var elements = [];
for (var _i = 0; _i < arguments.length; _i++) {
elements[_i] = arguments[_i];
}
// NOTE: We don't actually parse the data until it's used in set() or
// update() since we'd need the Firestore instance to do this.
return new ArrayUnionFieldValueImpl('arrayUnion', elements);
}
/**
* Returns a special value that can be used with {@link (setDoc:1)} or {@link
* updateDoc:1} that tells the server to remove the given elements from any
* array value that already exists on the server. All instances of each element
* specified will be removed from the array. If the field being modified is not
* already an array it will be overwritten with an empty array.
*
* @param elements - The elements to remove from the array.
* @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
* `updateDoc()`
*/
function arrayRemove() {
var elements = [];
for (var _i = 0; _i < arguments.length; _i++) {
elements[_i] = arguments[_i];
}
// NOTE: We don't actually parse the data until it's used in set() or
// update() since we'd need the Firestore instance to do this.
return new ArrayRemoveFieldValueImpl('arrayRemove', elements);
}
/**
* Returns a special value that can be used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link
* @firebase/firestore/lite#(updateDoc:1)} that tells the server to increment the field's current value by
* the given value.
*
* If either the operand or the current field value uses floating point
* precision, all arithmetic follows IEEE 754 semantics. If both values are
* integers, values outside of JavaScript's safe number range
* (`Number.MIN_SAFE_INTEGER` to `Number.MAX_SAFE_INTEGER`) are also subject to
* precision loss. Furthermore, once processed by the Firestore backend, all
* integer operations are capped between -2^63 and 2^63-1.
*
* If the current field value is not of type `number`, or if the field does not
* yet exist, the transformation sets the field to the given value.
*
* @param n - The value to increment by.
* @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
* `updateDoc()`
*/
function increment(n) {
return new NumericIncrementFieldValueImpl('increment', n);
}
/**
* @license
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Creates a write batch, used for performing multiple writes as a single
* atomic operation. The maximum number of writes allowed in a single WriteBatch
* is 500.
*
* Unlike transactions, write batches are persisted offline and therefore are
* preferable when you don't need to condition your writes on read data.
*
* @returns A `WriteBatch` that can be used to atomically execute multiple
* writes.
*/
function writeBatch(firestore) {
firestore = cast(firestore, Firestore);
ensureFirestoreConfigured(firestore);
return new WriteBatch(firestore, function (mutations) { return executeWrite(firestore, mutations); });
}
/**
* @license
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
registerFirestore('node');
exports.AbstractUserDataWriter = AbstractUserDataWriter;
exports.Bytes = Bytes;
exports.CACHE_SIZE_UNLIMITED = CACHE_SIZE_UNLIMITED;
exports.CollectionReference = CollectionReference;
exports.DocumentReference = DocumentReference;
exports.DocumentSnapshot = DocumentSnapshot;
exports.FieldPath = FieldPath;
exports.FieldValue = FieldValue;
exports.Firestore = Firestore;
exports.FirestoreError = FirestoreError;
exports.GeoPoint = GeoPoint;
exports.LoadBundleTask = LoadBundleTask;
exports.Query = Query;
exports.QueryConstraint = QueryConstraint;
exports.QueryDocumentSnapshot = QueryDocumentSnapshot;
exports.QuerySnapshot = QuerySnapshot;
exports.SnapshotMetadata = SnapshotMetadata;
exports.Timestamp = Timestamp;
exports.Transaction = Transaction;
exports.WriteBatch = WriteBatch;
exports.addDoc = addDoc;
exports.arrayRemove = arrayRemove;
exports.arrayUnion = arrayUnion;
exports.clearIndexedDbPersistence = clearIndexedDbPersistence;
exports.collection = collection;
exports.collectionGroup = collectionGroup;
exports.connectFirestoreEmulator = connectFirestoreEmulator;
exports.deleteDoc = deleteDoc;
exports.deleteField = deleteField;
exports.disableNetwork = disableNetwork;
exports.doc = doc;
exports.documentId = documentId;
exports.enableIndexedDbPersistence = enableIndexedDbPersistence;
exports.enableMultiTabIndexedDbPersistence = enableMultiTabIndexedDbPersistence;
exports.enableNetwork = enableNetwork;
exports.endAt = endAt;
exports.endBefore = endBefore;
exports.ensureFirestoreConfigured = ensureFirestoreConfigured;
exports.executeWrite = executeWrite;
exports.getDoc = getDoc;
exports.getDocFromCache = getDocFromCache;
exports.getDocFromServer = getDocFromServer;
exports.getDocs = getDocs;
exports.getDocsFromCache = getDocsFromCache;
exports.getDocsFromServer = getDocsFromServer;
exports.getFirestore = getFirestore;
exports.increment = increment;
exports.initializeFirestore = initializeFirestore;
exports.limit = limit;
exports.limitToLast = limitToLast;
exports.loadBundle = loadBundle;
exports.namedQuery = namedQuery;
exports.onSnapshot = onSnapshot;
exports.onSnapshotsInSync = onSnapshotsInSync;
exports.orderBy = orderBy;
exports.query = query;
exports.queryEqual = queryEqual;
exports.refEqual = refEqual;
exports.runTransaction = runTransaction;
exports.serverTimestamp = serverTimestamp;
exports.setDoc = setDoc;
exports.setLogLevel = setLogLevel;
exports.snapshotEqual = snapshotEqual;
exports.startAfter = startAfter;
exports.startAt = startAt;
exports.terminate = terminate;
exports.updateDoc = updateDoc;
exports.waitForPendingWrites = waitForPendingWrites;
exports.where = where;
exports.writeBatch = writeBatch;
//# sourceMappingURL=index.node.cjs.js.map