initial vitepress site with basic nav

This commit is contained in:
mrflos 2023-05-20 19:37:42 +03:00
parent a7df2e049d
commit 2029f16583
1900 changed files with 1014692 additions and 0 deletions

View file

@ -0,0 +1,483 @@
'use strict';
Object.defineProperty(exports, '__esModule', { value: true });
var requesterCommon = require('@algolia/requester-common');
function createMappedRequestOptions(requestOptions, timeout) {
const options = requestOptions || {};
const data = options.data || {};
Object.keys(options).forEach(key => {
if (['timeout', 'headers', 'queryParameters', 'data', 'cacheable'].indexOf(key) === -1) {
data[key] = options[key]; // eslint-disable-line functional/immutable-data
}
});
return {
data: Object.entries(data).length > 0 ? data : undefined,
timeout: options.timeout || timeout,
headers: options.headers || {},
queryParameters: options.queryParameters || {},
cacheable: options.cacheable,
};
}
const CallEnum = {
/**
* If the host is read only.
*/
Read: 1,
/**
* If the host is write only.
*/
Write: 2,
/**
* If the host is both read and write.
*/
Any: 3,
};
const HostStatusEnum = {
Up: 1,
Down: 2,
Timeouted: 3,
};
// By default, API Clients at Algolia have expiration delay
// of 5 mins. In the JavaScript client, we have 2 mins.
const EXPIRATION_DELAY = 2 * 60 * 1000;
function createStatefulHost(host, status = HostStatusEnum.Up) {
return {
...host,
status,
lastUpdate: Date.now(),
};
}
function isStatefulHostUp(host) {
return host.status === HostStatusEnum.Up || Date.now() - host.lastUpdate > EXPIRATION_DELAY;
}
function isStatefulHostTimeouted(host) {
return (host.status === HostStatusEnum.Timeouted && Date.now() - host.lastUpdate <= EXPIRATION_DELAY);
}
function createStatelessHost(options) {
if (typeof options === 'string') {
return {
protocol: 'https',
url: options,
accept: CallEnum.Any,
};
}
return {
protocol: options.protocol || 'https',
url: options.url,
accept: options.accept || CallEnum.Any,
};
}
function createRetryableOptions(hostsCache, statelessHosts) {
return Promise.all(statelessHosts.map(statelessHost => {
return hostsCache.get(statelessHost, () => {
return Promise.resolve(createStatefulHost(statelessHost));
});
})).then(statefulHosts => {
const hostsUp = statefulHosts.filter(host => isStatefulHostUp(host));
const hostsTimeouted = statefulHosts.filter(host => isStatefulHostTimeouted(host));
/**
* Note, we put the hosts that previously timeouted on the end of the list.
*/
const hostsAvailable = [...hostsUp, ...hostsTimeouted];
const statelessHostsAvailable = hostsAvailable.length > 0
? hostsAvailable.map(host => createStatelessHost(host))
: statelessHosts;
return {
getTimeout(timeoutsCount, baseTimeout) {
/**
* Imagine that you have 4 hosts, if timeouts will increase
* on the following way: 1 (timeouted) > 4 (timeouted) > 5 (200)
*
* Note that, the very next request, we start from the previous timeout
*
* 5 (timeouted) > 6 (timeouted) > 7 ...
*
* This strategy may need to be reviewed, but is the strategy on the our
* current v3 version.
*/
const timeoutMultiplier = hostsTimeouted.length === 0 && timeoutsCount === 0
? 1
: hostsTimeouted.length + 3 + timeoutsCount;
return timeoutMultiplier * baseTimeout;
},
statelessHosts: statelessHostsAvailable,
};
});
}
const isNetworkError = ({ isTimedOut, status }) => {
return !isTimedOut && ~~status === 0;
};
const isRetryable = (response) => {
const status = response.status;
const isTimedOut = response.isTimedOut;
return (isTimedOut || isNetworkError(response) || (~~(status / 100) !== 2 && ~~(status / 100) !== 4));
};
const isSuccess = ({ status }) => {
return ~~(status / 100) === 2;
};
const retryDecision = (response, outcomes) => {
if (isRetryable(response)) {
return outcomes.onRetry(response);
}
if (isSuccess(response)) {
return outcomes.onSuccess(response);
}
return outcomes.onFail(response);
};
function retryableRequest(transporter, statelessHosts, request, requestOptions) {
const stackTrace = []; // eslint-disable-line functional/prefer-readonly-type
/**
* First we prepare the payload that do not depend from hosts.
*/
const data = serializeData(request, requestOptions);
const headers = serializeHeaders(transporter, requestOptions);
const method = request.method;
// On `GET`, the data is proxied to query parameters.
const dataQueryParameters = request.method !== requesterCommon.MethodEnum.Get
? {}
: {
...request.data,
...requestOptions.data,
};
const queryParameters = {
'x-algolia-agent': transporter.userAgent.value,
...transporter.queryParameters,
...dataQueryParameters,
...requestOptions.queryParameters,
};
let timeoutsCount = 0; // eslint-disable-line functional/no-let
const retry = (hosts, // eslint-disable-line functional/prefer-readonly-type
getTimeout) => {
/**
* We iterate on each host, until there is no host left.
*/
const host = hosts.pop(); // eslint-disable-line functional/immutable-data
if (host === undefined) {
throw createRetryError(stackTraceWithoutCredentials(stackTrace));
}
const payload = {
data,
headers,
method,
url: serializeUrl(host, request.path, queryParameters),
connectTimeout: getTimeout(timeoutsCount, transporter.timeouts.connect),
responseTimeout: getTimeout(timeoutsCount, requestOptions.timeout),
};
/**
* The stackFrame is pushed to the stackTrace so we
* can have information about onRetry and onFailure
* decisions.
*/
const pushToStackTrace = (response) => {
const stackFrame = {
request: payload,
response,
host,
triesLeft: hosts.length,
};
// eslint-disable-next-line functional/immutable-data
stackTrace.push(stackFrame);
return stackFrame;
};
const decisions = {
onSuccess: response => deserializeSuccess(response),
onRetry(response) {
const stackFrame = pushToStackTrace(response);
/**
* If response is a timeout, we increaset the number of
* timeouts so we can increase the timeout later.
*/
if (response.isTimedOut) {
timeoutsCount++;
}
return Promise.all([
/**
* Failures are individually send the logger, allowing
* the end user to debug / store stack frames even
* when a retry error does not happen.
*/
transporter.logger.info('Retryable failure', stackFrameWithoutCredentials(stackFrame)),
/**
* We also store the state of the host in failure cases. If the host, is
* down it will remain down for the next 2 minutes. In a timeout situation,
* this host will be added end of the list of hosts on the next request.
*/
transporter.hostsCache.set(host, createStatefulHost(host, response.isTimedOut ? HostStatusEnum.Timeouted : HostStatusEnum.Down)),
]).then(() => retry(hosts, getTimeout));
},
onFail(response) {
pushToStackTrace(response);
throw deserializeFailure(response, stackTraceWithoutCredentials(stackTrace));
},
};
return transporter.requester.send(payload).then(response => {
return retryDecision(response, decisions);
});
};
/**
* Finally, for each retryable host perform request until we got a non
* retryable response. Some notes here:
*
* 1. The reverse here is applied so we can apply a `pop` later on => more performant.
* 2. We also get from the retryable options a timeout multiplier that is tailored
* for the current context.
*/
return createRetryableOptions(transporter.hostsCache, statelessHosts).then(options => {
return retry([...options.statelessHosts].reverse(), options.getTimeout);
});
}
function createTransporter(options) {
const { hostsCache, logger, requester, requestsCache, responsesCache, timeouts, userAgent, hosts, queryParameters, headers, } = options;
const transporter = {
hostsCache,
logger,
requester,
requestsCache,
responsesCache,
timeouts,
userAgent,
headers,
queryParameters,
hosts: hosts.map(host => createStatelessHost(host)),
read(request, requestOptions) {
/**
* First, we compute the user request options. Now, keep in mind,
* that using request options the user is able to modified the intire
* payload of the request. Such as headers, query parameters, and others.
*/
const mappedRequestOptions = createMappedRequestOptions(requestOptions, transporter.timeouts.read);
const createRetryableRequest = () => {
/**
* Then, we prepare a function factory that contains the construction of
* the retryable request. At this point, we may *not* perform the actual
* request. But we want to have the function factory ready.
*/
return retryableRequest(transporter, transporter.hosts.filter(host => (host.accept & CallEnum.Read) !== 0), request, mappedRequestOptions);
};
/**
* Once we have the function factory ready, we need to determine of the
* request is "cacheable" - should be cached. Note that, once again,
* the user can force this option.
*/
const cacheable = mappedRequestOptions.cacheable !== undefined
? mappedRequestOptions.cacheable
: request.cacheable;
/**
* If is not "cacheable", we immediatly trigger the retryable request, no
* need to check cache implementations.
*/
if (cacheable !== true) {
return createRetryableRequest();
}
/**
* If the request is "cacheable", we need to first compute the key to ask
* the cache implementations if this request is on progress or if the
* response already exists on the cache.
*/
const key = {
request,
mappedRequestOptions,
transporter: {
queryParameters: transporter.queryParameters,
headers: transporter.headers,
},
};
/**
* With the computed key, we first ask the responses cache
* implemention if this request was been resolved before.
*/
return transporter.responsesCache.get(key, () => {
/**
* If the request has never resolved before, we actually ask if there
* is a current request with the same key on progress.
*/
return transporter.requestsCache.get(key, () => {
return (transporter.requestsCache
/**
* Finally, if there is no request in progress with the same key,
* this `createRetryableRequest()` will actually trigger the
* retryable request.
*/
.set(key, createRetryableRequest())
.then(response => Promise.all([transporter.requestsCache.delete(key), response]), err => Promise.all([transporter.requestsCache.delete(key), Promise.reject(err)]))
.then(([_, response]) => response));
});
}, {
/**
* Of course, once we get this response back from the server, we
* tell response cache to actually store the received response
* to be used later.
*/
miss: response => transporter.responsesCache.set(key, response),
});
},
write(request, requestOptions) {
/**
* On write requests, no cache mechanisms are applied, and we
* proxy the request immediately to the requester.
*/
return retryableRequest(transporter, transporter.hosts.filter(host => (host.accept & CallEnum.Write) !== 0), request, createMappedRequestOptions(requestOptions, transporter.timeouts.write));
},
};
return transporter;
}
function createUserAgent(version) {
const userAgent = {
value: `Algolia for JavaScript (${version})`,
add(options) {
const addedUserAgent = `; ${options.segment}${options.version !== undefined ? ` (${options.version})` : ''}`;
if (userAgent.value.indexOf(addedUserAgent) === -1) {
// eslint-disable-next-line functional/immutable-data
userAgent.value = `${userAgent.value}${addedUserAgent}`;
}
return userAgent;
},
};
return userAgent;
}
function deserializeSuccess(response) {
// eslint-disable-next-line functional/no-try-statement
try {
return JSON.parse(response.content);
}
catch (e) {
throw createDeserializationError(e.message, response);
}
}
function deserializeFailure({ content, status }, stackFrame) {
// eslint-disable-next-line functional/no-let
let message = content;
// eslint-disable-next-line functional/no-try-statement
try {
message = JSON.parse(content).message;
}
catch (e) {
// ..
}
return createApiError(message, status, stackFrame);
}
// eslint-disable-next-line functional/prefer-readonly-type
function encode(format, ...args) {
// eslint-disable-next-line functional/no-let
let i = 0;
return format.replace(/%s/g, () => encodeURIComponent(args[i++]));
}
function serializeUrl(host, path, queryParameters) {
const queryParametersAsString = serializeQueryParameters(queryParameters);
// eslint-disable-next-line functional/no-let
let url = `${host.protocol}://${host.url}/${path.charAt(0) === '/' ? path.substr(1) : path}`;
if (queryParametersAsString.length) {
url += `?${queryParametersAsString}`;
}
return url;
}
function serializeQueryParameters(parameters) {
const isObjectOrArray = (value) => Object.prototype.toString.call(value) === '[object Object]' ||
Object.prototype.toString.call(value) === '[object Array]';
return Object.keys(parameters)
.map(key => encode('%s=%s', key, isObjectOrArray(parameters[key]) ? JSON.stringify(parameters[key]) : parameters[key]))
.join('&');
}
function serializeData(request, requestOptions) {
if (request.method === requesterCommon.MethodEnum.Get ||
(request.data === undefined && requestOptions.data === undefined)) {
return undefined;
}
const data = Array.isArray(request.data)
? request.data
: { ...request.data, ...requestOptions.data };
return JSON.stringify(data);
}
function serializeHeaders(transporter, requestOptions) {
const headers = {
...transporter.headers,
...requestOptions.headers,
};
const serializedHeaders = {};
Object.keys(headers).forEach(header => {
const value = headers[header];
// @ts-ignore
// eslint-disable-next-line functional/immutable-data
serializedHeaders[header.toLowerCase()] = value;
});
return serializedHeaders;
}
function stackTraceWithoutCredentials(stackTrace) {
return stackTrace.map(stackFrame => stackFrameWithoutCredentials(stackFrame));
}
function stackFrameWithoutCredentials(stackFrame) {
const modifiedHeaders = stackFrame.request.headers['x-algolia-api-key']
? { 'x-algolia-api-key': '*****' }
: {};
return {
...stackFrame,
request: {
...stackFrame.request,
headers: {
...stackFrame.request.headers,
...modifiedHeaders,
},
},
};
}
function createApiError(message, status, transporterStackTrace) {
return {
name: 'ApiError',
message,
status,
transporterStackTrace,
};
}
function createDeserializationError(message, response) {
return {
name: 'DeserializationError',
message,
response,
};
}
function createRetryError(transporterStackTrace) {
return {
name: 'RetryError',
message: 'Unreachable hosts - your application id may be incorrect. If the error persists, contact support@algolia.com.',
transporterStackTrace,
};
}
exports.CallEnum = CallEnum;
exports.HostStatusEnum = HostStatusEnum;
exports.createApiError = createApiError;
exports.createDeserializationError = createDeserializationError;
exports.createMappedRequestOptions = createMappedRequestOptions;
exports.createRetryError = createRetryError;
exports.createStatefulHost = createStatefulHost;
exports.createStatelessHost = createStatelessHost;
exports.createTransporter = createTransporter;
exports.createUserAgent = createUserAgent;
exports.deserializeFailure = deserializeFailure;
exports.deserializeSuccess = deserializeSuccess;
exports.isStatefulHostTimeouted = isStatefulHostTimeouted;
exports.isStatefulHostUp = isStatefulHostUp;
exports.serializeData = serializeData;
exports.serializeHeaders = serializeHeaders;
exports.serializeQueryParameters = serializeQueryParameters;
exports.serializeUrl = serializeUrl;
exports.stackFrameWithoutCredentials = stackFrameWithoutCredentials;
exports.stackTraceWithoutCredentials = stackTraceWithoutCredentials;

381
node_modules/@algolia/transporter/dist/transporter.d.ts generated vendored Normal file
View file

@ -0,0 +1,381 @@
import { Cache } from '@algolia/cache-common';
import { Logger } from '@algolia/logger-common';
import { MethodType } from '@algolia/requester-common';
import { Request as Request_2 } from '@algolia/requester-common';
import { Requester } from '@algolia/requester-common';
import { Response } from '@algolia/requester-common';
export declare type ApiError = Error & {
/**
* The http status code.
*/
readonly status: number;
/**
* Contains report of stack frames of the
* execution of a certain request.
*/
readonly transporterStackTrace: readonly StackFrame[];
};
export declare const CallEnum: Readonly<Record<string, CallType>>;
export declare type CallType = 1 | 2 | 3;
export declare function createApiError(message: string, status: number, transporterStackTrace: readonly StackFrame[]): ApiError;
export declare function createDeserializationError(message: string, response: Response): DeserializationError;
export declare function createMappedRequestOptions(requestOptions?: RequestOptions, timeout?: number): MappedRequestOptions;
export declare function createRetryError(transporterStackTrace: readonly StackFrame[]): RetryError;
export declare function createStatefulHost(host: StatelessHost, status?: HostStatusType): StatefulHost;
export declare function createStatelessHost(options: HostOptions): StatelessHost;
export declare function createTransporter(options: TransporterOptions): Transporter;
export declare function createUserAgent(version: string): UserAgent;
export declare type DeserializationError = Error & {
/**
* The raw response from the server.
*/
readonly response: Response;
};
export declare function deserializeFailure({ content, status }: Response, stackFrame: readonly StackFrame[]): Error;
export declare function deserializeSuccess<TObject>(response: Response): TObject;
export declare type Headers = Readonly<Record<string, string>>;
export declare type HostOptions = string | {
/**
* The url of the server, without the protocol.
*/
readonly url: string;
/**
* The type of host. Defaults to `Any`.
*/
readonly accept?: CallType;
/**
* The protocol. Defaults to `https`.
*/
readonly protocol?: string;
};
export declare const HostStatusEnum: Readonly<Record<string, HostStatusType>>;
export declare type HostStatusType = 1 | 2 | 3;
export declare function isStatefulHostTimeouted(host: StatefulHost): boolean;
export declare function isStatefulHostUp(host: StatefulHost): boolean;
export declare type MappedRequestOptions = {
/**
* If the request should be cached.
*/
readonly cacheable: boolean | undefined;
/**
* The `read` or `write` timeout of the request.
*/
readonly timeout: number | undefined;
/**
* The headers of the request.
*/
readonly headers: Record<string, string>;
/**
* The query parameters of the request.
*/
readonly queryParameters: Record<string, any>;
/**
* The data to be transfered to the server.
*/
readonly data?: Record<string, string>;
};
export declare type QueryParameters = Readonly<Record<string, string>>;
export declare type Request = {
/**
* The method of the request. `GET`, etc.
*/
readonly method: MethodType;
/**
* The path of the request. i.e: `/1/indexes`.
*/
readonly path: string;
/**
* The data to transfer to the server.
*/
readonly data?: Record<string, any> | ReadonlyArray<Record<string, any>>;
/**
* If the response should persist on cache.
*/
readonly cacheable?: boolean;
};
export declare type RequestOptions = {
/**
* If the given request should persist on the cache. Keep in mind,
* that some methods may have this option enabled by default.
*/
readonly cacheable?: boolean;
/**
* Custom timeout for the request. Note that, in normal situacions
* the given timeout will be applied. But the transporter layer may
* increase this timeout if there is need for it.
*/
readonly timeout?: number;
/**
* Custom headers for the request. This headers are
* going to be merged the transporter headers.
*/
readonly headers?: Readonly<Record<string, string>>;
/**
* Custom query parameters for the request. This query parameters are
* going to be merged the transporter query parameters.
*/
readonly queryParameters?: Record<string, any>;
/**
* Custom data for the request. This data are
* going to be merged the transporter data.
*/
readonly data?: Record<string, any>;
/**
* Additional request body values. It's only taken in
* consideration in `POST` and `PUT` requests.
*/
[key: string]: any;
};
export declare type RetryError = Error & {
/**
* Contains report of stack frames of the
* execution of a certain request.
*/
readonly transporterStackTrace: readonly StackFrame[];
};
export declare function serializeData(request: Request, requestOptions: RequestOptions): string | undefined;
export declare function serializeHeaders(transporter: Transporter, requestOptions: RequestOptions): Headers;
export declare function serializeQueryParameters(parameters: Readonly<Record<string, any>>): string;
export declare function serializeUrl(host: StatelessHost, path: string, queryParameters: Readonly<Record<string, string>>): string;
export declare type StackFrame = {
/**
* The request made.
*/
readonly request: Request_2;
/**
* The received response.
*/
readonly response: Response;
/**
* The host associated with the `request` and the `response`.
*/
readonly host: StatelessHost;
/**
* The number of tries left.
*/
readonly triesLeft: number;
};
export declare function stackFrameWithoutCredentials(stackFrame: StackFrame): StackFrame;
export declare function stackTraceWithoutCredentials(stackTrace: readonly StackFrame[]): readonly StackFrame[];
export declare type StatefulHost = StatelessHost & {
/**
* Holds the last time this host failed in milliseconds elapsed
* since the UNIX epoch. This failure can be because of an
* timeout error or a because the host is not available.
*/
readonly lastUpdate: number;
/**
* Holds the host status. Note that, depending of the `lastUpdate`
* an host may be considered as `Up` on the transporter layer.
*/
readonly status: HostStatusType;
};
export declare type StatelessHost = {
/**
* The protocol of the stateless host. Between `http` and `https`.
*/
readonly protocol: string;
/**
* The url, without protocol.
*/
readonly url: string;
/**
* The type of the host.
*/
readonly accept: CallType;
};
export declare type Timeouts = {
/**
* The timeout to stablish a connection with the server.
*/
readonly connect: number;
/**
* The timeout to receive the response on read requests.
*/
readonly read: number;
/**
* The timeout to receive the response on write requests.
*/
readonly write: number;
};
export declare type Transporter = {
/**
* The cache of the hosts. Usually used to persist
* the state of the host when its down.
*/
readonly hostsCache: Cache;
/**
* The logger instance to send events of the transporter.
*/
readonly logger: Logger;
/**
* The underlying requester used. Should differ
* depending of the enviroment where the client
* will be used.
*/
readonly requester: Requester;
/**
* The cache of the requests. When requests are
* `cacheable`, the returned promised persists
* in this cache to shared in similar resquests
* before being resolved.
*/
readonly requestsCache: Cache;
/**
* The cache of the responses. When requests are
* `cacheable`, the returned responses persists
* in this cache to shared in similar resquests.
*/
readonly responsesCache: Cache;
/**
* The timeouts used by the requester. The transporter
* layer may increase this timeouts as defined on the
* retry strategy.
*/
readonly timeouts: Timeouts;
/**
* The user agent used. Sent on query parameters.
*/
readonly userAgent: UserAgent;
/**
* The headers used on each request.
*/
readonly headers: Headers;
/**
* The query parameters used on each request.
*/
readonly queryParameters: QueryParameters;
/**
* The hosts used by the retry strategy.
*
* @readonly
*/
hosts: readonly StatelessHost[];
/**
* Performs a read request using read hosts.
*/
readonly read: <TResponse>(request: Request, requestOptions?: RequestOptions) => Readonly<Promise<TResponse>>;
/**
* Performs a write request using write hosts.
*/
readonly write: <TResponse>(request: Request, requestOptions?: RequestOptions) => Readonly<Promise<TResponse>>;
};
export declare type TransporterOptions = {
/**
* The cache of the hosts. Usually used to persist
* the state of the host when its down.
*/
readonly hostsCache: Cache;
/**
* The logger instance to send events of the transporter.
*/
readonly logger: Logger;
/**
* The underlying requester used. Should differ
* depending of the enviroment where the client
* will be used.
*/
readonly requester: Requester;
/**
* The cache of the requests. When requests are
* `cacheable`, the returned promised persists
* in this cache to shared in similar resquests
* before being resolved.
*/
readonly requestsCache: Cache;
/**
* The cache of the responses. When requests are
* `cacheable`, the returned responses persists
* in this cache to shared in similar resquests.
*/
readonly responsesCache: Cache;
/**
* The timeouts used by the requester. The transporter
* layer may increase this timeouts as defined on the
* retry strategy.
*/
readonly timeouts: Timeouts;
/**
* The hosts used by the requester.
*/
readonly hosts: readonly HostOptions[];
/**
* The headers used by the requester. The transporter
* layer may add some extra headers during the request
* for the user agent, and others.
*/
readonly headers: Headers;
/**
* The query parameters used by the requester. The transporter
* layer may add some extra headers during the request
* for the user agent, and others.
*/
readonly queryParameters: QueryParameters;
/**
* The user agent used. Sent on query parameters.
*/
readonly userAgent: UserAgent;
};
export declare type UserAgent = {
/**
* The raw value of the user agent.
*
* @readonly
*/
value: string;
/**
* Mutates the current user agent ading the given user agent options.
*/
readonly add: (options: UserAgentOptions) => UserAgent;
};
export declare type UserAgentOptions = {
/**
* The segment. Usually the integration name.
*/
readonly segment: string;
/**
* The version. Usually the integration version.
*/
readonly version?: string;
};
export { }

View file

@ -0,0 +1,460 @@
import { MethodEnum } from '@algolia/requester-common';
function createMappedRequestOptions(requestOptions, timeout) {
const options = requestOptions || {};
const data = options.data || {};
Object.keys(options).forEach(key => {
if (['timeout', 'headers', 'queryParameters', 'data', 'cacheable'].indexOf(key) === -1) {
data[key] = options[key]; // eslint-disable-line functional/immutable-data
}
});
return {
data: Object.entries(data).length > 0 ? data : undefined,
timeout: options.timeout || timeout,
headers: options.headers || {},
queryParameters: options.queryParameters || {},
cacheable: options.cacheable,
};
}
const CallEnum = {
/**
* If the host is read only.
*/
Read: 1,
/**
* If the host is write only.
*/
Write: 2,
/**
* If the host is both read and write.
*/
Any: 3,
};
const HostStatusEnum = {
Up: 1,
Down: 2,
Timeouted: 3,
};
// By default, API Clients at Algolia have expiration delay
// of 5 mins. In the JavaScript client, we have 2 mins.
const EXPIRATION_DELAY = 2 * 60 * 1000;
function createStatefulHost(host, status = HostStatusEnum.Up) {
return {
...host,
status,
lastUpdate: Date.now(),
};
}
function isStatefulHostUp(host) {
return host.status === HostStatusEnum.Up || Date.now() - host.lastUpdate > EXPIRATION_DELAY;
}
function isStatefulHostTimeouted(host) {
return (host.status === HostStatusEnum.Timeouted && Date.now() - host.lastUpdate <= EXPIRATION_DELAY);
}
function createStatelessHost(options) {
if (typeof options === 'string') {
return {
protocol: 'https',
url: options,
accept: CallEnum.Any,
};
}
return {
protocol: options.protocol || 'https',
url: options.url,
accept: options.accept || CallEnum.Any,
};
}
function createRetryableOptions(hostsCache, statelessHosts) {
return Promise.all(statelessHosts.map(statelessHost => {
return hostsCache.get(statelessHost, () => {
return Promise.resolve(createStatefulHost(statelessHost));
});
})).then(statefulHosts => {
const hostsUp = statefulHosts.filter(host => isStatefulHostUp(host));
const hostsTimeouted = statefulHosts.filter(host => isStatefulHostTimeouted(host));
/**
* Note, we put the hosts that previously timeouted on the end of the list.
*/
const hostsAvailable = [...hostsUp, ...hostsTimeouted];
const statelessHostsAvailable = hostsAvailable.length > 0
? hostsAvailable.map(host => createStatelessHost(host))
: statelessHosts;
return {
getTimeout(timeoutsCount, baseTimeout) {
/**
* Imagine that you have 4 hosts, if timeouts will increase
* on the following way: 1 (timeouted) > 4 (timeouted) > 5 (200)
*
* Note that, the very next request, we start from the previous timeout
*
* 5 (timeouted) > 6 (timeouted) > 7 ...
*
* This strategy may need to be reviewed, but is the strategy on the our
* current v3 version.
*/
const timeoutMultiplier = hostsTimeouted.length === 0 && timeoutsCount === 0
? 1
: hostsTimeouted.length + 3 + timeoutsCount;
return timeoutMultiplier * baseTimeout;
},
statelessHosts: statelessHostsAvailable,
};
});
}
const isNetworkError = ({ isTimedOut, status }) => {
return !isTimedOut && ~~status === 0;
};
const isRetryable = (response) => {
const status = response.status;
const isTimedOut = response.isTimedOut;
return (isTimedOut || isNetworkError(response) || (~~(status / 100) !== 2 && ~~(status / 100) !== 4));
};
const isSuccess = ({ status }) => {
return ~~(status / 100) === 2;
};
const retryDecision = (response, outcomes) => {
if (isRetryable(response)) {
return outcomes.onRetry(response);
}
if (isSuccess(response)) {
return outcomes.onSuccess(response);
}
return outcomes.onFail(response);
};
function retryableRequest(transporter, statelessHosts, request, requestOptions) {
const stackTrace = []; // eslint-disable-line functional/prefer-readonly-type
/**
* First we prepare the payload that do not depend from hosts.
*/
const data = serializeData(request, requestOptions);
const headers = serializeHeaders(transporter, requestOptions);
const method = request.method;
// On `GET`, the data is proxied to query parameters.
const dataQueryParameters = request.method !== MethodEnum.Get
? {}
: {
...request.data,
...requestOptions.data,
};
const queryParameters = {
'x-algolia-agent': transporter.userAgent.value,
...transporter.queryParameters,
...dataQueryParameters,
...requestOptions.queryParameters,
};
let timeoutsCount = 0; // eslint-disable-line functional/no-let
const retry = (hosts, // eslint-disable-line functional/prefer-readonly-type
getTimeout) => {
/**
* We iterate on each host, until there is no host left.
*/
const host = hosts.pop(); // eslint-disable-line functional/immutable-data
if (host === undefined) {
throw createRetryError(stackTraceWithoutCredentials(stackTrace));
}
const payload = {
data,
headers,
method,
url: serializeUrl(host, request.path, queryParameters),
connectTimeout: getTimeout(timeoutsCount, transporter.timeouts.connect),
responseTimeout: getTimeout(timeoutsCount, requestOptions.timeout),
};
/**
* The stackFrame is pushed to the stackTrace so we
* can have information about onRetry and onFailure
* decisions.
*/
const pushToStackTrace = (response) => {
const stackFrame = {
request: payload,
response,
host,
triesLeft: hosts.length,
};
// eslint-disable-next-line functional/immutable-data
stackTrace.push(stackFrame);
return stackFrame;
};
const decisions = {
onSuccess: response => deserializeSuccess(response),
onRetry(response) {
const stackFrame = pushToStackTrace(response);
/**
* If response is a timeout, we increaset the number of
* timeouts so we can increase the timeout later.
*/
if (response.isTimedOut) {
timeoutsCount++;
}
return Promise.all([
/**
* Failures are individually send the logger, allowing
* the end user to debug / store stack frames even
* when a retry error does not happen.
*/
transporter.logger.info('Retryable failure', stackFrameWithoutCredentials(stackFrame)),
/**
* We also store the state of the host in failure cases. If the host, is
* down it will remain down for the next 2 minutes. In a timeout situation,
* this host will be added end of the list of hosts on the next request.
*/
transporter.hostsCache.set(host, createStatefulHost(host, response.isTimedOut ? HostStatusEnum.Timeouted : HostStatusEnum.Down)),
]).then(() => retry(hosts, getTimeout));
},
onFail(response) {
pushToStackTrace(response);
throw deserializeFailure(response, stackTraceWithoutCredentials(stackTrace));
},
};
return transporter.requester.send(payload).then(response => {
return retryDecision(response, decisions);
});
};
/**
* Finally, for each retryable host perform request until we got a non
* retryable response. Some notes here:
*
* 1. The reverse here is applied so we can apply a `pop` later on => more performant.
* 2. We also get from the retryable options a timeout multiplier that is tailored
* for the current context.
*/
return createRetryableOptions(transporter.hostsCache, statelessHosts).then(options => {
return retry([...options.statelessHosts].reverse(), options.getTimeout);
});
}
function createTransporter(options) {
const { hostsCache, logger, requester, requestsCache, responsesCache, timeouts, userAgent, hosts, queryParameters, headers, } = options;
const transporter = {
hostsCache,
logger,
requester,
requestsCache,
responsesCache,
timeouts,
userAgent,
headers,
queryParameters,
hosts: hosts.map(host => createStatelessHost(host)),
read(request, requestOptions) {
/**
* First, we compute the user request options. Now, keep in mind,
* that using request options the user is able to modified the intire
* payload of the request. Such as headers, query parameters, and others.
*/
const mappedRequestOptions = createMappedRequestOptions(requestOptions, transporter.timeouts.read);
const createRetryableRequest = () => {
/**
* Then, we prepare a function factory that contains the construction of
* the retryable request. At this point, we may *not* perform the actual
* request. But we want to have the function factory ready.
*/
return retryableRequest(transporter, transporter.hosts.filter(host => (host.accept & CallEnum.Read) !== 0), request, mappedRequestOptions);
};
/**
* Once we have the function factory ready, we need to determine of the
* request is "cacheable" - should be cached. Note that, once again,
* the user can force this option.
*/
const cacheable = mappedRequestOptions.cacheable !== undefined
? mappedRequestOptions.cacheable
: request.cacheable;
/**
* If is not "cacheable", we immediatly trigger the retryable request, no
* need to check cache implementations.
*/
if (cacheable !== true) {
return createRetryableRequest();
}
/**
* If the request is "cacheable", we need to first compute the key to ask
* the cache implementations if this request is on progress or if the
* response already exists on the cache.
*/
const key = {
request,
mappedRequestOptions,
transporter: {
queryParameters: transporter.queryParameters,
headers: transporter.headers,
},
};
/**
* With the computed key, we first ask the responses cache
* implemention if this request was been resolved before.
*/
return transporter.responsesCache.get(key, () => {
/**
* If the request has never resolved before, we actually ask if there
* is a current request with the same key on progress.
*/
return transporter.requestsCache.get(key, () => {
return (transporter.requestsCache
/**
* Finally, if there is no request in progress with the same key,
* this `createRetryableRequest()` will actually trigger the
* retryable request.
*/
.set(key, createRetryableRequest())
.then(response => Promise.all([transporter.requestsCache.delete(key), response]), err => Promise.all([transporter.requestsCache.delete(key), Promise.reject(err)]))
.then(([_, response]) => response));
});
}, {
/**
* Of course, once we get this response back from the server, we
* tell response cache to actually store the received response
* to be used later.
*/
miss: response => transporter.responsesCache.set(key, response),
});
},
write(request, requestOptions) {
/**
* On write requests, no cache mechanisms are applied, and we
* proxy the request immediately to the requester.
*/
return retryableRequest(transporter, transporter.hosts.filter(host => (host.accept & CallEnum.Write) !== 0), request, createMappedRequestOptions(requestOptions, transporter.timeouts.write));
},
};
return transporter;
}
function createUserAgent(version) {
const userAgent = {
value: `Algolia for JavaScript (${version})`,
add(options) {
const addedUserAgent = `; ${options.segment}${options.version !== undefined ? ` (${options.version})` : ''}`;
if (userAgent.value.indexOf(addedUserAgent) === -1) {
// eslint-disable-next-line functional/immutable-data
userAgent.value = `${userAgent.value}${addedUserAgent}`;
}
return userAgent;
},
};
return userAgent;
}
function deserializeSuccess(response) {
// eslint-disable-next-line functional/no-try-statement
try {
return JSON.parse(response.content);
}
catch (e) {
throw createDeserializationError(e.message, response);
}
}
function deserializeFailure({ content, status }, stackFrame) {
// eslint-disable-next-line functional/no-let
let message = content;
// eslint-disable-next-line functional/no-try-statement
try {
message = JSON.parse(content).message;
}
catch (e) {
// ..
}
return createApiError(message, status, stackFrame);
}
// eslint-disable-next-line functional/prefer-readonly-type
function encode(format, ...args) {
// eslint-disable-next-line functional/no-let
let i = 0;
return format.replace(/%s/g, () => encodeURIComponent(args[i++]));
}
function serializeUrl(host, path, queryParameters) {
const queryParametersAsString = serializeQueryParameters(queryParameters);
// eslint-disable-next-line functional/no-let
let url = `${host.protocol}://${host.url}/${path.charAt(0) === '/' ? path.substr(1) : path}`;
if (queryParametersAsString.length) {
url += `?${queryParametersAsString}`;
}
return url;
}
function serializeQueryParameters(parameters) {
const isObjectOrArray = (value) => Object.prototype.toString.call(value) === '[object Object]' ||
Object.prototype.toString.call(value) === '[object Array]';
return Object.keys(parameters)
.map(key => encode('%s=%s', key, isObjectOrArray(parameters[key]) ? JSON.stringify(parameters[key]) : parameters[key]))
.join('&');
}
function serializeData(request, requestOptions) {
if (request.method === MethodEnum.Get ||
(request.data === undefined && requestOptions.data === undefined)) {
return undefined;
}
const data = Array.isArray(request.data)
? request.data
: { ...request.data, ...requestOptions.data };
return JSON.stringify(data);
}
function serializeHeaders(transporter, requestOptions) {
const headers = {
...transporter.headers,
...requestOptions.headers,
};
const serializedHeaders = {};
Object.keys(headers).forEach(header => {
const value = headers[header];
// @ts-ignore
// eslint-disable-next-line functional/immutable-data
serializedHeaders[header.toLowerCase()] = value;
});
return serializedHeaders;
}
function stackTraceWithoutCredentials(stackTrace) {
return stackTrace.map(stackFrame => stackFrameWithoutCredentials(stackFrame));
}
function stackFrameWithoutCredentials(stackFrame) {
const modifiedHeaders = stackFrame.request.headers['x-algolia-api-key']
? { 'x-algolia-api-key': '*****' }
: {};
return {
...stackFrame,
request: {
...stackFrame.request,
headers: {
...stackFrame.request.headers,
...modifiedHeaders,
},
},
};
}
function createApiError(message, status, transporterStackTrace) {
return {
name: 'ApiError',
message,
status,
transporterStackTrace,
};
}
function createDeserializationError(message, response) {
return {
name: 'DeserializationError',
message,
response,
};
}
function createRetryError(transporterStackTrace) {
return {
name: 'RetryError',
message: 'Unreachable hosts - your application id may be incorrect. If the error persists, contact support@algolia.com.',
transporterStackTrace,
};
}
export { CallEnum, HostStatusEnum, createApiError, createDeserializationError, createMappedRequestOptions, createRetryError, createStatefulHost, createStatelessHost, createTransporter, createUserAgent, deserializeFailure, deserializeSuccess, isStatefulHostTimeouted, isStatefulHostUp, serializeData, serializeHeaders, serializeQueryParameters, serializeUrl, stackFrameWithoutCredentials, stackTraceWithoutCredentials };